gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from __future__ import division
from sympy import sin, sqrt, pi, Number, Expr, Symbol, lambdify, symbols
from common import function, tensor_product
from lega.integration import Quad1d, Quad2d
from scipy.sparse import eye, diags
from math import pi as PI, sqrt as Sqrt
from sympy.mpmath import quad
from itertools import product
import numpy as np
SQRT_PI = Sqrt(pi)
def sine_basis(n, symbol='x'):
'''
Functions sin(k*x), k = 1, 2, ....n, normalized to have L^2 norm over [0, pi].
Note that (sin(k*x), k**2) are all the solutions of
-u`` = lambda u in (0, pi)
u(0) = u(pi) = 0
'''
x = Symbol(symbol)
return [sin(k*x)*sqrt(2/pi) for k in range(1, n+1)]
def sine_function(F):
'''
Return a linear combination of len(F_vec) sine basis functions with
coefficients given by F_vec.
'''
if len(F.shape) == 1:
basis = sine_basis(F.shape[0], 'x')
return function(basis, F)
elif len(F.shape) == 2:
basis_x = sine_basis(F.shape[0], 'x')
basis_y = sine_basis(F.shape[1], 'y')
basis = tensor_product([basis_x, basis_y])
# Collapse to coefs by row
F = F.flatten()
return function(basis, F)
else:
raise NotImplementedError
def mass_matrix(n):
'''inner(u, v) for u, v in sine_basis(n).'''
return eye(n)
def stiffness_matrix(n):
'''inner(u`, v`) for u, v in sine_basis(n).'''
return diags(np.arange(1, n+1)**2, 0, shape=(n, n))
def bending_matrix(n):
'''inner(u``, v``) for u, v in sine_basis(n).'''
return diags(np.arange(1, n+1)**4, 0, shape=(n, n))
# Suppose we have V_n = sine_basis(n) and for some function f we want to compute
# (f, v) for v in V_n, where (o, o) is the L^2 inner product over [0, pi].
# The idea is that if f is extended oddly to [0, 2*pi] then all the terms (f, v)
# can be computed at once by fft.
#
# The flow is eval -> (extend -> fft) -> (take only imag or results = sines)
def sine_points(N):
'''Points where the function is sampled for sine transformation'''
# 1d
if isinstance(N, int):
points = np.linspace(0, 2*PI, 2*N, endpoint=False)[:N]
return points
# 2d
elif hasattr(N, '__len__'):
assert len(N) == 2
# X and Y coordinates of the tensor product
X, Y = [sine_points(N[0]), sine_points(N[1])]
XY = np.array([list(xy) for xy in product(X, Y)])
X, Y = XY[:, 0], XY[:, 1]
return X, Y
def sine_eval(N, f):
'''
Sample f in N+1 points from the interval [0, 2*pi). Or the cartesian product
of this interval
'''
# Symbolic is evaluated in [0, PI]
assert isinstance(f, (Expr, Number))
# 1d
if isinstance(N, int):
points = sine_points(N)
# Numbers are special
if isinstance(f, Number):
return float(f)*np.ones(len(points))
x = Symbol('x')
flambda = lambdify(x, f, 'numpy')
f_values = flambda(points)
return f_values
# 2d
elif hasattr(N, '__len__'):
assert len(N) == 2
X, Y = sine_points(N)
# Numbers are special
if isinstance(f, Number):
return float(f)*np.ones(len(X)).reshape(N)
x, y = symbols('x, y')
flambda = lambdify([x, y], f, 'numpy')
f_values = flambda(X, Y)
return f_values.reshape(N)
def sine_fft(f_vec):
'''
Get sine expansion coeffs of f sampled at [0, Pi] and extended oddly ... .
'''
# 1d
if f_vec.shape == (len(f_vec), ):
f_vec = np.r_[f_vec, f_vec[0], -f_vec[1:][::-1]]
F_vec = np.fft.rfft(f_vec)
# These are the coefficient values
n_points = len(f_vec)
F_vec[1:] *= -2./n_points/Sqrt(2/PI)
return F_vec.imag[1:]
#2d
elif len(f_vec.shape) == 2:
F_vec = np.zeros_like(f_vec)
# Do sine_fft on rows
for i, row in enumerate(f_vec):
F_vec[i, :] = sine_fft(row)
# Do sine_fft on cols
for j, col in enumerate(F_vec.T):
F_vec[:, j] = sine_fft(col)
return F_vec
def sine_ifft(F_vec):
'''Point values from coefficients'''
if F_vec.shape == (len(F_vec), ):
# Rescale
N = len(F_vec)
n_points = 2*len(F_vec)
F_vec /= -2./n_points/Sqrt(2/PI)
# Fake complex
F_vec = np.r_[0, F_vec]*1j
f_vec = np.fft.irfft(F_vec)
return f_vec[:N]
#2d
elif len(F_vec.shape) == 2:
f_vec = np.zeros_like(F_vec)
# Do sine_fft on rows
for i, row in enumerate(F_vec):
f_vec[i, :] = sine_ifft(row)
# Do sine_fft on cols
for j, col in enumerate(f_vec.T):
f_vec[:, j] = sine_ifft(col)
return f_vec
def load_vector(f, n, n_quad=0, n_fft=0):
'''(f, v) for v in sine basis(n).'''
# Compute the integral by numeric/symbolic integration
if n_fft == 0:
# 1d
if isinstance(n, int):
x = Symbol('x')
# Integration by sympy
if n_quad == 0:
quadrature = lambda v, f=f: quad(lambdify(x, f*v), [0, PI])
# My custome quadrature with fixed degree
else:
Q1 = Quad1d(n_quad)
quadrature = lambda v, f=f: Q1(f*v, [0, PI])
return np.array(map(quadrature, sine_basis(n)), dtype=float)
# 2d
elif hasattr(n, '__len__'):
assert len(n) == 2, 'Only 2d'
# Basis in 2d is a tensor product of basis in each directions
basis_x = sine_basis(n[0], 'x')
basis_y = sine_basis(n[1], 'y')
basis = tensor_product([basis_x, basis_y])
x, y = symbols('x, y')
# Integration by sympy
if n_quad == 0:
quadrature = \
lambda v, f=f: quad(lambdify([x, y], f*v), [0, PI], [0, PI])
# My custome quadrature with fixed degree
else:
Q2 = Quad2d(n_quad)
quadrature = lambda v, f=f: Q2(f*v, [0, PI], [0, PI])
return np.array(map(quadrature, basis), dtype=float).reshape(n)
# Integral by fft only approximate!
else:
# 1d
if isinstance(n, int):
# If f is constant this is the minimal requirement for sensible results
assert n_fft >= n
f_vec = sine_eval(n_fft, f)
F_vec = sine_fft(f_vec)[:n]
return F_vec
# 2d
elif hasattr(n, '__len__'):
assert len(n) == 2, 'Only 2d'
f_vec = sine_eval([n_fft, n_fft], f)
F_vec = sine_fft(f_vec)[:n[0], :n[1]]
return F_vec
# -----------------------------------------------------------------------------
if __name__ == '__main__':
from sympy import lambdify, Symbol, cos, exp, S
from sympy.plotting import plot3d
n = 8
basis = sine_basis(n)
domain = [0, pi.n()]
x = Symbol('x')
# Test bending matrix
mat_value = lambda u, v: quad(lambdify(x, u.diff(x, 2)*v.diff(x, 2)), domain)
mat = np.zeros((len(basis), len(basis)))
for i, u in enumerate(basis):
mat[i, i] = mat_value(u, u)
for j, v in enumerate(basis[i+1:], i+1):
mat[i, j] = mat_value(u, v)
mat[j, i] = mat[i, j]
B = bending_matrix(n)
assert B.shape == mat.shape
assert np.allclose(B.toarray(), mat)
# Test mass
mat_value = lambda u, v: quad(lambdify(x, u*v), domain)
mat = np.zeros((len(basis), len(basis)))
for i, u in enumerate(basis):
mat[i, i] = mat_value(u, u)
for j, v in enumerate(basis[i+1:], i+1):
mat[i, j] = mat_value(u, v)
mat[j, i] = mat[i, j]
B = mass_matrix(n)
assert B.shape == mat.shape
assert np.allclose(B.toarray(), mat)
# Test stiffness
mat_value = lambda u, v: quad(lambdify(x, u.diff(x, 1)*v.diff(x, 1)), domain)
mat = np.zeros((len(basis), len(basis)))
for i, u in enumerate(basis):
mat[i, i] = mat_value(u, u)
for j, v in enumerate(basis[i+1:], i+1):
mat[i, j] = mat_value(u, v)
mat[j, i] = mat[i, j]
B = stiffness_matrix(n)
assert B.shape == mat.shape
assert np.allclose(B.toarray(), mat)
# sine FFT 1d
f = S(1)
# :f = 1*sin(x) - 2*sin(2*x)
f_vec = sine_eval(N=1000, f=f)
F_vec = sine_fft(f_vec)
f_vec_ = sine_ifft(F_vec)
print 'fft(ifft(f) - f', np.linalg.norm(f_vec - f_vec_), f
import matplotlib.pyplot as plt
points = sine_points(len(f_vec))
plt.figure()
plt.plot(points, f_vec, 'x', label='one')
plt.plot(points, f_vec_, 'o', label='two')
plt.xlim((0, np.pi))
plt.legend()
# sine FFT 2d
y = Symbol('y')
h = x*(x-pi)*sin(x+y)*y**2*(y-pi)**2
f_vec = sine_eval(N=[100, 100], f=h)
F_vec = sine_fft(f_vec)
f_vec_ = sine_ifft(F_vec)
print 'fft(ifft(f) - f', np.linalg.norm(f_vec - f_vec_), h
X, Y = sine_points(f_vec.shape)
X = X.reshape(f_vec.shape)
Y = Y.reshape(f_vec.shape)
# print f_vec
fig, (ax0, ax1) = plt.subplots(1 ,2)
ax0.pcolor(X, Y, f_vec)
ax0.set_xlim((0, np.pi))
ax0.set_ylim((0, np.pi))
ax1.pcolor(X, Y, f_vec_)
ax1.set_xlim((0, np.pi))
ax1.set_ylim((0, np.pi))
plot3d(h, (x, 0, np.pi), (y, 0, np.pi))
plt.show()
# f = sin(x) + 7*sin(2*x) - sin(4*x) # Exact
# f = sin(x)*cos(2*pi*x)*exp(x**2)
# f = exp(x)*(sum(i*x**i for i in range(1, 4)))
load_exact = np.array([quad(lambdify(x, f*v), [0, PI]) for v in basis],
dtype=float)
b = load_vector(f, len(basis))
b_ = load_vector(f, len(basis), n_fft=2**14)
print '1d error', np.linalg.norm(b - b_)
b__ = load_vector(f, len(basis), n_quad=200)
print '1d error', np.linalg.norm(b - b__)
# y = Symbol('y')
# g = sin(x)*(y**2-1)
# print load_vector(g, [2, 2])
# # How many sines you need to get the n integrals in the load vector right
# N = n
# for k in range(1, 11):
# f_vec = sine_eval(N, g)
# load_num = sine_fft(f_vec)[:n]
#
# print N, np.linalg.norm(load_exact - load_num)
# N *= 2
# y = Symbol('y')
# f = sin(x)*sin(y)
# sine_eval(N=[4, 4], f=f)
x, y = symbols('x, y')
f = x*(x-pi)*y*(y-pi)*sin(x)
import time
start = time.time()
b = load_vector(f, [5, 5])
print 'QUAD sympy', time.time() - start
start = time.time()
b_ = load_vector(f, [5, 5], n_fft=64)
print 'FFT', time.time() - start
print '2d error', np.linalg.norm(b - b_)
start = time.time()
b__ = load_vector(f, [5, 5], n_quad=200)
print 'QUAD me', time.time() - start
print '2d error', np.linalg.norm(b - b__)
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.dylan
~~~~~~~~~~~~~~~~~~~~~
Lexers for the Dylan language.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Literal
__all__ = ['DylanLexer', 'DylanConsoleLexer', 'DylanLidLexer']
class DylanLexer(RegexLexer):
"""
For the `Dylan <http://www.opendylan.org/>`_ language.
.. versionadded:: 0.7
"""
name = 'Dylan'
aliases = ['dylan']
filenames = ['*.dylan', '*.dyl', '*.intr']
mimetypes = ['text/x-dylan']
flags = re.IGNORECASE
builtins = set((
'subclass', 'abstract', 'block', 'concrete', 'constant', 'class',
'compiler-open', 'compiler-sideways', 'domain', 'dynamic',
'each-subclass', 'exception', 'exclude', 'function', 'generic',
'handler', 'inherited', 'inline', 'inline-only', 'instance',
'interface', 'import', 'keyword', 'library', 'macro', 'method',
'module', 'open', 'primary', 'required', 'sealed', 'sideways',
'singleton', 'slot', 'thread', 'variable', 'virtual'))
keywords = set((
'above', 'afterwards', 'begin', 'below', 'by', 'case', 'cleanup',
'create', 'define', 'else', 'elseif', 'end', 'export', 'finally',
'for', 'from', 'if', 'in', 'let', 'local', 'otherwise', 'rename',
'select', 'signal', 'then', 'to', 'unless', 'until', 'use', 'when',
'while'))
operators = set((
'~', '+', '-', '*', '|', '^', '=', '==', '~=', '~==', '<', '<=',
'>', '>=', '&', '|'))
functions = set((
'abort', 'abs', 'add', 'add!', 'add-method', 'add-new', 'add-new!',
'all-superclasses', 'always', 'any?', 'applicable-method?', 'apply',
'aref', 'aref-setter', 'as', 'as-lowercase', 'as-lowercase!',
'as-uppercase', 'as-uppercase!', 'ash', 'backward-iteration-protocol',
'break', 'ceiling', 'ceiling/', 'cerror', 'check-type', 'choose',
'choose-by', 'complement', 'compose', 'concatenate', 'concatenate-as',
'condition-format-arguments', 'condition-format-string', 'conjoin',
'copy-sequence', 'curry', 'default-handler', 'dimension', 'dimensions',
'direct-subclasses', 'direct-superclasses', 'disjoin', 'do',
'do-handlers', 'element', 'element-setter', 'empty?', 'error', 'even?',
'every?', 'false-or', 'fill!', 'find-key', 'find-method', 'first',
'first-setter', 'floor', 'floor/', 'forward-iteration-protocol',
'function-arguments', 'function-return-values',
'function-specializers', 'gcd', 'generic-function-mandatory-keywords',
'generic-function-methods', 'head', 'head-setter', 'identity',
'initialize', 'instance?', 'integral?', 'intersection',
'key-sequence', 'key-test', 'last', 'last-setter', 'lcm', 'limited',
'list', 'logand', 'logbit?', 'logior', 'lognot', 'logxor', 'make',
'map', 'map-as', 'map-into', 'max', 'member?', 'merge-hash-codes',
'min', 'modulo', 'negative', 'negative?', 'next-method',
'object-class', 'object-hash', 'odd?', 'one-of', 'pair', 'pop',
'pop-last', 'positive?', 'push', 'push-last', 'range', 'rank',
'rcurry', 'reduce', 'reduce1', 'remainder', 'remove', 'remove!',
'remove-duplicates', 'remove-duplicates!', 'remove-key!',
'remove-method', 'replace-elements!', 'replace-subsequence!',
'restart-query', 'return-allowed?', 'return-description',
'return-query', 'reverse', 'reverse!', 'round', 'round/',
'row-major-index', 'second', 'second-setter', 'shallow-copy',
'signal', 'singleton', 'size', 'size-setter', 'slot-initialized?',
'sort', 'sort!', 'sorted-applicable-methods', 'subsequence-position',
'subtype?', 'table-protocol', 'tail', 'tail-setter', 'third',
'third-setter', 'truncate', 'truncate/', 'type-error-expected-type',
'type-error-value', 'type-for-copy', 'type-union', 'union', 'values',
'vector', 'zero?'))
valid_name = '\\\\?[\\w!&*<>|^$%@\\-+~?/=]+'
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
lowercase_value = value.lower()
if lowercase_value in self.builtins:
yield index, Name.Builtin, value
continue
if lowercase_value in self.keywords:
yield index, Keyword, value
continue
if lowercase_value in self.functions:
yield index, Name.Builtin, value
continue
if lowercase_value in self.operators:
yield index, Operator, value
continue
yield index, token, value
tokens = {
'root': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# lid header
(r'([a-z0-9-]+)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Text, String)),
default('code') # no header match, switch to code
],
'code': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# multi-line comment
(r'/\*', Comment.Multiline, 'comment'),
# strings and characters
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-f0-9]{1,2}|[^\\\'\n])'", String.Char),
# binary integer
(r'#b[01]+', Number.Bin),
# octal integer
(r'#o[0-7]+', Number.Oct),
# floating point
(r'[-+]?(\d*\.\d+(e[-+]?\d+)?|\d+(\.\d*)?e[-+]?\d+)', Number.Float),
# decimal integer
(r'[-+]?\d+', Number.Integer),
# hex integer
(r'#x[0-9a-f]+', Number.Hex),
# Macro parameters
(r'(\?' + valid_name + ')(:)'
r'(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'(\?)(:)(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'\?' + valid_name, Name.Tag),
# Punctuation
(r'(=>|::|#\(|#\[|##|\?\?|\?=|\?|[(){}\[\],.;])', Punctuation),
# Most operators are picked up as names and then re-flagged.
# This one isn't valid in a name though, so we pick it up now.
(r':=', Operator),
# Pick up #t / #f before we match other stuff with #.
(r'#[tf]', Literal),
# #"foo" style keywords
(r'#"', String.Symbol, 'keyword'),
# #rest, #key, #all-keys, etc.
(r'#[a-z0-9-]+', Keyword),
# required-init-keyword: style keywords.
(valid_name + ':', Keyword),
# class names
(r'<' + valid_name + '>', Name.Class),
# define variable forms.
(r'\*' + valid_name + '\*', Name.Variable.Global),
# define constant forms.
(r'\$' + valid_name, Name.Constant),
# everything else. We re-flag some of these in the method above.
(valid_name, Name),
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'keyword': [
(r'"', String.Symbol, '#pop'),
(r'[^\\"]+', String.Symbol), # all other characters
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-f0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
]
}
class DylanLidLexer(RegexLexer):
"""
For Dylan LID (Library Interchange Definition) files.
.. versionadded:: 1.6
"""
name = 'DylanLID'
aliases = ['dylan-lid', 'lid']
filenames = ['*.lid', '*.hdp']
mimetypes = ['text/x-dylan-lid']
flags = re.IGNORECASE
tokens = {
'root': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# lid header
(r'(.*?)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Text, String)),
]
}
class DylanConsoleLexer(Lexer):
"""
For Dylan interactive console output like:
.. sourcecode:: dylan-console
? let a = 1;
=> 1
? a
=> 1
This is based on a copy of the RubyConsoleLexer.
.. versionadded:: 1.6
"""
name = 'Dylan session'
aliases = ['dylan-console', 'dylan-repl']
filenames = ['*.dylan-console']
mimetypes = ['text/x-dylan-console']
_line_re = re.compile('.*?\n')
_prompt_re = re.compile('\?| ')
def get_tokens_unprocessed(self, text):
dylexer = DylanLexer(**self.options)
curcode = ''
insertions = []
for match in self._line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode)):
yield item
|
|
import time
sttime= time.clock()
print("Importing essential modules...")
import numpy as np
import matplotlib.pyplot as plt
#from sklearn import svm
import sys
caffe_root = "/root/py-faster-rcnn/caffe-fast-rcnn/"
import sys
#sys.path.insert(0, caffe_root + "python")
import caffe
print("\t\tsuccessfully imported in "+str(time.clock()- sttime)+" Secs.\n")
####################
## Parameters ##
####################
#model net to extract features
mynet= "caffenet" #accepted values are alexnet, caffenet, googlenet
if mynet== "dbhnet":
deploy= "/root/py-faster-rcnn/caffe-fast-rcnn/examples/cvprw15-imagenet-3/KevinNet_imagenet3_128.prototxt"
model= "/root/py-faster-rcnn/caffe-fast-rcnn/examples/cvprw15-imagenet-3/KevinNet_imagenet3_128_iter_392.caffemodel"
elif mynet== "caffenet":
deploy= "/root/py-faster-rcnn/caffe-fast-rcnn/models/bvlc_reference_caffenet/deploy.prototxt"
model= "/root/py-faster-rcnn/caffe-fast-rcnn/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"
elif mynet== "googlenet":
deploy= "/root/py-faster-rcnn/caffe-fast-rcnn/models/bvlc_googlenet/deploy.prototxt"
model= "/root/py-faster-rcnn/caffe-fast-rcnn/models/bvlc_googlenet/bvlc_googlenet.caffemodel"
else:
print("Please give a proper net to extract!")
#########################
## Global settings ##
#########################
plt.rcParams["figure.figsize"] = (10, 10)
plt.rcParams["image.interpolation"] = "nearest"
plt.rcParams["image.cmap"] = "gray"
caffe.set_mode_cpu()
net = caffe.Net(deploy,
model,
caffe.TEST)
########################
## Pre-Processing ##
########################
transformer = caffe.io.Transformer({"data": net.blobs["data"].data.shape})
transformer.set_transpose("data", (2,0,1))
transformer.set_mean("data", np.load(caffe_root + "python/caffe/imagenet/ilsvrc_2012_mean.npy").mean(1).mean(1)) # mean pixel
transformer.set_raw_scale("data", 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap("data", (2,1,0)) # the reference model has channels in BGR order instead of RGB
net.blobs["data"].reshape(1,3,227,227)
train_feats_fc7= []
test_feats_fc7= []
train_feats_fc8= []
test_feats_fc8= []
train_feats_fc7_fc8= []
test_feats_fc7_fc8= []
#####################
## Load Labels ##
#####################
train_labels= []
test_labels= []
print("Loading labels from files...")
base_data = sys.argv[1]
search_data = sys.argv[2]
rdfile1= open(base_data, "r")
rdfile2= open(search_data, "r")
sttime= time.clock()
#to load train_labels from file
for line in rdfile1:
line= line[:-1]
train_labels.append(int(line.split()[1]))
train_labels= np.array(train_labels)
np.savetxt(base_data.split('.')[0]+'_label.txt',train_labels)
print("\t\t"+ str(len(train_labels))+" train_labels successfully loaded in "+str(time.clock()- sttime)+" Secs.\n")
sttime= time.clock()
#to load test labels from file
for line in rdfile2:
line= line[:-1]
test_labels.append(int(line.split()[1]))
test_labels= np.array(test_labels)
print("\t\t"+ str(len(test_labels))+" test_labels successfully loaded in "+str(time.clock()- sttime)+" Secs.\n")
##########################
## Extract Features ##
##########################
#train features
print("Extracting train features...")
sttime= time.clock()
train_file= open(base_data,"r")
total_train_images= str(len(train_labels))
current_image= 1
for line in train_file:
temp_print= (str(current_image)+ "/"+ total_train_images+ "... ")
sys.stdout.write(temp_print)
sys.stdout.flush()
current_image+= 1
line1= line.split()
net.blobs["data"].data[...]= transformer.preprocess("data", caffe.io.load_image(line1[0]))
out= net.forward()
feat_fc7= np.array(net.blobs["fc7"].data[0])
train_feats_fc7.append(feat_fc7)
train_feats_fc7= np.array(train_feats_fc7)
np.savetxt(base_data.split('.')[0]+'_feature.txt', train_feats_fc7)
print("\n\t\tsuccessfully extracted in "+str(time.clock()- sttime)+" Secs.")
#test features
print("Extracting test features...")
sttime= time.clock()
test_file= open(search_data, "r")
total_test_images= str(len(test_labels))
current_image= 1
for line in test_file:
temp_print= (str(current_image)+ "/"+ total_test_images+ "... ")
sys.stdout.write(temp_print)
sys.stdout.flush()
current_image+= 1
line1= line.split()
net.blobs["data"].data[...]= transformer.preprocess("data", caffe.io.load_image(line1[0]))
out= net.forward()
feat_fc7= np.array(net.blobs["fc7"].data[0])
test_feats_fc7.append(feat_fc7)
test_feats_fc7= np.array(test_feats_fc7)
np.savetxt(search_data.split('.')[0]+'_feature.txt', test_feats_fc7)
print("\n\t\tsuccessfully extracted in "+str(time.clock()- sttime)+" Secs.")
######################
## SVM Training ##
######################
'''
#1st svm fit
print("Svm Classification with first 500 features of layer fc7")
C=1.0
print("\tfitting svc...")
sttime= time.clock()
svc = svm.SVC(kernel='linear', C=C).fit(train_feats_fc7[:,:500], train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting rbf_svc...")
sttime= time.clock()
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(train_feats_fc7[:,:500], train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting poly_svc...")
sttime= time.clock()
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(train_feats_fc7[:,:500], train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting lin_svc...")
sttime= time.clock()
lin_svc = svm.LinearSVC(C=C).fit(train_feats_fc7[:,:500], train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\n\nPredicting values...")
for i, clf in enumerate((svc, rbf_svc, poly_svc, lin_svc)):
sttime= time.clock()
print("\nClassifier: ")
print(clf)
Z = clf.predict(test_feats_fc7[:,:500])
#print(Z)
#print(test_labels)
scores = (Z== test_labels)
total = np.sum(scores)
print("Accuracy: ")
print("\t\t"+str(total/500.0))
print("Time: ")
print("\t\t"+ str(time.clock()- sttime)+ " Secs.\n")
#2nd svm fit
print("Svm Classification with first half (2048) features of layer fc7")
C=1.0
print("\tfitting svc...")
sttime= time.clock()
svc = svm.SVC(kernel='linear', C=C).fit(train_feats_fc7[:,:2048], train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting rbf_svc...")
sttime= time.clock()
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(train_feats_fc7[:,:2048], train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting poly_svc...")
sttime= time.clock()
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(train_feats_fc7[:,:2048], train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting lin_svc...")
sttime= time.clock()
lin_svc = svm.LinearSVC(C=C).fit(train_feats_fc7[:,:2048], train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\n\nPredicting values...")
for i, clf in enumerate((svc, rbf_svc, poly_svc, lin_svc)):
sttime= time.clock()
print("\nClassifier: ")
print(clf)
Z = clf.predict(test_feats_fc7[:,:2048])
#print(Z)
#print(test_labels)
scores = (Z== test_labels)
total = np.sum(scores)
print("Accuracy: ")
print("\t\t"+str(total/500.0))
print("Time: ")
print("\t\t"+ str(time.clock()- sttime)+ " Secs.\n")
#3rd svm fit
print("Svm Classification with all the features of layer fc7")
C=1.0
print("\tfitting svc...")
sttime= time.clock()
svc = svm.SVC(kernel='linear', C=C).fit(train_feats_fc7, train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting rbf_svc...")
sttime= time.clock()
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(train_feats_fc7, train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting poly_svc...")
sttime= time.clock()
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(train_feats_fc7, train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting lin_svc...")
sttime= time.clock()
lin_svc = svm.LinearSVC(C=C).fit(train_feats_fc7, train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\n\nPredicting values...")
for i, clf in enumerate((svc, rbf_svc, poly_svc, lin_svc)):
sttime= time.clock()
print("\nClassifier: ")
print(clf)
Z = clf.predict(test_feats_fc7)
#print(Z)
#print(test_labels)
scores = (Z== test_labels)
total = np.sum(scores)
print("Accuracy: ")
print("\t\t"+str(total/500.0))
print("Time: ")
print("\t\t"+ str(time.clock()- sttime)+ " Secs.\n")
#4th svm fit
print("Svm Classification with first half (500) features of layer fc8")
C=1.0
print("\tfitting svc...")
sttime= time.clock()
svc = svm.SVC(kernel='linear', C=C).fit(train_feats_fc8[:,:500], train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting rbf_svc...")
sttime= time.clock()
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(train_feats_fc8[:,:500], train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting poly_svc...")
sttime= time.clock()
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(train_feats_fc8[:,:500], train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting lin_svc...")
sttime= time.clock()
lin_svc = svm.LinearSVC(C=C).fit(train_feats_fc8[:,:500], train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\n\nPredicting values...")
for i, clf in enumerate((svc, rbf_svc, poly_svc, lin_svc)):
sttime= time.clock()
print("\nClassifier: ")
print(clf)
Z = clf.predict(test_feats_fc8[:,:500])
#print(Z)
#print(test_labels)
scores = (Z== test_labels)
total = np.sum(scores)
print("Accuracy: ")
print("\t\t"+str(total/500.0))
print("Time: ")
print("\t\t"+ str(time.clock()- sttime)+ " Secs.\n")
#5th svm fit
print("Svm Classification with all the features of layer fc8")
C=1.0
print("\tfitting svc...")
sttime= time.clock()
svc = svm.SVC(kernel='linear', C=C).fit(train_feats_fc8, train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting rbf_svc...")
sttime= time.clock()
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(train_feats_fc8, train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting poly_svc...")
sttime= time.clock()
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(train_feats_fc8, train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting lin_svc...")
sttime= time.clock()
lin_svc = svm.LinearSVC(C=C).fit(train_feats_fc8, train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\n\nPredicting values...")
for i, clf in enumerate((svc, rbf_svc, poly_svc, lin_svc)):
sttime= time.clock()
print("\nClassifier: ")
print(clf)
Z = clf.predict(test_feats_fc8)
#print(Z)
#print(test_labels)
scores = (Z== test_labels)
total = np.sum(scores)
print("Accuracy: ")
print("\t\t"+str(total/500.0))
print("Time: ")
print("\t\t"+ str(time.clock()- sttime)+ " Secs.\n")
#6th svm fit
print("Svm Classification with all the features of layer fc7_fc8")
C=1.0
print("\tfitting svc...")
sttime= time.clock()
svc = svm.SVC(kernel='linear', C=C).fit(train_feats_fc7_fc8, train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting rbf_svc...")
sttime= time.clock()
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(train_feats_fc7_fc8, train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting poly_svc...")
sttime= time.clock()
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(train_feats_fc7_fc8, train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\tfitting lin_svc...")
sttime= time.clock()
lin_svc = svm.LinearSVC(C=C).fit(train_feats_fc7_fc8, train_labels)
print("\t\ttook "+str(time.clock()- sttime)+ " Secs.")
print("\n\nPredicting values...")
for i, clf in enumerate((svc, rbf_svc, poly_svc, lin_svc)):
sttime= time.clock()
print("\nClassifier: ")
print(clf)
Z = clf.predict(test_feats_fc7_fc8)
#print(Z)
#print(test_labels)
scores = (Z== test_labels)
total = np.sum(scores)
print("Accuracy: ")
print("\t\t"+str(total/500.0))
print("Time: ")
print("\t\t"+ str(time.clock()- sttime)+ " Secs.\n")
'''
|
|
"""Easy to use ETA calculation and progress bar library.
https://github.com/Robpol86/etaprogress
https://pypi.python.org/pypi/etaprogress
"""
from __future__ import division
from decimal import Decimal, ROUND_DOWN
import locale
from etaprogress.components.bars import Bar, BarDoubled, BarUndefinedAnimated, BarUndefinedEmpty
from etaprogress.components.base_progress_bar import BaseProgressBar
from etaprogress.components.eta_conversions import eta_hms, eta_letters
from etaprogress.components.misc import get_remaining_width, SPINNER
from etaprogress.components.units import UnitBit, UnitByte
__all__ = ('ProgressBar', 'ProgressBarBits', 'ProgressBarBytes', 'ProgressBarWget', 'ProgressBarYum')
class ProgressBar(BaseProgressBar):
"""Draw a progress bar showing the ETA, percentage, done/total items, and a spinner.
Looks like one of these:
8% ( 8/100) [## ] eta 00:24 /
100% (100/100) [####################################] eta 00:01 -
23 [ ? ] eta --:-- |
Positional arguments:
denominator -- the final/total number of units (like the expected file size of a download). 0 if unknown.
Keyword arguments:
max_with -- limit number of characters shown (by default the full progress bar takes up the entire terminal width).
Instance variables:
template -- string template of the full progress bar.
bar -- class instance of the 'bar' part of the full progress bar.
More instance variables in etaprogress.components.base_progress_bar.BaseProgressBar.
"""
def __init__(self, denominator, max_width=None):
super(ProgressBar, self).__init__(denominator, max_width=max_width)
if self.undefined:
self.template = '{numerator} {bar} eta --:-- {spinner}'
self.bar = BarUndefinedAnimated()
else:
self.template = '{percent:3d}% ({fraction}) {bar} eta {eta} {spinner}'
self.bar = Bar()
def __str__(self):
"""Returns the fully-built progress bar and other data."""
# Partially build out template.
bar = '{bar}'
spinner = next(SPINNER)
if self.undefined:
numerator = self.str_numerator
template = self.template.format(numerator=numerator, bar=bar, spinner=spinner)
else:
percent = int(self.percent)
fraction = self.str_fraction
eta = self._eta_string or '--:--'
template = self.template.format(percent=percent, fraction=fraction, bar=bar, eta=eta, spinner=spinner)
# Determine bar width and finish.
width = get_remaining_width(template.format(bar=''), self.max_width or None)
bar = self.bar.bar(width, percent=self.percent)
return template.format(bar=bar)
@staticmethod
def _generate_eta(seconds):
"""Returns a human readable ETA string."""
return '' if seconds is None else eta_hms(seconds, always_show_minutes=True)
@property
def str_fraction(self):
"""Returns the fraction with additional whitespace."""
if self.undefined:
return None
denominator = locale.format('%d', self.denominator, grouping=True)
numerator = self.str_numerator.rjust(len(denominator))
return '{0}/{1}'.format(numerator, denominator)
@property
def str_numerator(self):
"""Returns the numerator as a formatted string."""
return locale.format('%d', self.numerator, grouping=True)
class ProgressBarBits(ProgressBar):
"""Draw a progress bar showing the ETA, percentage, done/total items, a spinner, and units in bits.
Looks like one of these:
7% ( 7.40/100.00 mb) [# ] eta 00:20 \
100% (100.00/100.00 mb) [###########################] eta 00:00 \
62.96 mb [ ? ] eta --:-- |
Positional arguments:
denominator -- the final/total number of units (like the expected file size of a download). 0 if unknown.
Keyword arguments:
max_with -- limit number of characters shown (by default the full progress bar takes up the entire terminal width).
Instance variables:
_unit_class -- class object responsible for converting bits into megabits/etc.
More instance variables in etaprogress.progress.ProgressBar.
"""
def __init__(self, denominator, max_width=None):
super(ProgressBarBits, self).__init__(denominator, max_width)
self._unit_class = UnitBit
@property
def str_fraction(self):
"""Returns the fraction with additional whitespace."""
if self._eta.undefined:
return None
# Determine denominator and its unit.
unit_denominator, unit = self._unit_class(self.denominator).auto
formatter = '%d' if unit_denominator == self.denominator else '%0.2f'
denominator = locale.format(formatter, unit_denominator, grouping=True)
# Determine numerator.
unit_numerator = getattr(self._unit_class(self.numerator), unit)
if self.done:
rounded_numerator = unit_numerator
else:
rounded_numerator = float(Decimal(str(unit_numerator)).quantize(Decimal('.01'), rounding=ROUND_DOWN))
numerator = locale.format(formatter, rounded_numerator, grouping=True).rjust(len(denominator))
return '{0}/{1} {2}'.format(numerator, denominator, unit)
@property
def str_numerator(self):
"""Returns the numerator with formatting."""
if not self.undefined:
return None
unit_numerator, unit = self._unit_class(self.numerator).auto
formatter = '%d' if unit_numerator == self.numerator else '%0.2f'
numerator = locale.format(formatter, unit_numerator, grouping=True)
return '{0} {1}'.format(numerator, unit)
class ProgressBarBytes(ProgressBarBits):
"""Draw a progress bar showing the ETA, percentage, done/total items, a spinner, and units in bytes.
Looks like one of these:
7% ( 7.06/95.37 MiB) [## ] eta 00:20 \
100% (95.37/95.37 MiB) [############################] eta 00:00 |
24.72 MiB [ ? ] eta --:-- -
Positional arguments:
denominator -- the final/total number of units (like the expected file size of a download). 0 if unknown.
Keyword arguments:
max_with -- limit number of characters shown (by default the full progress bar takes up the entire terminal width).
Instance variables:
_unit_class -- class object responsible for converting bytes into mebibytes/etc.
More instance variables in etaprogress.progress.ProgressBarBits.
"""
def __init__(self, denominator, max_width=None):
super(ProgressBarBytes, self).__init__(denominator, max_width)
self._unit_class = UnitByte
class ProgressBarWget(BaseProgressBar):
"""Progress bar modeled after the one in wget.
Looks like one of these:
35% [=======> ] 35,802,443 4.66MiB/s eta 14s
100%[======================>] 100,000,000 4.59MiB/s in 21s
[ <=> ] 22,222,206 4.65MiB/s
[ <=> ] 100,000,000 4.59MiB/s in 21s
Positional arguments:
denominator -- the final/total number of units (like the expected file size of a download). 0 if unknown.
Keyword arguments:
max_with -- limit number of characters shown (by default the full progress bar takes up the entire terminal width).
eta_every -- calculate and cache the ETA string after this many numerator setting iteration. Default is every iter.
Instance variables:
template -- string template of the full progress bar.
bar -- class instance of the 'bar' part of the full progress bar.
More instance variables in etaprogress.components.base_progress_bar.BaseProgressBar.
"""
def __init__(self, denominator, max_width=None, eta_every=1):
super(ProgressBarWget, self).__init__(denominator, max_width=max_width, eta_every=eta_every)
if self.undefined:
self.template = ' {bar} {numerator:<11s} {rate:>9s} {eta:<12s}'
BarUndefinedAnimated.CHAR_ANIMATED = '<=>'
self.bar = BarUndefinedAnimated()
else:
self.template = '{percent:^4s}{bar} {numerator:<11s} {rate:>9s} {eta:<12s}'
Bar.CHAR_FULL = '='
Bar.CHAR_LEADING = '>'
self.bar = Bar()
def __str__(self):
"""Returns the fully-built progress bar and other data."""
# Partially build out template.
bar = '{bar}'
numerator = locale.format('%d', self.numerator, grouping=True)
rate = self.str_rate
eta = self.str_eta
if self.undefined:
template = self.template.format(bar=bar, numerator=numerator, rate=rate, eta=eta)
else:
percent = '{0}%'.format(int(self.percent))
template = self.template.format(percent=percent, bar=bar, numerator=numerator, rate=rate, eta=eta)
# Determine bar width and finish.
width = get_remaining_width(template.format(bar=''), self.max_width or None)
bar = self.bar.bar(width, percent=self.percent)
return template.format(bar=bar)
@staticmethod
def _generate_eta(seconds):
"""Returns a human readable ETA string."""
return '' if seconds is None else eta_letters(seconds)
@property
def str_eta(self):
"""Returns a formatted ETA value for the progress bar."""
eta = eta_letters(self._eta.elapsed) if self.done else self._eta_string
if not eta:
return ''
if eta.count(' ') > 1:
eta = ' '.join(eta.split(' ')[:2]) # Only show up to two units (h and m, no s for example).
return (' in {0}' if self.done else 'eta {0}').format(eta)
@property
def str_rate(self):
"""Returns the rate with formatting. If done, returns the overall rate instead."""
# Handle special cases.
if not self._eta.started or self._eta.stalled or not self.rate:
return '--.-KiB/s'
unit_rate, unit = UnitByte(self._eta.rate_overall if self.done else self.rate).auto
if unit_rate >= 100:
formatter = '%d'
elif unit_rate >= 10:
formatter = '%.1f'
else:
formatter = '%.2f'
return '{0}{1}/s'.format(locale.format(formatter, unit_rate, grouping=False), unit)
class ProgressBarYum(BaseProgressBar):
"""Progress bar modeled after the one in YUM.
Looks like one of these:
CentOS-7.0 27% [===- ] 265 MiB/s | 1.8 GiB 00:00:19 ETA
CentOS-7.0-1406-x86_64-Everything.iso | 6.6 GiB 00:00:26
CentOS-7.0 [ ] 265 MiB/s | 2.8 GiB
Positional arguments:
denominator -- the final/total number of units (like the expected file size of a download). 0 if unknown.
filename -- the string to display before the progress bar. Limited to whatever space is available in the terminal.
Keyword arguments:
max_with -- limit number of characters shown (by default the full progress bar takes up the entire terminal width).
Instance variables:
template -- string template of the full progress bar.
template_completed -- string template of the full progress bar at 100% or force_done = True.
bar -- class instance of the 'bar' part of the full progress bar.
More instance variables in etaprogress.components.base_progress_bar.BaseProgressBar.
"""
def __init__(self, denominator, filename, max_width=None):
super(ProgressBarYum, self).__init__(denominator, max_width=max_width)
self.filename = filename
self.template = '{filename} {percent:>4s} {bar} {rate:>9s} | {numerator:>7s} {eta:<12s}'
self.template_completed = '{filename} | {numerator:>7s} {eta:<12s}'
if self.undefined:
self.bar = BarUndefinedEmpty()
else:
self.bar = BarDoubled()
def __str__(self):
"""Returns the fully-built progress bar and other data."""
# Partially build out template.
filename = '{filename}'
numerator = self.str_numerator
eta = self.str_eta
if self.done:
template = self.template_completed.format(filename=filename, numerator=numerator, eta=eta)
else:
bar = '{bar}'
percent = '' if self.undefined else '{0}%'.format(int(self.percent))
rate = self.str_rate
template = self.template.format(filename=filename, percent=percent, bar=bar, rate=rate, numerator=numerator,
eta=eta)
width = get_remaining_width(template.format(bar='', filename=''), self.max_width or None)
# Filename will have 40% of the available width if not done.
if self.done:
filename = self.filename[:width].ljust(width) if width > 0 else ''
bar = None
else:
width_filename = int(width * 0.4)
filename = self.filename[:width_filename].ljust(width_filename) if width_filename > 0 else ''
bar = self.bar.bar(width - width_filename, percent=self.percent)
return template.format(bar=bar, filename=filename)
@staticmethod
def _generate_eta(seconds):
"""Returns a human readable ETA string."""
return '' if seconds is None else eta_hms(seconds, always_show_hours=True, hours_leading_zero=True)
@property
def str_eta(self):
"""Returns a formatted ETA value for the progress bar."""
if self.done:
return eta_hms(self._eta.elapsed, always_show_hours=True, hours_leading_zero=True)
if not self._eta_string:
return ''
return '{0} ETA'.format(self._eta_string)
@property
def str_numerator(self):
"""Returns the numerator with formatting."""
unit_numerator, unit = UnitByte(self.numerator).auto_no_thousands
if unit_numerator >= 10:
formatter = '%d'
else:
formatter = '%0.1f'
return '{0} {1}'.format(locale.format(formatter, unit_numerator, grouping=False), unit)
@property
def str_rate(self):
"""Returns the rate with formatting."""
# Handle special cases.
if not self._eta.started or self._eta.stalled or not self.rate:
return '--- KiB/s'
unit_rate, unit = UnitByte(self.rate).auto_no_thousands
if unit_rate >= 10:
formatter = '%d'
else:
formatter = '%0.1f'
return '{0} {1}/s'.format(locale.format(formatter, unit_rate, grouping=False), unit)
|
|
"""
A build class.
"""
import os
import signal
import subprocess
import math
from datetime import datetime
from contextlib import contextmanager
from threading import Thread
from .memoized import MemoizedObject
from .log import LOGGER, Highlight as hl
from .log import print_normal, print_error
from .filters import FilteredObject
from .environment import Environment
from .prefix import PrefixedObject
from .signature import SignableObject
from .command import Command
from .path import chdir
class Build(MemoizedObject, FilteredObject, PrefixedObject, SignableObject):
"""
Represents a build.
"""
memoization_keys = ('attendee', 'name')
propagate_memoization_keys = True
signature_fields = ('environment', 'subdir', 'commands', 'filter')
@classmethod
def transform_memoization_keys(cls, attendee, name):
"""
Make sure the attendee parameter is a real Attendee instance.
"""
if isinstance(attendee, basestring):
from .attendee import Attendee
attendee = Attendee(attendee)
return attendee, name
def __init__(self, attendee, name, environment, subdir=None, commands=None, *args, **kwargs):
super(Build, self).__init__(*args, **kwargs)
self._environment = environment
self.subdir = subdir
self._commands = commands or []
# Register the build in the Attendee.
self.attendee = attendee
self.name = name
attendee.add_build(self)
@property
def environment(self):
if not isinstance(self._environment, Environment):
self._environment = Environment.get_instance(self._environment)
return self._environment
@property
def commands(self):
return [command.resolve(self) for command in self._commands if command.enabled]
@commands.setter
def commands(self, value):
def make_command(command):
if isinstance(command, Command):
return command
else:
return Command(command)
self._commands = map(make_command, value)
def __repr__(self):
"""
Get a representation of the build.
"""
return 'Build(%r, %r)' % (self.attendee, self.name)
def __str__(self):
"""
Get a string representation of the build.
"""
return '%s_%s' % (self.attendee, self.name)
def add_command(self, command, *args, **kwargs):
"""
Add a command to the builder.
"""
self._commands.append(Command(command, *args, **kwargs))
@contextmanager
def create_log_file(self, log_path):
"""
Create a log file object and returns it.
`log_path` is the path to the log file to write to.
"""
try:
LOGGER.debug('Opening log file at: %s', hl(log_path))
with open(log_path, 'w') as log_file:
yield log_file
finally:
LOGGER.info('Log file written to: %s', hl(log_path))
@contextmanager
def handle_interruptions(self, callable=None):
"""
Handle interruptions.
"""
def handler(signum, frame):
LOGGER.warning('The building process was interrupted by the user.')
if callable:
callable()
previous_handler = signal.signal(signal.SIGINT, handler)
try:
yield
finally:
signal.signal(signal.SIGINT, previous_handler)
def build(self, path, log_path, verbose=False):
"""
Launch the build in the specified `path`.
`log_path` is the path to the log file to create.
"""
working_dir = os.path.join(path, self.subdir if self.subdir else '')
with self.create_log_file(log_path) as log_file:
with chdir(working_dir):
with self.environment.enable() as env:
LOGGER.info("Build started in %s at %s.", hl(working_dir), hl(datetime.now().strftime('%c')))
log_file.write("Build started in %s at %s.\n" % (working_dir, datetime.now().strftime('%c')))
if env.shell:
LOGGER.info('Building within: %s', hl(' '.join(env.shell)))
log_file.write('Using "%s" as a shell.\n' % ' '.join(env.shell))
else:
LOGGER.info('Building within %s.', hl('the default system shell'))
log_file.write('Using system shell.\n')
for key, value in os.environ.iteritems():
LOGGER.debug('%s: %s', key, hl(value))
log_file.write('%s: %s\n' % (key, value))
for index, command in enumerate(self.commands):
numbered_prefix = ('%%0%sd' % int(math.ceil(math.log10(len(self.commands))))) % index
LOGGER.important('%s: %s', numbered_prefix, hl(command))
log_file.write('%s: %s\n' % (numbered_prefix, command))
if env.shell:
process = subprocess.Popen(env.shell + [command], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
mixed_output = []
with self.handle_interruptions(process.terminate):
def read_stdout():
for line in iter(process.stdout.readline, ''):
mixed_output.append((print_normal, line))
log_file.write(line)
if verbose:
print_normal(line)
def read_stderr():
for line in iter(process.stderr.readline, ''):
mixed_output.append((print_error, line))
log_file.write(line)
if verbose:
print_error(line)
stdout_thread = Thread(target=read_stdout)
stdout_thread.daemon = True
stdout_thread.start()
stderr_thread = Thread(target=read_stderr)
stderr_thread.daemon = True
stderr_thread.start()
map(Thread.join, [stdout_thread, stderr_thread])
process.wait()
log_file.write('\n')
if process.returncode != 0:
if not verbose:
for func, line in mixed_output:
func(line)
log_file.write('Command failed with status: %s\n' % process.returncode)
log_file.write('Build failed at %s.\n' % datetime.now().strftime('%c'))
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=command)
LOGGER.info("Build succeeded at %s.", hl(datetime.now().strftime('%c')))
log_file.write("Build succeeded at %s.\n" % datetime.now().strftime('%c'))
|
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import SQLBaseStore
from twisted.internet import defer
from synapse.util.caches.descriptors import cached, cachedList, cachedInlineCallbacks
import ujson as json
import logging
logger = logging.getLogger(__name__)
class AccountDataStore(SQLBaseStore):
@cached()
def get_account_data_for_user(self, user_id):
"""Get all the client account_data for a user.
Args:
user_id(str): The user to get the account_data for.
Returns:
A deferred pair of a dict of global account_data and a dict
mapping from room_id string to per room account_data dicts.
"""
def get_account_data_for_user_txn(txn):
rows = self._simple_select_list_txn(
txn, "account_data", {"user_id": user_id},
["account_data_type", "content"]
)
global_account_data = {
row["account_data_type"]: json.loads(row["content"]) for row in rows
}
rows = self._simple_select_list_txn(
txn, "room_account_data", {"user_id": user_id},
["room_id", "account_data_type", "content"]
)
by_room = {}
for row in rows:
room_data = by_room.setdefault(row["room_id"], {})
room_data[row["account_data_type"]] = json.loads(row["content"])
return (global_account_data, by_room)
return self.runInteraction(
"get_account_data_for_user", get_account_data_for_user_txn
)
@cachedInlineCallbacks(num_args=2)
def get_global_account_data_by_type_for_user(self, data_type, user_id):
"""
Returns:
Deferred: A dict
"""
result = yield self._simple_select_one_onecol(
table="account_data",
keyvalues={
"user_id": user_id,
"account_data_type": data_type,
},
retcol="content",
desc="get_global_account_data_by_type_for_user",
allow_none=True,
)
if result:
defer.returnValue(json.loads(result))
else:
defer.returnValue(None)
@cachedList(cached_method_name="get_global_account_data_by_type_for_user",
num_args=2, list_name="user_ids", inlineCallbacks=True)
def get_global_account_data_by_type_for_users(self, data_type, user_ids):
rows = yield self._simple_select_many_batch(
table="account_data",
column="user_id",
iterable=user_ids,
keyvalues={
"account_data_type": data_type,
},
retcols=("user_id", "content",),
desc="get_global_account_data_by_type_for_users",
)
defer.returnValue({
row["user_id"]: json.loads(row["content"]) if row["content"] else None
for row in rows
})
def get_account_data_for_room(self, user_id, room_id):
"""Get all the client account_data for a user for a room.
Args:
user_id(str): The user to get the account_data for.
room_id(str): The room to get the account_data for.
Returns:
A deferred dict of the room account_data
"""
def get_account_data_for_room_txn(txn):
rows = self._simple_select_list_txn(
txn, "room_account_data", {"user_id": user_id, "room_id": room_id},
["account_data_type", "content"]
)
return {
row["account_data_type"]: json.loads(row["content"]) for row in rows
}
return self.runInteraction(
"get_account_data_for_room", get_account_data_for_room_txn
)
def get_all_updated_account_data(self, last_global_id, last_room_id,
current_id, limit):
"""Get all the client account_data that has changed on the server
Args:
last_global_id(int): The position to fetch from for top level data
last_room_id(int): The position to fetch from for per room data
current_id(int): The position to fetch up to.
Returns:
A deferred pair of lists of tuples of stream_id int, user_id string,
room_id string, type string, and content string.
"""
if last_room_id == current_id and last_global_id == current_id:
return defer.succeed(([], []))
def get_updated_account_data_txn(txn):
sql = (
"SELECT stream_id, user_id, account_data_type, content"
" FROM account_data WHERE ? < stream_id AND stream_id <= ?"
" ORDER BY stream_id ASC LIMIT ?"
)
txn.execute(sql, (last_global_id, current_id, limit))
global_results = txn.fetchall()
sql = (
"SELECT stream_id, user_id, room_id, account_data_type, content"
" FROM room_account_data WHERE ? < stream_id AND stream_id <= ?"
" ORDER BY stream_id ASC LIMIT ?"
)
txn.execute(sql, (last_room_id, current_id, limit))
room_results = txn.fetchall()
return (global_results, room_results)
return self.runInteraction(
"get_all_updated_account_data_txn", get_updated_account_data_txn
)
def get_updated_account_data_for_user(self, user_id, stream_id):
"""Get all the client account_data for a that's changed for a user
Args:
user_id(str): The user to get the account_data for.
stream_id(int): The point in the stream since which to get updates
Returns:
A deferred pair of a dict of global account_data and a dict
mapping from room_id string to per room account_data dicts.
"""
def get_updated_account_data_for_user_txn(txn):
sql = (
"SELECT account_data_type, content FROM account_data"
" WHERE user_id = ? AND stream_id > ?"
)
txn.execute(sql, (user_id, stream_id))
global_account_data = {
row[0]: json.loads(row[1]) for row in txn.fetchall()
}
sql = (
"SELECT room_id, account_data_type, content FROM room_account_data"
" WHERE user_id = ? AND stream_id > ?"
)
txn.execute(sql, (user_id, stream_id))
account_data_by_room = {}
for row in txn.fetchall():
room_account_data = account_data_by_room.setdefault(row[0], {})
room_account_data[row[1]] = json.loads(row[2])
return (global_account_data, account_data_by_room)
changed = self._account_data_stream_cache.has_entity_changed(
user_id, int(stream_id)
)
if not changed:
return ({}, {})
return self.runInteraction(
"get_updated_account_data_for_user", get_updated_account_data_for_user_txn
)
@defer.inlineCallbacks
def add_account_data_to_room(self, user_id, room_id, account_data_type, content):
"""Add some account_data to a room for a user.
Args:
user_id(str): The user to add a tag for.
room_id(str): The room to add a tag for.
account_data_type(str): The type of account_data to add.
content(dict): A json object to associate with the tag.
Returns:
A deferred that completes once the account_data has been added.
"""
content_json = json.dumps(content)
def add_account_data_txn(txn, next_id):
self._simple_upsert_txn(
txn,
table="room_account_data",
keyvalues={
"user_id": user_id,
"room_id": room_id,
"account_data_type": account_data_type,
},
values={
"stream_id": next_id,
"content": content_json,
}
)
txn.call_after(
self._account_data_stream_cache.entity_has_changed,
user_id, next_id,
)
txn.call_after(self.get_account_data_for_user.invalidate, (user_id,))
self._update_max_stream_id(txn, next_id)
with self._account_data_id_gen.get_next() as next_id:
yield self.runInteraction(
"add_room_account_data", add_account_data_txn, next_id
)
result = self._account_data_id_gen.get_current_token()
defer.returnValue(result)
@defer.inlineCallbacks
def add_account_data_for_user(self, user_id, account_data_type, content):
"""Add some account_data to a room for a user.
Args:
user_id(str): The user to add a tag for.
account_data_type(str): The type of account_data to add.
content(dict): A json object to associate with the tag.
Returns:
A deferred that completes once the account_data has been added.
"""
content_json = json.dumps(content)
def add_account_data_txn(txn, next_id):
self._simple_upsert_txn(
txn,
table="account_data",
keyvalues={
"user_id": user_id,
"account_data_type": account_data_type,
},
values={
"stream_id": next_id,
"content": content_json,
}
)
txn.call_after(
self._account_data_stream_cache.entity_has_changed,
user_id, next_id,
)
txn.call_after(self.get_account_data_for_user.invalidate, (user_id,))
txn.call_after(
self.get_global_account_data_by_type_for_user.invalidate,
(account_data_type, user_id,)
)
self._update_max_stream_id(txn, next_id)
with self._account_data_id_gen.get_next() as next_id:
yield self.runInteraction(
"add_user_account_data", add_account_data_txn, next_id
)
result = self._account_data_id_gen.get_current_token()
defer.returnValue(result)
def _update_max_stream_id(self, txn, next_id):
"""Update the max stream_id
Args:
txn: The database cursor
next_id(int): The the revision to advance to.
"""
update_max_id_sql = (
"UPDATE account_data_max_stream_id"
" SET stream_id = ?"
" WHERE stream_id < ?"
)
txn.execute(update_max_id_sql, (next_id, next_id))
|
|
from __future__ import unicode_literals
import logging
import operator
import os
import re
import socket
from decimal import Decimal
from itertools import islice, zip_longest
from urllib.parse import parse_qsl
import pkg_resources
from classytags.arguments import Argument
from classytags.core import Options
from classytags.helpers import AsTag
from django.conf import settings
from django.db.models import Model, Q
from django.db.models.query import QuerySet
from django.forms.boundfield import BoundField
from django.forms.widgets import (
CheckboxInput, CheckboxSelectMultiple, FileInput, MultiWidget, PasswordInput,
RadioSelect, Select, Textarea, TextInput,
)
from django.template import Library, Node
from django.template.loader import get_template, render_to_string
from django.urls import Resolver404, resolve, reverse
from django.utils import timezone
from django.utils.encoding import smart_str
from django.utils.html import escape
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import slugify
from guardian.core import ObjectPermissionChecker
from namedentities import named_entities
from touchtechnology.common.default_settings import CURRENCY_SYMBOL
from touchtechnology.common.exceptions import NotModelManager
from touchtechnology.common.models import SitemapNode
from touchtechnology.common.utils import (
create_exclude_filter, get_all_perms_for_model_cached, model_and_manager,
tree_for_node,
)
from tournamentcontrol.competition.utils import FauxQueryset
logger = logging.getLogger(__name__)
camel_case_re = re.compile(r"(?P<new_word>((?<![A-Z])[A-Z]|[A-Z](?![A-Z0-9])))")
version_re = re.compile(
r"""^
(?P<version>\d+\.\d+) # minimum 'N.N'
(?P<extraversion>(?:\.\d+)*) # any number of extra '.N' segments
(?:
(?P<prerel>[abc]|rc) # 'a' = alpha, 'b' = beta
# 'c' or 'rc' = release candidate
(?P<prerelversion>\d+(?:\.\d+)*)
)?
(?P<postdev>(\.post(?P<post>\d+))?(\.dev(?P<dev>\d+))?)?
$""",
re.VERBOSE,
)
FORM_FIELD_TEMPLATE = get_template("touchtechnology/common/templatetags/field.html")
register = Library()
@register.filter
def at_a_time(i, n):
"""
Turn a list into smaller lists of n items. For example, if we have:
object_list = [1, 2, 3, 4, 5, 6, 7, 8]
The {% object_list|at_a_time:2 %} will result in:
[[1, 2], [3, 4], [5, 6], [7, 8]
Or {% object_list|at_a_time:3 %} will result in:
[[1, 2, 3], [4, 5, 6], [7, 8, None]]
We don't filter out None items, so check when iterating in your template.
"""
return zip_longest(*[islice(i, x, None, n) for x in range(n)])
@register.filter
def camel_case_to_underscores(s):
"""Converts a CamelCase name to use lowercase_and_underscores.
e.g. "ConvertHTMLToText" -> "convert_html_to_text"
"""
def convert(match):
content = match.group().lower()
if match.start() == 0:
return content
else:
return "_%s" % content
return camel_case_re.sub(convert, s).lower()
@register.filter
def camel_case_split(s):
"""Converts a CamelCase name to be split by spaces.
e.g. "CovertHTMLToText" -> "Convert HTML To Text"
"""
def convert(match):
content = match.group()
if match.start() == 0:
return content
else:
return " %s" % content
return camel_case_re.sub(convert, s)
@register.filter
def cssify(s):
return slugify(s or "").replace("-", "_")
@register.filter
def future(d):
if d > timezone.now():
return True
return False
@register.filter
def split(st, on):
return st.split(on)
@register.filter
def twittify(s):
twitter_re = re.compile(r"(@(?P<username>[a-z0-9_]+)?)", re.I)
return mark_safe(
twitter_re.sub(
r'@<a class="twitter user" '
'target="_blank" '
'href="http://twitter.com/\\2">\\2</a>',
s,
)
)
@register.filter
def count(queryset):
return queryset.count()
@register.filter
def disabled(queryset):
return queryset.exclude(enabled=True)
@register.filter
def enabled(queryset):
return queryset.filter(enabled=True)
@register.filter
def invisible(queryset):
return queryset.exclude(hidden_from_navigation=False)
@register.filter
def visible(queryset):
return queryset.filter(hidden_from_navigation=False)
@register.tag
def navigation(parser, token):
"""
Build a navigational structure.
Examples:
{% navigation %}
{% navigation current_node=node %}
{% navigation start_at=1 stop_at=3 %}
{% navigation root='page_3' %}
"""
args = token.split_contents()[1:]
kwargs = {k: parser.compile_filter(v) for k, v in [a.split("=", 1) for a in args]}
return NavigationNode(kwargs)
class NavigationNode(Node):
def __init__(self, kwargs):
self.kwargs = kwargs
def render(self, context):
r_kwargs = dict(
[
(str(k), v.resolve(context, ignore_failures=True))
for k, v in self.kwargs.items()
]
)
return do_navigation(**r_kwargs)
def do_navigation(
root=None,
start_at=None,
stop_at=None,
current_node=None,
expand_all_nodes=None,
template_name=None,
**kwargs
):
nodes = SitemapNode._tree_manager.select_related("content_type", "parent")
if template_name is None:
template_name = "touchtechnology/common/templatetags/navigation.html"
if expand_all_nodes is None:
expand_all_nodes = False
logger.debug("======== do_navigation ========")
logger.debug("template_name: %r", template_name)
logger.debug("expand_all_nodes: %r", expand_all_nodes)
logger.debug("current_node: %r", current_node)
if root is not None:
if isinstance(root, str):
try:
root = resolve(root).kwargs.get("node")
except Resolver404:
root = resolve(reverse(root)).kwargs.get("node")
logger.debug("root: %r", root)
try:
nodes = root.get_descendants(include_self=True)
except AttributeError:
nodes = SitemapNode.objects.none()
if (
current_node is not None
and current_node.tree_id == root.tree_id
and root.lft < current_node.lft
and not expand_all_nodes
):
nodes = tree_for_node(current_node)
elif current_node is not None and not expand_all_nodes:
nodes = tree_for_node(current_node)
logger.debug("nodes: %r", nodes)
# make sure we hide any nodes that are in a hidden part of the tree
nodes_hidden_from_navigation = nodes.filter(
Q(hidden_from_navigation=True) | Q(enabled=False)
)
hidden_from_navigation = create_exclude_filter(nodes_hidden_from_navigation)
nodes = nodes.exclude(hidden_from_navigation)
logger.debug("nodes[cleaned]: %r", nodes)
# flatten the list of nodes to a list
tree = list(nodes)
if current_node is None and not expand_all_nodes:
stop_at = max(start_at or 0, stop_at or 0, 0)
if start_at is not None:
tree = [n for n in tree if n.level >= start_at]
if stop_at is not None:
tree = [n for n in tree if n.level <= stop_at]
if not expand_all_nodes and current_node is not None:
parents = []
n = current_node
while n.parent is not None:
parents.append(n.parent)
n = n.parent
fmt = "[{rel}] {url} {node}"
def func(node):
"""
Filter function to determine if a node should appear in the
navigation tree or not.
"""
rel = current_node.rel(node)
url = node.get_absolute_url()
logger.debug(fmt.format(node=node, rel=rel, url=url))
return rel in {
"ROOT",
"ANCESTOR",
"PARENT",
"UNCLE",
"ME",
"SIBLING",
"DESCENDANT",
}
log = {
"rel": "NODE",
"node": repr(current_node),
"url": current_node.get_absolute_url(),
}
logger.debug(fmt.format(**log))
tree = [t for t in tree if func(t)]
# re-sort the queryset to get our correct tree structure back
tree = sorted(tree, key=operator.attrgetter("tree_id", "lft"))
context = {
"nodes": tree,
"current_node": current_node,
"hidden_nodes": nodes_hidden_from_navigation,
"start_at": start_at,
"stop_at": stop_at,
}
return render_to_string(template_name, context)
@register.simple_tag
def field(bf, label=None):
if not isinstance(bf, BoundField):
raise TypeError(
"{{% field %}} tag can only be used with " "BoundFields ({0})".format(bf)
)
if bf.is_hidden:
return smart_str(bf)
widget = bf.field.widget
widget_class_name = camel_case_to_underscores(widget.__class__.__name__)
if label is None:
label = bf.label
if isinstance(widget, (CheckboxInput,)):
radio_checkbox_input = True
else:
radio_checkbox_input = False
if label:
if isinstance(
widget, (TextInput, PasswordInput, FileInput, Textarea, Select)
) and not isinstance(
widget, (CheckboxInput, RadioSelect, CheckboxSelectMultiple, MultiWidget)
):
# Use a <label> tag
caption = bf.label_tag(label, attrs={"class": "field_name"})
else:
# Don't use a <label> tag
label = label.decode("utf8") if type(label) is bytes else label
label_suffix = bf.form.label_suffix or ""
caption = '<span class="field_name">' "%s%s</span>" % (label, label_suffix)
else:
caption = ""
context = {
"f": bf,
"caption": caption,
"widget_class_name": widget_class_name,
"radio_checkbox_input": radio_checkbox_input,
"no_label": not bool(label),
}
return FORM_FIELD_TEMPLATE.render(context)
@register.inclusion_tag("touchtechnology/common/templatetags/analytics.html")
def analytics(code=None):
if code is None:
code = getattr(settings, "GOOGLE_ANALYTICS", code)
context = {
"code": code,
"debug": not os.environ.get("SITE_ENV", "dev") == "live",
}
return context
@register.inclusion_tag(
"touchtechnology/common/templatetags/pagination.html", takes_context=True
)
def pagination(context):
query_string = context.get("QUERY_STRING", "")
query = parse_qsl(query_string, True)
query = [q for q in query if q[0] != "page"]
query_string = urlencode(query, True)
context.update({"QUERY_STRING": query_string})
return context
@register.filter("type")
def get_type(obj):
if isinstance(obj, QuerySet):
return obj.model._meta.verbose_name
elif isinstance(obj, Model):
return obj._meta.verbose_name
else:
return obj.__class__.__name__
@register.filter("types")
def get_type_plural(obj):
if isinstance(obj, (QuerySet, FauxQueryset)):
return obj.model._meta.verbose_name_plural
elif isinstance(obj, Model):
return obj._meta.verbose_name_plural
else:
# extremely naive
return obj.__class__.__name__ + "s"
@register.filter
def htmlentities(s):
replaced_entities = named_entities(
escape(s).encode("ascii", "xmlcharrefreplace").decode("utf8")
)
return mark_safe(replaced_entities)
@register.filter("abs")
def absolute_value(value):
return abs(value)
@register.inclusion_tag("touchtechnology/common/templatetags/price.html")
def price(value, extra=""):
context = {
"SYMBOL": CURRENCY_SYMBOL,
"AMOUNT": value or Decimal("0.00"),
"EXTRA": extra,
}
return context
@register.filter("islice", is_safe=True)
def islice_(value, arg):
"""
Returns an iterator slice of the list.
"""
try:
bits = []
for x in arg.split(":"):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return islice(value, *bits)
except (ValueError, TypeError):
return value # Fail silently.
@register.inclusion_tag("touchtechnology/common/templatetags/version.html")
def version(package, url=None):
environment = pkg_resources.Environment()
try:
if package == "python":
context = {
"name": "Python",
"project": "Python",
"version": environment.python,
"release": None,
}
else:
distribution = environment[package][-1]
release = version_re.match(distribution.version).groupdict()
context = {
"name": distribution.key,
"project": distribution.project_name,
"version": distribution.version,
"release": release.get("prerel"),
}
except IndexError:
logger.exception(
'Attempting to lookup version for package "%s" not ' "in pkg_resources",
package,
)
if url:
context["url"] = url
return context
@register.filter("permchecker")
def create_permission_checker(model_or_manager, user):
"""
Create an :class:`ObjectPermissionChecker` for optimal permission checking
of related objects.
http://django-guardian.readthedocs.io/en/stable/userguide/performance.html
"""
model, manager = model_and_manager(model_or_manager)
checker = ObjectPermissionChecker(user)
try:
checker.prefetch_perms(manager)
except UnboundLocalError:
logger.exception(
"https://github.com/django-guardian/django-guardian/issues/519"
)
return checker
@register.filter("checkperm")
def check_permission(obj, checker):
"""
For a given model instance and a :class:`ObjectPermissionChecker` return
which permissions the associated user has.
"""
return [
model_perm
for model_perm in get_all_perms_for_model_cached(obj._meta.model)
if checker.has_perm(model_perm, obj)
]
@register.filter("hasperm")
def has_permission(obj, user):
try:
model, manager = model_and_manager(obj)
except NotModelManager:
logger.exception('error="model cannot be determined"')
return set()
logger.debug(
'model="%s.%s", object_id="%s", user="%s"',
model._meta.app_label,
model._meta.model_name,
getattr(obj, "pk", ""),
user.get_username(),
)
# Calculate the permissions this user has for the given object, both
# directly and inferred by group memberships.
perms = user.get_all_permissions(obj)
perms |= user.get_group_permissions(obj)
# What are the permissions that this model accepts?
model_perms = get_all_perms_for_model_cached(model, ttl=300)
# Superusers have all permissions, so add each permission that the model
# accepts for this user to the set that has been explicitly cast.
if user.is_superuser:
perms.update(model_perms)
# Otherwise we need to iterate the groups that the user belongs to and see
# if they transfer permission to the user.
else:
for group in user.groups.all():
group_perms = group.permissions.all()
group_model_perms = [p.codename for p in model_perms if p in group_perms]
logger.debug(
'user="%s", group="%s", permissions="%s"',
user.get_username(),
group,
", ".join(group_model_perms),
)
perms.update(group_model_perms)
# Log the resulting set of permissions for this user for this object.
logger.debug('user="%s", permissions=%r', user.get_username(), perms)
return perms
@register.simple_tag
def host():
return socket.gethostname()
@register.inclusion_tag("touchtechnology/common/templatetags/hostname.html")
def hostname():
hostname = socket.gethostname()
host, domain = hostname.split(".", 1)
return dict(hostname=hostname, host=host, domain=domain)
class Login(AsTag):
options = Options("as", Argument("varname", required=False, resolve=False))
def get_value(self, context):
request = context.get("request")
url = reverse("accounts:login")
if request:
url += "?next=" + request.path
return url
register.tag(Login)
|
|
# Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.serialization import jsonutils as json
from sahara.i18n import _
from sahara.openstack.common import log as logging
from sahara.plugins.general import exceptions as ex
from sahara.plugins.hdp.versions import versionhandlerfactory as vhf
LOG = logging.getLogger(__name__)
class ClusterSpec():
def __init__(self, config, version='1.3.2'):
self._config_template = config
self.services = []
self.configurations = {}
self.node_groups = {}
self.version = version
self.user_input_handlers = {}
cluster_template = json.loads(config)
self._parse_services(cluster_template)
self._parse_configurations(cluster_template)
self._process_node_groups(template_json=cluster_template)
def create_operational_config(self, cluster, user_inputs,
scaled_groups=None):
if scaled_groups is None:
scaled_groups = {}
self._determine_deployed_services(cluster)
self._process_node_groups(cluster=cluster)
for ng_id in scaled_groups:
existing = next(group for group in self.node_groups.values()
if group.id == ng_id)
existing.count = scaled_groups[ng_id]
self.validate_node_groups(cluster)
self._finalize_ng_components()
self._parse_configurations(json.loads(self._config_template))
self._process_user_inputs(user_inputs)
self._replace_config_tokens()
def scale(self, updated_groups):
for ng_id in updated_groups:
existing = next(group for group in self.node_groups.values()
if group.id == ng_id)
existing.count = updated_groups[ng_id]
def validate_node_groups(self, cluster):
for service in self.services:
if service.deployed:
service.validate(self, cluster)
elif service.is_mandatory():
raise ex.RequiredServiceMissingException(service.name)
def get_deployed_configurations(self):
configs = set()
for service in self.services:
if service.deployed:
configs |= service.configurations
return configs
def determine_component_hosts(self, component):
hosts = set()
for ng in self.node_groups.values():
if component in ng.components:
hosts |= ng.instances
return hosts
def normalize(self):
return NormalizedClusterConfig(self)
def get_deployed_node_group_count(self, name):
count = 0
for ng in self.get_node_groups_containing_component(name):
count += ng.count
return count
def get_node_groups_containing_component(self, component):
found_node_groups = []
for ng in self.node_groups.values():
if component in ng.components:
found_node_groups.append(ng)
return found_node_groups
def get_components_for_type(self, type):
components = set()
for service in self.services:
for component in service.components:
if component.type == type:
components.add(component.name)
return components
def _parse_services(self, template_json):
handler = (vhf.VersionHandlerFactory.get_instance().
get_version_handler(self.version))
sp = handler.get_services_processor()
for s in template_json['services']:
name = s['name']
service = sp.create_service(name)
self.services.append(service)
for c in s['components']:
component = Component(c['name'], c['type'], c['cardinality'])
service.add_component(component)
if 'users' in s:
for u in s['users']:
user = User(u['name'], u['password'], u['groups'])
service.add_user(user)
configs = self._parse_configurations(s)
for config in configs:
service.add_configuration(config)
def _parse_configurations(self, template_json):
config_names = []
for config in template_json['configurations']:
config_props = {}
name = config['name']
config_names.append(name)
if name in self.configurations:
config_props = self.configurations[name]
else:
self.configurations[name] = config_props
if 'properties' in config:
for prop in config['properties']:
config_props[prop['name']] = prop['value']
return config_names
def _process_node_groups(self, template_json=None, cluster=None):
# get node_groups from config
if template_json and not cluster:
for group in template_json['host_role_mappings']:
node_group = NodeGroup(group['name'].lower())
for component in group['components']:
node_group.add_component(component['name'])
for host in group['hosts']:
if 'predicate' in host:
node_group.predicate = host['predicate']
if 'cardinality' in host:
node_group.cardinality = host['cardinality']
if 'default_count' in host:
node_group.count = host['default_count']
self.node_groups[node_group.name] = node_group
if cluster:
self.node_groups = {}
node_groups = cluster.node_groups
for ng in node_groups:
node_group = NodeGroup(ng.name.lower())
node_group.count = ng.count
node_group.id = ng.id
node_group.components = ng.node_processes[:]
node_group.ng_storage_paths = ng.storage_paths()
for instance in ng.instances:
node_group.instances.add(Instance(instance))
self.node_groups[node_group.name] = node_group
def _determine_deployed_services(self, cluster):
for ng in cluster.node_groups:
for service in self.services:
if service.deployed:
continue
for sc in service.components:
if sc.name in ng.node_processes:
service.deployed = True
service.register_user_input_handlers(
self.user_input_handlers)
break
def _process_user_inputs(self, user_inputs):
for ui in user_inputs:
user_input_handler = self.user_input_handlers.get(
'{0}/{1}'.format(ui.config.tag, ui.config.name),
self._default_user_input_handler)
user_input_handler(ui, self.configurations)
def _replace_config_tokens(self):
for service in self.services:
if service.deployed:
service.finalize_configuration(self)
def _finalize_ng_components(self):
for service in self.services:
if service.deployed:
service.finalize_ng_components(self)
def _default_user_input_handler(self, user_input, configurations):
config_map = configurations[user_input.config.tag]
config_map[user_input.config.name] = user_input.value
class Component():
def __init__(self, name, component_type, cardinality):
self.name = name
self.type = component_type
self.cardinality = cardinality
class NodeGroup():
def __init__(self, name):
self.id = None
self.name = name
self.components = []
self.predicate = None
self.cardinality = None
self.count = None
self.instances = set()
self.ng_storage_paths = []
def add_component(self, component):
self.components.append(component)
def storage_paths(self):
return self.ng_storage_paths
class User():
def __init__(self, name, password, groups):
self.name = name
self.password = password
self.groups = groups
class Instance():
def __init__(self, sahara_instance):
self.inst_fqdn = sahara_instance.fqdn()
self.management_ip = sahara_instance.management_ip
self.internal_ip = sahara_instance.internal_ip
self.sahara_instance = sahara_instance
def fqdn(self):
return self.inst_fqdn
def remote(self):
return self.sahara_instance.remote()
def __hash__(self):
return hash(self.fqdn())
def __eq__(self, other):
return self.fqdn() == other.fqdn()
class NormalizedClusterConfig():
def __init__(self, cluster_spec):
self.hadoop_version = cluster_spec.version
self.cluster_configs = []
self.node_groups = []
self.handler = (vhf.VersionHandlerFactory.get_instance().
get_version_handler(self.hadoop_version))
self._parse_configurations(cluster_spec.configurations)
self._parse_node_groups(cluster_spec.node_groups)
def _parse_configurations(self, configurations):
for config_name, properties in configurations.items():
for prop, value in properties.items():
target = self._get_property_target(prop)
if target:
prop_type = self._get_property_type(prop, value)
# TODO(sdpeidel): should we supply a scope?
self.cluster_configs.append(
NormalizedConfigEntry(NormalizedConfig(
prop, prop_type, value, target, 'cluster'),
value))
def _parse_node_groups(self, node_groups):
for node_group in node_groups.values():
self.node_groups.append(NormalizedNodeGroup(node_group))
def _get_property_target(self, prop):
return self.handler.get_applicable_target(prop)
def _get_property_type(self, prop, value):
# TODO(jspeidel): seems that all numeric prop values in default config
# are encoded as strings. This may be incorrect.
# TODO(jspeidel): should probably analyze string value to determine if
# it is numeric
# TODO(jspeidel): would then need to know whether Ambari expects a
# string or a numeric value
prop_type = type(value).__name__
# print 'Type: {0}'.format(prop_type)
if prop_type == 'str' or prop_type == 'unicode' or value == '':
return 'string'
elif prop_type == 'int':
return 'integer'
elif prop_type == 'bool':
return 'boolean'
else:
raise ValueError(
_("Could not determine property type for property "
"'%(property)s' with value: %(value)s") %
{"property": prop, "value": value})
class NormalizedConfig():
def __init__(self, name, config_type, default_value, target, scope):
self.name = name
self.description = None
self.type = config_type
self.default_value = default_value
self.is_optional = False
self.applicable_target = target
self.scope = scope
class NormalizedConfigEntry():
def __init__(self, config, value):
self.config = config
self.value = value
class NormalizedNodeGroup():
def __init__(self, node_group):
self.name = node_group.name
self.node_processes = node_group.components
self.node_configs = None
# TODO(jpseidel): should not have to specify img/flavor
self.img = None
# TODO(jmaron) the flavor will be set via an ambari blueprint setting,
# but that setting doesn't exist yet. It will be addressed by a bug
# fix shortly
self.flavor = 3
self.count = node_group.count
self.id = node_group.id
|
|
import sys
if sys.version_info >= (3, 8):
from unittest import IsolatedAsyncioTestCase as TestCase
from unittest.mock import AsyncMock
else:
from unittest import TestCase
from asynctest.mock import CoroutineMock as AsyncMock
from unittest.mock import Mock, call, sentinel
import pytest
from jj.apps import create_app
from jj.resolvers import Registry, Resolver
class TestResolver(TestCase):
def setUp(self):
self.default_handler = AsyncMock(return_value=sentinel.default_response)
self.default_app = create_app()
self.resolver = Resolver(Registry(), self.default_app, self.default_handler)
# Apps
def test_get_apps(self):
apps = self.resolver.get_apps()
self.assertEqual(apps, [])
def test_register_app(self):
res = self.resolver.register_app(type(self.default_app))
self.assertIsNone(res)
apps = self.resolver.get_apps()
self.assertEqual(apps, [type(self.default_app)])
def test_register_another_app(self):
self.resolver.register_app(type(self.default_app))
app = create_app()
self.resolver.register_app(type(app))
apps = self.resolver.get_apps()
self.assertEqual(apps, [type(self.default_app), type(app)])
def test_register_app_twice(self):
self.resolver.register_app(type(self.default_app))
res = self.resolver.register_app(type(self.default_app))
self.assertIsNone(res)
apps = self.resolver.get_apps()
self.assertEqual(apps, [type(self.default_app)])
def test_deregister_single_app(self):
self.resolver.register_app(type(self.default_app))
res = self.resolver.deregister_app(type(self.default_app))
self.assertIsNone(res)
apps = self.resolver.get_apps()
self.assertEqual(apps, [])
def test_deregister_app(self):
app1, app2 = create_app(), create_app()
self.resolver.register_app(type(app1))
self.resolver.register_app(type(app2))
self.resolver.deregister_app(type(app1))
apps = self.resolver.get_apps()
self.assertEqual(apps, [type(app2)])
def test_deregister_nonexisting_app(self):
app = create_app()
res = self.resolver.deregister_app(type(app))
self.assertIsNone(res)
# Handlers
def test_get_handlers(self):
handlers = self.resolver.get_handlers(type(self.default_app))
self.assertEqual(handlers, [])
def test_get_handlers_with_nonexisting_app(self):
app = create_app()
handlers = self.resolver.get_handlers(type(app))
self.assertEqual(handlers, [])
def test_register_handler(self):
handler = AsyncMock(return_value=sentinel.response)
res = self.resolver.register_handler(handler, type(self.default_app))
self.assertIsNone(res)
handlers = self.resolver.get_handlers(type(self.default_app))
self.assertEqual(handlers, [handler])
def test_register_another_handler(self):
handler1 = AsyncMock(return_value=sentinel.response)
handler2 = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler1, type(self.default_app))
self.resolver.register_handler(handler2, type(self.default_app))
handlers = self.resolver.get_handlers(type(self.default_app))
self.assertEqual(handlers, [handler1, handler2])
def test_register_handler_twice(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
res = self.resolver.register_handler(handler, type(self.default_app))
self.assertIsNone(res)
handlers = self.resolver.get_handlers(type(self.default_app))
self.assertEqual(handlers, [handler])
def test_deregister_single_handler(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
res = self.resolver.deregister_handler(handler, type(self.default_app))
self.assertIsNone(res)
handlers = self.resolver.get_handlers(type(self.default_app))
self.assertEqual(handlers, [])
def test_deregister_handler(self):
handler1 = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler1, type(self.default_app))
handler2 = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler2, type(self.default_app))
self.resolver.deregister_handler(handler1, type(self.default_app))
handlers = self.resolver.get_handlers(type(self.default_app))
self.assertEqual(handlers, [handler2])
def test_deregister_nonexisting_handler(self):
handler = AsyncMock(return_value=sentinel.response)
res = self.resolver.deregister_handler(handler, type(self.default_app))
self.assertIsNone(res)
def test_deregister_handler_with_nonexisting_app(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
app = create_app()
res = self.resolver.deregister_handler(handler, type(app))
self.assertIsNone(res)
# Attributes
def test_get_nonexisting_attribute(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
attribute_value = self.resolver.get_attribute(sentinel.name, handler)
self.assertEqual(attribute_value, sentinel)
def test_get_attribute_with_non_existing_handler(self):
handler = AsyncMock(return_value=sentinel.response)
default = None
attribute_value = self.resolver.get_attribute(sentinel.name, handler, default)
self.assertEqual(attribute_value, default)
def test_register_attribute(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
res = self.resolver.register_attribute(sentinel.name, sentinel.value, handler)
self.assertIsNone(res)
attribute_value = self.resolver.get_attribute(sentinel.name, handler)
self.assertEqual(attribute_value, sentinel.value)
def test_register_another_attribute(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
self.resolver.register_attribute(sentinel.name1, sentinel.value1, handler)
self.resolver.register_attribute(sentinel.name2, sentinel.value2, handler)
attribute_value2 = self.resolver.get_attribute(sentinel.name2, handler)
self.assertEqual(attribute_value2, sentinel.value2)
attribute_value1 = self.resolver.get_attribute(sentinel.name1, handler)
self.assertEqual(attribute_value1, sentinel.value1)
def test_register_attribute_twice(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
self.resolver.register_attribute(sentinel.name1, sentinel.value1, handler)
res = self.resolver.register_attribute(sentinel.name, sentinel.value, handler)
self.assertIsNone(res)
attribute_value = self.resolver.get_attribute(sentinel.name, handler)
self.assertEqual(attribute_value, sentinel.value)
def test_deregister_attribute(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
self.resolver.register_attribute(sentinel.name, sentinel.value, handler)
res = self.resolver.deregister_attribute(sentinel.name, handler)
self.assertIsNone(res)
attribute_value = self.resolver.get_attribute(sentinel.name, handler, default=None)
self.assertEqual(attribute_value, None)
def test_deregister_nonexisting_attribute(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
res = self.resolver.deregister_attribute(sentinel.name, handler)
self.assertIsNone(res)
def test_deregister_attribute_with_nonexisting_handler(self):
handler = AsyncMock(return_value=sentinel.response)
res = self.resolver.deregister_attribute(sentinel.name, handler)
self.assertIsNone(res)
# Matchers
def test_get_matchers_without_matchers(self):
matchers = self.resolver.get_matchers(self.default_handler)
self.assertEqual(matchers, [])
def test_get_matchers_with_one_matcher(self):
matcher = AsyncMock(return_value=True)
self.resolver.register_matcher(matcher, self.default_handler)
matchers = self.resolver.get_matchers(self.default_handler)
self.assertEqual(matchers, [matcher])
def test_get_matchers_with_multiple_matchers(self):
matcher1 = AsyncMock(return_value=True)
matcher2 = AsyncMock(return_value=True)
self.resolver.register_matcher(matcher1, self.default_handler)
self.resolver.register_matcher(matcher2, self.default_handler)
matchers = self.resolver.get_matchers(self.default_handler)
self.assertEqual(matchers, [matcher1, matcher2])
def test_register_matcher(self):
handler = AsyncMock(return_value=sentinel.response)
matcher = AsyncMock(return_value=True)
self.assertIsNone(self.resolver.register_matcher(matcher, handler))
def test_deregister_matcher(self):
handler = AsyncMock(return_value=sentinel.response)
matcher = AsyncMock(return_value=True)
self.resolver.register_matcher(matcher, handler)
res = self.resolver.deregister_matcher(matcher, handler)
self.assertIsNone(res)
matchers = self.resolver.get_matchers(handler)
self.assertEqual(matchers, [])
def test_deregister_matcher_with_nonexisting_handler(self):
handler = AsyncMock(return_value=sentinel.response)
matcher = AsyncMock(return_value=True)
res = self.resolver.deregister_matcher(matcher, handler)
self.assertIsNone(res)
# Resolver
@pytest.mark.asyncio
async def test_resolve_request_with_all_truthy_matchers(self):
handler = AsyncMock(return_value=sentinel.response)
matcher1 = AsyncMock(return_value=True)
matcher2 = AsyncMock(return_value=True)
self.resolver.register_matcher(matcher1, handler)
self.resolver.register_matcher(matcher2, handler)
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, handler)
matcher1.assert_called_once_with(request)
matcher2.assert_called_once_with(request)
handler.assert_not_called()
@pytest.mark.asyncio
async def test_resolve_request_with_all_falsy_matchers(self):
handler = AsyncMock(return_value=sentinel.response)
matcher1 = AsyncMock(return_value=False)
matcher2 = AsyncMock(return_value=False)
self.resolver.register_matcher(matcher1, handler)
self.resolver.register_matcher(matcher2, handler)
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, self.default_handler)
matcher1.assert_called_once_with(request)
matcher2.assert_not_called()
handler.assert_not_called()
@pytest.mark.asyncio
async def test_resolve_request_with_first_falsy_matcher(self):
handler = AsyncMock(return_value=sentinel.response)
matcher1 = AsyncMock(return_value=False)
matcher2 = AsyncMock(return_value=True)
self.resolver.register_matcher(matcher1, handler)
self.resolver.register_matcher(matcher2, handler)
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, self.default_handler)
matcher1.assert_called_once_with(request)
matcher2.assert_not_called()
handler.assert_not_called()
@pytest.mark.asyncio
async def test_resolve_request_with_last_falsy_matcher(self):
handler = AsyncMock(return_value=sentinel.response)
matcher1 = AsyncMock(return_value=True)
matcher2 = AsyncMock(return_value=False)
self.resolver.register_matcher(matcher1, handler)
self.resolver.register_matcher(matcher2, handler)
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, self.default_handler)
matcher1.assert_called_once_with(request)
matcher2.assert_called_once_with(request)
handler.assert_not_called()
@pytest.mark.asyncio
async def test_resolve_request_without_handlers(self):
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, self.default_handler)
@pytest.mark.asyncio
async def test_resolve_request_with_nonexisting_app(self):
app = create_app()
request = Mock()
response = await self.resolver.resolve(request, app)
self.assertEqual(response, self.default_handler)
@pytest.mark.asyncio
async def test_resolve_request_without_matchers(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, self.default_handler)
@pytest.mark.asyncio
async def test_resolve_request_with_single_matcher(self):
handler = AsyncMock(return_value=sentinel.response)
matcher = AsyncMock(return_value=True)
self.resolver.register_matcher(matcher, handler)
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, handler)
matcher.assert_called_once_with(request)
handler.assert_not_called()
@pytest.mark.asyncio
async def test_resolve_request_with_multiple_handlers(self):
matcher = AsyncMock(side_effect=(False, True))
handler1 = AsyncMock(return_value=sentinel.response1)
handler2 = AsyncMock(return_value=sentinel.response2)
self.resolver.register_matcher(matcher, handler1)
self.resolver.register_matcher(matcher, handler2)
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, handler1)
handler1.assert_not_called()
handler2.assert_not_called()
matcher.assert_has_calls([call(request)] * 2, any_order=True)
self.assertEqual(matcher.call_count, 2)
@pytest.mark.asyncio
async def test_resolve_request_priority(self):
matcher = AsyncMock(side_effect=(True, True))
handler1 = AsyncMock(return_value=sentinel.response1)
handler2 = AsyncMock(return_value=sentinel.response2)
self.resolver.register_matcher(matcher, handler1)
self.resolver.register_matcher(matcher, handler2)
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, handler2)
handler1.assert_not_called()
handler2.assert_not_called()
matcher.assert_called_once_with(request)
|
|
#!/usr/local/bin/python
# -*- coding: latin-1
import binascii, os, stat, sys
CRCFILE = ".crcs"
chunk = 1048576
#flags = { 'quiet' : False, 'recurse' : False, 'verbose' : False, 'clear' : False, 'nofile' : False, 'pretty' : False, 'fullpath' : False }
class crc_file:
def __init__(self, directory_path, transparent=False, outfiledir=None, flags={}):
self.verbose = flags.get('verbose', False)
self.Verbose('starting crc_file: dp', directory_path, 'tr', transparent, 'ofd', outfiledir, 'fl', flags)
self.changed = False
self.transparent = transparent
self.nofile = flags.get('nofile', False)
self.quiet = flags.get('quiet', False)
self.pretty = flags.get('pretty', False)
self.path = None
self.crc_file_path = os.path.join(directory_path, CRCFILE)
if flags.get('clear'):
self.Clear()
else:
self.files = self.Read()
if transparent and not self.files:
self.nofile = True
self.path = directory_path
self.outfiledir = outfiledir
self.out_file_path = self.crc_file_path
if outfiledir:
self.out_file_path = os.path.join(outfiledir, CRCFILE)
if self.verbose:
print 'self.changed', self.changed
print 'self.nofile', self.nofile
print 'self.quiet', self.quiet
print 'self.verbose', self.verbose
print 'self.pretty', self.pretty
print 'self.path', self.path
print 'self.crc_file_path', self.crc_file_path
print 'self.files', self.files
print 'self.path', self.path
print 'self.outfiledir', self.outfiledir
print 'self.out_file_path', self.out_file_path
def __del__(self):
if self.path:
if self.nofile:
try:
os.remove(self.crc_file_path)
except:
pass
elif self.changed:
self.Write()
# tracker file manipulations
def Read(self):
self.Verbose('Reading', self.crc_file_path)
if os.path.exists(self.crc_file_path):
try:
return eval(open(self.crc_file_path).read())
except:
pass
return dict()
def Write(self):
self.Verbose('Writing', self.out_file_path)
try:
os.remove(self.out_file_path)
except:
pass
if self.files:
try:
open(self.out_file_path, 'w').write(str(self.files))
except:
self.Error("*** Writing", self.out_file_path, "failed")
else:
self.Verbose('No files to write.')
# directory manipulations
def Create(self, force=False, fullpath=False, subdirs=None):
counter_types = [('same', '='), ('diff', '~'), ('add', '+'), ('gone', '-'), ('unkn', '?'), ('exc', '!')]
counters = dict(map(lambda x: (x[0], 0), counter_types))
self.changed = force
fl = os.listdir(self.path)
self.Normal('+', self.path, "(%d)" % len(fl))
files = dict()
fullfiles = dict()
for file_name in fl:
full = os.path.join(self.path, file_name)
if file_name.startswith('.') or not os.path.exists(full) or os.path.islink(full):
continue
if os.path.isdir(full):
if subdirs != None:
subdirs.append(file_name)
continue
result, info = self.CreateFile(file_name, force)
self.Verbose('calculated', full, result, info)
if info:
files[file_name] = fullfiles[full] = info
counters[result] += 1
counters['gone'] = len(self.files.keys())
if self.verbose:
keys = self.files.keys()
keys.sort()
for k in keys:
self.Verbose(" ", k, "-")
elif not self.quiet and self.pretty:
for k in self.files.keys():
self.Pretty(full, 'removed')
self.changed = self.changed or counters['diff'] or counters['add'] or counters['gone']
cnts = reduce(lambda s, x: s + " %s %d" % x, filter(lambda x: x[1], map(lambda x: (x[1], counters[x[0]]), counter_types)), '')
if cnts:
self.Normal(' ', cnts)
else:
self.Normal(' no files')
self.files = files
if fullpath:
return fullfiles
return files
def CreateFile(self, file_name, force):
full = os.path.join(self.path, file_name)
st = os.lstat(full)
sig = (st.st_mtime, st.st_size)
self.Verbose(' ', file_name)
if file_name in self.files:
info = self.files[file_name][0:3]
if self.files[file_name][0:2] == sig:
result = 'same'
self.Verbose('=')
elif not force:
result = 'unkn'
self.Verbose('?')
else:
try:
info = self.ScanFile(full)
except KeyboardInterrupt:
raise
except:
self.Error("*** Can't CRC", full)
return 'exc', None
result = 'diff'
self.Verbose('~')
self.Pretty(full, 'changed')
del self.files[file_name]
else:
try:
info = self.ScanFile(full)
except KeyboardInterrupt:
raise
except:
self.Error("*** Can't CRC", full)
return 'exc', None
result = 'add'
self.Verbose('+')
self.Pretty(full, 'added')
return result, info
def Clear(self):
self.changed = True
self.files = dict()
def GetFileList(self):
keys = self.files.keys()
keys.sort()
return keys
def CleanFileList(self):
keys = self.GetFileList()
for k in keys:
if not os.path.exists(os.path.join(self.path, k)):
self.RemoveFile(k)
# low level single file
def ScanFile(self, full):
try:
st = os.lstat(full)
except:
self.Verbose("can't stat", full)
return ()
#print ".", full,
file_handle = open(full, 'rb')
crc = 0
while 1:
contents = file_handle.read(16777216)
#print ".",
if contents:
crc = binascii.crc32(contents, crc)
else:
break
#print (st.st_mtime, st.st_size, crc)
return (st.st_mtime, st.st_size, crc)
# file manipulations
def AddFile(self, file_name, crc=None):
if crc:
self.changed = True
self.files[file_name] = crc
elif not self.transparent:
self.changed = True
self.files[file_name] = self.ScanFile(os.path.join(self.path, file_name))
def RemoveFile(self, file_name):
self.changed = True
if file_name in self.files:
del self.files[file_name]
def RenameFile(self, old_name, new_name):
if old_name != new_name:
self.changed = True
if old_name in self.files:
self.files[new_name] = self.files[old_name]
del self.files[old_name]
elif not self.transparent:
self.files[new_name] = self.ScanFile(new_name)
def CopyFile(self, old_name, new_name):
self.changed = True
if old_name in self.files:
self.files[new_name] = self.files[old_name]
elif not self.transparent:
self.files[new_name] = self.ScanFile(new_name)
def GetFile(self, file_name):
return self.files.get(file_name)
# messaging
def Verbose(self, *args):
if self.verbose:
print ' '.join(map(str, args))
def Error(self, *args):
sys.stderr.write(' '.join(map(str, args)) + '\n')
def Normal(self, *args):
if not self.quiet and not self.pretty:
print ' '.join(map(str, args))
def Pretty(self, *args):
if not self.quiet and self.pretty:
print ' '.join(map(str, args))
#========================================================================
def FileCRC(fn):
#print ".",fn,
f = open(fn, 'rb')
crc = 0
while 1:
contents = f.read(16777216)
#print ".",
if contents:
crc = binascii.crc32(contents, crc)
else:
break
#print
return (crc,)
def ReadCRCFile(destdir):
try:
return eval(open(os.path.join(destdir, CRCFILE)).read())
except:
return dict()
def WriteCRCFile(destdir, files):
try:
os.remove(os.path.join(destdir, CRCFILE))
except:
pass
if files:
try:
open(os.path.join(destdir, CRCFILE), 'w').write(str(files))
except:
print "*** Writing", os.path.join(destdir, CRCFILE), "failed"
def CRCDir(destdir, flags={}, outfiledir=None, subdirs=None):
# recurse is cheap
same = diff = add = gone = unkn = 0
if flags.get('clear'):
saved = dict()
else:
saved = ReadCRCFile(destdir)
if not flags.get('quiet') and not flags.get('pretty'):
print "+",destdir,
fl = os.listdir(destdir)
if not flags.get('quiet') and not flags.get('pretty'):
print "(%d)" % len(fl)
files = dict()
fullfiles = dict()
for f in fl:
full = os.path.join(destdir, f)
try:
st = os.lstat(full)
except:
continue
sig = (st.st_mtime, st.st_size)
if stat.S_ISLNK(st.st_mode):
continue
elif stat.S_ISDIR(st.st_mode):
if subdirs != None:
subdirs.append(f)
# if flags.get('recurse'):
# CRCDir(full, flags)
continue
elif f[0] == '.':
continue
if flags.get('verbose'):
print " ",f,
if f in saved:
if saved[f][0:2] == sig:
same += 1
info = saved[f][0:3]
if flags.get('verbose'):
print "="
elif not flags.get('force'):
unkn += 1
info = saved[f][0:3]
if flags.get('verbose'):
print "?"
else:
try:
info = sig + FileCRC(full)
except KeyboardInterrupt:
raise
except:
print "*** Can't CRC", full
continue
diff += 1
if flags.get('verbose'):
print "~"
if not flags.get('quiet') and flags.get('pretty'):
print os.path.join(destdir, f),"changed"
del saved[f]
# elif f.endswith('~') and flags.backups:
# os.unlink(
else:
try:
info = sig + FileCRC(full)
except KeyboardInterrupt:
raise
except:
print "*** Can't CRC", full
continue
add += 1
if flags.get('verbose'):
print "+"
if not flags.get('quiet') and flags.get('pretty'):
print os.path.join(destdir, f),"added"
files[f] = info
fullfiles[os.path.join(destdir, f)] = info
gone = len(saved.keys())
if flags.get('verbose'):
keys = saved.keys()
keys.sort()
for k in keys:
print " ", k, "-"
elif not flags.get('quiet') and flags.get('pretty'):
for k in saved.keys():
print os.path.join(destdir, f),"removed"
if flags.get('nofile'):
try:
os.remove(os.path.join(destdir, CRCFILE))
except:
pass
elif flags.get('force') or diff or add or gone:
try:
if outfiledir:
WriteCRCFile(outfiledir, files)
else:
WriteCRCFile(destdir, files)
except:
pass
if not flags.get('quiet') and not flags.get('pretty'):
print " -",gone," +",add," ~",diff," =",same," ?",unkn
if flags.get('fullpath'):
return fullfiles
return files
if __name__ == '__main__':
pass
'''
import sys,os
dirlist = os.walk(sys.argv[1])
files = {}
for d in dirlist:
files.update(CRCDir(d[0], {'fullpath':True}))
print files.keys()
'''
|
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
# Author: Niam Moltta
# UY - 2017
# MIT License
# Measures of Central Tendency with Python
import math
import numpy as np
import pandas as pd
from scipy import stats
from scipy.stats import norm
import scipy.stats as st
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import re
import seaborn
import scipy.stats
print(' ')
print(' ')
print(' Welcome to MoCT.py')
print(' --by Niam Moltta--')
print (' ~~/\//V\ ')
print(' ')
print(' ')
print("Application: MEASURES OF CENTRAL TENDENCY.\n\nINSTRUCTIONS:\n\n- Make sure that the .csv file is in the same folder of this script.\n- To start, enter the name of the file without 'quotes' and ending with .csv\n Example: scores.csv\n- Enter 'ya' to analyze more columns or quit.\n- Returns measures of central tendency:\n N, mean, standard deviation, variance, standard error, etc...\n- Returns Normal Distribution graph.\n- Select sample, select point estimate for sampling distribution.\n- Returns z-score and p-value from z-table.\n- Returns Sampling Distribution graph.\n- Returns One tailed T-test: default alpha = 0.05\n- Returns acceptance/rejection of the null hypothesis.\n")
fhand = raw_input('Enter file name: ')
filecsv = str(fhand)
if filecsv == '':
print(' ')
print ('Ciao, human!')
print(' ')
exit()
data = pd.read_csv(filecsv)
print ' '
frame = pd.DataFrame(data)
coolist = frame.columns
columns = np.asarray(coolist)
while True:
print ' '
print 'Columns in', re.findall('(.+?).csv', filecsv), 'are:\n'
print columns
print ' '
hand = raw_input('Enter column header:\n\n')
column = str(hand)
if (column == 'ya') | (column == ''):
print ' '
print 'Hasta la vista, human.'
print ' '
exit()
else:
numbers = data[column]
data[column].fillna(0,inplace=True) # Missing values to zeros.
A = list()
for number in numbers :
value = float(number)
A.append(value)
sigma = sum(A) #sumation
n = len(A) #total of elements
mean = sigma / n
Dev = list()
AbsDev = list()
SqDev = list()
for number in A :
val = number - mean
Dev.append(val) #Deviation from the mean
for element in Dev :
val = abs(element)
AbsDev.append(val) # Absolute Deviation
for element in AbsDev :
val = (element**2)
SqDev.append(val) #Square Deviations
SS = sum(SqDev) #Sum of Squares
Var = SS / n #Variance
StdDev = math.sqrt(Var) #Standard Deviation
StdE = StdDev / math.sqrt(n) #Standard Error
print ('---------------------------------------------')
print ('MEASURES OF CENTRAL TENDENCY for:'), column
print (' ')
print ('N ='), n
print ('Mean ='), mean
print ('Sum of Squares ='), SS
print ('Variance ='), Var
print ('Standard Deviation ='), StdDev
print ('Standard Error ='), StdE
print ('---------------------------------------------')
print(' ')
Array = np.asarray(A)
lista = Array.sort()
legend = str(column) + ' distribution'
Mean = np.mean(Array)
Variance = np.var(Array)
Sigma = np.sqrt(Variance)
plt.figure(1)
plt.hist(Array, bins=20, normed=True)
plt.xlim((min(Array), max(Array)))
x = np.linspace(min(Array), max(Array), n)
fig = plt.plot(x, mlab.normpdf(x,Mean,Sigma))
plt.title(legend)
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.show(fig)
print ('To continue, you must save the figure and close it. \nYou can also zoom in it or move the graph to see it better, \nuse the buttons.\n')
print ' '
while True:
sample = raw_input('Enter n value for sample distribution: ')
fh = str(sample)
if fh[0] == '#':
continue
if (fh == 'ya') | (fh == ''):
print ' '
break
newn = float(fh)
standarderror = StdDev / math.sqrt(newn)
print('----------------------------------------------------------------')
print 'The Standard Error for', newn,'is: ', standarderror
print('----------------------------------------------------------------')
print ' '
anyvalue = raw_input('Enter point estimate: ')
if anyvalue[0] == '#':
continue
if anyvalue == '':
print ' '
break
newmean = float(anyvalue)
rest = newmean - mean
zscore = rest / standarderror
print ' '
print '| The z-score for', anyvalue, 'is:', zscore, '|'
pvalue = st.norm.cdf(zscore)
print('---------------------------------------------')
print '| Z table value =', pvalue, '|'
print('---------------------------------------------')
prob = 1 - pvalue
print 'The probability of getting at least', anyvalue,'is:'
print ' '
print 'p =', prob
print ' '
print '-------------------------------------------------------------'
Devs = (1.96 * standarderror)
print 'Approximately 95% of the sample means fall within', Devs, '\nof', mean, '(Population mean)'
Dev1 = mean - Devs
Dev2 = mean + Devs
print ' '
print 'The 95% confidence interval is:', Dev1,'<', newmean, '<', Dev2
print ' '
Devss = (2.33 * standarderror)
''' Some notes for T-test:
Ho = (null hypothesis):
Mean = Intervention Mean:
- The sample mean falls somewhere out the critical region.
Ha = (alternative hypothesis):
Mean != Intervention Mean (two-tailed),
Mean < Intervention Mean (one-tailed - right side),
Mean > Intervention Mean (one-tailed - left side):
- The sample mean falls somewhere in the critical region.'''
Dev3 = mean - Devss
Dev4 = mean + Devss
print 'The 98% confidence interval is:', Dev3,'<', newmean, '<', Dev4
print ' '
print 'The margin of error is:', Devs, 'for 95% \nand:', Devss,'for 98%'
print '--------------------------------------------------------------'
print ' '
if (zscore < 1.96)&(zscore > -1.96):
print 'Alpha = 0.05\nHo = Accepted' # Alpha is 0.05 because is used more often
print ' '
print '--------------------------------------------------------------'
else:
print 'Alpha = 0.05\nHo = Rejected'
print ' '
print '--------------------------------------------------------------'
pdf2 = stats.norm.pdf(Array, mean, standarderror)
altn = int(newn)
legen = ("n = "+str(altn))
fig2 = plt.plot(Array, pdf2, label=legen)
plt.title("Sampling distribution\nalpha = 0.05")
plt.xlabel("Value")
plt.ylabel("Frequency")
altm = str(mean)
legenda = ("Mean =\n "+altm)
plt.axvline(x= mean, color='r', linestyle='dashed', label=legenda)
plt.axvline(x= Dev1, color ='g', linestyle='dashed', label=Dev1)
plt.axvline(x= Dev2, color = 'g', linestyle='dashed', label=Dev2)
zscorev = mean+(zscore*standarderror)
zscor = ("z-score =\n"+str(zscore))
plt.axvline(x= zscorev, color = 'purple', label=zscor)
print ('To continue, you must save the figure and close it, or just close it. \nYou can also zoom in it or move the graph to see it better, \nuse the buttons.\n')
plt.legend()
plt.show(fig2)
continue
print(' ')
print 'Ciao, human!'
print(' ')
|
|
import sys
gui = sys.modules["__main__"]
from utils.debug import debug
from utils.string_handling import *
if sys.platform.startswith('win') :
SYSTEM_PATH_SEPERATOR = '\\'
elif sys.platform.startswith('darwin') :
SYSTEM_PATH_SEPERATOR = '/'
LOOKUP_OUTPUT = [0,0,0]
DYNA_1_POS = 0
DYNA_2_POS = 0
#POSITION_ARRAY = [[[-15,-105,-15,-105,38,83,1],[-60,80,2,-40,48,84,1],[80,-92,-2,85,58,85,1]]]
POSITION_ARRAY = []
POSITION_ARRAY_FLAGS = []
max_scalable_area = 3072
@debug()
def lookup(letter,directive):
#directive = 0 for pick and 1 for place
###direction = 0 for fwd and 1 for bckwrd
#letter needs to be local
#directive needs to be local
global LOOKUP_OUTPUT #delete later
if (letter == "C"):
sort(0,directive)
# for A, index is 0
if (letter == "A"):
sort(1,directive)
if (letter == "S"):
sort(2,directive)
if (letter == "R"):
sort(3,directive)
# CHANGE CHANGE CHANGE
@debug()
def sort(index,directive):
#directive needs to be local
global POSITION_ARRAY
global POSITION_KEY
# len_letters = len(POSITION_ARRAY) #length of letters
no_of_instances = len(POSITION_ARRAY[index])
maximum = []
for i in range(no_of_instances*2) :
maximum.append([])
#loop to find maximums
for i in range (no_of_instances):
for j in range (2):
if ( POSITION_ARRAY_FLAGS[ index ][ i ] != directive ):
#checking availability
#print("i = ",i," j = ",j)
x = POSITION_ARRAY[ index ][ i ][ (j*2) +0 ]
y = POSITION_ARRAY[ index ][ i ][ (j*2) +1 ]
maximum[(2*i)+j] = max_of_two(x,y)
#print("max[",(2*i)+j,"] = ",maximum[(2*i)+j])
else :
maximum[(2*i)+j] = max_scalable_area
#max value possible
# to find minimum
i_min = 0
j_min = 0
for i in range (no_of_instances):
for j in range (2):
if(maximum[(2*i)+j] < maximum[(2*i_min)+j_min]):
i_min = i
j_min = j
LOOKUP_OUTPUT[0] = POSITION_ARRAY[ index ][ i_min ][ (2*j_min) ]
LOOKUP_OUTPUT[1] = POSITION_ARRAY[ index ][ i_min ][ (2*j_min) + 1 ]
LOOKUP_OUTPUT[2] = POSITION_ARRAY[ index ][ i_min ][ j_min + 4 ]
POSITION_ARRAY_FLAGS[ index ][ i_min ] = directive
change_array(POSITION_ARRAY_FLAGS,0)
#@debug()
def max_of_two(x,y):
global DYNA_1_POS
global DYNA_2_POS
a = mod(DYNA_1_POS - x) #difference 1
b = mod(DYNA_2_POS - y) #difference 2
if (a<b):
a=b #if b is greater
return a
#@debug()
def mod(s):
if (s<0):
s*=-1
return s #make positive
######### ARRAY OPERATIONS ###########
def change_array(array,num):
#num=0 ==> POSITION_ARRAY_FLAGS
#num=1 ==> DISPLAY_AREA_POSITIONS
with open(gui.WORKING_DIRECTORY + 'core' + SYSTEM_PATH_SEPERATOR + 'variable_array.txt','r') as variable_array:
k=[]
for line in variable_array:
k.append(line)
s = str(array)
if(num != len(k)-1):
s += '\n'
k[num] = s
with open(gui.WORKING_DIRECTORY + 'core' + SYSTEM_PATH_SEPERATOR + 'variable_array.txt','w') as variable_array :
for i in range(len(k)):
variable_array.write(k[i])
########### RIYANSH CODES ##########
def init_lookup() :
def edit_position_array(logs) :
global POSITION_ARRAY
global POSITION_ARRAY_FLAGS
character_array = [] # list of characters : "a" or "b" or ...
array = [] # list of strings in the lookup.txt corresponding to
#characters in character_array
i = 0
while(i < len(logs)) :
i += skip_useless(logs,i)
#if reached end of file/
if(logs[i] == 'eof') :
break
#if commented line
if(logs[i] == '#') :
i += skip_until_character(logs,'\n',i)
break
#first character of a line : "a" or "b" or ...
character_array.append(logs[i])
i += skip_until_character(logs,'{',i)
i += 1
string = ''
while(logs[i] != '}') :
string += logs[i]
i += 1
i += 1
array.append(string)
if(i < len(logs)) :
i += skip_useless(logs,i)
else :
break
#character_array and array lists are filled
for i in range(len(array)) :
array[i] = remove_useless(array[i])
def decode_array(array) :
'''
isolate strings pertaining to each block for a character
returns an array of arrays that consist of strings, corresponding
to each block
'''
return_array = []
for i in range(len(array)) :
return_array.append([])
for i in range(len(array)) :
j = 0
while(j < len(array[i])) :
skip_useless(array[i],j)
skip_character(array[i],',',j)
j += skip_until_character(array[i],'[',j)
j += 1
string = ''
while(array[i][j] != ']') :
string += array[i][j]
j += 1
j += 1
return_array[i].append(string)
return(return_array)
array = decode_array(array)
#create a list of lists consisting of empty lists
#[ [ [],[],...],...]
return_array = []
for i in range(len(array)) :
return_array.append([])
for j in range(len(array[i])) :
return_array[i].append([])
#fill the return_array
for i in range(len(array)) :
for j in range(len(array[i])) :
k = 0
#fill the empty arrays with numbers (as strings)
while(k < len(array[i][j])) :
string = ''
while((k < len(array[i][j])) and (array[i][j][k] != ',')and(array[i][j][k] != '\n')) :
string += array[i][j][k]
k += 1
k += 1
return_array[i][j].append(string)
array = return_array
#convert the integers (as strings) in return array to integers
return_array = []
for i in range(len(array)) :
return_array.append([])
for j in range(len(array[i])) :
return_array[i].append([])
for i in range(len(array)) :
for j in range(len(array[i])) :
for k in range(len(array[i][j])) :
return_array[i][j].append(string_to_int(array[i][j][k]))
#------
POSITION_ARRAY = return_array
for character in POSITION_ARRAY :
array = []
for element in character :
array.append(element.pop())
POSITION_ARRAY_FLAGS.append(array)
with open(gui.WORKING_DIRECTORY + 'core' + SYSTEM_PATH_SEPERATOR + 'lookup.txt','r') as logs :
logs_ = logs.read()
edit_position_array(logs_)
logs.close()
print("Position array :- ",POSITION_ARRAY)
print
print('position array flags : ',POSITION_ARRAY_FLAGS)
print
change_array(POSITION_ARRAY_FLAGS,0)
|
|
# Hungarian algorithm (Kuhn-Munkres) for solving the linear sum assignment
# problem. Taken from scikit-learn. Based on original code by Brian Clapper,
# adapted to NumPy by Gael Varoquaux.
# Further improvements by Ben Root, Vlad Niculae and Lars Buitinck.
#
# Copyright (c) 2008 Brian M. Clapper <[email protected]>, Gael Varoquaux
# Author: Brian M. Clapper, Gael Varoquaux
# License: 3-clause BSD
import numpy as np
def linear_sum_assignment(cost_matrix):
"""Solve the linear sum assignment problem.
The linear sum assignment problem is also known as minimum weight matching
in bipartite graphs. A problem instance is described by a matrix C, where
each C[i,j] is the cost of matching vertex i of the first partite set
(a "worker") and vertex j of the second set (a "job"). The goal is to find
a complete assignment of workers to jobs of minimal cost.
Formally, let X be a boolean matrix where :math:`X[i,j] = 1` iff row i is
assigned to column j. Then the optimal assignment has cost
.. math::
\\min \\sum_i \\sum_j C_{i,j} X_{i,j}
s.t. each row is assignment to at most one column, and each column to at
most one row.
This function can also solve a generalization of the classic assignment
problem where the cost matrix is rectangular. If it has more rows than
columns, then not every row needs to be assigned to a column, and vice
versa.
The method used is the Hungarian algorithm, also known as the Munkres or
Kuhn-Munkres algorithm.
Parameters
----------
cost_matrix : array
The cost matrix of the bipartite graph.
Returns
-------
row_ind, col_ind : array
An array of row indices and one of corresponding column indices giving
the optimal assignment. The cost of the assignment can be computed
as ``cost_matrix[row_ind, col_ind].sum()``. The row indices will be
sorted; in the case of a square cost matrix they will be equal to
``numpy.arange(cost_matrix.shape[0])``.
Notes
-----
.. versionadded:: 0.17.0
Examples
--------
::
>>> cost = np.array([[4, 1, 3], [2, 0, 5], [3, 2, 2]])
>>> row_ind, col_ind = linear_sum_assignment(cost)
>>> col_ind
array([1, 0, 2])
>>> cost[row_ind, col_ind].sum()
5
References
----------
.. [991] `R.A. Pilgrim's page on Munkres' Assignemnt Algorithm
<http://csclab.murraystate.edu/bob.pilgrim/445/munkres.html>`_
.. [992] `Harold W. Kuhn. The Hungarian Method for the assignment problem.
Naval Research Logistics Quarterly, 2:83-97, 1955.
<http://doi.org/10.1002/nav.3800020109>`_
.. [993] `Harold W. Kuhn. Variants of the Hungarian method for assignment
problems. Naval Research Logistics Quarterly, 3: 253-258, 1956.
<http://doi.org/10.1002/nav.3800030404>`_
.. [994] `Munkres, J. Algorithms for the Assignment and Transportation Problems.
J. SIAM, 5(1):32-38, March, 1957.
<http://doi.org/10.1137/0105003>`_
.. [995] `Wikipedia entry for the Hungarian algorithm
<https://en.wikipedia.org/wiki/Hungarian_algorithm>`_
"""
cost_matrix = np.asarray(cost_matrix)
if len(cost_matrix.shape) != 2:
raise ValueError("expected a matrix (2-d array), got a %r array"
% (cost_matrix.shape,))
# The algorithm expects more columns than rows in the cost matrix.
if cost_matrix.shape[1] < cost_matrix.shape[0]:
cost_matrix = cost_matrix.T
transposed = True
else:
transposed = False
state = _Hungary(cost_matrix)
# No need to bother with assignments if one of the dimensions
# of the cost matrix is zero-length.
step = None if 0 in cost_matrix.shape else _step1
while step is not None:
step = step(state)
if transposed:
marked = state.marked.T
else:
marked = state.marked
return np.where(marked == 1)
class _Hungary(object):
"""State of the Hungarian algorithm.
Parameters
----------
cost_matrix : 2D matrix
The cost matrix. Must have shape[1] >= shape[0].
"""
def __init__(self, cost_matrix):
self.C = cost_matrix.copy()
n, m = self.C.shape
self.row_uncovered = np.ones(n, dtype=bool)
self.col_uncovered = np.ones(m, dtype=bool)
self.Z0_r = 0
self.Z0_c = 0
self.path = np.zeros((n + m, 2), dtype=int)
self.marked = np.zeros((n, m), dtype=int)
def clear_covers(self):
"""Clear all covered matrix cells"""
self.row_uncovered[:] = True
self.col_uncovered[:] = True
# Individual steps of the algorithm follow, as a state machine: they return
# the next step to be taken (function to be called), if any.
def _step1(state):
"""Steps 1 and 2 in the Wikipedia page."""
# Step 1: For each row of the matrix, find the smallest element and
# subtract it from every element in its row.
state.C -= state.C.min(axis=1)[:, np.newaxis]
# Step 2: Find a zero (Z) in the resulting matrix. If there is no
# starred zero in its row or column, star Z. Repeat for each element
# in the matrix.
for i, j in zip(*np.where(state.C == 0)):
if state.col_uncovered[j] and state.row_uncovered[i]:
state.marked[i, j] = 1
state.col_uncovered[j] = False
state.row_uncovered[i] = False
state.clear_covers()
return _step3
def _step3(state):
"""
Cover each column containing a starred zero. If n columns are covered,
the starred zeros describe a complete set of unique assignments.
In this case, Go to DONE, otherwise, Go to Step 4.
"""
marked = (state.marked == 1)
state.col_uncovered[np.any(marked, axis=0)] = False
if marked.sum() < state.C.shape[0]:
return _step4
def _step4(state):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
# We convert to int as numpy operations are faster on int
C = (state.C == 0).astype(int)
covered_C = C * state.row_uncovered[:, np.newaxis]
covered_C *= np.asarray(state.col_uncovered, dtype=int)
n = state.C.shape[0]
m = state.C.shape[1]
while True:
# Find an uncovered zero
row, col = np.unravel_index(np.argmax(covered_C), (n, m))
if covered_C[row, col] == 0:
return _step6
else:
state.marked[row, col] = 2
# Find the first starred element in the row
star_col = np.argmax(state.marked[row] == 1)
if state.marked[row, star_col] != 1:
# Could not find one
state.Z0_r = row
state.Z0_c = col
return _step5
else:
col = star_col
state.row_uncovered[row] = False
state.col_uncovered[col] = True
covered_C[:, col] = C[:, col] * (
np.asarray(state.row_uncovered, dtype=int))
covered_C[row] = 0
def _step5(state):
"""
Construct a series of alternating primed and starred zeros as follows.
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always be one).
Continue until the series terminates at a primed zero that has no starred
zero in its column. Unstar each starred zero of the series, star each
primed zero of the series, erase all primes and uncover every line in the
matrix. Return to Step 3
"""
count = 0
path = state.path
path[count, 0] = state.Z0_r
path[count, 1] = state.Z0_c
while True:
# Find the first starred element in the col defined by
# the path.
row = np.argmax(state.marked[:, path[count, 1]] == 1)
if state.marked[row, path[count, 1]] != 1:
# Could not find one
break
else:
count += 1
path[count, 0] = row
path[count, 1] = path[count - 1, 1]
# Find the first prime element in the row defined by the
# first path step
col = np.argmax(state.marked[path[count, 0]] == 2)
if state.marked[row, col] != 2:
col = -1
count += 1
path[count, 0] = path[count - 1, 0]
path[count, 1] = col
# Convert paths
for i in range(count + 1):
if state.marked[path[i, 0], path[i, 1]] == 1:
state.marked[path[i, 0], path[i, 1]] = 0
else:
state.marked[path[i, 0], path[i, 1]] = 1
state.clear_covers()
# Erase all prime markings
state.marked[state.marked == 2] = 0
return _step3
def _step6(state):
"""
Add the value found in Step 4 to every element of each covered row,
and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered lines.
"""
# the smallest uncovered value in the matrix
if np.any(state.row_uncovered) and np.any(state.col_uncovered):
minval = np.min(state.C[state.row_uncovered], axis=0)
minval = np.min(minval[state.col_uncovered])
state.C[~state.row_uncovered] += minval
state.C[:, state.col_uncovered] -= minval
return _step4
|
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2011, Evan Leis
#
# Distributed under the terms of the Lesser GNU General Public License (LGPL)
#-----------------------------------------------------------------------------
'''
Created on May 9, 2011
@author: evan
'''
from kayako.tests import KayakoAPITest
class TestStaff(KayakoAPITest):
EMAIL = '[email protected]'
def tearDown(self):
from kayako.objects import Staff
api = self.api
test_staff = api.filter(Staff, email=self.EMAIL)
for staff in test_staff:
staff.delete()
super(TestStaff, self).tearDown()
def test_add_get_nonexistant(self):
from kayako.objects import Staff
obj = self.api.get(Staff, '123123')
assert obj is None
def test_add_get_bare(self):
from kayako.objects import Staff
obj = self.api.create(Staff, firstname='DELETEME', lastname='DELETEME', username='DELETEME', email=self.EMAIL, password='DELETEME', staffgroupid=1)
obj.add()
obj2 = self.api.get(Staff, obj.id) # Shouldn't raise errors
obj.delete()
assert obj2 is not None
def test_add_get_full(self):
from kayako.objects import Staff
obj = self.api.create(Staff, firstname='DELETEME', lastname='DELETEME', username='DELETEME', email=self.EMAIL, password='DELETEME', staffgroupid=1,
designation='TEST', mobilenumber='123-456-7890', signature='TEST', isenabled=False, greeting='Mr.', timezone='MST', enabledst=True)
obj.add()
obj2 = self.api.get(Staff, obj.id) # Shouldn't raise errors
obj.delete()
assert obj2 is not None
def test_get_staff(self):
from kayako.objects import Staff
d = self.api.get(Staff, 1)
assert 'Staff ' in str(d)
self.assertEqual(d.id, 1)
def test_add_staff_missing_firstname(self):
from kayako.exception import KayakoRequestError
from kayako.objects import Staff
staff = self.api.create(Staff)
staff.lastname = 'test_lastname'
staff.username = 'test_username'
staff.password = 'test_password'
staff.staffgroupid = 1
staff.email = self.EMAIL
self.assertRaises(KayakoRequestError, staff.add)
def test_add_staff_missing_lastname(self):
from kayako.exception import KayakoRequestError
from kayako.objects import Staff
staff = self.api.create(Staff)
staff.firstname = 'test_firstname'
staff.username = 'test_username'
staff.password = 'test_password'
staff.staffgroupid = 1
staff.email = self.EMAIL
self.assertRaises(KayakoRequestError, staff.add)
def test_add_staff_missing_username(self):
from kayako.exception import KayakoRequestError
from kayako.objects import Staff
staff = self.api.create(Staff)
staff.firstname = 'test_firstname'
staff.lastname = 'test_lastname'
staff.password = 'test_password'
staff.staffgroupid = 1
staff.email = self.EMAIL
self.assertRaises(KayakoRequestError, staff.add)
def test_add_staff_missing_password(self):
from kayako.exception import KayakoRequestError
from kayako.objects import Staff
staff = self.api.create(Staff)
staff.firstname = 'test_firstname'
staff.lastname = 'test_lastname'
staff.username = 'test_username'
staff.staffgroupid = 1
staff.email = self.EMAIL
self.assertRaises(KayakoRequestError, staff.add)
def test_add_staff_missing_staffgroupid(self):
from kayako.exception import KayakoRequestError
from kayako.objects import Staff
staff = self.api.create(Staff)
staff.firstname = 'test_firstname'
staff.lastname = 'test_lastname'
staff.username = 'test_username'
staff.password = 'test_password'
staff.email = self.EMAIL
self.assertRaises(KayakoRequestError, staff.add)
def test_add_staff_missing_email(self):
from kayako.exception import KayakoRequestError
from kayako.objects import Staff
staff = self.api.create(Staff)
staff.firstname = 'test_firstname'
staff.lastname = 'test_lastname'
staff.username = 'test_username'
staff.password = 'test_password'
staff.staffgroupid = 1
self.assertRaises(KayakoRequestError, staff.add)
def test_get_all(self):
from kayako.objects import Staff
result = self.api.get_all(Staff)
assert len(result)
def test_add_save_delete(self):
from kayako.core.lib import UnsetParameter
from kayako.objects import Staff
staff = self.api.create(Staff)
staff.firstname = 'DELETEME'
staff.lastname = 'test_lastname'
staff.username = 'test_username'
staff.password = 'test_password'
staff.staffgroupid = 1
staff.email = self.EMAIL
staff.add()
assert staff.id is not UnsetParameter
staff.firstname = 'DELETEME2'
staff.save()
staff.delete()
found_error = False
all_staff = self.api.get_all(Staff)
for staff in all_staff:
if staff.email == self.EMAIL:
staff.delete()
found_error = True
if found_error:
assert False, 'Found an error, Staff did not delete correctly.'
def test_delete_unadded(self):
from kayako.exception import KayakoRequestError
from kayako.objects import Staff
staff = self.api.create(Staff)
self.assertRaises(KayakoRequestError, staff.delete)
class TestStaffGroup(KayakoAPITest):
def tearDown(self):
from kayako.objects import StaffGroup
all_groups = self.api.get_all(StaffGroup)
for group in all_groups:
if group.title == 'DELETEME' or group.title == 'DELETEME2':
group.delete()
super(TestStaffGroup, self).tearDown()
def test_add_get_nonexistant(self):
from kayako.objects import StaffGroup
obj = self.api.get(StaffGroup, 123123123)
assert obj is None
def test_add_get_bare(self):
from kayako.objects import StaffGroup
obj = self.api.create(StaffGroup, title='DELETEME', isadmin=False)
obj.add()
obj2 = self.api.get(StaffGroup, obj.id) # Shouldn't raise errors
obj.delete()
assert obj2 is not None
def test_add_get_full(self):
from kayako.objects import StaffGroup
obj = self.api.create(StaffGroup, title='DELETEME', isadmin=False)
obj.add()
obj2 = self.api.get(StaffGroup, obj.id) # Shouldn't raise errors
obj.delete()
assert obj2 is not None
def test_get_staffgroup(self):
from kayako.objects import StaffGroup
d = self.api.get(StaffGroup, 1)
self.assertEqual(d.id, 1)
def test_add_staff_missing_title(self):
from kayako.exception import KayakoRequestError
from kayako.objects import StaffGroup
staffgroup = self.api.create(StaffGroup)
staffgroup.isadmin = 0
self.assertRaises(KayakoRequestError, staffgroup.add)
def test_add_staff_missing_isadmin(self):
from kayako.exception import KayakoRequestError
from kayako.objects import StaffGroup
staffgroup = self.api.create(StaffGroup)
staffgroup.title = 'test_title'
self.assertRaises(KayakoRequestError, staffgroup.add)
def test_get_all(self):
from kayako.objects import StaffGroup
result = self.api.get_all(StaffGroup)
assert len(result)
def test_add_save_delete(self):
from kayako.core.lib import UnsetParameter
from kayako.objects import StaffGroup
staffgroup = self.api.create(StaffGroup)
staffgroup.title = 'DELETEME'
staffgroup.isadmin = 0
staffgroup.add()
assert staffgroup.id is not UnsetParameter
staffgroup.title = 'DELETEME2'
staffgroup.save()
staffgroup.delete()
found_error = False
all_staff_groups = self.api.get_all(StaffGroup)
for staffgroup in all_staff_groups:
if staffgroup.title == 'DELETEME' or staffgroup.title == 'DELETEME2':
staffgroup.delete()
found_error = True
if found_error:
assert False, 'Found an error, StaffGroup did not delete correctly.'
def test_delete_unadded(self):
from kayako.exception import KayakoRequestError
from kayako.objects import StaffGroup
staffgroup = self.api.create(StaffGroup)
self.assertRaises(KayakoRequestError, staffgroup.delete)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various high level TF models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.ops import autoencoder_ops
from tensorflow.contrib.learn.python.learn.ops import dnn_ops
from tensorflow.contrib.learn.python.learn.ops import losses_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops as array_ops_
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope as vs
def linear_regression_zero_init(X, y):
# pylint: disable=invalid-name
"""Linear regression subgraph with zero-value initial weights and bias.
Args:
X: tensor or placeholder for input features.
y: tensor or placeholder for target.
Returns:
Predictions and loss tensors.
"""
return linear_regression(X, y, init_mean=0.0, init_stddev=0.0)
def logistic_regression_zero_init(X, y):
# pylint: disable=invalid-name
"""Logistic regression subgraph with zero-value initial weights and bias.
Args:
X: tensor or placeholder for input features.
y: tensor or placeholder for target.
Returns:
Predictions and loss tensors.
"""
return logistic_regression(X, y, init_mean=0.0, init_stddev=0.0)
def linear_regression(X, y, init_mean=None, init_stddev=1.0):
# pylint: disable=invalid-name
"""Creates linear regression TensorFlow subgraph.
Args:
X: tensor or placeholder for input features.
y: tensor or placeholder for target.
init_mean: the mean value to use for initialization.
init_stddev: the standard devation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('linear_regression'):
logging_ops.histogram_summary('linear_regression.X', X)
logging_ops.histogram_summary('linear_regression.y', y)
y_shape = y.get_shape()
if len(y_shape) == 1:
output_shape = 1
else:
output_shape = y_shape[1]
# Set up the requested initialization.
if init_mean is None:
weights = vs.get_variable('weights', [X.get_shape()[1], output_shape])
bias = vs.get_variable('bias', [output_shape])
else:
weights = vs.get_variable('weights', [X.get_shape()[1], output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev))
bias = vs.get_variable('bias', [output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev))
logging_ops.histogram_summary('linear_regression.weights', weights)
logging_ops.histogram_summary('linear_regression.bias', bias)
return losses_ops.mean_squared_error_regressor(X, y, weights, bias)
def logistic_regression(X,
y,
class_weight=None,
init_mean=None,
init_stddev=1.0):
# pylint: disable=invalid-name
"""Creates logistic regression TensorFlow subgraph.
Args:
X: tensor or placeholder for input features,
shape should be [batch_size, n_features].
y: tensor or placeholder for target,
shape should be [batch_size, n_classes].
class_weight: tensor, [n_classes], where for each class
it has weight of the class. If not provided
will check if graph contains tensor `class_weight:0`.
If that is not provided either all ones are used.
init_mean: the mean value to use for initialization.
init_stddev: the standard devation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('logistic_regression'):
logging_ops.histogram_summary('%s.X' % vs.get_variable_scope().name, X)
logging_ops.histogram_summary('%s.y' % vs.get_variable_scope().name, y)
# Set up the requested initialization.
if init_mean is None:
weights = vs.get_variable('weights',
[X.get_shape()[1], y.get_shape()[-1]])
bias = vs.get_variable('bias', [y.get_shape()[-1]])
else:
weights = vs.get_variable('weights',
[X.get_shape()[1], y.get_shape()[-1]],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev))
bias = vs.get_variable('bias', [y.get_shape()[-1]],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev))
logging_ops.histogram_summary('%s.weights' % vs.get_variable_scope().name,
weights)
logging_ops.histogram_summary('%s.bias' % vs.get_variable_scope().name,
bias)
# If no class weight provided, try to retrieve one from pre-defined
# tensor name in the graph.
if not class_weight:
try:
class_weight = ops.get_default_graph().get_tensor_by_name(
'class_weight:0')
except KeyError:
pass
return losses_ops.softmax_classifier(X,
y,
weights,
bias,
class_weight=class_weight)
def get_dnn_model(hidden_units, target_predictor_fn, dropout=None):
"""Returns a function that creates a DNN TensorFlow subgraph.
Args:
hidden_units: List of values of hidden units for layers.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes X, y and returns predictions and loss
tensors.
dropout: When not none, causes dropout regularization to be used,
with the specified probability of removing a given coordinate.
Returns:
A function that creates the subgraph.
"""
def dnn_estimator(X, y):
# pylint: disable=invalid-name
"""DNN estimator with target predictor function on top."""
layers = dnn_ops.dnn(X, hidden_units, dropout=dropout)
return target_predictor_fn(layers, y)
return dnn_estimator
def get_autoencoder_model(hidden_units, target_predictor_fn,
activation, add_noise=None, dropout=None):
"""Returns a function that creates a Autoencoder TensorFlow subgraph.
Args:
hidden_units: List of values of hidden units for layers.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes X, y and returns predictions and loss
tensors.
activation: activation function used to map inner latent layer onto
reconstruction layer.
add_noise: a function that adds noise to tensor_in,
e.g. def add_noise(x):
return(x + np.random.normal(0, 0.1, (len(x), len(x[0]))))
dropout: When not none, causes dropout regularization to be used,
with the specified probability of removing a given coordinate.
Returns:
A function that creates the subgraph.
"""
def dnn_autoencoder_estimator(X):
# pylint: disable=invalid-name
"""Autoencoder estimator with target predictor function on top."""
encoder, decoder = autoencoder_ops.dnn_autoencoder(
X, hidden_units, activation,
add_noise=add_noise, dropout=dropout)
return encoder, decoder, target_predictor_fn(X, decoder)
return dnn_autoencoder_estimator
## This will be in Tensorflow 0.7.
## TODO(ilblackdragon): Clean this up when it's released
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
lengths: A tensor of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply
reverses the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
for input_ in input_seq:
input_.set_shape(input_.get_shape().with_rank(2))
# Join into (time, batch_size, depth)
s_joined = array_ops_.pack(input_seq)
# Reverse along dimension 0
s_reversed = array_ops_.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops_.unpack(s_reversed)
return result
def bidirectional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states
are ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
[batch_size x cell.state_size].
initial_state_bw: (optional) Same as for initial_state_fw.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int64 vector (tensor) of size
[batch_size],
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to "BiRNN"
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input), which
are depth-concatenated forward and backward outputs
state is the concatenated final state of the forward and backward RNN
Raises:
TypeError: If "cell_fw" or "cell_bw" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell_fw, nn.rnn_cell.RNNCell):
raise TypeError('cell_fw must be an instance of RNNCell')
if not isinstance(cell_bw, nn.rnn_cell.RNNCell):
raise TypeError('cell_bw must be an instance of RNNCell')
if not isinstance(inputs, list):
raise TypeError('inputs must be a list')
if not inputs:
raise ValueError('inputs must not be empty')
name = scope or 'BiRNN'
# Forward direction
with vs.variable_scope(name + '_FW'):
output_fw, state_fw = nn.rnn(cell_fw, inputs, initial_state_fw, dtype,
sequence_length)
# Backward direction
with vs.variable_scope(name + '_BW'):
tmp, state_bw = nn.rnn(cell_bw, _reverse_seq(inputs, sequence_length),
initial_state_bw, dtype, sequence_length)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
outputs = [array_ops_.concat(1, [fw, bw])
for fw, bw in zip(output_fw, output_bw)]
return outputs, array_ops_.concat(1, [state_fw, state_bw])
# End of Tensorflow 0.7
def get_rnn_model(rnn_size, cell_type, num_layers, input_op_fn, bidirectional,
target_predictor_fn, sequence_length, initial_state):
"""Returns a function that creates a RNN TensorFlow subgraph.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument X for input and returns transformed X.
bidirectional: boolean, Whether this is a bidirectional rnn.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes X, y and returns predictions and loss
tensors.
sequence_length: If sequence_length is provided, dynamic calculation is
performed.
This saves computational time when unrolling past max
sequence length.
Required for bidirectional RNNs.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
Returns:
A function that creates the subgraph.
"""
def rnn_estimator(X, y):
# pylint: disable=invalid-name
"""RNN estimator with target predictor function on top."""
X = input_op_fn(X)
if cell_type == 'rnn':
cell_fn = nn.rnn_cell.BasicRNNCell
elif cell_type == 'gru':
cell_fn = nn.rnn_cell.GRUCell
elif cell_type == 'lstm':
cell_fn = nn.rnn_cell.BasicLSTMCell
else:
raise ValueError('cell_type {} is not supported. '.format(cell_type))
if bidirectional:
# forward direction cell
rnn_fw_cell = nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
# backward direction cell
rnn_bw_cell = nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
_, encoding = bidirectional_rnn(rnn_fw_cell,
rnn_bw_cell,
X,
dtype=dtypes.float32,
sequence_length=sequence_length,
initial_state_fw=initial_state,
initial_state_bw=initial_state)
else:
cell = nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
_, encoding = nn.rnn(cell,
X,
dtype=dtypes.float32,
sequence_length=sequence_length,
initial_state=initial_state)
return target_predictor_fn(encoding, y)
return rnn_estimator
|
|
from django.db import NotSupportedError
from django.db.models import Index
from django.utils.functional import cached_property
__all__ = [
'BloomIndex', 'BrinIndex', 'BTreeIndex', 'GinIndex', 'GistIndex',
'HashIndex', 'SpGistIndex',
]
class PostgresIndex(Index):
@cached_property
def max_name_length(self):
# Allow an index name longer than 30 characters when the suffix is
# longer than the usual 3 character limit. The 30 character limit for
# cross-database compatibility isn't applicable to PostgreSQL-specific
# indexes.
return Index.max_name_length - len(Index.suffix) + len(self.suffix)
def create_sql(self, model, schema_editor, using='', **kwargs):
self.check_supported(schema_editor)
statement = super().create_sql(model, schema_editor, using=' USING %s' % self.suffix, **kwargs)
with_params = self.get_with_params()
if with_params:
statement.parts['extra'] = 'WITH (%s) %s' % (
', '.join(with_params),
statement.parts['extra'],
)
return statement
def check_supported(self, schema_editor):
pass
def get_with_params(self):
return []
class BloomIndex(PostgresIndex):
suffix = 'bloom'
def __init__(self, *, length=None, columns=(), **kwargs):
super().__init__(**kwargs)
if len(self.fields) > 32:
raise ValueError('Bloom indexes support a maximum of 32 fields.')
if not isinstance(columns, (list, tuple)):
raise ValueError('BloomIndex.columns must be a list or tuple.')
if len(columns) > len(self.fields):
raise ValueError(
'BloomIndex.columns cannot have more values than fields.'
)
if not all(0 < col <= 4095 for col in columns):
raise ValueError(
'BloomIndex.columns must contain integers from 1 to 4095.',
)
if length is not None and not 0 < length <= 4096:
raise ValueError(
'BloomIndex.length must be None or an integer from 1 to 4096.',
)
self.length = length
self.columns = columns
def deconstruct(self):
path, args, kwargs = super().deconstruct()
if self.length is not None:
kwargs['length'] = self.length
if self.columns:
kwargs['columns'] = self.columns
return path, args, kwargs
def check_supported(self, schema_editor):
if not schema_editor.connection.features.has_bloom_index:
raise NotSupportedError('Bloom indexes require PostgreSQL 9.6+.')
def get_with_params(self):
with_params = []
if self.length is not None:
with_params.append('length = %d' % self.length)
if self.columns:
with_params.extend(
'col%d = %d' % (i, v)
for i, v in enumerate(self.columns, start=1)
)
return with_params
class BrinIndex(PostgresIndex):
suffix = 'brin'
def __init__(self, *, autosummarize=None, pages_per_range=None, **kwargs):
if pages_per_range is not None and pages_per_range <= 0:
raise ValueError('pages_per_range must be None or a positive integer')
self.autosummarize = autosummarize
self.pages_per_range = pages_per_range
super().__init__(**kwargs)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
if self.autosummarize is not None:
kwargs['autosummarize'] = self.autosummarize
if self.pages_per_range is not None:
kwargs['pages_per_range'] = self.pages_per_range
return path, args, kwargs
def check_supported(self, schema_editor):
if self.autosummarize and not schema_editor.connection.features.has_brin_autosummarize:
raise NotSupportedError('BRIN option autosummarize requires PostgreSQL 10+.')
def get_with_params(self):
with_params = []
if self.autosummarize is not None:
with_params.append('autosummarize = %s' % ('on' if self.autosummarize else 'off'))
if self.pages_per_range is not None:
with_params.append('pages_per_range = %d' % self.pages_per_range)
return with_params
class BTreeIndex(PostgresIndex):
suffix = 'btree'
def __init__(self, *, fillfactor=None, **kwargs):
self.fillfactor = fillfactor
super().__init__(**kwargs)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
if self.fillfactor is not None:
kwargs['fillfactor'] = self.fillfactor
return path, args, kwargs
def get_with_params(self):
with_params = []
if self.fillfactor is not None:
with_params.append('fillfactor = %d' % self.fillfactor)
return with_params
class GinIndex(PostgresIndex):
suffix = 'gin'
def __init__(self, *, fastupdate=None, gin_pending_list_limit=None, **kwargs):
self.fastupdate = fastupdate
self.gin_pending_list_limit = gin_pending_list_limit
super().__init__(**kwargs)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
if self.fastupdate is not None:
kwargs['fastupdate'] = self.fastupdate
if self.gin_pending_list_limit is not None:
kwargs['gin_pending_list_limit'] = self.gin_pending_list_limit
return path, args, kwargs
def get_with_params(self):
with_params = []
if self.gin_pending_list_limit is not None:
with_params.append('gin_pending_list_limit = %d' % self.gin_pending_list_limit)
if self.fastupdate is not None:
with_params.append('fastupdate = %s' % ('on' if self.fastupdate else 'off'))
return with_params
class GistIndex(PostgresIndex):
suffix = 'gist'
def __init__(self, *, buffering=None, fillfactor=None, **kwargs):
self.buffering = buffering
self.fillfactor = fillfactor
super().__init__(**kwargs)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
if self.buffering is not None:
kwargs['buffering'] = self.buffering
if self.fillfactor is not None:
kwargs['fillfactor'] = self.fillfactor
return path, args, kwargs
def get_with_params(self):
with_params = []
if self.buffering is not None:
with_params.append('buffering = %s' % ('on' if self.buffering else 'off'))
if self.fillfactor is not None:
with_params.append('fillfactor = %d' % self.fillfactor)
return with_params
class HashIndex(PostgresIndex):
suffix = 'hash'
def __init__(self, *, fillfactor=None, **kwargs):
self.fillfactor = fillfactor
super().__init__(**kwargs)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
if self.fillfactor is not None:
kwargs['fillfactor'] = self.fillfactor
return path, args, kwargs
def get_with_params(self):
with_params = []
if self.fillfactor is not None:
with_params.append('fillfactor = %d' % self.fillfactor)
return with_params
class SpGistIndex(PostgresIndex):
suffix = 'spgist'
def __init__(self, *, fillfactor=None, **kwargs):
self.fillfactor = fillfactor
super().__init__(**kwargs)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
if self.fillfactor is not None:
kwargs['fillfactor'] = self.fillfactor
return path, args, kwargs
def get_with_params(self):
with_params = []
if self.fillfactor is not None:
with_params.append('fillfactor = %d' % self.fillfactor)
return with_params
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilities to aid in testing mapreduces."""
import base64
import collections
import logging
import os
import re
from google.appengine.ext.mapreduce import main
from google.appengine.ext.mapreduce import model
from google.appengine.ext.webapp import mock_webapp
_LOGGING_LEVEL = logging.ERROR
logging.getLogger().setLevel(_LOGGING_LEVEL)
def decode_task_payload(task):
"""Decodes POST task payload.
This can only decode POST payload for a normal task. For huge task,
use model.HugeTask.decode_payload.
Args:
task: a dict representing a taskqueue task as documented in taskqueue_stub.
Returns:
parameter_name -> parameter_value dict. If multiple parameter values are
present, then parameter_value will be a list.
"""
if not task:
return {}
body = base64.b64decode(task["body"])
return model.HugeTask._decode_payload(body)
def execute_task(task, retries=0, handlers_map=None):
"""Execute mapper's executor task.
This will try to determine the correct mapper handler for the task, will set
up all mock environment necessary for task execution, and execute the task
itself.
This function can be used for functional-style testing of functionality
depending on mapper framework.
Args:
task: a taskqueue task.
retries: the current retry of this task.
handlers_map: a dict from url regex to handler.
Returns:
the handler instance used for this task.
Raises:
Exception: whatever the task raises.
"""
if not handlers_map:
handlers_map = main.create_handlers_map()
url = task["url"]
handler = None
params = []
for (re_str, handler_class) in handlers_map:
re_str = "^" + re_str + "($|\\?)"
m = re.match(re_str, url)
if m:
params = m.groups()[:-1]
break
else:
raise Exception("Can't determine handler for %s" % task)
request = mock_webapp.MockRequest()
request.set_url(url)
version = "mr-test-support-version.1"
module = "mr-test-support-module"
default_version_hostname = "mr-test-support.appspot.com"
host = "%s.%s.%s" % (version.split(".")[0],
module,
default_version_hostname)
if "CURRENT_VERSION_ID" not in os.environ:
request.environ["CURRENT_VERSION_ID"] = version
if "DEFAULT_VERSION_HOSTNAME" not in os.environ:
request.environ["DEFAULT_VERSION_HOSTNAME"] = (
default_version_hostname)
if "CURRENT_MODULE_ID" not in os.environ:
request.environ["CURRENT_MODULE_ID"] = module
if "HTTP_HOST" not in os.environ:
request.environ["HTTP_HOST"] = host
for k, v in task.get("headers", []):
request.headers[k] = v
environ_key = "HTTP_" + k.replace("-", "_").upper()
request.environ[environ_key] = v
request.headers["X-AppEngine-TaskExecutionCount"] = retries
request.environ["HTTP_X_APPENGINE_TASKNAME"] = (
task.get("name", "default_task_name"))
request.environ["HTTP_X_APPENGINE_QUEUENAME"] = (
task.get("queue_name", "default"))
request.environ["PATH_INFO"] = request.path
if task["method"] == "POST":
request.body = base64.b64decode(task["body"])
for k, v in decode_task_payload(task).iteritems():
request.set(k, v)
response = mock_webapp.MockResponse()
saved_os_environ = os.environ
copy_os_environ = dict(os.environ)
copy_os_environ.update(request.environ)
try:
os.environ = copy_os_environ
handler = handler_class(request, response)
except TypeError:
handler = handler_class()
handler.initialize(request, response)
finally:
os.environ = saved_os_environ
try:
os.environ = copy_os_environ
if task["method"] == "POST":
handler.post(*params)
elif task["method"] == "GET":
handler.get(*params)
else:
raise Exception("Unsupported method: %s" % task.method)
finally:
os.environ = saved_os_environ
if handler.response.status != 200:
raise Exception("Handler failure: %s (%s). \nTask: %s\nHandler: %s" %
(handler.response.status,
handler.response.status_message,
task,
handler))
return handler
def execute_all_tasks(taskqueue, queue="default", handlers_map=None):
"""Run and remove all tasks in the taskqueue.
Args:
taskqueue: An instance of taskqueue stub.
queue: Queue name to run all tasks from.
hanlders_map: see main.create_handlers_map.
Returns:
task_run_counts: a dict from handler class to the number of tasks
it handled.
"""
tasks = taskqueue.GetTasks(queue)
taskqueue.FlushQueue(queue)
task_run_counts = collections.defaultdict(lambda: 0)
for task in tasks:
retries = 0
while True:
try:
handler = execute_task(task, retries, handlers_map=handlers_map)
task_run_counts[handler.__class__] += 1
break
except Exception, e:
retries += 1
if retries > 100:
logging.debug("Task %s failed for too many times. Giving up.",
task["name"])
raise
logging.debug(
"Task %s is being retried for the %s time",
task["name"],
retries)
logging.debug(e)
return task_run_counts
def execute_until_empty(taskqueue, queue="default", handlers_map=None):
"""Execute taskqueue tasks until it becomes empty.
Args:
taskqueue: An instance of taskqueue stub.
queue: Queue name to run all tasks from.
hanlders_map: see main.create_handlers_map.
Returns:
task_run_counts: a dict from handler class to the number of tasks
it handled.
"""
task_run_counts = collections.defaultdict(lambda: 0)
while taskqueue.GetTasks(queue):
new_counts = execute_all_tasks(taskqueue, queue, handlers_map)
for handler_cls in new_counts:
task_run_counts[handler_cls] += new_counts[handler_cls]
return task_run_counts
|
|
# $Filename$
# $Authors$
#
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements the data adapter to access files/directories via SFTP.
"""
__version__ = "$Revision-Id:$"
import errno
import stat
import StringIO
import sys
import tempfile
from paramiko.ssh_exception import SSHException
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.data import datastorer
from datafinder.persistence.adapters.sftp import constants
class SftpDataAdapter(datastorer.NullDataStorer):
"""
@note: Links are not supported.
@note: Copying of large collections might be inefficient
because files are transferred to the client and then
back to the server. However, this is a limitation of SFTP.
@see: For interface details see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>}
"""
def __init__(self, identifier, persistenceIdentifier,
connectionPool, factory, idMapper):
datastorer.NullDataStorer.__init__(self, identifier)
self._connectionPool = connectionPool
self._persistenceIdentifier = persistenceIdentifier
self._factory = factory
self._idMapper = idMapper
@property
def isCollection(self):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
connection = self._connectionPool.acquire()
try:
return stat.S_ISDIR(connection.stat(self._persistenceIdentifier).st_mode)
except (IOError, EOFError, SSHException):
message = "Cannot determine item type (file or collection) of '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
@staticmethod
def _reRaiseError(message):
_, value, traceback = sys.exc_info()
raise PersistenceError, u"%s.\nReason: '%s'" % (message, value), traceback
@property
def isLeaf(self):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
return not self.isCollection
@property
def canAddChildren(self):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
return self.isCollection
def createCollection(self, recursively=False):
"""
@see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>}
@note: Currently it is parent collections are created recursively. I.e.,
this might lead to problems when creating largely nested collections.
"""
if recursively:
self._createMissingParents()
self._createSingleCollection()
def _createMissingParents(self):
parentId = self._idMapper.determineParentId(self.identifier)
parent = self._factory.createDataStorer(parentId)
if not parent.exists():
try:
parent.createCollection(recursively=True)
except RuntimeError:
raise PersistenceError(
"Cannot create collection '%s'.\n" % self.identifier,
"The collection path is too deeply nested.")
def _createSingleCollection(self):
connection = self._connectionPool.acquire()
try:
connection.mkdir(self._persistenceIdentifier)
except (IOError, EOFError, SSHException):
message = "Cannot create collection '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
# Set the directory permissions because the mode parameter of
# mkdir did not work for rwxrws--T (=> x instead of s)
self._setPermissions(constants.DEFAULT_DIRECTORY_PERMISSIONS)
def _setPermissions(self, mode):
""" Helper method which sets the permissions of a dirctory/file to the given mode.
See os.chmode for details on the mode parameter (octal).
"""
connection = self._connectionPool.acquire()
try:
connection.chmod(self._persistenceIdentifier, mode)
except (IOError, EOFError, SSHException):
message = "Cannot set default permissions of file '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
def createResource(self):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
self.writeData(StringIO.StringIO(""))
self._setPermissions(constants.DEFAULT_FILE_PERMISSIONS)
def createLink(self, destination):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
raise PersistenceError("Not implemented.")
def getChildren(self):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
connection = self._connectionPool.acquire()
try:
children = list()
for name in connection.listdir(self._persistenceIdentifier):
name = name.decode(constants.FILE_NAME_ENCODING, "replace")
child_id = self._idMapper.determineChildId(self.identifier, name)
children.append(child_id)
return children
except (IOError, EOFError, SSHException):
message = "Cannot retrieve children of item '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
def exists(self):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
connection = self._connectionPool.acquire()
try:
connection.stat(self._persistenceIdentifier)
return True
except IOError, error:
if error.errno == errno.ENOENT:
return False
message = "Cannot determine existence of '%s'!" % self.identifier
self._reRaiseError(message)
except (EOFError, SSHException):
message = "Cannot determine existence of '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
def delete(self):
"""
@see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>}
@note: As there is no library function to delete complete directories,
we implemented it on our own.
"""
isCollection = self.isCollection
connection = self._connectionPool.acquire()
try:
if isCollection:
self._deleteCollection(connection)
else:
self._deleteLeaf(connection)
except (IOError, EOFError, SSHException):
message = "Cannot delete item '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
def _deleteCollection(self, connection):
emptiedCollections = self._emptyAllCollections(connection)
self._deleteEmptiedCollections(connection, emptiedCollections)
def _emptyAllCollections(self, connection):
collections = [self._persistenceIdentifier]
emptiedCollections = list()
while collections:
currentCollection = collections[0]
for attrs in connection.listdir_attr(currentCollection):
persistenceId = self._idMapper.determinePersistenceChildId(
currentCollection, attrs.filename)
if not stat.S_ISDIR(attrs.st_mode):
connection.remove(persistenceId)
else:
collections.append(persistenceId)
collections.remove(currentCollection)
emptiedCollections.append(currentCollection)
return emptiedCollections
@staticmethod
def _deleteEmptiedCollections(connection, emptiedCollections):
emptiedCollections.reverse()
for collection in emptiedCollections:
connection.rmdir(collection)
def _deleteLeaf(self, connection):
connection.remove(self._persistenceIdentifier)
def copy(self, destination):
"""
@see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>}
@note: There is no library function to copy complete directories.
Additionally, every file needs to be transferred to the client
and back to the server. Thus, it takes some time to copy large data sets.
Unfortunately, this is a limitation of SFTP.
"""
isCollection = self.isCollection
connection = self._connectionPool.acquire()
try:
if isCollection:
self._copyCollection(connection, destination)
else:
self._copyLeaf(destination)
except (IOError, EOFError, SSHException):
message = "Cannot copy item '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
def _copyCollection(self, connection, destination):
collections = [self]
baseOrginalId = self.identifier
baseDestinationId = destination.identifier
while collections:
currentCollection = collections[0]
self._createDestinationCollection(currentCollection, baseOrginalId, baseDestinationId)
self._copyCollectionContent(currentCollection, connection, collections, baseOrginalId, baseDestinationId)
def _createDestinationCollection(self, orgCollection, baseOrginalId, baseDestinationId):
destCollectionId = orgCollection.identifier.replace(baseOrginalId, baseDestinationId)
destCollection = self._factory.createDataStorer(destCollectionId)
destCollection.createCollection()
def _copyCollectionContent(self, orgCollection, connection, collections, baseOrginalId, baseDestinationId):
orgPersistenceId = self._idMapper.determinePeristenceId(orgCollection.identifier)
for attrs in connection.listdir_attr(orgPersistenceId):
name = attrs.filename.decode(constants.FILE_NAME_ENCODING, "replace")
itemId = self._idMapper.determineChildId(orgCollection.identifier, name)
itemStorer = self._factory.createDataStorer(itemId)
if stat.S_ISDIR(attrs.st_mode):
collections.append(itemStorer)
else:
destItemId = itemId.replace(baseOrginalId, baseDestinationId)
destItemStorer = self._factory.createDataStorer(destItemId)
data = itemStorer.readData()
destItemStorer.writeData(data)
collections.remove(orgCollection)
def _copyLeaf(self, destination):
data = self.readData()
destination.writeData(data)
def move(self, destination):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
connection = self._connectionPool.acquire()
destPersistenceId = self._idMapper.determinePeristenceId(destination.identifier)
try:
connection.rename(self._persistenceIdentifier, destPersistenceId)
except (IOError, EOFError, SSHException):
message = "Cannot move/rename item '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
def readData(self):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
connection = self._connectionPool.acquire()
temporaryFileObject = tempfile.TemporaryFile()
try:
temporaryFileObject.seek(0)
remoteFileObject = connection.open(self._persistenceIdentifier)
block = remoteFileObject.read(constants.BLOCK_SIZE)
while block:
temporaryFileObject.write(block)
block = remoteFileObject.read(constants.BLOCK_SIZE)
temporaryFileObject.seek(0)
return temporaryFileObject
except (IOError, EOFError, SSHException):
message = "Cannot read data of item '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
def writeData(self, data):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
connection = self._connectionPool.acquire()
try:
remoteFileObject = connection.open(self._persistenceIdentifier, "w")
block = data.read(constants.BLOCK_SIZE)
while block:
remoteFileObject.write(block)
block = data.read(constants.BLOCK_SIZE)
except (IOError, EOFError, SSHException):
message = "Cannot write data to item '%s'!" % self.identifier
self._reRaiseError(message)
finally:
data.close()
self._connectionPool.release(connection)
|
|
# -*- coding: utf-8 -*-
################################################################################
# _____ _ _____ _ #
# / ____(_) / ____| | | #
# | | _ ___ ___ ___ | (___ _ _ ___| |_ ___ _ __ ___ ___ #
# | | | / __|/ __/ _ \ \___ \| | | / __| __/ _ \ '_ ` _ \/ __| #
# | |____| \__ \ (_| (_) | ____) | |_| \__ \ || __/ | | | | \__ \ #
# \_____|_|___/\___\___/ |_____/ \__, |___/\__\___|_| |_| |_|___/ #
# __/ | #
# |___/ #
# _ __ _____ _ _____ ______ #
# | |/ / / ____| | |/ ____| ____| #
# | ' / ___ _ __ ___ __ _ | (___ ___ | | (___ | |__ #
# | < / _ \| '__/ _ \/ _` | \___ \ / _ \| |\___ \| __| #
# | . \ (_) | | | __/ (_| | ____) | (_) | |____) | |____ #
# |_|\_\___/|_| \___|\__,_| |_____/ \___/|_|_____/|______| #
# #
################################################################################
# #
# Copyright (c) 2016 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
import time
import pygics
import acidipy
import archon
from archon import *
from models import *
from .settings import *
#===============================================================================
# Create your manager here.
#===============================================================================
class HealthMonitor(pygics.Task):
def __init__(self, manager):
pygics.Task.__init__(self, tick=HEALTH_MON_SEC)
self.manager = manager
self.health = {'_tstamp' : []}
for i in reversed(range(0, HEALTH_MON_CNT)):
self.health['_tstamp'].append('00:00:00')
self.start()
def getNewHealthHist(self, dn, score):
if dn in self.health:
ret = self.health[dn][1:]
ret.append(score)
return ret
else:
ret = []
for i in range(0, HEALTH_MON_CNT - 1): ret.append(0)
ret.append(score)
return ret
def getHealth(self):
return self.health
def run(self):
now = time.strftime("%H:%M:%S", time.localtime(time.time()))
total, pod, node, tenant, appprof, epg = Burst(
)(self.manager.health
)(self.manager.Pod.health
)(self.manager.Node.health
)(self.manager.Tenant.health
)(self.manager.AppProfile.health
)(self.manager.EPG.health
).do()
health = {'_tstamp' : self.health['_tstamp'][1:]}
health['_tstamp'].append(now)
for dom_name in total: health[dom_name] = self.getNewHealthHist(dom_name, total[dom_name]['score'])
for dom_name in pod:
for dp in pod[dom_name]:
dn = dom_name + '/' + dp['dn']
health[dn] = self.getNewHealthHist(dn, dp['score'])
for dom_name in node:
for dp in node[dom_name]:
dn = dom_name + '/' + dp['dn']
health[dn] = self.getNewHealthHist(dn, dp['score'])
for dom_name in tenant:
for dp in tenant[dom_name]:
dn = dom_name + '/' + dp['dn']
health[dn] = self.getNewHealthHist(dn, dp['score'])
for dom_name in appprof:
for dp in appprof[dom_name]:
dn = dom_name + '/' + dp['dn']
health[dn] = self.getNewHealthHist(dn, dp['score'])
for dom_name in epg:
for dp in epg[dom_name]:
dn = dom_name + '/' + dp['dn']
health[dn] = self.getNewHealthHist(dn, dp['score'])
self.health = health
class EndpointTracker(acidipy.SubscribeHandler):
@classmethod
def initDatabase(cls):
EPTracker.objects.all().delete()
pass
def __init__(self, manager, domain_name):
self.manager = manager
self.domain_name = domain_name
self.getInitData()
def convertTstamp(self, tstamp):
(resp_ts, remaining) = tstamp.split('T')
resp_ts += ' '
resp_ts = resp_ts + remaining.split('+')[0].split('.')[0]
return resp_ts
def getIfName(self, ep):
epcs = ep.Class('fvRsCEpToPathEp').list(sort='dn')
if_dn = []
for epc in epcs:
if_dn.append(re.sub('(topology/|pod-|protpaths-|paths-|pathep-|\[|\])', '', epc['tDn']))
return if_dn
def getInitData(self):
ep_list = self.manager[self.domain_name].Endpoint.list(detail=True)
for ep in ep_list:
sdn = ep['dn'].split('/')
start = self.convertTstamp(ep['modTs'])
create = False
epts = EPTracker.objects.filter(domain=self.domain_name,
dn=ep['dn'],
stop='Active')
if len(epts) != 0:
for ept in epts:
if ept.start != start:
ept.stop = start
ept.save()
create = True
else: create = True
if create:
EPTracker.objects.create(mac=ep['mac'],
ip=ep['ip'],
domain=self.domain_name,
tenant=sdn[1].replace('tn-', ''),
app=sdn[2].replace('ap-', ''),
epg=sdn[3].replace('epg-', ''),
dn=ep['dn'],
intf=','.join(self.getIfName(ep)),
start=self.convertTstamp(ep['modTs']),
stop='Active')
def subscribe(self, status, obj):
if self.manager.debug: print('[Info]Archon:ACI:Manager:EPTracker:%s:%s:%s' % (obj.class_name, status, obj))
sdn = obj['dn'].split('/')
start = self.convertTstamp(obj['modTs'])
epts = EPTracker.objects.filter(domain=self.domain_name,
dn=obj['dn'],
stop='Active')
for ept in epts:
if ept.start != start:
ept.stop = start
ept.save()
if status != 'deleted':
EPTracker.objects.create(mac=obj['mac'],
ip=obj['ip'],
domain=self.domain_name,
tenant=sdn[1].replace('tn-', ''),
app=sdn[2].replace('ap-', ''),
epg=sdn[3].replace('epg-', ''),
dn=obj['dn'],
intf=','.join(self.getIfName(obj)),
start=self.convertTstamp(obj['modTs']),
stop='Active')
class Manager(archon.ManagerAbstraction, acidipy.MultiDomain):
def __init__(self):
acidipy.MultiDomain.__init__(self, debug=MANAGER_DEBUG)
domains = Domain.objects.all()
for domain in domains:
ret = acidipy.MultiDomain.addDomain(self, domain.name, domain.controllers, domain.user, domain.password)
if ret:
self[domain.name].eptracker = EndpointTracker(self, domain.name)
self[domain.name].Endpoint.subscribe(self[domain.name].eptracker)
self.healthmon = HealthMonitor(self)
def addDomain(self, domain_name, ip, user, pwd):
try: Domain.objects.get(name=domain_name)
except:
ret = acidipy.MultiDomain.addDomain(self, domain_name, ip, user, pwd)
if ret:
Domain.objects.create(name=domain_name, controllers=ip, user=user, password=pwd)
self[domain_name].eptracker = EndpointTracker(self, domain_name)
self[domain_name].Endpoint.subscribe(self[domain_name].eptracker)
return ret
return False
def delDomain(self, domain_name):
try: domain = Domain.objects.get(name=domain_name)
except: return False
acidipy.MultiDomain.delDomain(self, domain_name)
domain.delete()
return True
def initEndpoint(self):
EndpointTracker.initDatabase()
for domain_name in self: self[domain_name].eptracker.getInitData()
def getHealth(self):
return self.healthmon.getHealth()
def getSummary(self, R, M, V):
return {
'name' : 'ACI',
'icon' : 'aci/APIC_Default.png',
'desc' : 'Application Centric Infrastructure Monitoring Application',
'link' : '/aci/overview',
'view' : DIV()
}
|
|
import logging
import keyedcache
import xlivesettings
from django.conf import settings as djangosettings
from django.core.urlresolvers import reverse
from django.test import TestCase
from xlivesettings import *
log = logging.getLogger(__name__);
class ConfigurationFunctionTest(TestCase):
def testSetSingleConfigItem(self):
value = IntegerValue(BASE_GROUP, 'SingleItem')
config_register(value)
self.assert_(config_exists(BASE_GROUP, 'SingleItem'))
def testSetTwoConfigItems(self):
s = [IntegerValue(BASE_GROUP, 'testTwoA'), StringValue(BASE_GROUP, 'testTwoB')]
config_register_list(*s)
self.assert_(config_exists(BASE_GROUP, 'testTwoA'))
self.assert_(config_exists(BASE_GROUP, 'testTwoB'))
def testSetGroup(self):
g1 = ConfigurationGroup('test1','test1')
value = IntegerValue(g1, 'SingleGroupedItem')
config_register(value)
self.assertFalse(config_exists(BASE_GROUP, 'SingleGroupedItem'))
self.assert_(config_exists(g1, 'SingleGroupedItem'))
class ConfigurationTestSettings(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g = ConfigurationGroup('test2','test2')
self.g = g
config_register(StringValue(g, 's1'))
config_register(IntegerValue(g, 's2', default=10))
config_register(IntegerValue(g, 's3', default=10))
def testSetSetting(self):
c = config_get('test2', 's1')
c.update('test')
self.assertEqual(c.value, 'test')
self.assertEqual(c.setting.value, 'test')
def testSettingDefault(self):
c = config_get('test2', 's2')
self.assertEqual(c.value, 10)
def testSetAndReset(self):
"""Test setting one value and then updating"""
c = config_get('test2', 's1')
c.update('test1')
self.assertEqual(c.value, 'test1')
# should be true, since it is an update
self.assert_(c.update('test2'))
self.assertEqual(c.value, 'test2')
def testTwice(self):
"""Config items should respond False to duplicate requests to update."""
c = config_get('test2', 's1')
c.update('test1')
self.assertFalse(c.update('test1'))
def testDeletesDefault(self):
c = config_get('test2', 's3')
# false because it isn't saving a default value
self.assertFalse(c.update(10))
self.assert_(c.update(20))
self.assertEqual(c.value, 20)
try:
s = c.setting
except SettingNotSet:
self.fail("Should have a setting now")
# now delete and go back to no setting by setting the default
self.assert_(c.update(10))
self.assertEqual(c.value, 10)
try:
s = c.setting
self.fail('Should throw SettingNotSet')
except SettingNotSet:
pass
class ConfigTestDotAccess(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g = ConfigurationGroup('test3','test3')
self.g = g
c1 = config_register(BooleanValue(g, 's1', default=True))
c2 = config_register(IntegerValue(g, 's2', default=10))
c2.update(100)
def testDotAccess(self):
self.assert_(ConfigurationSettings().test3.s1.value)
self.assertEqual(ConfigurationSettings().test3.s2.value, 100)
def testSettingProperty(self):
c = config_get('test3','s2')
s = c.setting
self.assert_(s.value, 100)
def testDictValues(self):
d = self.g.dict_values()
self.assertEqual(d, {'s1': True, 's2' : 100})
class ConfigTestModuleValue(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g = ConfigurationGroup('modules','module test')
self.g = g
self.c = config_register(ModuleValue(g, 'test'))
def testModule(self):
c = config_get('modules', 'test')
c.update('django')
self.assert_(hasattr(self.c.value, 'get_version'))
class ConfigTestSortOrder(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g1 = ConfigurationGroup('group1', 'Group 1', ordering=-1001)
g2 = ConfigurationGroup('group2', 'Group 2', ordering=-1002)
g3 = ConfigurationGroup('group3', 'Group 3', ordering=-1003)
self.g1 = g1
self.g2 = g2
self.g3 = g3
self.g1c1 = config_register(IntegerValue(g1, 'c1'))
self.g1c2 = config_register(IntegerValue(g1, 'c2'))
self.g1c3 = config_register(IntegerValue(g1, 'c3'))
self.g2c1 = config_register(IntegerValue(g2, 'c1'))
self.g2c2 = config_register(IntegerValue(g2, 'c2'))
self.g2c3 = config_register(IntegerValue(g2, 'c3'))
self.g3c1 = config_register(IntegerValue(g3, 'c1'))
self.g3c2 = config_register(IntegerValue(g3, 'c2'))
self.g3c3 = config_register(IntegerValue(g3, 'c3'))
def testGroupOrdering(self):
mgr = ConfigurationSettings()
self.assertEqual(mgr[2].key, self.g1.key)
self.assertEqual(mgr[1].key, self.g2.key)
self.assertEqual(mgr[0].key, self.g3.key)
class TestMultipleValues(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g1 = ConfigurationGroup('m1', 'Multiple Group 1', ordering=1000)
self.g1 = g1
self.g1c1 = config_register(MultipleStringValue(g1,
'c1',
choices=((1,'one'),(2,'two'),(3,'three'))))
def testSave(self):
c = config_get('m1','c1')
c.update([1,2])
self.assertEqual(c.value, [1,2])
def testAddChoice(self):
config_add_choice('m1','c1',(4, 'four'))
c = config_get('m1','c1')
self.assertEqual(c.choices, ((1,'one'),(2,'two'),(3,'three'),(4,'four')))
def testChoiceValues(self):
self.g1c1.update([1,2])
self.assertEqual(self.g1c1.value, [1,2])
self.assertEqual(self.g1c1.choice_values, [(1, 'one'),(2, 'two')])
choices = config_choice_values('m1', 'c1')
self.assertEqual(choices, [(1, 'one'),(2, 'two')])
class TestMultipleValuesWithDefault(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g1 = ConfigurationGroup('mv2', 'Multiple Group 2', ordering=1000)
self.g1 = g1
self.g1c1 = config_register(MultipleStringValue(g1,
'c1',
choices=((1,'one'),(2,'two'),(3,'three')),
default=[1,2]))
def testDefault(self):
c = config_get('mv2','c1')
self.assertEqual(c.value, [1,2])
c.update([1,2,3])
self.assertEqual(c.value, [1,2,3])
class ConfigTestChoices(TestCase):
def testAddPreregisteredChoice(self):
"""Test that we can register choices before the config is actually set up."""
config_add_choice('ctg1', 'c1', ('a', 'Item A'))
config_add_choice('ctg1', 'c1', ('b', 'Item B'))
config_add_choice('ctg1', 'c1', ('c', 'Item C'))
g1 = ConfigurationGroup('ctg1', 'Choice 1', ordering=1000)
config_register(StringValue(g1, 'c1'))
c = config_get('ctg1','c1')
self.assertEqual(c.choices, [('a','Item A'), ('b','Item B'), ('c','Item C')])
class ConfigTestRequires(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g1 = ConfigurationGroup('req1', 'Requirements 1', ordering=1000)
self.g1 = g1
bool1 = config_register(BooleanValue(g1, 'bool1', default=False, ordering=1))
bool2 = config_register(BooleanValue(g1, 'bool2', ordering=2))
self.g1c1 = config_register(IntegerValue(g1, 'c1', requires=bool1, ordering=3))
self.g1c2 = config_register(IntegerValue(g1, 'c2', requires=bool2, ordering=4))
self.g1c3 = config_register(IntegerValue(g1, 'c3', ordering=5))
bool2.update(True)
def testSimpleRequires(self):
v = config_value('req1', 'bool2')
self.assertTrue(v)
keys = [cfg.key for cfg in self.g1]
self.assertEqual(keys, ['bool1', 'bool2', 'c2','c3'])
c = config_get('req1','bool1')
c.update(True)
keys = [cfg.key for cfg in self.g1]
self.assertEqual(keys, ['bool1', 'bool2', 'c1', 'c2', 'c3'])
class ConfigTestRequiresChoices(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g1 = ConfigurationGroup('req2', 'Requirements 2', ordering=1000)
self.g1 = g1
choices1 = config_register(MultipleStringValue(BASE_GROUP, 'rc1', ordering=1))
self.g1c1 = config_register(IntegerValue(g1, 'c1', requires=choices1, ordering=3))
self.g1c2 = config_register(IntegerValue(g1, 'c2', requires=choices1, ordering=4))
self.g1c3 = config_register(IntegerValue(g1, 'c3', ordering=5))
choices1.update('c1')
g2 = ConfigurationGroup('req3', 'Requirements 3', ordering=1000)
self.g2 = g2
choices2 = config_register(StringValue(BASE_GROUP, 'choices2', ordering=1))
self.g2c1 = config_register(IntegerValue(g2, 'c1', requires=choices2, ordering=3))
self.g2c2 = config_register(IntegerValue(g2, 'c2', requires=choices2, ordering=4))
self.g2c3 = config_register(IntegerValue(g2, 'c3', requires=choices2, ordering=5))
choices2.update('c1')
def testSimpleRequiresChoices(self):
v = config_value('BASE', 'rc1')
self.assertEquals(v, ['c1'])
g = config_get_group('req2')
keys = [cfg.key for cfg in g]
self.assertEqual(keys, ['c1','c3'])
c = config_get('BASE', 'rc1')
c.update(['c1','c2'])
g = config_get_group('req2')
keys = [cfg.key for cfg in g]
self.assertEqual(keys, ['c1', 'c2', 'c3'])
def testRequiresSingleValue(self):
v = config_value('BASE', 'choices2')
self.assertEquals(v, 'c1')
keys = [cfg.key for cfg in self.g2]
self.assertEqual(keys, ['c1'])
c = config_get('BASE', 'choices2')
c.update('c2')
keys = [cfg.key for cfg in self.g2]
self.assertEqual(keys, ['c2'])
class ConfigTestRequiresValue(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g1 = ConfigurationGroup('reqval', 'Requirements 3', ordering=1000)
self.g1 = g1
choices1 = config_register(MultipleStringValue(BASE_GROUP, 'valchoices', ordering=1))
self.g1c1 = config_register(IntegerValue(g1, 'c1', requires=choices1, requiresvalue='foo', ordering=3))
self.g1c2 = config_register(IntegerValue(g1, 'c2', requires=choices1, requiresvalue='bar', ordering=4))
self.g1c3 = config_register(IntegerValue(g1, 'c3', ordering=5))
choices1.update('foo')
g2 = ConfigurationGroup('reqval2', 'Requirements 4', ordering=1000)
self.g2 = g2
choices2 = config_register(StringValue(BASE_GROUP, 'valchoices2', ordering=1,
choices=(('a','test a'),('b', 'test b'),('c', 'test c'))))
self.g2c1 = config_register(IntegerValue(g2, 'c1', requires=choices2, requiresvalue='a', ordering=3))
self.g2c2 = config_register(IntegerValue(g2, 'c2', requires=choices2, requiresvalue='b', ordering=4))
self.g2c3 = config_register(IntegerValue(g2, 'c3', requires=choices2, requiresvalue='c', ordering=5))
choices2.update('a')
def testRequiresValue(self):
v = config_value('BASE', 'valchoices')
self.assertEquals(v, ['foo'])
g = config_get_group('reqval')
keys = [cfg.key for cfg in g]
self.assertEqual(keys, ['c1','c3'])
c = config_get('BASE', 'valchoices')
c.update(['foo','bar'])
g = config_get_group('reqval')
keys = [cfg.key for cfg in g]
self.assertEqual(keys, ['c1', 'c2', 'c3'])
def testRequiresSingleValue(self):
v = config_value('BASE', 'valchoices2')
self.assertEquals(v, 'a')
keys = [cfg.key for cfg in self.g2]
self.assertEqual(keys, ['c1'])
c = config_get('BASE', 'valchoices2')
c.update('b')
keys = [cfg.key for cfg in self.g2]
self.assertEqual(keys, ['c2'])
class ConfigTestGroupRequires(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
choices1 = config_register(MultipleStringValue(BASE_GROUP, 'groupchoice', ordering=1))
choices2 = config_register(MultipleStringValue(BASE_GROUP, 'groupchoice2', ordering=1))
g1 = ConfigurationGroup('groupreq', 'Requirements 4', ordering=1000, requires=choices1)
self.g1 = g1
self.g1c1 = config_register(IntegerValue(g1, 'c1', ordering=3))
self.g1c2 = config_register(IntegerValue(g1, 'c2', requires=choices2, requiresvalue='bar', ordering=4))
self.g1c3 = config_register(IntegerValue(g1, 'c3', ordering=5))
def testRequiresValue(self):
c = config_get('BASE', 'groupchoice')
self.assertEquals(c.value, [])
keys = [cfg.key for cfg in self.g1]
self.assertEqual(keys, [])
c2 = config_get('BASE', 'groupchoice2')
c2.update('bar')
keys = [cfg.key for cfg in self.g1]
self.assertEqual(keys, ['c2'])
c.update(['groupreq'])
keys = [cfg.key for cfg in self.g1]
self.assertEqual(keys, ['c1', 'c2', 'c3'])
class ConfigCollectGroup(TestCase):
def setUp(self):
keyedcache.cache_delete()
choices = config_register(MultipleStringValue(BASE_GROUP, 'collect', ordering=1))
self.choices = choices
g1 = ConfigurationGroup('coll1', 'Collection 1')
g2 = ConfigurationGroup('coll2', 'Collection 2')
g3 = ConfigurationGroup('coll3', 'Collection 3')
g1c1 = config_register(StringValue(g1, 'test'))
g1c2 = config_register(StringValue(g1, 'test1'))
g2c1 = config_register(StringValue(g2, 'test'))
g3c1 = config_register(StringValue(g3, 'test'))
g1c1.update('set a')
g1c2.update('set b')
g2c1.update('set a')
g3c1.update('set d')
choices.update(['coll1','coll3'])
def testCollectSimple(self):
v = config_collect_values('BASE', 'collect', 'test')
self.assertEqual(v, ['set a', 'set d'])
def testCollectUnique(self):
self.choices.update(['coll1','coll2','coll3'])
v = config_collect_values('BASE', 'collect', 'test', unique=False)
self.assertEqual(v, ['set a', 'set a', 'set d'])
v = config_collect_values('BASE', 'collect', 'test', unique=True)
self.assertEqual(v, ['set a', 'set d'])
class LongSettingTest(TestCase):
def setUp(self):
keyedcache.cache_delete()
wide = config_register(LongStringValue(BASE_GROUP, 'LONG', ordering=1, default="woot"))
self.wide = wide
self.wide.update('*' * 1000)
def testLongStorage(self):
w = config_value('BASE', 'LONG')
self.assertEqual(len(w), 1000)
self.assertEqual(w, '*'*1000)
def testShortInLong(self):
self.wide.update("test")
w = config_value('BASE', 'LONG')
self.assertEqual(len(w), 4)
self.assertEqual(w, 'test')
def testDelete(self):
remember = self.wide.setting.id
self.wide.update('woot')
try:
q = LongSetting.objects.get(pk = remember)
self.fail("Should be deleted")
except LongSetting.DoesNotExist:
pass
class OverrideTest(TestCase):
"""Test settings overrides"""
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
djangosettings.LIVESETTINGS_OPTIONS = {
1 : {
'DB' : False,
'SETTINGS' : {
'overgroup' : {
's2' : '100',
'choices' : '["one","two","three"]'
}
}
}
}
g = ConfigurationGroup('overgroup','Override Group')
self.g = g
config_register(StringValue(g, 's1'))
config_register(IntegerValue(g, 's2', default=10))
config_register(IntegerValue(g, 's3', default=10))
config_register(MultipleStringValue(g, 'choices'))
def tearDown(self):
djangosettings.LIVESETTINGS_OPTIONS = {}
def testOverriddenSetting(self):
"""Accessing an overridden setting should give the override value."""
c = config_get('overgroup', 's2')
self.assertEquals(c.value, 100)
def testCantChangeSetting(self):
"""When overridden, setting a value should not work, should get the overridden value"""
c = config_get('overgroup', 's2')
c.update(1)
c = config_get('overgroup', 's2')
self.assertEquals(c.value, 100)
def testNotOverriddenSetting(self):
"""Settings which are not overridden should return their defaults"""
c = config_get('overgroup', 's3')
self.assertEquals(c.value, 10)
def testOverriddenListSetting(self):
"""Make sure lists work when overridden"""
c = config_get('overgroup', 'choices')
v = c.value
self.assertEqual(len(v), 3)
self.assertEqual(v[0], "one")
self.assertEqual(v[1], "two")
self.assertEqual(v[2], "three")
class PermissionTest(TestCase):
"""Test access permissions"""
urls = 'livesettings.test_urls'
def setUp(self):
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
# Users with different permissions
# staff member
user1 = User.objects.create_user('warehouseman', '[email protected]', 'secret')
user1.is_staff = True
user1.save()
# developper with limited permissions
user2 = User.objects.create_user('cautious_developer', '[email protected]', 'secret')
user2.is_staff = True
user2.user_permissions.add(Permission.objects.get(codename='change_setting', \
content_type=ContentType.objects.get(app_label='livesettings', model='setting')))
user2.save()
# superuser
user3 = User.objects.create_user('superuser', '[email protected]', 'secret')
user3.is_superuser = True
user3.save()
keyedcache.cache_delete()
# Example config
config_register(IntegerValue(BASE_GROUP, 'SingleItem', default=0))
def test_unauthorized_form(self):
"Testing users without enought additional permission"
# usually login_url_mask % nexturl is '/accounts/login/?next=/settings/'
login_url_mask = '%s?next=%%s' % reverse('django.contrib.auth.views.login')
# unauthorized
response = self.client.get(reverse('satchmo_site_settings')) # usually '/settings/'
self.assertRedirects(response, login_url_mask % '/settings/', msg_prefix='unathorized user should first login')
# few authorized
self.client.login(username='warehouseman', password='secret')
response = self.client.get(reverse('satchmo_site_settings'))
self.assertRedirects(response, login_url_mask % '/settings/', msg_prefix='user with small permission should not read normal settings')
# authorized enough but not for secret values
self.client.login(username='cautious_developer', password='secret')
response = self.client.get(reverse('settings_export')) # usually '/settings/export/'
self.assertRedirects(response, login_url_mask % '/settings/export/', msg_prefix='user without superuser permission should not export sensitive settings')
def test_authorized_enough(self):
"Testing a sufficiently authorized user"
self.client.login(username='cautious_developer', password='secret')
response = self.client.get(reverse('satchmo_site_settings'))
self.assertContains(response, 'SingleItem')
self.client.login(username='superuser', password='secret')
response = self.client.get(reverse('settings_export'))
self.assertContains(response, 'LIVESETTINGS_OPTIONS = ')
def test_export(self):
"Details of exported settings"
self.client.login(username='superuser', password='secret')
val2 = IntegerValue(BASE_GROUP, 'ModifiedItem', default=0)
config_register(val2)
val2.update(6789)
response = self.client.get('/settings/export/')
self.assertContains(response, "LIVESETTINGS_OPTIONS =", 1)
self.assertContains(response, "'DB': False", 1)
self.assertContains(response, "u'BASE':",1)
self.assertContains(response, "u'ModifiedItem': u'6789'", 1)
def test_secret_password(self):
"Verify that password is saved but not re-echoed if render_value=False"
# example of value, where reading is more sensitive than writing
val1 = PasswordValue(BASE_GROUP, 'password_to_reading_external_payment_gateway', render_value=False)
config_register(val1)
val1.update('secret')
val2 = PasswordValue(BASE_GROUP, 'unsecure_password')
config_register(val2)
val2.update('unsecure_pwd')
self.client.login(username='superuser', password='secret')
response = self.client.get('/settings/')
self.assertContains(response, 'password_to_reading_external_payment_gateway')
self.assertNotContains(response, 'secret')
self.assertContains(response, 'unsecure_password')
self.assertContains(response, 'unsecure_pwd')
class WebClientPostTest(TestCase):
"""
Tests of the web interface with POST.
These tests require temporary removing all earlier defined values.
Then are all values restored because it can be important for testing an application which uses livesettings.
"""
urls = 'livesettings.test_urls'
def setUp(self):
from django.contrib.auth.models import User
from django.utils.datastructures import SortedDict
# The following hack works like completely replaced ConfigurationSettings internal state only, if
# no the same group name is used inside and outside the test.
self.saved_conf_inst = ConfigurationSettings._ConfigurationSettings__instance.settings
ConfigurationSettings.__dict__['_ConfigurationSettings__instance'].settings = SortedDict()
keyedcache.cache_delete()
# set new users and values
user = User.objects.create_user('admin', '[email protected]', 'secret')
user.is_superuser = True
user.save()
self.client.login(username='admin', password='secret')
GROUP2 = ConfigurationGroup('Group2', 'g')
value = IntegerValue(GROUP2, 'SingleItem')
config_register(value)
def tearDown(self):
# restore the original configuration
ConfigurationSettings.__dict__['_ConfigurationSettings__instance'].settings = self.saved_conf_inst
def test_post(self):
"Tests of POST, verify is saved"
response = self.client.post('/settings/', {'Group2__SingleItem': '7890'})
# test can not use assertRedirects because it would consume the next get
self.assertEqual((response.status_code, response.get('Location', '')), (302, 'http://testserver/settings/'))
response = self.client.get('/settings/')
self.assertContains(response, 'Updated')
self.assertContains(response, '7890')
def test_empty_fields(self):
"test an empty value in the form should not raise an exception"
# Some test features had been temporary commented out before some ..Values classes are fixed
# because I do not want to display many old inconsistencies now. (hynekcer)
def extract_val(content):
regr = re.search(r'SingleItem.*value="([^"]*)"', content, flags=re.MULTILINE)
return regr and regr.group(1) or '' # html value
def get_setting_like_in_db(x):
try:
return x.setting.value
except SettingNotSet:
return 'Error'
def test_empty_value_type(value_type, protocol, reject_empty=False):
"empty value can be accepted or rejected by validation rules"
value = value_type(GROUP2, 'SingleItem') # first it does it to easy get the class name
type_name = value.__class__.__name__
value = value_type(GROUP2, 'SingleItem', description = 'type %s' % type_name)
config_register(value)
response = self.client.get('/settings/')
html_value = extract_val(response.content)
#print '%s "%s"' % (type_name, html_value)
response = self.client.post('/settings/', {'Group2__SingleItem': ''}) # See in the traceback a line one level Up
if reject_empty:
# option reject_empty had been tested before all Value types were fixed to be similar accepting empty value
# this is a typical text from validation warning
self.assertContains(response, 'Please correct the error below.')
else:
self.assertRedirects(response, '/settings/')
response = self.client.get('/settings/')
html_value = extract_val(response.content)
#print '%s "%s" "%s" "%s"' % (type_name, html_value, value.value, get_setting_like_in_db(value))
#self.assertNotContains(response, '<object object at 0x[0-9a-f]+>') # rendered NOTSET = object()
#if re.search('SingleItem.*value="', response.content):
# self.assertTrue(re.search('SingleItem.*value="([0.]*|\[\])"', response.content))
protocol.add(value_type)
#
import re
GROUP2 = ConfigurationGroup('Group2', 'g')
protocol = set()
# tested values
test_empty_value_type(BooleanValue, protocol)
test_empty_value_type(DecimalValue, protocol)
test_empty_value_type(DurationValue, protocol)
test_empty_value_type(FloatValue, protocol)
test_empty_value_type(IntegerValue, protocol)
test_empty_value_type(PositiveIntegerValue, protocol)
test_empty_value_type(StringValue, protocol)
test_empty_value_type(LongStringValue, protocol)
test_empty_value_type(MultipleStringValue, protocol)
test_empty_value_type(LongMultipleStringValue, protocol)
test_empty_value_type(ModuleValue, protocol)
test_empty_value_type(PasswordValue, protocol)
# verify completness of the test
classes_to_test = set(getattr(livesettings.values, k) for k in livesettings.values.__all__ if \
not k in ('BASE_GROUP', 'ConfigurationGroup', 'Value', 'SortedDotDict', 'PercentValue'))
self.assertEqual(protocol, classes_to_test, msg='The tested classes have been not all exactly the same as expected')
def test_csrf(self):
"test CSRF"
from django.test import Client
csrf_client = Client(enforce_csrf_checks=True)
csrf_client.login(username='admin', password='secret')
# get CSFR token
response = csrf_client.get('/settings/')
csrfmiddlewaretoken = response.context['csrf_token'] + ''
self.assertContains(response, csrfmiddlewaretoken, msg_prefix='has not csrf')
# expect OK
response = csrf_client.post('/settings/', {'Group2__SingleItem': '1234', 'csrfmiddlewaretoken': csrfmiddlewaretoken})
self.assertRedirects(response, expected_url='/settings/')
# expect 403
response = csrf_client.post('/settings/', {'Group2__SingleItem': '1234'})
self.assertContains(response, 'CSRF', status_code=403, msg_prefix='should require csrf')
|
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
try:
import http.client
except ImportError:
import httplib
except ImportError:
raise ImportError("There is no http.client or httplib module")
import urllib.parse as urllib
import time
import re
from io import StringIO
import gzip
from tweepy.error import TweepError
from tweepy.utils import convert_to_utf8_str
from tweepy.models import Model
re_path_template = re.compile('{\w+}')
def bind_api(**config):
class APIMethod(object):
path = config['path']
payload_type = config.get('payload_type', None)
payload_list = config.get('payload_list', False)
allowed_param = config.get('allowed_param', [])
method = config.get('method', 'GET')
require_auth = config.get('require_auth', False)
search_api = config.get('search_api', False)
use_cache = config.get('use_cache', True)
def __init__(self, api, args, kargs):
# If authentication is required and no credentials
# are provided, throw an error.
if self.require_auth and not api.auth:
raise TweepError('Authentication required!')
self.api = api
self.post_data = kargs.pop('post_data', None)
self.retry_count = kargs.pop('retry_count', api.retry_count)
self.retry_delay = kargs.pop('retry_delay', api.retry_delay)
self.retry_errors = kargs.pop('retry_errors', api.retry_errors)
self.headers = kargs.pop('headers', {})
self.build_parameters(args, kargs)
# Pick correct URL root to use
if self.search_api:
self.api_root = api.search_root
else:
self.api_root = api.api_root
# Perform any path variable substitution
self.build_path()
if api.secure:
self.scheme = 'https://'
else:
self.scheme = 'http://'
if self.search_api:
self.host = api.search_host
else:
self.host = api.host
# Manually set Host header to fix an issue in python 2.5
# or older where Host is set including the 443 port.
# This causes Twitter to issue 301 redirect.
# See Issue https://github.com/tweepy/tweepy/issues/12
self.headers['Host'] = self.host
def build_parameters(self, args, kargs):
self.parameters = {}
for idx, arg in enumerate(args):
if arg is None:
continue
try:
self.parameters[self.allowed_param[idx]] = convert_to_utf8_str(arg)
except IndexError:
raise TweepError('Too many parameters supplied!')
for k, arg in kargs.items():
if arg is None:
continue
if k in self.parameters:
raise TweepError('Multiple values for parameter %s supplied!' % k)
self.parameters[k] = convert_to_utf8_str(arg)
def build_path(self):
for variable in re_path_template.findall(self.path):
name = variable.strip('{}')
if name == 'user' and 'user' not in self.parameters and self.api.auth:
# No 'user' parameter provided, fetch it from Auth instead.
value = self.api.auth.get_username()
else:
try:
value = urllib.quote(self.parameters[name])
except KeyError:
raise TweepError('No parameter value found for path variable: %s' % name)
del self.parameters[name]
self.path = self.path.replace(variable, value)
def execute(self):
# Build the request URL
url = self.api_root + self.path
if len(self.parameters):
url = '%s?%s' % (url, urllib.urlencode(self.parameters))
# Query the cache if one is available
# and this request uses a GET method.
if self.use_cache and self.api.cache and self.method == 'GET':
cache_result = self.api.cache.get(url)
# if cache result found and not expired, return it
if cache_result:
# must restore api reference
if isinstance(cache_result, list):
for result in cache_result:
if isinstance(result, Model):
result._api = self.api
else:
if isinstance(cache_result, Model):
cache_result._api = self.api
return cache_result
# Continue attempting request until successful
# or maximum number of retries is reached.
retries_performed = 0
while retries_performed < self.retry_count + 1:
# Open connection
if self.api.secure:
conn = http.client.HTTPSConnection(self.host, timeout=self.api.timeout)
else:
conn = http.client.HTTPConnection(self.host, timeout=self.api.timeout)
# Apply authentication
if self.api.auth:
self.api.auth.apply_auth(
self.scheme + self.host + url,
self.method, self.headers, self.parameters
)
# Request compression if configured
if self.api.compression:
self.headers['Accept-encoding'] = 'gzip'
# Execute request
try:
conn.request(self.method, url, headers=self.headers, body=self.post_data)
resp = conn.getresponse()
except Exception as e:
raise TweepError('Failed to send request: %s' % e)
# Exit request loop if non-retry error code
if self.retry_errors:
if resp.status not in self.retry_errors: break
else:
if resp.status == 200: break
# Sleep before retrying request again
time.sleep(self.retry_delay)
retries_performed += 1
# If an error was returned, throw an exception
self.api.last_response = resp
if resp.status != 200:
try:
error_msg = self.api.parser.parse_error(resp.read())
except Exception:
error_msg = "Twitter error response: status code = %s" % resp.status
raise TweepError(error_msg, resp)
# Parse the response payload
body = resp.read()
if resp.getheader('Content-Encoding', '') == 'gzip':
try:
zipper = gzip.GzipFile(fileobj=StringIO(body))
body = zipper.read()
except Exception as e:
raise TweepError('Failed to decompress data: %s' % e)
result = self.api.parser.parse(self, body)
conn.close()
# Store result into cache if one is available.
if self.use_cache and self.api.cache and self.method == 'GET' and result:
self.api.cache.store(url, result)
return result
def _call(api, *args, **kargs):
method = APIMethod(api, args, kargs)
return method.execute()
# Set pagination mode
if 'cursor' in APIMethod.allowed_param:
_call.pagination_mode = 'cursor'
elif 'max_id' in APIMethod.allowed_param and \
'since_id' in APIMethod.allowed_param:
_call.pagination_mode = 'id'
elif 'page' in APIMethod.allowed_param:
_call.pagination_mode = 'page'
return _call
|
|
"""
Form classes
"""
from django.utils.datastructures import SortedDict, MultiValueDict
from django.utils.html import escape
from fields import Field
from widgets import TextInput, Textarea, HiddenInput, MultipleHiddenInput
from util import flatatt, StrAndUnicode, ErrorDict, ErrorList, ValidationError
import copy
__all__ = ('BaseForm', 'Form')
NON_FIELD_ERRORS = '__all__'
def pretty_name(name):
"Converts 'first_name' to 'First name'"
name = name[0].upper() + name[1:]
return name.replace('_', ' ')
class SortedDictFromList(SortedDict):
"A dictionary that keeps its keys in the order in which they're inserted."
# This is different than django.utils.datastructures.SortedDict, because
# this takes a list/tuple as the argument to __init__().
def __init__(self, data=None):
if data is None: data = []
self.keyOrder = [d[0] for d in data]
dict.__init__(self, dict(data))
def copy(self):
return SortedDictFromList([(k, copy.copy(v)) for k, v in self.items()])
class DeclarativeFieldsMetaclass(type):
"""
Metaclass that converts Field attributes to a dictionary called
'base_fields', taking into account parent class 'base_fields' as well.
"""
def __new__(cls, name, bases, attrs):
fields = [(field_name, attrs.pop(field_name)) for field_name, obj in attrs.items() if isinstance(obj, Field)]
fields.sort(lambda x, y: cmp(x[1].creation_counter, y[1].creation_counter))
# If this class is subclassing another Form, add that Form's fields.
# Note that we loop over the bases in *reverse*. This is necessary in
# order to preserve the correct order of fields.
for base in bases[::-1]:
if hasattr(base, 'base_fields'):
fields = base.base_fields.items() + fields
attrs['base_fields'] = SortedDictFromList(fields)
return type.__new__(cls, name, bases, attrs)
class BaseForm(StrAndUnicode):
# This is the main implementation of all the Form logic. Note that this
# class is different than Form. See the comments by the Form class for more
# information. Any improvements to the form API should be made to *this*
# class, not to the Form class.
def __init__(self, data=None, auto_id='id_%s', prefix=None, initial=None):
self.is_bound = data is not None
self.data = data or {}
self.auto_id = auto_id
self.prefix = prefix
self.initial = initial or {}
self.__errors = None # Stores the errors after clean() has been called.
# The base_fields class attribute is the *class-wide* definition of
# fields. Because a particular *instance* of the class might want to
# alter self.fields, we create self.fields here by copying base_fields.
# Instances should always modify self.fields; they should not modify
# self.base_fields.
self.fields = self.base_fields.copy()
def __unicode__(self):
return self.as_table()
def __iter__(self):
for name, field in self.fields.items():
yield BoundField(self, field, name)
def __getitem__(self, name):
"Returns a BoundField with the given name."
try:
field = self.fields[name]
except KeyError:
raise KeyError('Key %r not found in Form' % name)
return BoundField(self, field, name)
def _errors(self):
"Returns an ErrorDict for self.data"
if self.__errors is None:
self.full_clean()
return self.__errors
errors = property(_errors)
def is_valid(self):
"""
Returns True if the form has no errors. Otherwise, False. If errors are
being ignored, returns False.
"""
return self.is_bound and not bool(self.errors)
def add_prefix(self, field_name):
"""
Returns the field name with a prefix appended, if this Form has a
prefix set.
Subclasses may wish to override.
"""
return self.prefix and ('%s-%s' % (self.prefix, field_name)) or field_name
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()."
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in self.fields.items():
bf = BoundField(self, field, name)
bf_errors = ErrorList([escape(error) for error in bf.errors]) # Escape and cache in local variable.
if bf.is_hidden:
if bf_errors:
top_errors.extend(['(Hidden field %s) %s' % (name, e) for e in bf_errors])
hidden_fields.append(unicode(bf))
else:
if errors_on_separate_row and bf_errors:
output.append(error_row % bf_errors)
label = bf.label and bf.label_tag(escape(bf.label + ':')) or ''
if field.help_text:
help_text = help_text_html % field.help_text
else:
help_text = u''
output.append(normal_row % {'errors': bf_errors, 'label': label, 'field': unicode(bf), 'help_text': help_text})
if top_errors:
output.insert(0, error_row % top_errors)
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = u''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and insert the hidden fields.
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else: # If there aren't any rows in the output, just append the hidden fields.
output.append(str_hidden)
return u'\n'.join(output)
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(u'<tr><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>', u'<tr><td colspan="2">%s</td></tr>', '</td></tr>', u'<br />%s', False)
def as_ul(self):
"Returns this form rendered as HTML <li>s -- excluding the <ul></ul>."
return self._html_output(u'<li>%(errors)s%(label)s %(field)s%(help_text)s</li>', u'<li>%s</li>', '</li>', u' %s', False)
def as_p(self):
"Returns this form rendered as HTML <p>s."
return self._html_output(u'<p>%(label)s %(field)s%(help_text)s</p>', u'<p>%s</p>', '</p>', u' %s', True)
def non_field_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
field -- i.e., from Form.clean(). Returns an empty ErrorList if there
are none.
"""
return self.errors.get(NON_FIELD_ERRORS, ErrorList())
def full_clean(self):
"""
Cleans all of self.data and populates self.__errors and self.clean_data.
"""
errors = ErrorDict()
if not self.is_bound: # Stop further processing.
self.__errors = errors
return
self.clean_data = {}
for name, field in self.fields.items():
# value_from_datadict() gets the data from the dictionary.
# Each widget type knows how to retrieve its own data, because some
# widgets split data over several HTML fields.
value = field.widget.value_from_datadict(self.data, self.add_prefix(name))
try:
value = field.clean(value)
self.clean_data[name] = value
if hasattr(self, 'clean_%s' % name):
value = getattr(self, 'clean_%s' % name)()
self.clean_data[name] = value
except ValidationError, e:
errors[name] = e.messages
try:
self.clean_data = self.clean()
except ValidationError, e:
errors[NON_FIELD_ERRORS] = e.messages
if errors:
delattr(self, 'clean_data')
self.__errors = errors
def clean(self):
"""
Hook for doing any extra form-wide cleaning after Field.clean() been
called on every field. Any ValidationError raised by this method will
not be associated with a particular field; it will have a special-case
association with the field named '__all__'.
"""
return self.clean_data
class Form(BaseForm):
"A collection of Fields, plus their associated data."
# This is a separate class from BaseForm in order to abstract the way
# self.fields is specified. This class (Form) is the one that does the
# fancy metaclass stuff purely for the semantic sugar -- it allows one
# to define a form using declarative syntax.
# BaseForm itself has no way of designating self.fields.
__metaclass__ = DeclarativeFieldsMetaclass
class BoundField(StrAndUnicode):
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ''
def __unicode__(self):
"Renders this field as an HTML widget."
# Use the 'widget' attribute on the field to determine which type
# of HTML widget to use.
value = self.as_widget(self.field.widget)
if not isinstance(value, basestring):
# Some Widget render() methods -- notably RadioSelect -- return a
# "special" object rather than a string. Call the __str__() on that
# object to get its rendered value.
value = value.__str__()
return value
def _errors(self):
"""
Returns an ErrorList for this field. Returns an empty ErrorList
if there are none.
"""
return self.form.errors.get(self.name, ErrorList())
errors = property(_errors)
def as_widget(self, widget, attrs=None):
attrs = attrs or {}
auto_id = self.auto_id
if auto_id and not attrs.has_key('id') and not widget.attrs.has_key('id'):
attrs['id'] = auto_id
if not self.form.is_bound:
data = self.form.initial.get(self.name, self.field.initial)
else:
data = self.data
return widget.render(self.html_name, data, attrs=attrs)
def as_text(self, attrs=None):
"""
Returns a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs)
def as_textarea(self, attrs=None):
"Returns a string of HTML for representing this as a <textarea>."
return self.as_widget(Textarea(), attrs)
def as_hidden(self, attrs=None):
"""
Returns a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs)
def _data(self):
"""
Returns the data for this BoundField, or None if it wasn't given.
"""
return self.field.widget.value_from_datadict(self.form.data, self.html_name)
data = property(_data)
def label_tag(self, contents=None, attrs=None):
"""
Wraps the given contents in a <label>, if the field has an ID attribute.
Does not HTML-escape the contents. If contents aren't given, uses the
field's HTML-escaped label.
If attrs are given, they're used as HTML attributes on the <label> tag.
"""
contents = contents or escape(self.label)
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
if id_:
attrs = attrs and flatatt(attrs) or ''
contents = '<label for="%s"%s>%s</label>' % (widget.id_for_label(id_), attrs, contents)
return contents
def _is_hidden(self):
"Returns True if this BoundField's widget is hidden."
return self.field.widget.is_hidden
is_hidden = property(_is_hidden)
def _auto_id(self):
"""
Calculates and returns the ID attribute for this BoundField, if the
associated Form has specified auto_id. Returns an empty string otherwise.
"""
auto_id = self.form.auto_id
if auto_id and '%s' in str(auto_id):
return str(auto_id) % self.html_name
elif auto_id:
return self.html_name
return ''
auto_id = property(_auto_id)
|
|
"""
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import (check_random_state, check_array,
gen_batches, gen_even_slices, _get_n_jobs)
from ..utils.validation import check_non_negative
from ..utils.extmath import logsumexp
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..exceptions import NotFittedError
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
EPS = np.finfo(np.float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
Exponential value of expection of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : boolean
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calculate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in xrange(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
# Note: adds doc_topic_prior to doc_topic_d, in-place.
_dirichlet_expectation_1d(doc_topic_d, doc_topic_prior,
exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(BaseEstimator, TransformerMixin):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
.. versionadded:: 0.17
Read more in the :ref:`User Guide <LatentDirichletAllocation>`.
Parameters
----------
n_topics : int, optional (default=10)
Number of topics.
doc_topic_prior : float, optional (default=None)
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_topics`.
In the literature, this is called `alpha`.
topic_word_prior : float, optional (default=None)
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_topics`.
In the literature, this is called `eta`.
learning_method : 'batch' | 'online', default='online'
Method used to update `_component`. Only used in `fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
learning_decay : float, optional (default=0.7)
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, optional (default=10.)
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : integer, optional (default=10)
The maximum number of iterations.
total_samples : int, optional (default=1e6)
Total number of documents. Only used in the `partial_fit` method.
batch_size : int, optional (default=128)
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int optional (default=0)
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or negative number to not evalute perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
perp_tol : float, optional (default=1e-1)
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, optional (default=1e-3)
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int (default=100)
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, optional (default=1)
The number of jobs to use in the E-step. If -1, all CPUs are used. For
``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used.
verbose : int, optional (default=0)
Verbosity level.
random_state : int or RandomState instance or None, optional (default=None)
Pseudo-random number generator seed control.
Attributes
----------
components_ : array, [n_topics, n_features]
Topic word distribution. ``components_[i, j]`` represents word j in
topic `i`. In the literature, this is called lambda.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
References
----------
[1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman,
David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
def __init__(self, n_topics=10, doc_topic_prior=None,
topic_word_prior=None, learning_method='online',
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=1, verbose=0, random_state=None):
self.n_topics = n_topics
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _check_params(self):
"""Check model parameters."""
if self.n_topics <= 0:
raise ValueError("Invalid 'n_topics' parameter: %r"
% self.n_topics)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online"):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self.n_topics
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self.n_topics
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self.n_topics, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel (optional)
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormailzed topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = _get_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=self.verbose)
results = parallel(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total umber of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : array, shape=(n_samples, n_topics)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True,
parallel=parallel)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _check_non_neg_array(self, X, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if not hasattr(self, 'components_'):
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=self.verbose) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=self.verbose) as parallel:
for i in xrange(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False, parallel=parallel)
else:
# batch update
self._em_step(X, total_samples=n_samples,
batch_update=True, parallel=parallel)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False,
parallel=parallel)
bound = self.perplexity(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d, perplexity: %.4f'
% (i + 1, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
self.n_iter_ += 1
return self
def transform(self, X):
"""Transform data X according to the fitted model.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_topics)
Document topic distribution for X.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
# normalize doc_topic_distr
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_topics)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calculate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_topics = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self.n_topics)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
score : float
Use approximate bound as score.
"""
X = self._check_non_neg_array(X, "LatentDirichletAllocation.score")
doc_topic_distr = self.transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def perplexity(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_topics)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self.transform(X)
else:
n_samples, n_topics = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_topics != self.n_topics:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
|
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
from environment import *
from imports import mpi
import DistGraph as DG
class DistSparseMatrix(object):
# Constructors and destructors
# ============================
lib.ElDistSparseMatrixCreate_i.argtypes = \
lib.ElDistSparseMatrixCreate_s.argtypes = \
lib.ElDistSparseMatrixCreate_d.argtypes = \
lib.ElDistSparseMatrixCreate_c.argtypes = \
lib.ElDistSparseMatrixCreate_z.argtypes = \
[POINTER(c_void_p),mpi.Comm]
def __init__(self,tag=dTag,comm=mpi.COMM_WORLD(),create=True):
self.obj = c_void_p()
self.tag = tag
CheckTag(tag)
if create:
args = [pointer(self.obj),comm]
if tag == iTag: lib.ElDistSparseMatrixCreate_i(*args)
elif tag == sTag: lib.ElDistSparseMatrixCreate_s(*args)
elif tag == dTag: lib.ElDistSparseMatrixCreate_d(*args)
elif tag == cTag: lib.ElDistSparseMatrixCreate_c(*args)
elif tag == zTag: lib.ElDistSparseMatrixCreate_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixDestroy_i.argtypes = \
lib.ElDistSparseMatrixDestroy_s.argtypes = \
lib.ElDistSparseMatrixDestroy_d.argtypes = \
lib.ElDistSparseMatrixDestroy_c.argtypes = \
lib.ElDistSparseMatrixDestroy_z.argtypes = \
[c_void_p]
def Destroy(self):
args = [self.obj]
if self.tag == iTag: lib.ElDistSparseMatrixDestroy_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixDestroy_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixDestroy_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixDestroy_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixDestroy_z(*args)
else: DataExcept()
# Assignment and reconfiguration
# ==============================
lib.ElDistSparseMatrixEmpty_i.argtypes = \
lib.ElDistSparseMatrixEmpty_s.argtypes = \
lib.ElDistSparseMatrixEmpty_d.argtypes = \
lib.ElDistSparseMatrixEmpty_c.argtypes = \
lib.ElDistSparseMatrixEmpty_z.argtypes = \
[c_void_p]
def Empty(self):
args = [self.obj]
if self.tag == iTag: lib.ElDistSparseMatrixEmpty_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixEmpty_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixEmpty_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixEmpty_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixEmpty_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixResize_i.argtypes = \
lib.ElDistSparseMatrixResize_s.argtypes = \
lib.ElDistSparseMatrixResize_d.argtypes = \
lib.ElDistSparseMatrixResize_c.argtypes = \
lib.ElDistSparseMatrixResize_z.argtypes = \
[c_void_p,iType,iType]
def Resize(self,height,width):
args = [self.obj,height,width]
if self.tag == iTag: lib.ElDistSparseMatrixResize_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixResize_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixResize_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixResize_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixResize_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixSetComm_i.argtypes = \
lib.ElDistSparseMatrixSetComm_s.argtypes = \
lib.ElDistSparseMatrixSetComm_d.argtypes = \
lib.ElDistSparseMatrixSetComm_c.argtypes = \
lib.ElDistSparseMatrixSetComm_z.argtypes = \
[c_void_p,mpi.Comm]
def SetComm(self,comm):
args = [self.obj,comm]
if self.tag == iTag: lib.ElDistSparseMatrixSetComm_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixSetComm_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixSetComm_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixSetComm_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixSetComm_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixReserve_i.argtypes = \
lib.ElDistSparseMatrixReserve_s.argtypes = \
lib.ElDistSparseMatrixReserve_d.argtypes = \
lib.ElDistSparseMatrixReserve_c.argtypes = \
lib.ElDistSparseMatrixReserve_z.argtypes = \
[c_void_p,iType,iType]
def Reserve(self,numLocalEntries,numRemoteEntries=0):
args = [self.obj,numLocalEntries,numRemoteEntries]
if self.tag == iTag: lib.ElDistSparseMatrixReserve_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixReserve_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixReserve_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixReserve_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixReserve_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixUpdate_i.argtypes = [c_void_p,iType,iType,iType]
lib.ElDistSparseMatrixUpdate_s.argtypes = [c_void_p,iType,iType,sType]
lib.ElDistSparseMatrixUpdate_d.argtypes = [c_void_p,iType,iType,dType]
lib.ElDistSparseMatrixUpdate_c.argtypes = [c_void_p,iType,iType,cType]
lib.ElDistSparseMatrixUpdate_z.argtypes = [c_void_p,iType,iType,zType]
def Update(self,row,col,value):
args = [self.obj,row,col,value]
if self.tag == iTag: lib.ElDistSparseMatrixUpdate_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixUpdate_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixUpdate_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixUpdate_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixUpdate_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixUpdateLocal_i.argtypes = [c_void_p,iType,iType,iType]
lib.ElDistSparseMatrixUpdateLocal_s.argtypes = [c_void_p,iType,iType,sType]
lib.ElDistSparseMatrixUpdateLocal_d.argtypes = [c_void_p,iType,iType,dType]
lib.ElDistSparseMatrixUpdateLocal_c.argtypes = [c_void_p,iType,iType,cType]
lib.ElDistSparseMatrixUpdateLocal_z.argtypes = [c_void_p,iType,iType,zType]
def UpdateLocal(self,localRow,col,value):
args = [self.obj,localRow,col,value]
if self.tag == iTag: lib.ElDistSparseMatrixUpdateLocal_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixUpdateLocal_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixUpdateLocal_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixUpdateLocal_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixUpdateLocal_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixZero_i.argtypes = \
lib.ElDistSparseMatrixZero_s.argtypes = \
lib.ElDistSparseMatrixZero_d.argtypes = \
lib.ElDistSparseMatrixZero_c.argtypes = \
lib.ElDistSparseMatrixZero_z.argtypes = \
[c_void_p,iType,iType]
def Zero(self,row,col):
args = [self.obj,row,col]
if self.tag == iTag: lib.ElDistSparseMatrixZero_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixZero_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixZero_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixZero_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixZero_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixZeroLocal_i.argtypes = \
lib.ElDistSparseMatrixZeroLocal_s.argtypes = \
lib.ElDistSparseMatrixZeroLocal_d.argtypes = \
lib.ElDistSparseMatrixZeroLocal_c.argtypes = \
lib.ElDistSparseMatrixZeroLocal_z.argtypes = \
[c_void_p,iType,iType]
def ZeroLocal(self,localRow,col):
args = [self.obj,localRow,col]
if self.tag == iTag: lib.ElDistSparseMatrixZeroLocal_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixZeroLocal_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixZeroLocal_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixZeroLocal_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixZeroLocal_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixQueueUpdate_i.argtypes = \
[c_void_p,iType,iType,iType,bType]
lib.ElDistSparseMatrixQueueUpdate_s.argtypes = \
[c_void_p,iType,iType,sType,bType]
lib.ElDistSparseMatrixQueueUpdate_d.argtypes = \
[c_void_p,iType,iType,dType,bType]
lib.ElDistSparseMatrixQueueUpdate_c.argtypes = \
[c_void_p,iType,iType,cType,bType]
lib.ElDistSparseMatrixQueueUpdate_z.argtypes = \
[c_void_p,iType,iType,zType,bType]
def QueueUpdate(self,row,col,value,passive=True):
args = [self.obj,row,col,value,passive]
if self.tag == iTag: lib.ElDistSparseMatrixQueueUpdate_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixQueueUpdate_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixQueueUpdate_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixQueueUpdate_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixQueueUpdate_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixQueueLocalUpdate_i.argtypes = \
[c_void_p,iType,iType,iType]
lib.ElDistSparseMatrixQueueLocalUpdate_s.argtypes = \
[c_void_p,iType,iType,sType]
lib.ElDistSparseMatrixQueueLocalUpdate_d.argtypes = \
[c_void_p,iType,iType,dType]
lib.ElDistSparseMatrixQueueLocalUpdate_c.argtypes = \
[c_void_p,iType,iType,cType]
lib.ElDistSparseMatrixQueueLocalUpdate_z.argtypes = \
[c_void_p,iType,iType,zType]
def QueueLocalUpdate(self,localRow,col,value):
args = [self.obj,localRow,col,value]
if self.tag == iTag: lib.ElDistSparseMatrixQueueLocalUpdate_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixQueueLocalUpdate_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixQueueLocalUpdate_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixQueueLocalUpdate_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixQueueLocalUpdate_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixQueueZero_i.argtypes = \
lib.ElDistSparseMatrixQueueZero_s.argtypes = \
lib.ElDistSparseMatrixQueueZero_d.argtypes = \
lib.ElDistSparseMatrixQueueZero_c.argtypes = \
lib.ElDistSparseMatrixQueueZero_z.argtypes = \
[c_void_p,iType,iType,bType]
def QueueZero(self,row,col,passive=True):
args = [self.obj,row,col,passive]
if self.tag == iTag: lib.ElDistSparseMatrixQueueZero_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixQueueZero_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixQueueZero_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixQueueZero_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixQueueZero_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixQueueLocalZero_i.argtypes = \
lib.ElDistSparseMatrixQueueLocalZero_s.argtypes = \
lib.ElDistSparseMatrixQueueLocalZero_d.argtypes = \
lib.ElDistSparseMatrixQueueLocalZero_c.argtypes = \
lib.ElDistSparseMatrixQueueLocalZero_z.argtypes = \
[c_void_p,iType,iType]
def QueueLocalZero(self,localRow,col):
args = [self.obj,localRow,col]
if self.tag == iTag: lib.ElDistSparseMatrixQueueLocalZero_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixQueueLocalZero_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixQueueLocalZero_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixQueueLocalZero_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixQueueLocalZero_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixProcessQueues_i.argtypes = \
lib.ElDistSparseMatrixProcessQueues_s.argtypes = \
lib.ElDistSparseMatrixProcessQueues_d.argtypes = \
lib.ElDistSparseMatrixProcessQueues_c.argtypes = \
lib.ElDistSparseMatrixProcessQueues_z.argtypes = \
[c_void_p]
def ProcessQueues(self):
args = [self.obj]
if self.tag == iTag: lib.ElDistSparseMatrixProcessQueues_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixProcessQueues_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixProcessQueues_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixProcessQueues_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixProcessQueues_z(*args)
else: DataExcept()
lib.ElDistSparseMatrixProcessLocalQueues_i.argtypes = \
lib.ElDistSparseMatrixProcessLocalQueues_s.argtypes = \
lib.ElDistSparseMatrixProcessLocalQueues_d.argtypes = \
lib.ElDistSparseMatrixProcessLocalQueues_c.argtypes = \
lib.ElDistSparseMatrixProcessLocalQueues_z.argtypes = \
[c_void_p]
def ProcessLocalQueues(self):
args = [self.obj]
if self.tag == iTag: lib.ElDistSparseMatrixProcessLocalQueues_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixProcessLocalQueues_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixProcessLocalQueues_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixProcessLocalQueues_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixProcessLocalQueues_z(*args)
else: DataExcept()
# Queries
# =======
lib.ElDistSparseMatrixHeight_i.argtypes = \
lib.ElDistSparseMatrixHeight_s.argtypes = \
lib.ElDistSparseMatrixHeight_d.argtypes = \
lib.ElDistSparseMatrixHeight_c.argtypes = \
lib.ElDistSparseMatrixHeight_z.argtypes = \
[c_void_p,POINTER(iType)]
def Height(self):
height = iType()
args = [self.obj,pointer(height)]
if self.tag == iTag: lib.ElDistSparseMatrixHeight_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixHeight_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixHeight_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixHeight_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixHeight_z(*args)
else: DataExcept()
return height.value
lib.ElDistSparseMatrixWidth_i.argtypes = \
lib.ElDistSparseMatrixWidth_s.argtypes = \
lib.ElDistSparseMatrixWidth_d.argtypes = \
lib.ElDistSparseMatrixWidth_c.argtypes = \
lib.ElDistSparseMatrixWidth_z.argtypes = \
[c_void_p,POINTER(iType)]
def Width(self):
width = iType()
args = [self.obj,pointer(width)]
if self.tag == iTag: lib.ElDistSparseMatrixWidth_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixWidth_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixWidth_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixWidth_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixWidth_z(*args)
else: DataExcept()
return width.value
lib.ElDistSparseMatrixDistGraph_i.argtypes = \
lib.ElDistSparseMatrixDistGraph_s.argtypes = \
lib.ElDistSparseMatrixDistGraph_d.argtypes = \
lib.ElDistSparseMatrixDistGraph_c.argtypes = \
lib.ElDistSparseMatrixDistGraph_z.argtypes = \
lib.ElDistSparseMatrixLockedDistGraph_i.argtypes = \
lib.ElDistSparseMatrixLockedDistGraph_s.argtypes = \
lib.ElDistSparseMatrixLockedDistGraph_d.argtypes = \
lib.ElDistSparseMatrixLockedDistGraph_c.argtypes = \
lib.ElDistSparseMatrixLockedDistGraph_z.argtypes = \
[c_void_p,POINTER(c_void_p)]
def DistGraph(self,locked=False):
graph = DG.DistGraph(mpi.COMM_WORLD(),False)
args = [self.obj,pointer(graph.obj)]
if locked:
if self.tag == iTag: lib.ElDistSparseMatrixLockedDistGraph_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixLockedDistGraph_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixLockedDistGraph_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixLockedDistGraph_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixLockedDistGraph_z(*args)
else: DataExcept()
else:
if self.tag == iTag: lib.ElDistSparseMatrixDistGraph_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixDistGraph_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixDistGraph_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixDistGraph_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixDistGraph_z(*args)
else: DataExcept()
return graph
lib.ElDistSparseMatrixFirstLocalRow_i.argtypes = \
lib.ElDistSparseMatrixFirstLocalRow_s.argtypes = \
lib.ElDistSparseMatrixFirstLocalRow_d.argtypes = \
lib.ElDistSparseMatrixFirstLocalRow_c.argtypes = \
lib.ElDistSparseMatrixFirstLocalRow_z.argtypes = \
[c_void_p,POINTER(iType)]
def FirstLocalRow(self):
firstLocalRow = iType()
args = [self.obj,pointer(firstLocalRow)]
if self.tag == iTag: lib.ElDistSparseMatrixFirstLocalRow_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixFirstLocalRow_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixFirstLocalRow_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixFirstLocalRow_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixFirstLocalRow_z(*args)
else: DataExcept()
return firstLocalRow.value
lib.ElDistSparseMatrixLocalHeight_i.argtypes = \
lib.ElDistSparseMatrixLocalHeight_s.argtypes = \
lib.ElDistSparseMatrixLocalHeight_d.argtypes = \
lib.ElDistSparseMatrixLocalHeight_c.argtypes = \
lib.ElDistSparseMatrixLocalHeight_z.argtypes = \
[c_void_p,POINTER(iType)]
def LocalHeight(self):
localHeight = iType()
args = [self.obj,pointer(localHeight)]
if self.tag == iTag: lib.ElDistSparseMatrixLocalHeight_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixLocalHeight_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixLocalHeight_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixLocalHeight_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixLocalHeight_z(*args)
else: DataExcept()
return localHeight.value
lib.ElDistSparseMatrixNumLocalEntries_i.argtypes = \
lib.ElDistSparseMatrixNumLocalEntries_s.argtypes = \
lib.ElDistSparseMatrixNumLocalEntries_d.argtypes = \
lib.ElDistSparseMatrixNumLocalEntries_c.argtypes = \
lib.ElDistSparseMatrixNumLocalEntries_z.argtypes = \
[c_void_p,POINTER(iType)]
def NumLocalEntries(self):
numLocalEntries = iType()
args = [self.obj,pointer(numLocalEntries)]
if self.tag == iTag: lib.ElDistSparseMatrixNumLocalEntries_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixNumLocalEntries_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixNumLocalEntries_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixNumLocalEntries_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixNumLocalEntries_z(*args)
else: DataExcept()
return numLocalEntries.value
lib.ElDistSparseMatrixCapacity_i.argtypes = \
lib.ElDistSparseMatrixCapacity_s.argtypes = \
lib.ElDistSparseMatrixCapacity_d.argtypes = \
lib.ElDistSparseMatrixCapacity_c.argtypes = \
lib.ElDistSparseMatrixCapacity_z.argtypes = \
[c_void_p,POINTER(iType)]
def Capacity(self):
capacity = iType()
args = [self.obj,pointer(capacity)]
if self.tag == iTag: lib.ElDistSparseMatrixCapacity_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixCapacity_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixCapacity_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixCapacity_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixCapacity_z(*args)
else: DataExcept()
return capacity.value
lib.ElDistSparseMatrixLocallyConsistent_i.argtypes = \
lib.ElDistSparseMatrixLocallyConsistent_s.argtypes = \
lib.ElDistSparseMatrixLocallyConsistent_d.argtypes = \
lib.ElDistSparseMatrixLocallyConsistent_c.argtypes = \
lib.ElDistSparseMatrixLocallyConsistent_z.argtypes = \
[c_void_p,POINTER(bType)]
def LocallyConsistent(self):
consistent = bType()
args = [self.obj,pointer(consistent)]
if self.tag == iTag: lib.ElDistSparseMatrixLocallyConsistent_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixLocallyConsistent_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixLocallyConsistent_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixLocallyConsistent_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixLocallyConsistent_z(*args)
else: DataExcept()
return consistent.value
lib.ElDistSparseMatrixComm_i.argtypes = \
lib.ElDistSparseMatrixComm_s.argtypes = \
lib.ElDistSparseMatrixComm_d.argtypes = \
lib.ElDistSparseMatrixComm_c.argtypes = \
lib.ElDistSparseMatrixComm_z.argtypes = \
[c_void_p,POINTER(mpi.Comm)]
def Comm(self):
comm = mpi.Comm()
args = [self.obj,pointer(comm)]
if self.tag == iTag: lib.ElDistSparseMatrixComm_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixComm_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixComm_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixComm_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixComm_z(*args)
else: DataExcept()
return comm
lib.ElDistSparseMatrixBlocksize_i.argtypes = \
lib.ElDistSparseMatrixBlocksize_s.argtypes = \
lib.ElDistSparseMatrixBlocksize_d.argtypes = \
lib.ElDistSparseMatrixBlocksize_c.argtypes = \
lib.ElDistSparseMatrixBlocksize_z.argtypes = \
[c_void_p,POINTER(iType)]
def Blocksize(self):
blocksize = iType()
args = [self.obj,pointer(blocksize)]
if self.tag == iTag: lib.ElDistSparseMatrixBlocksize_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixBlocksize_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixBlocksize_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixBlocksize_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixBlocksize_z(*args)
else: DataExcept()
return blocksize.value
lib.ElDistSparseMatrixRowOwner_i.argtypes = \
lib.ElDistSparseMatrixRowOwner_s.argtypes = \
lib.ElDistSparseMatrixRowOwner_d.argtypes = \
lib.ElDistSparseMatrixRowOwner_c.argtypes = \
lib.ElDistSparseMatrixRowOwner_z.argtypes = \
[c_void_p,iType,POINTER(c_int)]
def RowOwner(self,i):
owner = c_int()
args = [self.obj,i,pointer(owner)]
if self.tag == iTag: lib.ElDistSparseMatrixRowOwner_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixRowOwner_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixRowOwner_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixRowOwner_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixRowOwner_z(*args)
else: DataExcept()
return owner.value
lib.ElDistSparseMatrixGlobalRow_i.argtypes = \
lib.ElDistSparseMatrixGlobalRow_s.argtypes = \
lib.ElDistSparseMatrixGlobalRow_d.argtypes = \
lib.ElDistSparseMatrixGlobalRow_c.argtypes = \
lib.ElDistSparseMatrixGlobalRow_z.argtypes = \
[c_void_p,iType,POINTER(iType)]
def GlobalRow(self,iLoc):
i = iType()
args = [self.obj,iLoc,pointer(i)]
if self.tag == iTag: lib.ElDistSparseMatrixGlobalRow_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixGlobalRow_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixGlobalRow_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixGlobalRow_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixGlobalRow_z(*args)
else: DataExcept()
return i.value
lib.ElDistSparseMatrixRow_i.argtypes = \
lib.ElDistSparseMatrixRow_s.argtypes = \
lib.ElDistSparseMatrixRow_d.argtypes = \
lib.ElDistSparseMatrixRow_c.argtypes = \
lib.ElDistSparseMatrixRow_z.argtypes = \
[c_void_p,iType,POINTER(iType)]
def Row(self,localInd):
row = iType()
args = [self.obj,localInd,pointer(row)]
if self.tag == iTag: lib.ElDistSparseMatrixRow_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixRow_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixRow_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixRow_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixRow_z(*args)
else: DataExcept()
return row.value
lib.ElDistSparseMatrixCol_i.argtypes = \
lib.ElDistSparseMatrixCol_s.argtypes = \
lib.ElDistSparseMatrixCol_d.argtypes = \
lib.ElDistSparseMatrixCol_c.argtypes = \
lib.ElDistSparseMatrixCol_z.argtypes = \
[c_void_p,iType,POINTER(iType)]
def Col(self,localInd):
col = iType()
args = [self.obj,localInd,pointer(col)]
if self.tag == iTag: lib.ElDistSparseMatrixCol_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixCol_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixCol_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixCol_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixCol_z(*args)
else: DataExcept()
return col.value
lib.ElDistSparseMatrixValue_i.argtypes = [c_void_p,iType,POINTER(iType)]
lib.ElDistSparseMatrixValue_s.argtypes = [c_void_p,iType,POINTER(sType)]
lib.ElDistSparseMatrixValue_d.argtypes = [c_void_p,iType,POINTER(dType)]
lib.ElDistSparseMatrixValue_c.argtypes = [c_void_p,iType,POINTER(cType)]
lib.ElDistSparseMatrixValue_z.argtypes = [c_void_p,iType,POINTER(zType)]
def Value(self,localInd):
value = TagToType(self.tag)()
args = [self.obj,localInd,pointer(value)]
if self.tag == iTag: lib.ElDistSparseMatrixValue_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixValue_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixValue_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixValue_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixValue_z(*args)
else: DataExcept()
return value.value
lib.ElDistSparseMatrixRowOffset_i.argtypes = \
lib.ElDistSparseMatrixRowOffset_s.argtypes = \
lib.ElDistSparseMatrixRowOffset_d.argtypes = \
lib.ElDistSparseMatrixRowOffset_c.argtypes = \
lib.ElDistSparseMatrixRowOffset_z.argtypes = \
[c_void_p,iType,POINTER(iType)]
def RowOffset(self,localRow):
offset = iType()
args = [self.obj,localRow,pointer(offset)]
if self.tag == iTag: lib.ElDistSparseMatrixRowOffset_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixRowOffset_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixRowOffset_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixRowOffset_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixRowOffset_z(*args)
else: DataExcept()
return offset.value
lib.ElDistSparseMatrixOffset_i.argtypes = \
lib.ElDistSparseMatrixOffset_s.argtypes = \
lib.ElDistSparseMatrixOffset_d.argtypes = \
lib.ElDistSparseMatrixOffset_c.argtypes = \
lib.ElDistSparseMatrixOffset_z.argtypes = \
[c_void_p,iType,iType,POINTER(iType)]
def Offset(self,localRow,col):
offset = iType()
args = [self.obj,localRow,col,pointer(offset)]
if self.tag == iTag: lib.ElDistSparseMatrixOffset_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixOffset_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixOffset_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixOffset_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixOffset_z(*args)
else: DataExcept()
return offset.value
lib.ElDistSparseMatrixNumConnections_i.argtypes = \
lib.ElDistSparseMatrixNumConnections_s.argtypes = \
lib.ElDistSparseMatrixNumConnections_d.argtypes = \
lib.ElDistSparseMatrixNumConnections_c.argtypes = \
lib.ElDistSparseMatrixNumConnections_z.argtypes = \
[c_void_p,iType,POINTER(iType)]
def NumConnections(self,localRow):
numConnections = iType()
args = [self.obj,localRow,pointer(numConnections)]
if self.tag == iTag: lib.ElDistSparseMatrixNumConnections_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixNumConnections_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixNumConnections_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixNumConnections_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixNumConnections_z(*args)
else: DataExcept()
return numConnections.value
lib.ElDistSparseMatrixSourceBuffer_i.argtypes = \
lib.ElDistSparseMatrixSourceBuffer_s.argtypes = \
lib.ElDistSparseMatrixSourceBuffer_d.argtypes = \
lib.ElDistSparseMatrixSourceBuffer_c.argtypes = \
lib.ElDistSparseMatrixSourceBuffer_z.argtypes = \
lib.ElDistSparseMatrixLockedSourceBuffer_i.argtypes = \
lib.ElDistSparseMatrixLockedSourceBuffer_s.argtypes = \
lib.ElDistSparseMatrixLockedSourceBuffer_d.argtypes = \
lib.ElDistSparseMatrixLockedSourceBuffer_c.argtypes = \
lib.ElDistSparseMatrixLockedSourceBuffer_z.argtypes = \
[c_void_p,POINTER(POINTER(iType))]
def SourceBuffer(self,locked=False):
sourceBuf = POINTER(iType)()
args = [self.obj,pointer(sourceBuf)]
if locked:
if self.tag == iTag: lib.ElDistSparseMatrixLockedSourceBuffer_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixLockedSourceBuffer_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixLockedSourceBuffer_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixLockedSourceBuffer_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixLockedSourceBuffer_z(*args)
else: DataExcept()
else:
if self.tag == iTag: lib.ElDistSparseMatrixSourceBuffer_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixSourceBuffer_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixSourceBuffer_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixSourceBuffer_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixSourceBuffer_z(*args)
else: DataExcept()
return sourceBuf
lib.ElDistSparseMatrixTargetBuffer_i.argtypes = \
lib.ElDistSparseMatrixTargetBuffer_s.argtypes = \
lib.ElDistSparseMatrixTargetBuffer_d.argtypes = \
lib.ElDistSparseMatrixTargetBuffer_c.argtypes = \
lib.ElDistSparseMatrixTargetBuffer_z.argtypes = \
lib.ElDistSparseMatrixLockedTargetBuffer_i.argtypes = \
lib.ElDistSparseMatrixLockedTargetBuffer_s.argtypes = \
lib.ElDistSparseMatrixLockedTargetBuffer_d.argtypes = \
lib.ElDistSparseMatrixLockedTargetBuffer_c.argtypes = \
lib.ElDistSparseMatrixLockedTargetBuffer_z.argtypes = \
[c_void_p,POINTER(POINTER(iType))]
def TargetBuffer(self,locked=False):
targetBuf = POINTER(iType)()
args = [self.obj,pointer(targetBuf)]
if locked:
if self.tag == iTag: lib.ElDistSparseMatrixLockedTargetBuffer_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixLockedTargetBuffer_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixLockedTargetBuffer_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixLockedTargetBuffer_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixLockedTargetBuffer_z(*args)
else: DataExcept()
else:
if self.tag == iTag: lib.ElDistSparseMatrixTargetBuffer_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixTargetBuffer_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixTargetBuffer_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixTargetBuffer_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixTargetBuffer_z(*args)
else: DataExcept()
return targetBuf
lib.ElDistSparseMatrixValueBuffer_i.argtypes = \
lib.ElDistSparseMatrixLockedValueBuffer_i.argtypes = \
[c_void_p,POINTER(POINTER(iType))]
lib.ElDistSparseMatrixValueBuffer_s.argtypes = \
lib.ElDistSparseMatrixLockedValueBuffer_s.argtypes = \
[c_void_p,POINTER(POINTER(sType))]
lib.ElDistSparseMatrixValueBuffer_d.argtypes = \
lib.ElDistSparseMatrixLockedValueBuffer_d.argtypes = \
[c_void_p,POINTER(POINTER(dType))]
lib.ElDistSparseMatrixValueBuffer_c.argtypes = \
lib.ElDistSparseMatrixLockedValueBuffer_c.argtypes = \
[c_void_p,POINTER(POINTER(cType))]
lib.ElDistSparseMatrixValueBuffer_z.argtypes = \
lib.ElDistSparseMatrixLockedValueBuffer_z.argtypes = \
[c_void_p,POINTER(POINTER(zType))]
def ValueBuffer(self,locked=False):
valueBuf = POINTER(TagToType(self.tag))()
args = [self.obj,pointer(valueBuf)]
if locked:
if self.tag == iTag: lib.ElDistSparseMatrixLockedValueBuffer_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixLockedValueBuffer_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixLockedValueBuffer_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixLockedValueBuffer_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixLockedValueBuffer_z(*args)
else: DataExcept()
else:
if self.tag == iTag: lib.ElDistSparseMatrixValueBuffer_i(*args)
elif self.tag == sTag: lib.ElDistSparseMatrixValueBuffer_s(*args)
elif self.tag == dTag: lib.ElDistSparseMatrixValueBuffer_d(*args)
elif self.tag == cTag: lib.ElDistSparseMatrixValueBuffer_c(*args)
elif self.tag == zTag: lib.ElDistSparseMatrixValueBuffer_z(*args)
else: DataExcept()
return valueBuf
lib.ElGetContigSubmatrixDistSparse_i.argtypes = \
lib.ElGetContigSubmatrixDistSparse_s.argtypes = \
lib.ElGetContigSubmatrixDistSparse_d.argtypes = \
lib.ElGetContigSubmatrixDistSparse_c.argtypes = \
lib.ElGetContigSubmatrixDistSparse_z.argtypes = \
[c_void_p,IndexRange,IndexRange,c_void_p]
def __getitem__(self,indTup):
iInd, jInd = indTup
if isinstance(iInd,slice):
if iInd.start == None:
iInd = slice(0,iInd.stop,iInd.step)
if iInd.stop == None:
iInd = slice(iInd.start,self.Height(),iInd.step)
if isinstance(jInd,slice):
if jInd.start == None:
jInd = slice(0,jInd.stop,jInd.step)
if jInd.stop == None:
jInd = slice(jInd.start,self.Width(),jInd.step)
iRan = IndexRange(iInd)
jRan = IndexRange(jInd)
ASub = DistSparseMatrix(self.tag,self.Comm())
args = [self.obj,iRan,jRan,ASub.obj]
if self.tag == iTag: lib.ElGetContigSubmatrixDistSparse_i(*args)
elif self.tag == sTag: lib.ElGetContigSubmatrixDistSparse_s(*args)
elif self.tag == dTag: lib.ElGetContigSubmatrixDistSparse_d(*args)
elif self.tag == cTag: lib.ElGetContigSubmatrixDistSparse_c(*args)
elif self.tag == zTag: lib.ElGetContigSubmatrixDistSparse_z(*args)
else: DataExcept()
return ASub
|
|
import os
import cPickle as pickle
from datetime import datetime
from globalvars import GlobalVars
import metasmoke
import requests
import json
import time
import math
# methods to load files and filter data in them:
def load_files():
if os.path.isfile("falsePositives.txt"):
with open("falsePositives.txt", "rb") as f:
GlobalVars.false_positives = pickle.load(f)
if os.path.isfile("whitelistedUsers.txt"):
with open("whitelistedUsers.txt", "rb") as f:
GlobalVars.whitelisted_users = pickle.load(f)
if os.path.isfile("blacklistedUsers.txt"):
with open("blacklistedUsers.txt", "rb") as f:
GlobalVars.blacklisted_users = pickle.load(f)
if os.path.isfile("ignoredPosts.txt"):
with open("ignoredPosts.txt", "rb") as f:
GlobalVars.ignored_posts = pickle.load(f)
if os.path.isfile("autoIgnoredPosts.txt"):
with open("autoIgnoredPosts.txt", "rb") as f:
GlobalVars.auto_ignored_posts = pickle.load(f)
if os.path.isfile("notifications.txt"):
with open("notifications.txt", "rb") as f:
GlobalVars.notifications = pickle.load(f)
if os.path.isfile("whyData.txt"):
with open("whyData.txt", "rb") as f:
GlobalVars.why_data = pickle.load(f)
if os.path.isfile("whyDataAllspam.txt"):
with open("whyDataAllspam.txt") as f:
GlobalVars.why_data_allspam = pickle.load(f)
if os.path.isfile("latestMessages.txt"):
try:
with open("latestMessages.txt", "rb") as f:
GlobalVars.latest_smokedetector_messages = pickle.load(f)
except EOFError:
os.remove("latestMessages.txt")
raise
if os.path.isfile("apiCalls.txt"):
try:
with open("apiCalls.txt", "rb") as f:
GlobalVars.api_calls_per_site = pickle.load(f)
except EOFError:
os.remove("apiCalls.txt")
raise
if os.path.isfile("bodyfetcherQueue.txt"):
try:
with open("bodyfetcherQueue.txt", "rb") as f:
GlobalVars.bodyfetcher.queue = pickle.load(f)
except EOFError:
os.remove("bodyfetcherQueue.txt")
raise
def filter_auto_ignored_posts():
today_date = datetime.today()
to_remove = []
for aip in GlobalVars.auto_ignored_posts:
day_ignored = aip[2]
day_diff = (today_date - day_ignored).days
if day_diff > 7:
to_remove.append(aip)
for tr in to_remove:
GlobalVars.auto_ignored_posts.remove(tr)
with open("autoIgnoredPosts.txt", "wb") as f:
pickle.dump(GlobalVars.auto_ignored_posts, f, protocol=pickle.HIGHEST_PROTOCOL)
# methods to check whether a post/user is whitelisted/blacklisted/...
def is_false_positive(postid_site_tuple):
return postid_site_tuple in GlobalVars.false_positives
def is_whitelisted_user(user):
return user in GlobalVars.whitelisted_users
def is_blacklisted_user(user):
for blacklisted_user in GlobalVars.blacklisted_users:
if user == blacklisted_user[0]:
return True
return False
def get_blacklisted_user_data(user):
for blacklisted_user in GlobalVars.blacklisted_users:
if user == blacklisted_user[0]:
return blacklisted_user
return ()
def is_ignored_post(postid_site_tuple):
return postid_site_tuple in GlobalVars.ignored_posts
def is_auto_ignored_post(postid_site_tuple):
for p in GlobalVars.auto_ignored_posts:
if p[0] == postid_site_tuple[0] and p[1] == postid_site_tuple[1]:
return True
return False
def is_privileged(room_id_str, user_id_str, wrap2):
if room_id_str in GlobalVars.privileged_users and user_id_str in GlobalVars.privileged_users[room_id_str]:
return True
user = wrap2.get_user(user_id_str)
return user.is_moderator
def is_code_privileged(room_id_str, user_id_str, wrap2):
if GlobalVars.code_privileged_users is None:
metasmoke.Metasmoke.update_code_privileged_users_list()
if room_id_str in GlobalVars.code_privileged_users and int(user_id_str) in GlobalVars.code_privileged_users[room_id_str]:
return True
return False # For now, disable the moderator override on code/blacklist changes
# methods to add/remove whitelisted/blacklisted users, ignored posts, ...
def add_whitelisted_user(user):
if user in GlobalVars.whitelisted_users or user is None:
return
GlobalVars.whitelisted_users.append(user)
with open("whitelistedUsers.txt", "wb") as f:
pickle.dump(GlobalVars.whitelisted_users, f, protocol=pickle.HIGHEST_PROTOCOL)
def add_blacklisted_user(user, message_url, post_url):
if is_blacklisted_user(user) or user is None:
return
GlobalVars.blacklisted_users.append((user, message_url, post_url))
with open("blacklistedUsers.txt", "wb") as f:
pickle.dump(GlobalVars.blacklisted_users, f, protocol=pickle.HIGHEST_PROTOCOL)
def add_auto_ignored_post(postid_site_tuple):
if postid_site_tuple is None or is_auto_ignored_post(postid_site_tuple):
return
GlobalVars.auto_ignored_posts.append(postid_site_tuple)
with open("autoIgnoredPosts.txt", "wb") as f:
pickle.dump(GlobalVars.auto_ignored_posts, f, protocol=pickle.HIGHEST_PROTOCOL)
def add_false_positive(site_post_id_tuple):
if site_post_id_tuple is None or site_post_id_tuple in GlobalVars.false_positives:
return
GlobalVars.false_positives.append(site_post_id_tuple)
with open("falsePositives.txt", "wb") as f:
pickle.dump(GlobalVars.false_positives, f, protocol=pickle.HIGHEST_PROTOCOL)
def add_ignored_post(postid_site_tuple):
if postid_site_tuple is None or postid_site_tuple in GlobalVars.ignored_posts:
return
GlobalVars.ignored_posts.append(postid_site_tuple)
with open("ignoredPosts.txt", "wb") as f:
pickle.dump(GlobalVars.ignored_posts, f, protocol=pickle.HIGHEST_PROTOCOL)
def remove_blacklisted_user(user):
blacklisted_user_data = get_blacklisted_user_data(user)
if not blacklisted_user_data:
return False
GlobalVars.blacklisted_users.remove(blacklisted_user_data)
with open("blacklistedUsers.txt", "wb") as f:
pickle.dump(GlobalVars.blacklisted_users, f, protocol=pickle.HIGHEST_PROTOCOL)
return True
def remove_whitelisted_user(user):
if user not in GlobalVars.whitelisted_users:
return False
GlobalVars.whitelisted_users.remove(user)
with open("whitelistedUsers.txt", "wb") as f:
pickle.dump(GlobalVars.whitelisted_users, f, protocol=pickle.HIGHEST_PROTOCOL)
return True
def add_why(site, post_id, why):
key = site + "/" + str(post_id)
why_data_tuple = (key, why)
GlobalVars.why_data.append(why_data_tuple)
filter_why()
with open("whyData.txt", "wb") as f:
pickle.dump(GlobalVars.why_data, f, protocol=pickle.HIGHEST_PROTOCOL)
def get_why(site, post_id):
key = site + "/" + str(post_id)
for post in GlobalVars.why_data:
if post[0] == key:
return post[1]
return None
def filter_why(max_size=50):
GlobalVars.why_data = GlobalVars.why_data[-max_size:]
def add_why_allspam(user, why):
GlobalVars.why_data_allspam.append((user, why))
filter_why_allspam()
with open("whyDataAllspam.txt", "wb") as f:
pickle.dump(GlobalVars.why_data_allspam, f, protocol=pickle.HIGHEST_PROTOCOL)
def get_why_allspam(user):
for post in GlobalVars.why_data_allspam:
if post[0] == user:
return post[1]
return None
def add_post_site_id_link(post_site_id, question_id):
GlobalVars.post_site_id_to_question[post_site_id] = question_id
def get_post_site_id_link(post_site_id):
if post_site_id in GlobalVars.post_site_id_to_question:
return GlobalVars.post_site_id_to_question[post_site_id]
return None
def filter_why_allspam(max_size=50):
GlobalVars.why_data_allspam = GlobalVars.why_data_allspam[-max_size:]
def add_latest_smokedetector_message(room, message_id):
GlobalVars.latest_smokedetector_messages[room].append(message_id)
# Keep the last 100 messages
max_size = 100
GlobalVars.latest_smokedetector_messages[room] = GlobalVars.latest_smokedetector_messages[room][-max_size:]
with open("latestMessages.txt", "wb") as f:
pickle.dump(GlobalVars.latest_smokedetector_messages, f, protocol=pickle.HIGHEST_PROTOCOL)
def add_or_update_api_data(site):
if site in GlobalVars.api_calls_per_site:
GlobalVars.api_calls_per_site[site] += 1
else:
GlobalVars.api_calls_per_site[site] = 1
with open("apiCalls.txt", "wb") as f:
pickle.dump(GlobalVars.api_calls_per_site, f, protocol=pickle.HIGHEST_PROTOCOL)
def clear_api_data():
GlobalVars.api_calls_per_site = {}
with open("apiCalls.txt", "wb") as f:
pickle.dump(GlobalVars.api_calls_per_site, f, protocol=pickle.HIGHEST_PROTOCOL)
def store_bodyfetcher_queue():
with open("bodyfetcherQueue.txt", "wb") as f:
pickle.dump(GlobalVars.bodyfetcher.queue, f, protocol=pickle.HIGHEST_PROTOCOL)
# methods that help avoiding reposting alerts:
def append_to_latest_questions(host, post_id, title):
GlobalVars.latest_questions.insert(0, (host, str(post_id), title))
if len(GlobalVars.latest_questions) > 15:
GlobalVars.latest_questions.pop()
def has_already_been_posted(host, post_id, title):
for post in GlobalVars.latest_questions:
if post[0] == host and post[1] == str(post_id) and post[2] == title:
return True
return False
# method to get data from the error log:
def fetch_lines_from_error_log(line_count):
if not os.path.isfile("errorLogs.txt"):
return "The error log file does not exist."
if line_count <= 0:
return "Please request a line count greater than zero."
lines = []
with open("errorLogs.txt", "r") as f:
lines = f.readlines()[-line_count:]
formatted_lines = []
for line in lines:
formatted_lines.append(" " + line.rstrip())
fetched = os.linesep.join(formatted_lines)
if fetched.rstrip() == "":
return "The fetched part is empty. Please try another line count."
return fetched
# method to check whether a SE site exists:
def refresh_sites():
has_more = True
page = 1
while has_more:
response = requests.get("https://api.stackexchange.com/2.2/sites?filter=!%29Qpa1bTB_jCkeaZsqiQ8pDwI&pagesize=500&page=" + str(page) + "&key=lKfzuApO1ASY*NegoDzU0g((")
data = json.loads(response.text)
if "error_message" in data:
return False, data["error_message"]
if "items" not in data:
return False, "`items` not in JSON data"
if "has_more" not in data:
return False, "`has_more` not in JSON data"
GlobalVars.se_sites.extend(data["items"])
has_more = data["has_more"]
page += 1
return True, "OK"
def check_site_and_get_full_name(site):
if len(GlobalVars.se_sites) == 0:
refreshed, msg = refresh_sites()
if not refreshed:
return False, "Could not fetch sites: " + msg
for item in GlobalVars.se_sites:
full_name = item["site_url"].replace("http://", "")
short_name = item["api_site_parameter"]
if site == full_name or site == short_name:
return True, full_name
return False, "Could not find the given site."
# methods to add/remove/check users on the "notification" list
# (that is, being pinged when Smokey reports something on a specific site)
def add_to_notification_list(user_id, chat_site, room_id, se_site):
exists, site = check_site_and_get_full_name(se_site)
if not exists:
return -2, None
notification_tuple = (int(user_id), chat_site, int(room_id), site)
if notification_tuple in GlobalVars.notifications:
return -1, None
GlobalVars.notifications.append(notification_tuple)
with open("notifications.txt", "wb") as f:
pickle.dump(GlobalVars.notifications, f, protocol=pickle.HIGHEST_PROTOCOL)
return 0, site
def remove_from_notification_list(user_id, chat_site, room_id, se_site):
notification_tuple = (int(user_id), chat_site, int(room_id), se_site)
if notification_tuple not in GlobalVars.notifications:
return False
GlobalVars.notifications.remove(notification_tuple)
with open("notifications.txt", "wb") as f:
pickle.dump(GlobalVars.notifications, f, protocol=pickle.HIGHEST_PROTOCOL)
return True
def will_i_be_notified(user_id, chat_site, room_id, se_site):
notification_tuple = (int(user_id), chat_site, int(room_id), se_site)
return notification_tuple in GlobalVars.notifications
def get_all_notification_sites(user_id, chat_site, room_id):
sites = []
for notification in GlobalVars.notifications:
if notification[0] == int(user_id) and notification[1] == chat_site and notification[2] == int(room_id):
sites.append(notification[3])
return sites
def get_user_ids_on_notification_list(chat_site, room_id, se_site):
uids = []
for notification in GlobalVars.notifications:
if notification[1] == chat_site and notification[2] == int(room_id) and notification[3] == se_site:
uids.append(notification[0])
return uids
def get_user_names_on_notification_list(chat_site, room_id, se_site, client):
return [client.get_user(i).name for i in get_user_ids_on_notification_list(chat_site, room_id, se_site)]
def append_pings(original_message, names):
if len(names) != 0:
new_message = u"{0} ({1})".format(original_message, " ".join(["@" + x.replace(" ", "") for x in names]))
if len(new_message) <= 500:
return new_message
return original_message
# methods to check if someone waited long enough to use another !!/report with multiple URLs
# (to avoid SmokeDetector's chat messages to be rate-limited too much)
def add_or_update_multiple_reporter(user_id, chat_host, time_integer):
user_id = str(user_id)
for i in xrange(len(GlobalVars.multiple_reporters)):
if GlobalVars.multiple_reporters[i][0] == user_id and GlobalVars.multiple_reporters[i][1] == chat_host:
GlobalVars.multiple_reporters[i] = (GlobalVars.multiple_reporters[i][0], GlobalVars.multiple_reporters[i][1], time_integer)
return 1
GlobalVars.multiple_reporters.append((user_id, chat_host, time_integer))
def can_report_now(user_id, chat_host):
user_id = str(user_id)
for reporter in GlobalVars.multiple_reporters:
if reporter[0] == user_id and reporter[1] == chat_host:
now = time.time()
latest_report = reporter[2]
can_report_again = latest_report + 30
if now > can_report_again:
return True, True
return False, math.ceil(can_report_again - now)
return True, True
|
|
"""Support for remote Python debugging.
Some ASCII art to describe the structure:
IN PYTHON SUBPROCESS # IN IDLE PROCESS
#
# oid='gui_adapter'
+----------+ # +------------+ +-----+
| GUIProxy |--remote#call-->| GUIAdapter |--calls-->| GUI |
+-----+--calls-->+----------+ # +------------+ +-----+
| Idb | # /
+-----+<-calls--+------------+ # +----------+<--calls-/
| IdbAdapter |<--remote#call--| IdbProxy |
+------------+ # +----------+
oid='idb_adapter' #
The purpose of the Proxy and Adapter classes is to translate certain
arguments and return values that cannot be transported through the RPC
barrier, in particular frame and traceback objects.
"""
import types
import rpc
import Debugger
debugging = 0
idb_adap_oid = "idb_adapter"
gui_adap_oid = "gui_adapter"
#=======================================
#
# In the PYTHON subprocess:
frametable = {}
dicttable = {}
codetable = {}
tracebacktable = {}
def wrap_frame(frame):
fid = id(frame)
frametable[fid] = frame
return fid
def wrap_info(info):
"replace info[2], a traceback instance, by its ID"
if info is None:
return None
else:
traceback = info[2]
assert isinstance(traceback, types.TracebackType)
traceback_id = id(traceback)
tracebacktable[traceback_id] = traceback
modified_info = (info[0], info[1], traceback_id)
return modified_info
class GUIProxy:
def __init__(self, conn, gui_adap_oid):
self.conn = conn
self.oid = gui_adap_oid
def interaction(self, message, frame, info=None):
# calls rpc.SocketIO.remotecall() via run.MyHandler instance
# pass frame and traceback object IDs instead of the objects themselves
self.conn.remotecall(self.oid, "interaction",
(message, wrap_frame(frame), wrap_info(info)),
{})
class IdbAdapter:
def __init__(self, idb):
self.idb = idb
#----------called by an IdbProxy----------
def set_step(self):
self.idb.set_step()
def set_quit(self):
self.idb.set_quit()
def set_continue(self):
self.idb.set_continue()
def set_next(self, fid):
frame = frametable[fid]
self.idb.set_next(frame)
def set_return(self, fid):
frame = frametable[fid]
self.idb.set_return(frame)
def get_stack(self, fid, tbid):
##print >>sys.__stderr__, "get_stack(%r, %r)" % (fid, tbid)
frame = frametable[fid]
if tbid is None:
tb = None
else:
tb = tracebacktable[tbid]
stack, i = self.idb.get_stack(frame, tb)
##print >>sys.__stderr__, "get_stack() ->", stack
stack = [(wrap_frame(frame), k) for frame, k in stack]
##print >>sys.__stderr__, "get_stack() ->", stack
return stack, i
def run(self, cmd):
import __main__
self.idb.run(cmd, __main__.__dict__)
def set_break(self, filename, lineno):
msg = self.idb.set_break(filename, lineno)
return msg
def clear_break(self, filename, lineno):
msg = self.idb.clear_break(filename, lineno)
return msg
def clear_all_file_breaks(self, filename):
msg = self.idb.clear_all_file_breaks(filename)
return msg
#----------called by a FrameProxy----------
def frame_attr(self, fid, name):
frame = frametable[fid]
return getattr(frame, name)
def frame_globals(self, fid):
frame = frametable[fid]
dict = frame.f_globals
did = id(dict)
dicttable[did] = dict
return did
def frame_locals(self, fid):
frame = frametable[fid]
dict = frame.f_locals
did = id(dict)
dicttable[did] = dict
return did
def frame_code(self, fid):
frame = frametable[fid]
code = frame.f_code
cid = id(code)
codetable[cid] = code
return cid
#----------called by a CodeProxy----------
def code_name(self, cid):
code = codetable[cid]
return code.co_name
def code_filename(self, cid):
code = codetable[cid]
return code.co_filename
#----------called by a DictProxy----------
def dict_keys(self, did):
dict = dicttable[did]
return dict.keys()
def dict_item(self, did, key):
dict = dicttable[did]
value = dict[key]
value = repr(value)
return value
#----------end class IdbAdapter----------
def start_debugger(rpchandler, gui_adap_oid):
"""Start the debugger and its RPC link in the Python subprocess
Start the subprocess side of the split debugger and set up that side of the
RPC link by instantiating the GUIProxy, Idb debugger, and IdbAdapter
objects and linking them together. Register the IdbAdapter with the
RPCServer to handle RPC requests from the split debugger GUI via the
IdbProxy.
"""
gui_proxy = GUIProxy(rpchandler, gui_adap_oid)
idb = Debugger.Idb(gui_proxy)
idb_adap = IdbAdapter(idb)
rpchandler.register(idb_adap_oid, idb_adap)
return idb_adap_oid
#=======================================
#
# In the IDLE process:
class FrameProxy:
def __init__(self, conn, fid):
self._conn = conn
self._fid = fid
self._oid = "idb_adapter"
self._dictcache = {}
def __getattr__(self, name):
if name[:1] == "_":
raise AttributeError, name
if name == "f_code":
return self._get_f_code()
if name == "f_globals":
return self._get_f_globals()
if name == "f_locals":
return self._get_f_locals()
return self._conn.remotecall(self._oid, "frame_attr",
(self._fid, name), {})
def _get_f_code(self):
cid = self._conn.remotecall(self._oid, "frame_code", (self._fid,), {})
return CodeProxy(self._conn, self._oid, cid)
def _get_f_globals(self):
did = self._conn.remotecall(self._oid, "frame_globals",
(self._fid,), {})
return self._get_dict_proxy(did)
def _get_f_locals(self):
did = self._conn.remotecall(self._oid, "frame_locals",
(self._fid,), {})
return self._get_dict_proxy(did)
def _get_dict_proxy(self, did):
if self._dictcache.has_key(did):
return self._dictcache[did]
dp = DictProxy(self._conn, self._oid, did)
self._dictcache[did] = dp
return dp
class CodeProxy:
def __init__(self, conn, oid, cid):
self._conn = conn
self._oid = oid
self._cid = cid
def __getattr__(self, name):
if name == "co_name":
return self._conn.remotecall(self._oid, "code_name",
(self._cid,), {})
if name == "co_filename":
return self._conn.remotecall(self._oid, "code_filename",
(self._cid,), {})
class DictProxy:
def __init__(self, conn, oid, did):
self._conn = conn
self._oid = oid
self._did = did
def keys(self):
return self._conn.remotecall(self._oid, "dict_keys", (self._did,), {})
def __getitem__(self, key):
return self._conn.remotecall(self._oid, "dict_item",
(self._did, key), {})
def __getattr__(self, name):
##print >>sys.__stderr__, "failed DictProxy.__getattr__:", name
raise AttributeError, name
class GUIAdapter:
def __init__(self, conn, gui):
self.conn = conn
self.gui = gui
def interaction(self, message, fid, modified_info):
##print "interaction: (%s, %s, %s)" % (message, fid, modified_info)
frame = FrameProxy(self.conn, fid)
self.gui.interaction(message, frame, modified_info)
class IdbProxy:
def __init__(self, conn, shell, oid):
self.oid = oid
self.conn = conn
self.shell = shell
def call(self, methodname, *args, **kwargs):
##print "**IdbProxy.call %s %s %s" % (methodname, args, kwargs)
value = self.conn.remotecall(self.oid, methodname, args, kwargs)
##print "**IdbProxy.call %s returns %r" % (methodname, value)
return value
def run(self, cmd, locals):
# Ignores locals on purpose!
seq = self.conn.asyncqueue(self.oid, "run", (cmd,), {})
self.shell.interp.active_seq = seq
def get_stack(self, frame, tbid):
# passing frame and traceback IDs, not the objects themselves
stack, i = self.call("get_stack", frame._fid, tbid)
stack = [(FrameProxy(self.conn, fid), k) for fid, k in stack]
return stack, i
def set_continue(self):
self.call("set_continue")
def set_step(self):
self.call("set_step")
def set_next(self, frame):
self.call("set_next", frame._fid)
def set_return(self, frame):
self.call("set_return", frame._fid)
def set_quit(self):
self.call("set_quit")
def set_break(self, filename, lineno):
msg = self.call("set_break", filename, lineno)
return msg
def clear_break(self, filename, lineno):
msg = self.call("clear_break", filename, lineno)
return msg
def clear_all_file_breaks(self, filename):
msg = self.call("clear_all_file_breaks", filename)
return msg
def start_remote_debugger(rpcclt, pyshell):
"""Start the subprocess debugger, initialize the debugger GUI and RPC link
Request the RPCServer start the Python subprocess debugger and link. Set
up the Idle side of the split debugger by instantiating the IdbProxy,
debugger GUI, and debugger GUIAdapter objects and linking them together.
Register the GUIAdapter with the RPCClient to handle debugger GUI
interaction requests coming from the subprocess debugger via the GUIProxy.
The IdbAdapter will pass execution and environment requests coming from the
Idle debugger GUI to the subprocess debugger via the IdbProxy.
"""
global idb_adap_oid
idb_adap_oid = rpcclt.remotecall("exec", "start_the_debugger",\
(gui_adap_oid,), {})
idb_proxy = IdbProxy(rpcclt, pyshell, idb_adap_oid)
gui = Debugger.Debugger(pyshell, idb_proxy)
gui_adap = GUIAdapter(rpcclt, gui)
rpcclt.register(gui_adap_oid, gui_adap)
return gui
def close_remote_debugger(rpcclt):
"""Shut down subprocess debugger and Idle side of debugger RPC link
Request that the RPCServer shut down the subprocess debugger and link.
Unregister the GUIAdapter, which will cause a GC on the Idle process
debugger and RPC link objects. (The second reference to the debugger GUI
is deleted in PyShell.close_remote_debugger().)
"""
close_subprocess_debugger(rpcclt)
rpcclt.unregister(gui_adap_oid)
def close_subprocess_debugger(rpcclt):
rpcclt.remotecall("exec", "stop_the_debugger", (idb_adap_oid,), {})
def restart_subprocess_debugger(rpcclt):
idb_adap_oid_ret = rpcclt.remotecall("exec", "start_the_debugger",\
(gui_adap_oid,), {})
assert idb_adap_oid_ret == idb_adap_oid, 'Idb restarted with different oid'
|
|
import os
import json
import time
import uuid
import signal
import logging
from threading import Thread
from addict import Dict
from six.moves.http_client import HTTPConnection
from .process import Process
from .interface import ExecutorDriver
from .utils import parse_duration, encode_data, decode_data
logger = logging.getLogger(__name__)
class MesosExecutorDriver(Process, ExecutorDriver):
_timeout = 10
def __init__(self, executor, use_addict=False):
env = os.environ
agent_endpoint = env['MESOS_AGENT_ENDPOINT']
super(MesosExecutorDriver, self).__init__(master=agent_endpoint)
framework_id = env['MESOS_FRAMEWORK_ID']
assert framework_id
self.framework_id = dict(value=framework_id)
executor_id = env['MESOS_EXECUTOR_ID']
self.executor_id = dict(value=executor_id)
grace_shutdown_period = env.get('MESOS_EXECUTOR_SHUTDOWN_GRACE_PERIOD')
if grace_shutdown_period:
self.grace_shutdown_period = parse_duration(grace_shutdown_period)
else:
self.grace_shutdown_period = 0.0
self.checkpoint = bool(env.get('MESOS_CHECKPOINT'))
self.local = bool(env.get('MESOS_LOCAL'))
self.executor = executor
self.framework_info = None
self.executor_info = None
self.tasks = {}
self.updates = {}
self._conn = None
self._dict_cls = Dict if use_addict else dict
def _delay_kill(self):
def _():
try:
time.sleep(self.grace_shutdown_period)
os.killpg(0, signal.SIGKILL)
except Exception:
logger.exception('Failed to force kill executor')
t = Thread(target=_)
t.daemon = True
t.start()
def gen_request(self):
body = json.dumps(dict(
type='SUBSCRIBE',
framework_id=self.framework_id,
executor_id=self.executor_id,
subscribe=dict(
unacknowledged_tasks=list(self.tasks.values()),
unacknowledged_updates=list(self.updates.values()),
),
))
request = ('POST /api/v1/executor HTTP/1.1\r\nHost: %s\r\n'
'Content-Type: application/json\r\n'
'Accept: application/json\r\n'
'Connection: close\r\nContent-Length: %s\r\n\r\n%s') % (
self.master, len(body), body
)
return request.encode('utf-8')
def on_close(self):
if self._conn is not None:
self._conn.close()
self._conn = None
self.version = None
self.executor.disconnected(self)
if not self.checkpoint:
if not self.local:
self._delay_kill()
self.executor.shutdown(self)
self.abort()
def on_event(self, event):
if 'type' in event:
_type = event['type'].lower()
if _type == 'shutdown':
self.on_shutdown()
return
if _type == 'heartbeat':
return
if _type not in event:
logger.error(
'Missing `%s` in event %s' %
(_type, event))
return
event = event[_type]
func_name = 'on_%s' % (_type,)
func = getattr(self, func_name, None)
if func is not None:
func(event)
else:
logger.error('Unknown type:%s, event:%s' % (_type, event))
else:
logger.error('Unknown event:%s' % (event,))
def on_subscribed(self, info):
executor_info = info['executor_info']
framework_info = info['framework_info']
agent_info = info['agent_info']
assert executor_info['executor_id'] == self.executor_id
assert framework_info['id'] == self.framework_id
if self.executor_info is None or self.framework_info is None:
self.executor_info = executor_info
self.framework_info = framework_info
self.executor.registered(
self, self._dict_cls(executor_info),
self._dict_cls(framework_info), self._dict_cls(agent_info)
)
else:
self.executor.reregistered(self, self._dict_cls(agent_info))
def on_launch(self, event):
task_info = event['task']
task_id = task_info['task_id']['value']
assert task_id not in self.tasks
self.tasks[task_id] = task_info
self.executor.launchTask(self, self._dict_cls(task_info))
def on_kill(self, event):
task_id = event['task_id']
self.executor.killTask(self, self._dict_cls(task_id))
def on_acknowledged(self, event):
task_id = event['task_id']['value']
uuid_ = uuid.UUID(bytes=decode_data(event['uuid']))
self.updates.pop(uuid_, None)
self.tasks.pop(task_id, None)
def on_message(self, event):
data = event['data']
self.executor.frameworkMessage(self, data)
def on_error(self, event):
message = event['message']
self.executor.error(self, message)
def on_shutdown(self):
if not self.local:
self._delay_kill()
self.executor.shutdown(self)
self.abort()
def _get_conn(self):
if not self.connected:
return None
if self._conn is not None:
return self._conn
host, port = self.master.split(':', 2)
port = int(port)
self._conn = HTTPConnection(host, port, timeout=self._timeout)
return self._conn
def _send(self, body, path='/api/v1/executor', method='POST', headers={}):
with self._lock:
conn = self._get_conn()
if conn is None:
raise RuntimeError('Not connected yet')
if body != '':
data = json.dumps(body).encode('utf-8')
headers['Content-Type'] = 'application/json'
else:
data = ''
stream_id = self.stream_id
if stream_id:
headers['Mesos-Stream-Id'] = stream_id
try:
conn.request(method, path, body=data, headers=headers)
resp = conn.getresponse()
except Exception:
self._conn.close()
self._conn = None
raise
if resp.status < 200 or resp.status >= 300:
raise RuntimeError(
'Failed to send request code=%s, message=%s' % (
resp.status, resp.read()
)
)
result = resp.read()
if not result:
return {}
try:
return json.loads(result.decode('utf-8'))
except Exception:
return {}
def sendStatusUpdate(self, status):
if 'timestamp' not in status:
status['timestamp'] = int(time.time())
if 'uuid' not in status:
status['uuid'] = encode_data(uuid.uuid4().bytes)
if 'source' not in status:
status['source'] = 'SOURCE_EXECUTOR'
body = dict(
type='UPDATE',
executor_id=self.executor_id,
framework_id=self.framework_id,
update=dict(
status=status,
),
)
self._send(body)
def sendFrameworkMessage(self, data):
body = dict(
type='MESSAGE',
executor_id=self.executor_id,
framework_id=self.framework_id,
message=dict(
data=data,
),
)
self._send(body)
|
|
# $Id$
#
# Copyright (C) 2000-2008 greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
""" code for dealing with composite models
For a model to be useable here, it should support the following API:
- _ClassifyExample(example)_, returns a classification
Other compatibility notes:
1) To use _Composite.Grow_ there must be some kind of builder
functionality which returns a 2-tuple containing (model,percent accuracy).
2) The models should be pickleable
3) It would be very happy if the models support the __cmp__ method so that
membership tests used to make sure models are unique work.
"""
from rdkit.ML.Data import DataUtils
import cPickle
import math
import numpy
class Composite(object):
"""a composite model
**Notes**
- adding a model which is already present just results in its count
field being incremented and the errors being averaged.
- typical usage:
1) grow the composite with AddModel until happy with it
2) call AverageErrors to calculate the average error values
3) call SortModels to put things in order by either error or count
- Composites can support individual models requiring either quantized or
nonquantized data. This is done by keeping a set of quantization bounds
(_QuantBounds_) in the composite and quantizing data passed in when required.
Quantization bounds can be set and interrogated using the
_Get/SetQuantBounds()_ methods. When models are added to the composite,
it can be indicated whether or not they require quantization.
- Composites are also capable of extracting relevant variables from longer lists.
This is accessible using _SetDescriptorNames()_ to register the descriptors about
which the composite cares and _SetInputOrder()_ to tell the composite what the
ordering of input vectors will be. **Note** there is a limitation on this: each
model needs to take the same set of descriptors as inputs. This could be changed.
"""
def __init__(self):
self.modelList=[]
self.errList=[]
self.countList=[]
self.modelVotes=[]
self.quantBounds = None
self.nPossibleVals = None
self.quantizationRequirements=[]
self._descNames = []
self._mapOrder = None
self.activityQuant=[]
def SetModelFilterData(self, modelFilterFrac=0.0, modelFilterVal=0.0) :
self._modelFilterFrac = modelFilterFrac
self._modelFilterVal = modelFilterVal
def SetDescriptorNames(self,names):
""" registers the names of the descriptors this composite uses
**Arguments**
- names: a list of descriptor names (strings).
**NOTE**
the _names_ list is not
copied, so if you modify it later, the composite itself will also be modified.
"""
self._descNames = names
def GetDescriptorNames(self):
""" returns the names of the descriptors this composite uses
"""
return self._descNames
def SetQuantBounds(self,qBounds,nPossible=None):
""" sets the quantization bounds that the composite will use
**Arguments**
- qBounds: a list of quantization bounds, each quantbound is a
list of boundaries
- nPossible: a list of integers indicating how many possible values
each descriptor can take on.
**NOTE**
- if the two lists are of different lengths, this will assert out
- neither list is copied, so if you modify it later, the composite
itself will also be modified.
"""
if nPossible is not None:
assert len(qBounds)==len(nPossible),'qBounds/nPossible mismatch'
self.quantBounds = qBounds
self.nPossibleVals = nPossible
def GetQuantBounds(self):
""" returns the quantization bounds
**Returns**
a 2-tuple consisting of:
1) the list of quantization bounds
2) the nPossibleVals list
"""
return self.quantBounds,self.nPossibleVals
def GetActivityQuantBounds(self):
if not hasattr(self,'activityQuant'):
self.activityQuant=[]
return self.activityQuant
def SetActivityQuantBounds(self,bounds):
self.activityQuant=bounds
def QuantizeActivity(self,example,activityQuant=None,actCol=-1):
if activityQuant is None:
activityQuant=self.activityQuant
if activityQuant:
example = example[:]
act = example[actCol]
for box in xrange(len(activityQuant)):
if act < activityQuant[box]:
act = box
break
else:
act = box + 1
example[actCol] = act
return example
def QuantizeExample(self,example,quantBounds=None):
""" quantizes an example
**Arguments**
- example: a data point (list, tuple or numpy array)
- quantBounds: a list of quantization bounds, each quantbound is a
list of boundaries. If this argument is not provided, the composite
will use its own quantBounds
**Returns**
the quantized example as a list
**Notes**
- If _example_ is different in length from _quantBounds_, this will
assert out.
- This is primarily intended for internal use
"""
if quantBounds is None:
quantBounds = self.quantBounds
assert len(example)==len(quantBounds),'example/quantBounds mismatch'
quantExample = [None]*len(example)
for i in xrange(len(quantBounds)):
bounds = quantBounds[i]
p = example[i]
if len(bounds):
for box in xrange(len(bounds)):
if p < bounds[box]:
p = box
break
else:
p = box + 1
else:
if i != 0:
p = int(p)
quantExample[i] = p
return quantExample
def MakeHistogram(self):
""" creates a histogram of error/count pairs
**Returns**
the histogram as a series of (error, count) 2-tuples
"""
nExamples = len(self.modelList)
histo = []
i = 1
lastErr = self.errList[0]
countHere = self.countList[0]
eps = 0.001
while i < nExamples:
if self.errList[i]-lastErr > eps:
histo.append((lastErr,countHere))
lastErr = self.errList[i]
countHere = self.countList[i]
else:
countHere = countHere + self.countList[i]
i = i + 1
return histo
def CollectVotes(self,example,quantExample,appendExample=0,
onlyModels=None):
""" collects votes across every member of the composite for the given example
**Arguments**
- example: the example to be voted upon
- quantExample: the quantized form of the example
- appendExample: toggles saving the example on the models
- onlyModels: if provided, this should be a sequence of model
indices. Only the specified models will be used in the
prediction.
**Returns**
a list with a vote from each member
"""
if not onlyModels:
onlyModels = range(len(self))
nModels = len(onlyModels)
votes = [-1]*len(self)
for i in onlyModels:
if self.quantizationRequirements[i]:
votes[i] = int(round(self.modelList[i].ClassifyExample(quantExample,
appendExamples=appendExample)))
else:
votes[i] = int(round(self.modelList[i].ClassifyExample(example,
appendExamples=appendExample)))
return votes
def ClassifyExample(self,example,threshold=0,appendExample=0,
onlyModels=None):
""" classifies the given example using the entire composite
**Arguments**
- example: the data to be classified
- threshold: if this is a number greater than zero, then a
classification will only be returned if the confidence is
above _threshold_. Anything lower is returned as -1.
- appendExample: toggles saving the example on the models
- onlyModels: if provided, this should be a sequence of model
indices. Only the specified models will be used in the
prediction.
**Returns**
a (result,confidence) tuple
**FIX:**
statistics sucks... I'm not seeing an obvious way to get
the confidence intervals. For that matter, I'm not seeing
an unobvious way.
For now, this is just treated as a voting problem with the confidence
measure being the percent of models which voted for the winning result.
"""
if self._mapOrder is not None:
example = self._RemapInput(example)
if self.GetActivityQuantBounds():
example = self.QuantizeActivity(example)
if self.quantBounds is not None and 1 in self.quantizationRequirements:
quantExample = self.QuantizeExample(example,self.quantBounds)
else:
quantExample = []
if not onlyModels:
onlyModels = range(len(self))
self.modelVotes = self.CollectVotes(example,quantExample,appendExample=appendExample,
onlyModels=onlyModels)
votes = [0]*self.nPossibleVals[-1]
for i in onlyModels:
res = self.modelVotes[i]
votes[res] = votes[res] + self.countList[i]
totVotes = sum(votes)
res = numpy.argmax(votes)
conf = float(votes[res])/float(totVotes)
if conf > threshold:
return res,conf
else:
return -1,conf
def GetVoteDetails(self):
""" returns the votes from the last classification
This will be _None_ if nothing has yet be classified
"""
return self.modelVotes
def _RemapInput(self,inputVect):
""" remaps the input so that it matches the expected internal ordering
**Arguments**
- inputVect: the input to be reordered
**Returns**
- a list with the reordered (and possible shorter) data
**Note**
- you must call _SetDescriptorNames()_ and _SetInputOrder()_ for this to work
- this is primarily intended for internal use
"""
order = self._mapOrder
if order is None:
return inputVect
remappedInput = [None]*len(order)
for i in xrange(len(order)-1):
remappedInput[i] = inputVect[order[i]]
if order[-1] == -1:
remappedInput[-1] = 0
else:
remappedInput[-1] = inputVect[order[-1]]
return remappedInput
def GetInputOrder(self):
""" returns the input order (used in remapping inputs)
"""
return self._mapOrder
def SetInputOrder(self,colNames):
""" sets the input order
**Arguments**
- colNames: a list of the names of the data columns that will be passed in
**Note**
- you must call _SetDescriptorNames()_ first for this to work
- if the local descriptor names do not appear in _colNames_, this will
raise an _IndexError_ exception.
"""
import types
if type(colNames)!=types.ListType:
colNames = list(colNames)
descs = [x.upper() for x in self.GetDescriptorNames()]
self._mapOrder = [None]*len(descs)
colNames = [x.upper() for x in colNames]
# FIX: I believe that we're safe assuming that field 0
# is always the label, and therefore safe to ignore errors,
# but this may not be the case
try:
self._mapOrder[0] = colNames.index(descs[0])
except ValueError:
self._mapOrder[0] = 0
for i in xrange(1,len(descs)-1):
try:
self._mapOrder[i] = colNames.index(descs[i])
except ValueError:
raise ValueError,'cannot find descriptor name: %s in set %s'%(repr(descs[i]),repr(colNames))
try:
self._mapOrder[-1] = colNames.index(descs[-1])
except ValueError:
# ok, there's no obvious match for the final column (activity)
# We'll take the last one:
#self._mapOrder[-1] = len(descs)-1
self._mapOrder[-1] = -1
def Grow(self,examples,attrs,nPossibleVals,buildDriver,pruner=None,
nTries=10,pruneIt=0,
needsQuantization=1,progressCallback=None,
**buildArgs):
""" Grows the composite
**Arguments**
- examples: a list of examples to be used in training
- attrs: a list of the variables to be used in training
- nPossibleVals: this is used to provide a list of the number
of possible values for each variable. It is used if the
local quantBounds have not been set (for example for when you
are working with data which is already quantized).
- buildDriver: the function to call to build the new models
- pruner: a function used to "prune" (reduce the complexity of)
the resulting model.
- nTries: the number of new models to add
- pruneIt: toggles whether or not pruning is done
- needsQuantization: used to indicate whether or not this type of model
requires quantized data
- **buildArgs: all other keyword args are passed to _buildDriver_
**Note**
- new models are *added* to the existing ones
"""
try:
silent = buildArgs['silent']
except:
silent = 0
buildArgs['silent']=1
buildArgs['calcTotalError']=1
if self._mapOrder is not None:
examples = map(self._RemapInput,examples)
if self.GetActivityQuantBounds():
for i in xrange(len(examples)):
examples[i] = self.QuantizeActivity(examples[i])
nPossibleVals[-1]=len(self.GetActivityQuantBounds())+1
if self.nPossibleVals is None:
self.nPossibleVals = nPossibleVals[:]
if needsQuantization:
trainExamples = [None]*len(examples)
nPossibleVals = self.nPossibleVals
for i in xrange(len(examples)):
trainExamples[i] = self.QuantizeExample(examples[i],self.quantBounds)
else:
trainExamples = examples
for i in xrange(nTries):
trainSet = None
if (hasattr(self, '_modelFilterFrac')) and (self._modelFilterFrac != 0) :
trainIdx, temp = DataUtils.FilterData(trainExamples, self._modelFilterVal,
self._modelFilterFrac,-1, indicesOnly=1)
trainSet = [trainExamples[x] for x in trainIdx]
else:
trainSet = trainExamples
#print "Training model %i with %i out of %i examples"%(i, len(trainSet), len(trainExamples))
model,frac = apply(buildDriver,(trainSet,attrs,nPossibleVals),
buildArgs)
if pruneIt:
model,frac2 = pruner(model,model.GetTrainingExamples(),
model.GetTestExamples(),
minimizeTestErrorOnly=0)
frac = frac2
if hasattr(self, '_modelFilterFrac') and self._modelFilterFrac!=0 and \
hasattr(model,'_trainIndices'):
# correct the model's training indices:
trainIndices = [trainIdx[x] for x in model._trainIndices]
model._trainIndices = trainIndices
self.AddModel(model,frac,needsQuantization)
if not silent and (nTries < 10 or i % (nTries/10) == 0):
print 'Cycle: % 4d'%(i)
if progressCallback is not None:
progressCallback(i)
def ClearModelExamples(self):
for i in range(len(self)):
m = self.GetModel(i)
try:
m.ClearExamples()
except AttributeError:
pass
def Pickle(self,fileName='foo.pkl',saveExamples=0):
""" Writes this composite off to a file so that it can be easily loaded later
**Arguments**
- fileName: the name of the file to be written
- saveExamples: if this is zero, the individual models will have
their stored examples cleared.
"""
if not saveExamples:
self.ClearModelExamples()
pFile = open(fileName,'wb+')
cPickle.dump(self,pFile,1)
pFile.close()
def AddModel(self,model,error,needsQuantization=1):
""" Adds a model to the composite
**Arguments**
- model: the model to be added
- error: the model's error
- needsQuantization: a toggle to indicate whether or not this model
requires quantized inputs
**NOTE**
- this can be used as an alternative to _Grow()_ if you already have
some models constructed
- the errList is run as an accumulator,
you probably want to call _AverageErrors_ after finishing the forest
"""
if model in self.modelList:
try:
idx = self.modelList.index(model)
except ValueError:
# FIX: we should never get here, but sometimes we do anyway
self.modelList.append(model)
self.errList.append(error)
self.countList.append(1)
self.quantizationRequirements.append(needsQuantization)
else:
self.errList[idx] = self.errList[idx]+error
self.countList[idx] = self.countList[idx] + 1
else:
self.modelList.append(model)
self.errList.append(error)
self.countList.append(1)
self.quantizationRequirements.append(needsQuantization)
def AverageErrors(self):
""" convert local summed error to average error
"""
self.errList = map(lambda x,y:x/y,self.errList,self.countList)
def SortModels(self,sortOnError=1):
""" sorts the list of models
**Arguments**
sortOnError: toggles sorting on the models' errors rather than their counts
"""
if sortOnError:
order = numpy.argsort(self.errList)
else:
order = numpy.argsort(self.countList)
# these elaborate contortions are required because, at the time this
# code was written, Numeric arrays didn't unpickle so well...
self.modelList = [self.modelList[x] for x in order]
self.countList = [self.countList[x] for x in order]
self.errList = [self.errList[x] for x in order]
def GetModel(self,i):
""" returns a particular model
"""
return self.modelList[i]
def SetModel(self,i,val):
""" replaces a particular model
**Note**
This is included for the sake of completeness, but you need to be
*very* careful when you use it.
"""
self.modelList[i] = val
def GetCount(self,i):
""" returns the count of the _i_th model
"""
return self.countList[i]
def SetCount(self,i,val):
""" sets the count of the _i_th model
"""
self.countList[i] = val
def GetError(self,i):
""" returns the error of the _i_th model
"""
return self.errList[i]
def SetError(self,i,val):
""" sets the error of the _i_th model
"""
self.errList[i] = val
def GetDataTuple(self,i):
""" returns all relevant data about a particular model
**Arguments**
i: an integer indicating which model should be returned
**Returns**
a 3-tuple consisting of:
1) the model
2) its count
3) its error
"""
return (self.modelList[i],self.countList[i],self.errList[i])
def SetDataTuple(self,i,tup):
""" sets all relevant data for a particular tree in the forest
**Arguments**
- i: an integer indicating which model should be returned
- tup: a 3-tuple consisting of:
1) the model
2) its count
3) its error
**Note**
This is included for the sake of completeness, but you need to be
*very* careful when you use it.
"""
self.modelList[i],self.countList[i],self.errList[i] = tup
def GetAllData(self):
""" Returns everything we know
**Returns**
a 3-tuple consisting of:
1) our list of models
2) our list of model counts
3) our list of model errors
"""
return (self.modelList,self.countList,self.errList)
def __len__(self):
""" allows len(composite) to work
"""
return len(self.modelList)
def __getitem__(self,which):
""" allows composite[i] to work, returns the data tuple
"""
return self.GetDataTuple(which)
def __str__(self):
""" returns a string representation of the composite
"""
outStr= 'Composite\n'
for i in xrange(len(self.modelList)):
outStr = outStr + \
' Model % 4d: % 5d occurances %%% 5.2f average error\n'%(i,self.countList[i],
100.*self.errList[i])
return outStr
if __name__ == '__main__':
if 0:
from rdkit.ML.DecTree import DecTree
c = Composite()
n = DecTree.DecTreeNode(None,'foo')
c.AddModel(n,0.5)
c.AddModel(n,0.5)
c.AverageErrors()
c.SortModels()
print c
qB = [[],[.5,1,1.5]]
exs = [['foo',0],['foo',.4],['foo',.6],['foo',1.1],['foo',2.0]]
print 'quantBounds:',qB
for ex in exs:
q = c.QuantizeExample(ex,qB)
print ex,q
else:
pass
|
|
__file__ = 'OffSystem_v1'
__date__ = '5/29/14'
__author__ = 'ABREZNIC'
import os, arcpy, xlwt, datetime, math
#date
now = datetime.datetime.now()
curMonth = now.strftime("%m")
curDay = now.strftime("%d")
curYear = now.strftime("%Y")
today = curYear + "_" + curMonth + "_" + curDay
#variables
# check0 = arcpy.GetParameterAsText(0)
# check1 = arcpy.GetParameterAsText(1)
# check2 = arcpy.GetParameterAsText(2)
# check3 = arcpy.GetParameterAsText(3)
# check4 = arcpy.GetParameterAsText(4)
# check5 = arcpy.GetParameterAsText(5)
# check6 = arcpy.GetParameterAsText(6)
check0 = "C:\\TxDOT\\Scripts\\QC\\Error Checks\\GOOD"
check1 = "true"
check2 = "false"
check3 = "false"
check4 = "false"
check5 = "false"
check6 = "true"
if check1 != "true":
check1 = "false"
if check2 != "true":
check2 = "false"
if check3 != "true":
check3 = "false"
if check4 != "true":
check4 = "false"
if check5 != "true":
check5 = "false"
if check6 != "true":
check6 = "false"
qcfolder = check0
workspace = qcfolder + "\\" + today
where = """ ( RDBD_TYPE = 'CNCTR-GS' AND RTE_CLASS = '1' ) OR( (RTE_CLASS = '2' OR RTE_CLASS = '3') AND RDBD_TYPE = 'KG' AND RTE_CLASS <> '8' ) """
database = workspace + "\\Comanche_Copy.gdb"
roadways = database + "\\TXDOT_Roadways"
subfiles = database + "\\SUBFILES"
cities = database + "\\City"
districts = database + "\\District"
if not os.path.exists(workspace):
os.makedirs(workspace)
else:
arcpy.Delete_management(database)
for file in os.listdir(workspace):
thefile = os.path.join(workspace, file)
os.remove(thefile)
txdotroads = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.Roadways\\TPP_GIS.APP_TPP_GIS_ADMIN.TXDOT_Roadways"
txdotsubs = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.SUBFILES"
# txdotroads = "T:\\DATAMGT\\MAPPING\\_Comanche Backup\\Comanche Backup July 9th.gdb\\Roadways\\TXDOT_Roadways_July_9th"
# txdotsubs = "T:\\DATAMGT\\MAPPING\\_Comanche Backup\\Comanche Backup July 9th.gdb\\SUBFILES_July_9th"
txdotcity = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.City\\TPP_GIS.APP_TPP_GIS_ADMIN.City"
txdotdist = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.District\\TPP_GIS.APP_TPP_GIS_ADMIN.District"
def copylocal():
arcpy.CreateFileGDB_management(workspace, "Comanche_Copy.gdb")
arcpy.TableSelect_analysis(txdotsubs, subfiles, "(SUBFILE = 2 AND ADMIN_SYSTEM <> 8) OR SUBFILE = 3 OR SUBFILE = 1")
arcpy.Copy_management(txdotcity, cities)
arcpy.Copy_management(txdotdist, districts)
arcpy.SpatialJoin_analysis(txdotroads, districts, roadways)
def overlap():
arcpy.Select_analysis(roadways, database + "\\FC_Streets", """ RTE_CLASS = '3' """)
arcpy.Erase_analysis(database + "\\FC_Streets", cities, database + "\\FC_Streets_Errors")
arcpy.Clip_analysis(roadways, cities, database + "\\City_Roads")
arcpy.Select_analysis(database + "\\City_Roads", database + "\\County_Roads_Errors", """ RTE_CLASS = '2' """)
arcpy.Merge_management([database + "\\County_Roads_Errors", database + "\\FC_Streets_Errors"],
database + "\\MergedErrors")
arcpy.SpatialJoin_analysis(database + "\\MergedErrors", districts, workspace + "\\City_OverlapErrors.shp")
arcpy.Delete_management(database + "\\City_Roads")
arcpy.Delete_management(database + "\\FC_Streets")
arcpy.Delete_management(database + "\\County_Roads_Errors")
arcpy.Delete_management(database + "\\FC_Streets_Errors")
arcpy.Delete_management(database + "\\MergedErrors")
errors = []
cursor = arcpy.UpdateCursor(workspace + "\\City_OverlapErrors.shp")
counter = 0
for row in cursor:
geom = row.shape
len = geom.length * .000621371
if len < .003:
cursor.deleteRow(row)
else:
row.setValue("RTE_LEN", len)
cursor.updateRow(row)
rowinfo = [row.RTE_ID, row.RTE_LEN, row.DIST_NM, row.DIST_NBR]
errors.append(rowinfo)
counter += 1
arcpy.AddMessage(str(counter) + " overlap errors.")
del cursor
del row
return errors
def routeopen():
errors = []
openstatus = {}
counter = 0
whereto = """ ( RDBD_TYPE = 'CNCTR-GS' AND RTE_CLASS = '1' ) OR( (RTE_CLASS = '2' OR RTE_CLASS = '3') AND RDBD_TYPE = 'KG' AND RTE_CLASS <> '8' ) OR( RTE_NM = '183A' AND RTE_ID LIKE '183A-%') """
cursor = arcpy.SearchCursor(roadways, whereto)
for row in cursor:
id = row.RTE_ID
if id is not None and id != "":
open = str(row.RTE_OPEN)
length = row.RTE_LEN
key = id + "=" + open
if key not in openstatus.keys():
openstatus[key] = length
else:
openstatus[key] = openstatus[key] + length
else:
errorinfo = []
oid = str(row.OBJECTID)
errorinfo.append("OID: " + oid)
errorinfo.append("N/A")
errorinfo.append("N/A")
errorinfo.append("BAD RTE_ID")
errorinfo.append(row.DIST_NBR)
errors.append(errorinfo)
counter += 1
arcpy.AddMessage(str(counter) + " null/bad RouteID errors.")
del cursor
counter = 0
hwystatus = {}
cursor = arcpy.SearchCursor(subfiles)
for row in cursor:
id = row.RTE_ID
length = row.LEN_OF_SECTION
status = row.HIGHWAY_STATUS
if status == 4 or status == 1:
thiskey = id + "=" + str(1)
if thiskey in openstatus.keys():
if thiskey in hwystatus:
hwystatus[thiskey] = hwystatus[thiskey] + length
else:
hwystatus[thiskey] = length
else:
errorinfo = []
errorinfo.append(id)
errorinfo.append("N/A")
errorinfo.append(status)
errorinfo.append("RTE_ID has SUBFILES with status which does not match TxDOT_Roadways' RTE_OPEN")
errorinfo.append(row.DISTRICT)
errors.append(errorinfo)
counter += 1
elif status == 0:
thiskey = id + "=" + str(0)
if thiskey in openstatus.keys():
if thiskey in hwystatus:
hwystatus[thiskey] = hwystatus[thiskey] + length
else:
hwystatus[thiskey] = length
else:
errorinfo = []
errorinfo.append(id)
errorinfo.append("N/A")
errorinfo.append(status)
errorinfo.append("RTE_ID has SUBFILES with status which does not match TxDOT_Roadways' RTE_OPEN")
errorinfo.append(row.DISTRICT)
errors.append(errorinfo)
counter += 1
else:
errorinfo = []
errorinfo.append(id)
errorinfo.append("N/A")
errorinfo.append(status)
errorinfo.append("HIGHWAY_STATUS must be 0 or 4")
errorinfo.append(row.DISTRICT)
errors.append(errorinfo)
counter += 1
del cursor
for key in openstatus.keys():
if key in hwystatus.keys():
linelen = openstatus[key]
sublen = hwystatus[key]
id = key.split("=")[0]
open = key.split("=")[1]
if abs(linelen - sublen) > .004:
cursor = arcpy.SearchCursor(subfiles,"RTE_ID = '"+id+"'")
for row in cursor:
Dist_Num = row.DISTRICT
try:
errorinfo = []
errorinfo.append(id)
errorinfo.append(open)
errorinfo.append("N/A")
errorinfo.append("Length error. SUBFILES LEN_OF_SECTIONS does not match ROADWAYS Route_Length")
errorinfo.append(Dist_Num)
errors.append(errorinfo)
except:
errorinfo = []
errorinfo.append(id)
errorinfo.append(open)
errorinfo.append("N/A")
errorinfo.append("RTE_ID does not exist in SUBFILES")
errorinfo.append("")
errors.append(errorinfo)
arcpy.AddMessage("check out: " + str(id))
counter += 1
else:
pass
else:
id = key.split("=")[0]
open = key.split("=")[1]
cursor = arcpy.SearchCursor(subfiles,"RTE_ID = '"+id+"'")
for row in cursor:
Dist_Num = row.DISTRICT
try:
errorinfo = []
errorinfo.append(id)
errorinfo.append(open)
errorinfo.append("N/A")
errorinfo.append("RTE_ID in TxDOT_Roadways with this RTE_OPEN does not match SUBFILES' HIGHWAY_STATUS")
errorinfo.append(Dist_Num)
errors.append(errorinfo)
except:
errorinfo = []
errorinfo.append(id)
errorinfo.append(open)
errorinfo.append("N/A")
errorinfo.append("RTE_ID does not exist in SUBFILES")
errorinfo.append("")
errors.append(errorinfo)
arcpy.AddMessage("check out: " + str(id))
counter += 1
arcpy.AddMessage(str(counter) + " subfile vs roadways Route Open errors.")
return errors
def measurelength():
counter = 0
cursor = arcpy.SearchCursor(roadways, where)
errors = []
for row in cursor:
errorinfo = []
id = row.RTE_ID
geom = row.shape
ext = geom.extent
Mmin = round(ext.MMin, 3)
Mmax = round(ext.MMax, 3)
Mdiff = abs(Mmax - Mmin)
wholelen = geom.length * .000621371
shp_len = round(wholelen, 3)
rte_len = row.RTE_LEN
testlen = abs(shp_len - Mdiff)
if rte_len is not None and id is not None:
if testlen <= .003 and abs(rte_len - shp_len) > .003:
oid = str(row.OBJECTID)
arcpy.AddMessage("RTE_LEN replaced: " + str(oid) + "," + str(rte_len) + "," + str(shp_len) + "," + str(Mdiff))
# cur = arcpy.UpdateCursor(txdotroads, "OBJECTID = " + oid)
# for i in cur:
# i.setValue("RTE_LEN", wholelen)
# cur.updateRow(i)
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append(abs(shp_len - Mdiff))
errors.append(errorinfo)
counter += 1
elif abs(shp_len - Mdiff) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append(abs(shp_len - Mdiff))
errors.append(errorinfo)
counter += 1
elif abs(rte_len - Mdiff) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append(abs(rte_len - Mdiff))
errors.append(errorinfo)
counter += 1
elif abs(shp_len - rte_len) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append(abs(shp_len - rte_len))
errors.append(errorinfo)
counter += 1
else:
pass
else:
oid = str(row.OBJECTID)
errorinfo.append("OID: " + oid)
errorinfo.append(str(Mdiff))
errorinfo.append(str(shp_len))
errorinfo.append(str(rte_len))
errorinfo.append(row.DIST_NM)
errorinfo.append(row.DIST_NBR)
errorinfo.append("")
errors.append(errorinfo)
counter += 1
arcpy.AddMessage(str(counter) + " measure length errors.")
del cursor
del row
return errors
def roadwaydict():
errors = []
counter = 0
arcpy.AddMessage("Creating dictionary")
dictionary = {}
cursor = arcpy.SearchCursor(roadways, where)
for row in cursor:
id = row.RTE_ID
if row.RTE_LEN is not None:
len = row.RTE_LEN
else:
len = 0
oid = str(row.OBJECTID)
errorinfo = []
errorinfo.append("OBJECTID: " + oid)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(Mmin)
errorinfo.append("")
errorinfo.append(Mmax)
errorinfo.append("")
errorinfo.append(len)
errorinfo.append("NO RTE_LEN POPULATED. OBJECTID: " + oid)
errors.append(errorinfo)
counter += 1
geom = row.shape
ext = geom.extent
Mmin = round(ext.MMin, 3)
Mmax = round(ext.MMax, 3)
if id not in dictionary.keys() and id is not None:
dictionary[str(id)] = [len, Mmin, Mmax]
elif id in dictionary.keys() and id is not None:
currentrecord = dictionary[id]
currentlength = currentrecord[0]
currentmin = currentrecord[1]
currentmax = currentrecord[2]
newlen = currentlength + len
if Mmin < currentmin:
currentmin = Mmin
if Mmax > currentmax:
currentmax = Mmax
dictionary[str(id)] = [newlen, currentmin, currentmax]
else:
oid = str(row.OBJECTID)
errorinfo = []
errorinfo.append("OBJECTID: " + oid)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(Mmin)
errorinfo.append("")
errorinfo.append(Mmax)
errorinfo.append("")
errorinfo.append(len)
errorinfo.append("NO ROUTE ID. OBJECTID: " + oid)
errors.append(errorinfo)
counter += 1
del cursor
del row
arcpy.AddMessage("Dictionary complete")
arcpy.AddMessage(str(counter) + " null RTE_ID and RTE_LEN errors")
theball = [errors, dictionary]
return theball
def subfilelength():
theball = roadwaydict()
errors = theball[0]
dictionary = theball[1]
now1 = datetime.datetime.now()
counto = int(arcpy.GetCount_management(subfiles).getOutput(0))
total = counto - 1
starter = 0
counter = 0
previous = ""
cursor = arcpy.SearchCursor(subfiles, "", "", "", "RTE_ID A; BMP A")
for row in cursor:
id = row.RTE_ID
if id in dictionary.keys():
current = id
if starter == 0:
sublength = 0
linevalues = dictionary[current]
linelen = linevalues[0]
linemin = linevalues[1]
linemax = linevalues[2]
bmp1 = row.BMP
bmp = row.BMP
emp = row.EMP
sublength += row.LEN_OF_SECTION
dist = row.DISTRICT
if abs((emp - bmp) - row.LEN_OF_SECTION) > .001:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append("")
errorinfo.append(sublength)
errorinfo.append("")
errorinfo.append("BMP and EMP difference does not equal the LEN_OF_SECTION. SUBFILE OID: " + str(row.OBJECTID))
errors.append(errorinfo)
counter += 1
previous = current
elif current != previous and starter != total:
if abs(linelen - sublength) > .003:
errorinfo = []
errorinfo.append(previous)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append(sublength)
errorinfo.append(linelen)
errorinfo.append("RTE_LEN does not equal SUBFILES total LEN_OF_SECTION")
errors.append(errorinfo)
counter += 1
if abs(linemin - bmp1) > .002:
errorinfo = []
errorinfo.append(previous)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line minimum measure does not equal starting BMP")
errors.append(errorinfo)
counter += 1
if abs(linemax - emp) > .003:
errorinfo = []
errorinfo.append(previous)
errorinfo.append(dist)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line maximum measure does not equal ending EMP")
errors.append(errorinfo)
counter += 1
sublength = 0
linevalues = dictionary[current]
linelen = linevalues[0]
linemin = linevalues[1]
linemax = linevalues[2]
bmp1 = row.BMP
bmp = row.BMP
emp = row.EMP
sublength += row.LEN_OF_SECTION
dist = row.DISTRICT
if abs((emp - bmp) - row.LEN_OF_SECTION) > .001:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append("")
errorinfo.append(sublength)
errorinfo.append("")
errorinfo.append("BMP and EMP difference does not equal the LEN_OF_SECTION. SUBFILE OID: " + str(row.OBJECTID))
errors.append(errorinfo)
counter += 1
previous = current
elif current == previous and starter != total:
bmp = row.BMP
emp = row.EMP
sublength += row.LEN_OF_SECTION
dist = row.DISTRICT
if abs((emp - bmp) - row.LEN_OF_SECTION) > .001:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append("")
errorinfo.append(sublength)
errorinfo.append("")
errorinfo.append("BMP and EMP difference does not equal the LEN_OF_SECTION. SUBFILE OID: " + str(row.OBJECTID))
errors.append(errorinfo)
counter += 1
previous = current
elif current != previous and starter == total:
if abs(linelen - sublength) > .003:
errorinfo = []
errorinfo.append(previous)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append(sublength)
errorinfo.append(linelen)
errorinfo.append("RTE_LEN does not equal SUBFILES total LEN_OF_SECTION")
errors.append(errorinfo)
counter += 1
if abs(linemin - bmp1) > .002:
errorinfo = []
errorinfo.append(previous)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line minimum measure does not equal starting BMP")
errors.append(errorinfo)
counter += 1
if abs(linemax - emp) > .003:
errorinfo = []
errorinfo.append(previous)
errorinfo.append(dist)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line maximum measure does not equal ending EMP")
errors.append(errorinfo)
counter += 1
sublength = 0
linevalues = dictionary[current]
linelen = linevalues[0]
linemin = linevalues[1]
linemax = linevalues[2]
bmp1 = row.BMP
bmp = row.BMP
emp = row.EMP
sublength += row.LEN_OF_SECTION
dist = row.DISTRICT
if abs((emp - bmp) - row.LEN_OF_SECTION) > .001:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append("")
errorinfo.append(sublength)
errorinfo.append("")
errorinfo.append("BMP and EMP difference does not equal the LEN_OF_SECTION. SUBFILE OID: " + str(row.OBJECTID))
errors.append(errorinfo)
counter += 1
if abs(linelen - sublength) > .003:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append(sublength)
errorinfo.append(linelen)
errorinfo.append("RTE_LEN does not equal SUBFILES total LEN_OF_SECTION")
errors.append(errorinfo)
counter += 1
if abs(linemin - bmp1) > .002:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line minimum measure does not equal starting BMP")
errors.append(errorinfo)
counter += 1
if abs(linemax - emp) > .003:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line maximum measure does not equal ending EMP")
errors.append(errorinfo)
counter += 1
elif current == previous and starter == total:
bmp = row.BMP
emp = row.EMP
sublength += row.LEN_OF_SECTION
dist = row.DISTRICT
if abs((emp - bmp) - row.LEN_OF_SECTION) > .001:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append("")
errorinfo.append(sublength)
errorinfo.append("")
errorinfo.append("BMP and EMP difference does not equal the LEN_OF_SECTION. SUBFILE OID: " + str(row.OBJECTID))
errors.append(errorinfo)
counter += 1
if abs(linelen - sublength) > .003:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append(sublength)
errorinfo.append(linelen)
errorinfo.append("RTE_LEN does not equal SUBFILES total LEN_OF_SECTION")
errors.append(errorinfo)
counter += 1
if abs(linemin - bmp1) > .002:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line minimum measure does not equal starting BMP")
errors.append(errorinfo)
counter += 1
if abs(linemax - emp) > .003:
errorinfo = []
errorinfo.append(current)
errorinfo.append(dist)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line maximum measure does not equal ending EMP")
errors.append(errorinfo)
counter += 1
starter += 1
arcpy.AddMessage(str(starter) + "/" + str(total))
else:
starter += 1
arcpy.AddMessage(str(starter) + "/" + str(total))
pass
then = datetime.datetime.now()
arcpy.AddMessage(str(now1))
arcpy.AddMessage("Update Done. " + str(then))
arcpy.AddMessage(str(counter) + " subfile length errors.")
return errors
def removevertices():
counter = 0
errors = []
spatialRef = arcpy.Describe(txdotroads).spatialReference
query = """ RDBD_TYPE <> 'CNCTR-GS' AND RDBD_TYPE <> 'CONNECTOR' AND RDBD_TYPE <> 'OTHER' AND RDBD_TYPE <> 'RAMP' AND RDBD_TYPE <> 'TURNAROUND' """
cursor = arcpy.SearchCursor(txdotroads, query)
for row in cursor:
geom = row.shape
allparts = geom.getPart()
if allparts.count > 1:
errorinfo = []
errorinfo.append(row.OBJECTID)
errorinfo.append(row.RTE_ID)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Multipart feature.")
errors.append(errorinfo)
counter += 1
try:
lastX = 0
lastY = 0
for part in allparts:
srtpnt = 0
for pnt in part:
if srtpnt == 0:
x = pnt.X
y = pnt.Y
m = pnt.M
lastX = x
lastY = y
lastM = m
srtpnt += 1
else:
x = pnt.X
y = pnt.Y
m = pnt.M
if row.RDBD_TYPE == "LG" or row.RDBD_TYPE == "XG":
if math.isnan(m):
errorinfo = []
errorinfo.append(row.OBJECTID)
errorinfo.append(row.RTE_ID)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Has vertex with zero measure, apply measures.")
if errorinfo not in errors:
errors.append(errorinfo)
counter += 1
if m >= lastM:
errorinfo = []
errorinfo.append(row.OBJECTID)
errorinfo.append(row.RTE_ID)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("LG or XG with non-decreasing measure, Re-apply measures.")
if errorinfo not in errors:
errors.append(errorinfo)
counter += 1
elif row.RDBD_TYPE == "KG" or row.RDBD_TYPE == "AG" or row.RDBD_TYPE == "XG":
if math.isnan(m):
errorinfo = []
errorinfo.append(row.OBJECTID)
errorinfo.append(row.RTE_ID)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Has vertex with zero measure, apply measures.")
if errorinfo not in errors:
errors.append(errorinfo)
counter += 1
if m <= lastM:
errorinfo = []
errorinfo.append(row.OBJECTID)
errorinfo.append(row.RTE_ID)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("KG, AG, or XG with non-increasing measure, Re-apply measures.")
if errorinfo not in errors:
errors.append(errorinfo)
counter += 1
except:
errorinfo = []
errorinfo.append(row.OBJECTID)
errorinfo.append(row.RTE_ID)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Geometry Error. Please check geometry.")
errors.append(errorinfo)
counter += 1
print str(counter) + " multipart and vertex measure errors."
return errors
def proc2():
arcpy.AddMessage("Overlap Errors...")
overlapsheet = book.add_sheet("City Boundary Overlap")
line = 0
overlapsheet.write(line, 0, "RTE_ID")
overlapsheet.write(line, 1, "Overlap Length")
overlapsheet.write(line, 2, "District Name")
overlapsheet.write(line, 3, "District Number")
overlapsheet.write(line, 5,
"The following Route IDs are County Roads and FC Streets which cross a City Boundary as found in City_OverlapErrors.shp")
line += 1
overlaplist = overlap()
for i in overlaplist:
overlapsheet.write(line, 0, i[0])
overlapsheet.write(line, 1, i[1])
overlapsheet.write(line, 2, i[2])
overlapsheet.write(line, 3, i[3])
line += 1
def proc3():
arcpy.AddMessage("Route Open Errors...")
opensheet = book.add_sheet("Route Open")
line = 0
opensheet.write(line, 0, "RTE_ID")
opensheet.write(line, 1, "RTE_OPEN")
opensheet.write(line, 2, "HIGHWAY_STATUS")
opensheet.write(line, 3, "Description")
opensheet.write(line, 4, "District Number")
opensheet.write(line, 6,
"The following Route IDs contain an error between RTE_OPEN in TxDOT_Roadways and ROADWAY_STATUS in SUBFILES")
line += 1
openlist = routeopen()
for i in openlist:
opensheet.write(line, 0, i[0])
opensheet.write(line, 1, i[1])
opensheet.write(line, 2, i[2])
opensheet.write(line, 3, i[3])
opensheet.write(line, 4, i[4])
line += 1
def proc4():
arcpy.AddMessage("Geometry and Measure Errors...")
geomsheet = book.add_sheet("Geometry and Measures")
line = 0
geomsheet.write(line, 0, "RTE_ID")
geomsheet.write(line, 1, "Measures' Length")
geomsheet.write(line, 2, "Shape Length")
geomsheet.write(line, 3, "RTE_LEN")
geomsheet.write(line, 4, "District Name")
geomsheet.write(line, 5, "District Number")
geomsheet.write(line, 6, "Difference")
geomsheet.write(line, 8,
"The following Route IDs contain an error between their measures' length, shape length, and RTE_LEN")
line += 1
geomlist = measurelength()
for i in geomlist:
geomsheet.write(line, 0, i[0])
geomsheet.write(line, 1, i[1])
geomsheet.write(line, 2, i[2])
geomsheet.write(line, 3, i[3])
geomsheet.write(line, 4, i[4])
geomsheet.write(line, 5, i[5])
geomsheet.write(line, 6, i[6])
line += 1
def proc5():
arcpy.AddMessage("Subfile Length Errors...")
subsheet = book.add_sheet("Subfile Lengths")
line = 0
subsheet.write(line, 0, "RTE_ID")
subsheet.write(line, 1, "District Number")
subsheet.write(line, 2, "BMP")
subsheet.write(line, 3, "Min Measure")
subsheet.write(line, 4, "EMP")
subsheet.write(line, 5, "Max Measure")
subsheet.write(line, 6, "Subfile Len")
subsheet.write(line, 7, "RTE_LEN")
subsheet.write(line, 8, "Description")
subsheet.write(line, 10, "The following Route IDs contain an error between their line and SUBFILES lengths")
line += 1
sublist = subfilelength()
for i in sublist:
subsheet.write(line, 0, i[0])
subsheet.write(line, 1, i[1])
subsheet.write(line, 2, i[2])
subsheet.write(line, 3, i[3])
subsheet.write(line, 4, i[4])
subsheet.write(line, 5, i[5])
subsheet.write(line, 6, i[6])
subsheet.write(line, 7, i[7])
subsheet.write(line, 8, i[8])
line += 1
def proc6():
arcpy.AddMessage("Multipart Errors and Removing Verticies...")
multisheet = book.add_sheet("Multipart & Measure Errors")
line = 0
multisheet.write(line, 0, "OBJECTID")
multisheet.write(line, 1, "RTE_ID")
multisheet.write(line, 2, "District Name")
multisheet.write(line, 3, "District Number")
multisheet.write(line, 4, "Description")
multisheet.write(line, 6, "The following Object IDs are multipart features or have measure errors.")
line += 1
multilist = removevertices()
for i in multilist:
multisheet.write(line, 0, i[0])
multisheet.write(line, 1, i[1])
multisheet.write(line, 2, i[2])
multisheet.write(line, 3, i[3])
multisheet.write(line, 4, i[4])
line += 1
def assemblereport(check1, check2, check3, check4, check5, check6):
if check1 == "false" and check2 == "false" and check3 == "false" and check4 == "false" and check5 == "false":
pass
else:
arcpy.AddMessage("Copying data local...")
copylocal()
arcpy.AddMessage("Beginning error checks...")
if check1 == "true":
check2 = "true"
check3 = "true"
check4 = "true"
check5 = "true"
check6 = "true"
if check2 == "true":
proc2()
if check3 == "true":
proc3()
if check4 == "true":
proc4()
if check5 == "true":
proc5()
if check6 == "true":
proc6()
book.save(workspace + "\\ErrorReport_" + today + ".xls")
nowS = datetime.datetime.now()
arcpy.AddMessage("and away we go... " + str(nowS))
book = xlwt.Workbook()
assemblereport(check1, check2, check3, check4, check5, check6)
now = datetime.datetime.now()
arcpy.AddMessage("started " + str(nowS))
arcpy.AddMessage("that's all folks!" + str(now))
|
|
#!/usr/bin/python
"""User Database"""
from userbase_config import config
import psycopg2
import cStringIO
import traceback
import userbase_models
import userbase_validations
import bcrypt
import datetime
import uuid
class Database:
def __init__(self, salt, fn_log_error, config_path, config_section='postgresql'):
self.salt = salt
self.grp_admin = "Admin"
self.fn_log_error = fn_log_error
self.config = config(config_path, config_section)
#salt = "DummyUserbaseSalt"
#grp_admin = "Admin"
def id_from_username(self, username):
""" finds id_user from username """
conn = None
id = 0
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("SELECT id FROM users WHERE name = %(name)s AND active = true",
{
"name": username
})
row = cur.fetchone()
if row is not None:
id = int(row[0])
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
finally:
if conn is not None:
conn.close()
return id
def id_from_email(self, email):
""" finds id_user from email """
conn = None
id = 0
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("SELECT u.id FROM user_authentications ua JOIN users u ON u.id = ua.id WHERE recovery_email = %(email)s AND active = true",
{
"email": email
})
row = cur.fetchone()
if row is not None:
id = int(row[0])
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
finally:
if conn is not None:
conn.close()
return id
def username_exists(self, username):
""" checks if username exists """
return self.id_from_username(username) > 0
def email_exists(self, email):
""" checks if email exists """
return self.id_from_email(email) > 0
def create_user(self, rq):
""" creates user """
if self.username_exists(rq.Username) or self.email_exists(rq.Authentication.Email):
return userbase_models.ConnectUserResponse.invalid()
if (
not userbase_validations.username(rq.Username) or
not userbase_validations.email(rq.Authentication.Email) or
not userbase_validations.password(rq.Authentication.Password) or
not userbase_validations.display_name(rq.Profile.DisplayName)
):
return userbase_models.ConnectUserResponse.invalid()
id_user = self.__create_user_entity(rq)
self.__create_authentication_entity(rq, id_user)
self.__create_profile_entity(rq, id_user)
self.__create_setting_entity(rq, id_user)
return self.__get_summary(self.__create_connection_token_response(id_user))
def validate_credentials(self, username, password):
""" validate user credentials """
resp = userbase_models.ConnectUserResponse.invalid()
id_user = self.id_from_username(username)
if id_user == 0:
return resp
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("SELECT password FROM user_authentications WHERE id = %(id)s",
{
"id": id_user
})
row = cur.fetchone()
if row is not None:
dbpass = row[0]
if bcrypt.checkpw(bytes(self.__salt_password(password)), bytes(dbpass)):
resp = self.__create_connection_token_response(id_user)
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
finally:
if conn is not None:
conn.close()
return self.__get_summary(resp)
def disconnect(self, username, token):
""" disconnect """
resp = False
conn_resp = self.__validate_connection_token_response(username, token)
if not conn_resp.Success:
return resp
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("UPDATE user_tokens SET Expiration = %(expiration)s WHERE id_user = %(id)s AND token = %(token)s",
{
"id": conn_resp.IdUser,
"token": token,
"expiration": datetime.datetime.now()
})
cur.close()
resp = True
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
finally:
if conn is not None:
conn.close()
return resp
def deactivate(self, username, token):
""" deactivate """
resp = False
conn_resp = self.__validate_connection_token_response(username, token)
if not conn_resp.Success:
return resp
self.disconnect(username, token)
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("UPDATE users SET Active = false WHERE id = %(id)s",
{
"id": conn_resp.IdUser
})
cur.close()
resp = True
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
finally:
if conn is not None:
conn.close()
return resp
def modify_credentials(self, username, token, creds):
""" modify_credentials """
conn_resp = self.__validate_connection_token_response(username, token)
resp = userbase_models.TokenSuccessResponse(conn_resp.Success, conn_resp.Token)
if not resp.Success:
return resp
if len(creds.Password) > 0:
if not userbase_validations.password(creds.Password):
resp = userbase_models.TokenSuccessResponse.invalid()
else:
conn = None
try:
hashed_password = bcrypt.hashpw(bytes(self.__salt_password(creds.Password)), bcrypt.gensalt())
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("UPDATE user_authentications SET password = %(password)s WHERE id = %(id)s",
{
"id": conn_resp.IdUser,
"password": hashed_password
})
cur.close()
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
resp = userbase_models.TokenSuccessResponse.invalid()
finally:
if conn is not None:
conn.close()
if not resp.Success:
return resp
if len(creds.Email) > 0:
if not userbase_validations.email(creds.Email):
resp = userbase_models.TokenSuccessResponse.invalid()
else:
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("UPDATE user_authentications SET recovery_email = %(email)s WHERE id = %(id)s",
{
"id": conn_resp.IdUser,
"email": creds.Email
})
cur.close()
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
resp = userbase_models.TokenSuccessResponse.invalid()
finally:
if conn is not None:
conn.close()
return resp
def modify_profile(self, username, token, prof):
""" modify_profile """
conn_resp = self.__validate_connection_token_response(username, token)
resp = userbase_models.TokenSuccessResponse(conn_resp.Success, conn_resp.Token)
if not resp.Success:
return resp
if len(prof.DisplayName) > 0:
if not userbase_validations.display_name(prof.DisplayName):
resp = userbase_models.TokenSuccessResponse.invalid()
else:
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("UPDATE user_profiles SET display_name = %(display)s WHERE id = %(id)s",
{
"id": conn_resp.IdUser,
"display": prof.DisplayName
})
cur.close()
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
resp = userbase_models.TokenSuccessResponse.invalid()
finally:
if conn is not None:
conn.close()
return resp
def send_recovery(self, username, send_email_func):
""" send_recovery """
resp = False
id_user = self.id_from_username(username)
if id_user == 0:
return resp
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("SELECT recovery_email FROM user_authentications WHERE id = %(id)s",
{
"id": id_user
})
row = cur.fetchone()
if row is not None:
email = row[0]
cur.close()
send_email_func(self.__create_recovery_token_response(id_user).Token, username, email, self.fn_log_error)
resp = True
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
finally:
if conn is not None:
conn.close()
return resp
def reset_password(self, username, token, password):
""" reset_password """
id_user = self.id_from_username(username)
if id_user == 0:
return userbase_models.ConnectUserResponse.invalid()
if not userbase_validations.password(password):
return userbase_models.ConnectUserResponse.invalid()
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("SELECT expiration FROM user_recovery_tokens WHERE id_user = %(id)s AND token = %(token)s",
{
"id": id_user,
"token": token
})
row = cur.fetchone()
cur.close()
if row is not None:
expiration = row[0]
if expiration > datetime.datetime.now():
new_token = userbase_models.Token(token, datetime.datetime.now())
cur2 = conn.cursor()
cur2.execute("UPDATE user_recovery_tokens SET Expiration = %(expiration)s WHERE id_user = %(id)s AND token = %(token)s",
{
"id": id_user,
"token": new_token.Id,
"expiration": new_token.ValidUntil
})
cur2.close()
hashed_password = bcrypt.hashpw(bytes(self.__salt_password(password)), bcrypt.gensalt())
cur3 = conn.cursor()
cur3.execute("UPDATE user_authentications SET password = %(password)s WHERE id = %(id)s",
{
"id": id_user,
"password": hashed_password
})
cur3.close()
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
finally:
if conn is not None:
conn.close()
return self.validate_credentials(username, password)
def list_users(self, username, token):
""" list_users """
resp = userbase_models.ConnectUserResponse.invalid()
conn_resp = self.__validate_connection_token_response(username, token)
if not conn_resp.Success:
return resp
if not self.__is_user_in_group(conn_resp.IdUser, self.grp_admin):
return resp
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("SELECT u.id, u.name, up.display_name FROM users u NATURAL JOIN user_profiles up WHERE u.active = true")
row = cur.fetchone()
users = []
while row is not None:
users.append(userbase_models.User(int(row[0]), row[1], row[2], self.__get_groups(int(row[0]))))
row = cur.fetchone()
resp = userbase_models.UserListResponse(True, conn_resp.Token, users)
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
finally:
if conn is not None:
conn.close()
return resp
def include_in_group(self, rq):
""" include_in_group """
resp = userbase_models.ConnectUserResponse.invalid()
conn_resp = self.__validate_connection_token_response(rq.Username, rq.Token)
if not conn_resp.Success:
return resp
if not self.__is_user_in_group(conn_resp.IdUser, self.grp_admin):
return resp
id_user_to_add = self.id_from_username(rq.UserToAdd)
if id_user_to_add == 0:
return resp
if self.__is_user_in_groupid(id_user_to_add, rq.IdGroup):
return resp
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("INSERT INTO user_groups (id_user, id_user_group_type) VALUES (%(id_user)s,%(id_group)s)",
{
"id_user": id_user_to_add,
"id_group": rq.IdGroup
})
cur.close()
conn.commit()
resp = conn_resp
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
finally:
if conn is not None:
conn.close()
return self.__get_summary(resp)
def exclude_from_group(self, username, token, user_to_remove, id_group):
""" exclude_from_group """
resp = userbase_models.ConnectUserResponse.invalid()
conn_resp = self.__validate_connection_token_response(username, token)
if not conn_resp.Success:
return resp
if not self.__is_user_in_group(conn_resp.IdUser, self.grp_admin):
return resp
id_user_to_add = self.id_from_username(user_to_remove)
if id_user_to_add == 0:
return resp
if not self.__is_user_in_groupid(id_user_to_add, id_group):
return resp
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("DELETE FROM user_groups WHERE id_user = %(id_user)s AND id_user_group_type = %(id_group)s",
{
"id_user": id_user_to_add,
"id_group": id_group
})
cur.close()
conn.commit()
resp = conn_resp
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
finally:
if conn is not None:
conn.close()
return self.__get_summary(resp)
def __create_user_entity(self, rq):
conn = None
id = 0
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("INSERT INTO users (name,active) VALUES (%(name)s, true) RETURNING id;",
{
"name": rq.Username
})
row = cur.fetchone()
if row is not None:
id = int(row[0])
cur.close()
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error(error)
print(error)
finally:
if conn is not None:
conn.close()
return id
def __create_authentication_entity(self, rq, id_user):
hashed_password = bcrypt.hashpw(bytes(self.__salt_password(rq.Authentication.Password)), bcrypt.gensalt())
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("INSERT INTO user_authentications (id,password,recovery_email) VALUES (%(id)s, %(pass)s, %(email)s);",
{
"id": id_user,
"pass": hashed_password,
"email": rq.Authentication.Email
})
cur.close()
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error(error)
print(error)
finally:
if conn is not None:
conn.close()
def __create_profile_entity(self, rq, id_user):
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("INSERT INTO user_profiles (id,display_name) VALUES (%(id)s, %(display)s);",
{
"id": id_user,
"display": rq.Profile.DisplayName
})
cur.close()
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error(error)
print(error)
finally:
if conn is not None:
conn.close()
def __create_setting_entity(self, rq, id_user):
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("INSERT INTO user_settings (id,id_user_access_type_list_friends) VALUES (%(id)s, 1);",
{
"id": id_user
})
cur.close()
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error(error)
print(error)
finally:
if conn is not None:
conn.close()
def __create_connection_token_response(self, id_user):
resp = userbase_models.ConnectUserResponse.invalid()
conn = None
try:
token = userbase_models.Token(str(uuid.uuid4()), datetime.datetime.now() + datetime.timedelta(minutes=10))
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("INSERT INTO user_tokens (id_user,token,expiration) VALUES (%(id)s, %(token)s, %(expiration)s);",
{
"id": id_user,
"token": token.Id,
"expiration": token.ValidUntil
})
cur.close()
conn.commit()
resp = userbase_models.ConnectUserResponse(True, token, id_user)
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error(error)
print(error)
finally:
if conn is not None:
conn.close()
return resp
def __create_recovery_token_response(self, id_user):
resp = userbase_models.ConnectUserResponse.invalid()
conn = None
try:
token = userbase_models.Token(str(uuid.uuid4())[:8].upper(), datetime.datetime.now() + datetime.timedelta(days=1))
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("INSERT INTO user_recovery_tokens (id_user,token,expiration) VALUES (%(id)s, %(token)s, %(expiration)s);",
{
"id": id_user,
"token": token.Id,
"expiration": token.ValidUntil
})
cur.close()
conn.commit()
resp = userbase_models.ConnectUserResponse(True, token, id_user)
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error(error)
print(error)
finally:
if conn is not None:
conn.close()
return resp
def __validate_connection_token_response(self, username, token):
""" validate user credentials """
resp = userbase_models.ConnectUserResponse.invalid()
id_user = self.id_from_username(username)
if id_user == 0:
return resp
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("SELECT expiration FROM user_tokens WHERE id_user = %(id)s AND token = %(token)s",
{
"id": id_user,
"token": token
})
row = cur.fetchone()
cur.close()
if row is not None:
expiration = row[0]
if expiration > datetime.datetime.now():
new_token = userbase_models.Token(token, datetime.datetime.now() + datetime.timedelta(minutes=10))
cur2 = conn.cursor()
cur2.execute("UPDATE user_tokens SET Expiration = %(expiration)s WHERE id_user = %(id)s AND token = %(token)s",
{
"id": id_user,
"token": new_token.Id,
"expiration": new_token.ValidUntil
})
cur2.close()
resp = userbase_models.ConnectUserResponse(True, new_token, id_user)
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
finally:
if conn is not None:
conn.close()
return resp
def __get_summary(self, conn_resp):
""" validate user credentials """
return userbase_models.UserSummaryResponse(conn_resp.Success, conn_resp.Token, self.__get_profile(conn_resp.IdUser), self.__get_groups(conn_resp.IdUser))
def __get_profile(self, id_user):
""" validate user credentials """
profile = userbase_models.ProfileInfo(None)
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("SELECT display_name FROM user_profiles WHERE id = %(id)s",
{
"id": id_user
})
row = cur.fetchone()
cur.close()
if row is not None:
profile = userbase_models.ProfileInfo(row[0])
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
finally:
if conn is not None:
conn.close()
return profile
def __get_groups(self, id_user):
""" __get_groups """
groups = []
conn = None
try:
conn = psycopg2.connect(**self.config)
cur = conn.cursor()
cur.execute("SELECT ugt.id, name FROM user_groups ug JOIN user_group_types ugt ON ug.id_user_group_type = ugt.id WHERE id_user = %(id)s",
{
"id": id_user
})
row = cur.fetchone()
while row is not None:
groups.append(userbase_models.Group(int(row[0]), row[1]))
row = cur.fetchone()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
self.fn_log_error("There was an error: {0}".format(traceback.format_exc()))
finally:
if conn is not None:
conn.close()
return groups
def __is_user_in_group(self, id_user, group):
""" __is_user_in_group """
groups = self.__get_groups(id_user)
for grp in groups:
if grp.Name == group:
return True
return False
def __is_user_in_groupid(self, id_user, id_group):
""" __is_user_in_groupid """
groups = self.__get_groups(id_user)
for grp in groups:
if grp.Id == id_group:
return True
return False
def __salt_password(self, password):
return self.salt + password
|
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_nutnr_b_dcl_conc
@file mi/dataset/parser/test/test_nutnr_b_dcl_conc.py
@author Steve Myerson (Raytheon), Mark Worden
@brief Test code for a nutnr_b_dcl_conc data parser
Files used for testing:
Filename Blocks Instrument Records
20010101.nutnr_b_dcl_conc.log 1 0
20020125.nutnr_b_dcl_conc.log 1 25
20031129.nutnr_b_dcl_conc.log 2 11, 29
20040509.nutnr_b_dcl_conc.log 3 4, 5, 9
20051220.nutnr_b_dcl_conc.log 2 40, 75
20061225.nutnr_b_dcl_conc.log 3 50, 80, 125
19990401.nutnr_b_dcl_conc.log 1 19 valid, 1 invalid frame type
19980401.nutnr_b_dcl_conc.log 1 20, metadata incomplete
19970401.nutnr_b_dcl_conc.log 1 No valid records (various fields incorrect)
Real file:
20140430.nutnr_b_dcl_conc.log has 8 NDC records and 56 NLC records.
Should generate 8 metadata particles and 64 instrument particles.
Each block produces 1 metadata particle unless there are no instrument records.
Each instrument record produces 1 instrument particle.
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger
log = get_logger()
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.nutnr_b_dcl_conc import \
NutnrBDclConcRecoveredParser, \
NutnrBDclConcTelemeteredParser
from mi.dataset.parser.nutnr_b_particles import \
NutnrBDclConcRecoveredInstrumentDataParticle, \
NutnrBDclDarkConcRecoveredInstrumentDataParticle, \
NutnrBDclConcTelemeteredInstrumentDataParticle, \
NutnrBDclDarkConcTelemeteredInstrumentDataParticle, \
NutnrBDclConcRecoveredMetadataDataParticle, \
NutnrBDclConcTelemeteredMetadataDataParticle
from mi.idk.config import Config
RESOURCE_PATH = os.path.join(Config().base_dir(), 'mi', 'dataset', 'driver',
'nutnr_b', 'dcl_conc', 'resource')
MODULE_NAME = 'mi.dataset.parser.nutnr_b_particles'
FILE1 = '20010101.nutnr_b_dcl_conc.log'
FILE2 = '20020125.nutnr_b_dcl_conc.log'
FILE3 = '20031129.nutnr_b_dcl_conc.log'
FILE4 = '20040509.nutnr_b_dcl_conc.log'
FILE5 = '20051220.nutnr_b_dcl_conc.log'
FILE6 = '20061225.nutnr_b_dcl_conc.log'
FILE_INVALID_FRAME_TYPE = '19990401.nutnr_b_dcl_conc.log'
FILE_MISSING_METADATA = '19980401.nutnr_b_dcl_conc.log'
FILE_INVALID_FIELDS = '19970401.nutnr_b_dcl_conc.log'
FILE_REAL = '20140430.nutnr_b_dcl_conc.log'
SECOND_BLOCK_IN_DATA_BLOCK_FILE = '20040901.nutnr_b_dcl_conc.log'
EXPECTED_PARTICLES1 = 0
EXPECTED_PARTICLES2 = 26
EXPECTED_PARTICLES3 = 42
EXPECTED_PARTICLES4 = 21
EXPECTED_PARTICLES5 = 117
EXPECTED_PARTICLES6 = 258
EXPECTED_META_PARTICLES_REAL = 8
EXPECTED_INST_PARTICLES_REAL = 64
EXPECTED_PARTICLES_INVALID_FRAME_TYPE = 19
EXPECTED_EXCEPTIONS_INVALID_FRAME_TYPE = 3
EXPECTED_PARTICLES_MISSING_METADATA = 20
EXPECTED_EXCEPTIONS_MISSING_METADATA = 2
EXPECTED_PARTICLES_INVALID_FIELDS = 2
EXPECTED_EXCEPTIONS_INVALID_FIELDS = 20
EXPECTED_PARTICLES_SECOND_BLOCK_IN_DATA_BLOCK = 7
EXPECTED_EXCEPTIONS_SECOND_BLOCK_IN_DATA_BLOCK = 0
REC_YML2 = 'rec_20020125.nutnr_b_dcl_conc.yml'
REC_YML3 = 'rec_20031129.nutnr_b_dcl_conc.yml'
REC_YML4 = 'rec_20040509.nutnr_b_dcl_conc.yml'
REC_YML5 = 'rec_20051220.nutnr_b_dcl_conc.yml'
REC_YML6 = 'rec_20061225.nutnr_b_dcl_conc.yml'
REC_YML_INVALID_FIELDS = 'rec_19970401.nutnr_b_dcl_conc.yml'
TEL_YML2 = 'tel_20020125.nutnr_b_dcl_conc.yml'
TEL_YML3 = 'tel_20031129.nutnr_b_dcl_conc.yml'
TEL_YML4 = 'tel_20040509.nutnr_b_dcl_conc.yml'
TEL_YML5 = 'tel_20051220.nutnr_b_dcl_conc.yml'
TEL_YML6 = 'tel_20061225.nutnr_b_dcl_conc.yml'
TEL_YML_INVALID_FIELDS = 'tel_19970401.nutnr_b_dcl_conc.yml'
HAPPY_PATH_TABLE = [
(FILE2, EXPECTED_PARTICLES2, REC_YML2, TEL_YML2),
(FILE3, EXPECTED_PARTICLES3, REC_YML3, TEL_YML3),
(FILE4, EXPECTED_PARTICLES4, REC_YML4, TEL_YML4),
(FILE5, EXPECTED_PARTICLES5, REC_YML5, TEL_YML5),
(FILE6, EXPECTED_PARTICLES6, REC_YML6, TEL_YML6)
]
@attr('UNIT', group='mi')
class NutnrBDclConcParserUnitTestCase(ParserUnitTestCase):
"""
nutnr_b_dcl_conc Parser unit test suite
"""
def create_rec_parser(self, file_handle, new_state=None):
"""
This function creates a NutnrBDclConc parser for recovered data.
"""
return NutnrBDclConcRecoveredParser(
self.rec_config,
file_handle, lambda state, ingested: None,
lambda data: None, self.rec_exception_callback)
def create_tel_parser(self, file_handle, new_state=None):
"""
This function creates a NutnrBDclConc parser for telemetered data.
"""
return NutnrBDclConcTelemeteredParser(
self.tel_config,
file_handle, lambda state, ingested: None,
lambda data: None, self.tel_exception_callback)
def open_file(self, filename):
return open(os.path.join(RESOURCE_PATH, filename), mode='r')
def rec_state_callback(self, state, file_ingested):
""" Call back method to watch what comes in via the position callback """
self.rec_state_callback_value = state
self.rec_file_ingested_value = file_ingested
def tel_state_callback(self, state, file_ingested):
""" Call back method to watch what comes in via the position callback """
self.tel_state_callback_value = state
self.tel_file_ingested_value = file_ingested
def rec_pub_callback(self, pub):
""" Call back method to watch what comes in via the publish callback """
self.rec_publish_callback_value = pub
def tel_pub_callback(self, pub):
""" Call back method to watch what comes in via the publish callback """
self.tel_publish_callback_value = pub
def rec_exception_callback(self, exception):
""" Call back method to watch what comes in via the exception callback """
self.rec_exception_callback_value = exception
self.rec_exceptions_detected += 1
def tel_exception_callback(self, exception):
""" Call back method to watch what comes in via the exception callback """
self.tel_exception_callback_value = exception
self.tel_exceptions_detected += 1
def setUp(self):
ParserUnitTestCase.setUp(self)
self.rec_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: None
}
self.tel_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: None
}
self.rec_state_callback_value = None
self.rec_file_ingested_value = False
self.rec_publish_callback_value = None
self.rec_exception_callback_value = None
self.rec_exceptions_detected = 0
self.tel_state_callback_value = None
self.tel_file_ingested_value = False
self.tel_publish_callback_value = None
self.tel_exception_callback_value = None
self.tel_exceptions_detected = 0
self.maxDiff = None
def test_happy_path(self):
"""
Read files and verify that all expected particles can be read.
Verify that the contents of the particles are correct.
"""
log.debug('===== START TEST HAPPY PATH =====')
for input_file, expected_particles, rec_yml_file, tel_yml_file in HAPPY_PATH_TABLE:
in_file = self.open_file(input_file)
parser = self.create_rec_parser(in_file)
particles = parser.get_records(expected_particles)
self.assert_particles(particles, rec_yml_file, RESOURCE_PATH)
self.assertEqual(self.rec_exceptions_detected, 0)
in_file.close()
in_file = self.open_file(input_file)
parser = self.create_tel_parser(in_file)
particles = parser.get_records(expected_particles)
self.assert_particles(particles, tel_yml_file, RESOURCE_PATH)
self.assertEqual(self.tel_exceptions_detected, 0)
in_file.close()
log.debug('===== END TEST HAPPY PATH =====')
def test_invalid_fields(self):
"""
The file used in this test has errors in every instrument record
except the first NDC record.
This results in 1 metadata particle and 1 instrument particle.
"""
log.debug('===== START TEST INVALID FIELDS =====')
input_file = FILE_INVALID_FIELDS
expected_particles = EXPECTED_PARTICLES_INVALID_FIELDS
expected_exceptions = EXPECTED_EXCEPTIONS_INVALID_FIELDS
total_records = expected_particles + expected_exceptions + 1
in_file = self.open_file(input_file)
parser = self.create_rec_parser(in_file)
particles = parser.get_records(total_records)
self.assertEqual(len(particles), expected_particles)
self.assert_particles(particles, REC_YML_INVALID_FIELDS, RESOURCE_PATH)
self.assertEqual(self.rec_exceptions_detected, expected_exceptions)
in_file.close()
in_file = self.open_file(input_file)
parser = self.create_tel_parser(in_file)
particles = parser.get_records(total_records)
self.assertEqual(len(particles), expected_particles)
self.assert_particles(particles, TEL_YML_INVALID_FIELDS, RESOURCE_PATH)
self.assertEqual(self.tel_exceptions_detected, expected_exceptions)
in_file.close()
log.debug('===== END TEST INVALID FIELDS =====')
def test_invalid_frame_type(self):
"""
The file used in this test has an valid frame type instead
of the NDC (dark) type and 1 other invalid frame type.
This results in 1 metadata,
instrument particles for the other valid instrument types,
plus 2 Recoverable exceptions.
"""
log.debug('===== START TEST INVALID FRAME TYPE =====')
input_file = FILE_INVALID_FRAME_TYPE
expected_particles = EXPECTED_PARTICLES_INVALID_FRAME_TYPE
expected_exceptions = EXPECTED_EXCEPTIONS_INVALID_FRAME_TYPE
total_records = expected_particles + expected_exceptions
in_file = self.open_file(input_file)
parser = self.create_rec_parser(in_file)
particles = parser.get_records(total_records)
self.assertEqual(len(particles), expected_particles)
self.assertEqual(self.rec_exceptions_detected, expected_exceptions)
in_file.close()
in_file = self.open_file(input_file)
parser = self.create_tel_parser(in_file)
particles = parser.get_records(total_records)
self.assertEqual(len(particles), expected_particles)
self.assertEqual(self.tel_exceptions_detected, expected_exceptions)
in_file.close()
log.debug('===== END TEST INVALID FRAME TYPE =====')
def test_missing_metadata(self):
"""
The file used in this test is missing one of the required
metadata records.
This causes no metadata particles to be generated and 20 science particles
"""
log.debug('===== START TEST MISSING METADATA =====')
input_file = FILE_MISSING_METADATA
expected_particles = EXPECTED_PARTICLES_MISSING_METADATA
expected_exceptions = EXPECTED_EXCEPTIONS_MISSING_METADATA
total_records = expected_particles + expected_exceptions
in_file = self.open_file(input_file)
parser = self.create_rec_parser(in_file)
particles = parser.get_records(total_records)
self.assertEqual(len(particles), expected_particles)
self.assertEqual(self.rec_exceptions_detected, expected_exceptions)
inst_particles = 0
meta_particles = 0
for particle in particles:
if isinstance(particle, NutnrBDclConcRecoveredInstrumentDataParticle) or \
isinstance(particle, NutnrBDclDarkConcRecoveredInstrumentDataParticle):
inst_particles += 1
elif isinstance(particle, NutnrBDclConcRecoveredMetadataDataParticle):
meta_particles += 1
self.assertEqual(inst_particles, expected_particles)
self.assertEqual(meta_particles, 0)
in_file.close()
in_file = self.open_file(input_file)
parser = self.create_tel_parser(in_file)
particles = parser.get_records(total_records)
self.assertEqual(len(particles), expected_particles)
self.assertEqual(self.tel_exceptions_detected, expected_exceptions)
inst_particles = 0
meta_particles = 0
for particle in particles:
if isinstance(particle, NutnrBDclConcTelemeteredInstrumentDataParticle) or \
isinstance(particle, NutnrBDclDarkConcTelemeteredInstrumentDataParticle):
inst_particles += 1
elif isinstance(particle, NutnrBDclConcTelemeteredMetadataDataParticle):
meta_particles += 1
self.assertEqual(inst_particles, expected_particles)
self.assertEqual(meta_particles, 0)
in_file.close()
log.debug('===== END TEST MISSING METADATA =====')
def test_second_nitrate_dark_in_data_block(self):
"""
Verify that no particles are produced if the input file
has no instrument records.
"""
log.debug('===== START TEST SECOND DARK IN DATA BLOCK =====')
input_file = SECOND_BLOCK_IN_DATA_BLOCK_FILE
expected_particles = EXPECTED_PARTICLES_SECOND_BLOCK_IN_DATA_BLOCK
expected_exceptions = EXPECTED_EXCEPTIONS_SECOND_BLOCK_IN_DATA_BLOCK
total_records = expected_particles
in_file = self.open_file(input_file)
parser = self.create_rec_parser(in_file)
particles = parser.get_records(total_records)
self.assertEqual(len(particles), expected_particles)
self.assertEqual(self.rec_exceptions_detected, expected_exceptions)
in_file.close()
in_file = self.open_file(input_file)
parser = self.create_tel_parser(in_file)
particles = parser.get_records(total_records)
self.assertEqual(len(particles), expected_particles)
self.assertEqual(self.tel_exceptions_detected, expected_exceptions)
in_file.close()
log.debug('===== END TEST SECOND DARK IN DATA BLOCK =====')
def test_no_particles(self):
"""
Verify that no particles are produced if the input file
has no instrument records.
"""
log.debug('===== START TEST NO PARTICLES =====')
input_file = FILE1
expected_particles = EXPECTED_PARTICLES1
total_records = expected_particles + 1
in_file = self.open_file(input_file)
parser = self.create_rec_parser(in_file)
particles = parser.get_records(total_records)
self.assertEqual(len(particles), expected_particles)
self.assertEqual(self.rec_exceptions_detected, 0)
in_file.close()
in_file = self.open_file(input_file)
parser = self.create_tel_parser(in_file)
particles = parser.get_records(total_records)
self.assertEqual(len(particles), expected_particles)
self.assertEqual(self.tel_exceptions_detected, 0)
in_file.close()
log.debug('===== END TEST NO PARTICLES =====')
def test_real_file(self):
"""
Verify that the correct number of particles are generated
from a real file.
"""
log.debug('===== START TEST REAL FILE =====')
input_file = FILE_REAL
expected_inst_particles = EXPECTED_INST_PARTICLES_REAL
expected_meta_particles = EXPECTED_META_PARTICLES_REAL
expected_particles = expected_meta_particles + expected_inst_particles
in_file = self.open_file(input_file)
parser = self.create_rec_parser(in_file)
particles = parser.get_records(expected_particles)
self.assertEqual(len(particles), expected_particles)
inst_particles = 0
meta_particles = 0
for particle in particles:
if isinstance(particle, NutnrBDclConcRecoveredInstrumentDataParticle) or \
isinstance(particle, NutnrBDclDarkConcRecoveredInstrumentDataParticle):
inst_particles += 1
elif isinstance(particle, NutnrBDclConcRecoveredMetadataDataParticle):
meta_particles += 1
self.assertEqual(inst_particles, expected_inst_particles)
self.assertEqual(meta_particles, expected_meta_particles)
in_file.close()
in_file = self.open_file(input_file)
parser = self.create_tel_parser(in_file)
particles = parser.get_records(expected_particles)
self.assertEqual(len(particles), expected_particles)
inst_particles = 0
meta_particles = 0
for particle in particles:
if isinstance(particle, NutnrBDclConcTelemeteredInstrumentDataParticle) or \
isinstance(particle, NutnrBDclDarkConcTelemeteredInstrumentDataParticle):
inst_particles += 1
elif isinstance(particle, NutnrBDclConcTelemeteredMetadataDataParticle):
meta_particles += 1
self.assertEqual(inst_particles, expected_inst_particles)
self.assertEqual(meta_particles, expected_meta_particles)
in_file.close()
log.debug('===== END TEST REAL FILE =====')
|
|
"""
Copyright 2016-2017 Ellation, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import base64
import os
import unittest
from botocore.exceptions import ClientError
from mock import Mock, patch, mock_open
import context_paths
import ef_password
class TestEFPassword(unittest.TestCase):
def setUp(self):
self.service = "test-service"
self.env = "test"
self.secret = "secret"
self.secret_file = os.path.join(os.path.dirname(__file__), '../test_data/parameters/test.cnf.parameters.json')
self.error_response = {'Error': {'Code': 'FakeError', 'Message': 'Testing catch of all ClientErrors'}}
self.client_error = ClientError(self.error_response, "boto3")
self.mock_kms = Mock(name="mocked kms client")
self.bytes_return = "cipher_blob".encode()
self.key_id = "AWS_KMS_KEY_ID"
self.mock_kms.encrypt.return_value = {"CiphertextBlob": self.bytes_return, "KeyId": self.key_id}
self.mock_kms.decrypt.return_value = {"Plaintext": self.bytes_return, "KeyId": self.key_id}
self.mock_kms.re_encrypt.return_value = { 'CiphertextBlob': self.bytes_return }
self.mock_kms.list_aliases.return_value = {
'Aliases': [ { 'AliasName': 'alias/staging-data-flow' } ]
}
def test_generate_secret(self):
"""Check that generated secret matches the length specified and doesn't contain any special characters"""
random_secret = ef_password.generate_secret(24)
self.assertEqual(len(random_secret), 24)
assert not set('[~!@#$%^&*()_+{}":;\']+$').intersection(random_secret)
def test_args_decrypt(self):
"""Test parsing args with all valid values (decrypt)"""
args = [self.service, self.env, "--length", "10", "--decrypt", "test"]
context = ef_password.handle_args_and_set_context(args)
self.assertEqual(context.env, self.env)
self.assertEqual(context.service, self.service)
self.assertEqual(context.length, 10)
self.assertEqual(context.decrypt, "test")
def test_args_plaintext(self):
"""Test parsing args with all valid values (plaintext)"""
args = [self.service, self.env, "--length", "10", "--plaintext", "test"]
context = ef_password.handle_args_and_set_context(args)
self.assertEqual(context.env, self.env)
self.assertEqual(context.service, self.service)
self.assertEqual(context.length, 10)
self.assertEqual(context.plaintext, "test")
def test_args_plaintext_escape_sequences(self):
"""
Test parsing args with all valid values and plaintext with escape sequences
When called from bash in this form `ef-password --plaintext "hello\nworld"`
the OS transforms the plaintext argument into `"hello\\nworld"`,
resulting in unexpected results on decryption
"""
expected_plaintext = "hello\nworld"
args = [self.service, self.env, "--length", "10", "--plaintext", "hello\\nworld"]
context = ef_password.handle_args_and_set_context(args)
self.assertEqual(context.env, self.env)
self.assertEqual(context.service, self.service)
self.assertEqual(context.length, 10)
self.assertEqual(context.plaintext, expected_plaintext)
def test_args_plaintext_lbv1_escape_sequences(self):
"""
Test parsing args with all valid values and plaintext with latebindv1
compatible escape sequences When called from bash in this form
`ef-password --plaintext "hello\nworld" --lbv1_escapes` the OS transforms the plaintext
argument into `"hello\\nworld"`. The decrypted value should have the same form
"""
expected_plaintext = "hello\\nworld"
args = [self.service, self.env, "--lbv1_escapes", "--plaintext", "hello\\nworld"]
context = ef_password.handle_args_and_set_context(args)
self.assertEqual(context.env, self.env)
self.assertEqual(context.service, self.service)
self.assertEqual(context.plaintext, expected_plaintext)
def test_args_secret_file(self):
"""Test parsing args with all valid values (secret file)"""
args = [self.service, self.env, "--length", "10", "--secret_file",
"test_data/parameters/test.cnf.parameters.json", "--match", "test"]
context = ef_password.handle_args_and_set_context(args)
self.assertEqual(context.env, self.env)
self.assertEqual(context.service, self.service)
self.assertEqual(context.length, 10)
self.assertEqual(context.secret_file, "test_data/parameters/test.cnf.parameters.json")
self.assertEqual(context.match, "test")
def test_args_invalid_env(self):
"""Verify that an invalid environment arg raises an exception"""
args = [self.service, "invalid_env"]
with self.assertRaises(SystemExit):
ef_password.handle_args_and_set_context(args)
def test_args_nonint_length(self):
"""A non-integer value for the length param should raise an exception"""
args = [self.service, self.env, "--length", "8a"]
with self.assertRaises(ValueError):
ef_password.handle_args_and_set_context(args)
def test_args_length_too_small(self):
"""A length value less than 10 should raise an exception"""
args = [self.service, self.env, "--length", "5"]
with self.assertRaises(ValueError):
ef_password.handle_args_and_set_context(args)
def test_args_without_secret_file(self):
"""Without the --secret_file flag"""
args = [self.service, self.env, "--match", "test"]
with self.assertRaises(ValueError):
ef_password.handle_args_and_set_context(args)
def test_args_without_match(self):
"""Without the --match flag"""
args = [self.service, self.env, "--secret_file", "test_data/parameters/test.cnf.parameters.json"]
with self.assertRaises(ValueError):
ef_password.handle_args_and_set_context(args)
@patch('ef_password.generate_secret', return_value="mock_secret")
@patch('ef_utils.create_aws_clients')
@patch('ef_password.handle_args_and_set_context')
def test_main(self, mock_context, mock_create_aws, mock_gen):
"""Test valid main() call with just service and env.
Ensure generate_password and encrypt are called with the correct parameters"""
context = ef_password.EFPWContext()
context.env, context.service, context.length = self.env, self.service, 24
mock_context.return_value = context
mock_create_aws.return_value = {"kms": self.mock_kms}
ef_password.main()
mock_gen.assert_called_once_with(24)
self.mock_kms.decrypt.assert_not_called()
self.mock_kms.encrypt.assert_called_once_with(
KeyId='alias/{}-{}'.format(self.env, self.service),
Plaintext="mock_secret".encode()
)
@patch('ef_password.generate_secret', return_value="mock_secret")
@patch('ef_utils.create_aws_clients')
@patch('ef_password.handle_args_and_set_context')
def test_main_plaintext(self, mock_context, mock_create_aws, mock_gen):
"""Test valid main() call with service, env, and --plaintext.
Ensure generate_password and encrypt are called with the correct parameters"""
context = ef_password.EFPWContext()
context.env, context.service, context.plaintext = self.env, self.service, self.secret
mock_context.return_value = context
mock_create_aws.return_value = {"kms": self.mock_kms}
ef_password.main()
mock_gen.assert_not_called()
self.mock_kms.decrypt.assert_not_called()
self.mock_kms.encrypt.assert_called_once_with(
KeyId='alias/{}-{}'.format(self.env, self.service),
Plaintext=self.secret.encode()
)
@patch('ef_password.generate_secret')
@patch('ef_utils.create_aws_clients')
@patch('ef_password.handle_args_and_set_context')
def test_main_decrypt(self, mock_context, mock_create_aws, mock_gen):
"""Test valid main() call with service, env, and --decrypt.
Ensure decrypt is called with the correct parameters"""
context = ef_password.EFPWContext()
context.env, context.service, context.decrypt = self.env, self.service, base64.b64encode(self.secret)
mock_context.return_value = context
mock_create_aws.return_value = {"kms": self.mock_kms}
ef_password.main()
mock_gen.assert_not_called()
self.mock_kms.encrypt.assert_not_called()
self.mock_kms.decrypt.assert_called_once_with(CiphertextBlob=self.secret)
@patch('ef_password.generate_secret_file')
@patch('ef_utils.create_aws_clients')
@patch('ef_password.handle_args_and_set_context')
def test_main_secret_file_parameters(self, mock_context, mock_create_aws, mock_gen):
"""Test valid main() call with service, env, --secret_file, and --match.
Ensure generate_secret_file is called with the correct parameters"""
context = ef_password.EFPWContext()
context.env, context.service = self.env, self.service
context.secret_file = self.secret_file
context.match = 'password'
mock_context.return_value = context
mock_create_aws.return_value = {"kms": self.mock_kms}
ef_password.main()
mock_gen.assert_called_once_with(context.secret_file, context.match, context.service, context.env, mock_create_aws.return_value)
@patch('json.dump')
@patch('json.load')
@patch('__builtin__.open', new_callable=mock_open)
@patch('ef_utils.create_aws_clients')
@patch('ef_password.handle_args_and_set_context')
def test_generate_secret_file(self, mock_context, mock_create_aws, mock_file_open, mock_json, mock_dump):
"""Test generate_secret_file and ensure encrypt is called with the correct parameters"""
context = ef_password.EFPWContext()
context.env, context.service = self.env, self.service
context.secret_file = self.secret_file
context.match = 'password'
mock_context.return_value = context
mock_create_aws.return_value = {"kms": self.mock_kms}
mock_json.return_value = {"params": {"test": {"password": "mock_secret1"}}}
ef_password.main()
self.mock_kms.decrypt.assert_not_called()
self.mock_kms.encrypt.assert_called_once_with(
KeyId='alias/{}-{}'.format(self.env, self.service),
Plaintext="mock_secret1".encode()
)
mock_file_open.assert_called_with(self.secret_file, 'w')
handle = mock_file_open()
mock_dump.assert_called_once_with({'params': {'test': {'password': '{{aws:kms:decrypt,Y2lwaGVyX2Jsb2I=}}'}}},
handle, indent=2, separators=(',', ': '))
handle.write.assert_called_with('\n')
@patch('ef_password.generate_secret')
@patch('ef_utils.create_aws_clients')
@patch('ef_password.handle_args_and_set_context')
def test_main_decrypt(self, mock_context, mock_create_aws, mock_gen):
"""Test valid main() call with service, env, and --decrypt.
Ensure decrypt is called with the correct parameters"""
context = ef_password.EFPWContext()
context.env, context.service, context.re_encrypt = self.env, self.service, base64.b64encode(self.secret)
mock_context.return_value = context
mock_create_aws.return_value = {"kms": self.mock_kms}
ef_password.main()
mock_gen.assert_not_called()
self.mock_kms.encrypt.assert_not_called()
expected_destination_key_id = "alias/{}-{}".format(context.env, context.service)
self.mock_kms.re_encrypt.assert_called_once_with(CiphertextBlob=self.secret, DestinationKeyId=expected_destination_key_id)
|
|
#!/usr/bin/env python
"""
This is a REST server that accept requests to control the PiGlow board.
A few interesting things about this server:
* It is designed with a RESTful Api
* It uses a global lock to queue up operations to the PiGlow
Run this server like this:
python pg_rest_server.py
Example in using the API:
# set arm 3 to brightness 50
curl -X PUT -d brightness=50 http://localhost:5000/arms/3
# switch on and off LED 7
curl -X PUT -d brightness=100 http://localhost:5000/leds/7
curl -X PUT -d brightness=0 http://localhost:5000/leds/7
# switch on led 3 and 5 with brightness 10 and 200
curl -X PUT -H 'Content-Type: application/json' \
-d '[{"led_id":3, "brightness": 10}, {"led_id":5, "brightness":200 }]' \
http://localhost:5000/leds
"""
import threading
from time import sleep
from flask import Flask
from flask import request
from flask.ext.restful import (Resource, Api, reqparse, abort)
# Support a dummy PyGlow class so that we can test this code
# on something other than a real RPi
try:
from PyGlow import (PyGlow, ARM_LED_LIST, COLOR_LED_LIST)
except ImportError:
print 'Cannot import PyGlow library, use dummy interface for testing'
from dummy_pyglow import (PyGlow, ARM_LED_LIST, COLOR_LED_LIST)
app = Flask(__name__)
api = Api(app)
@app.after_request
def add_cors_headers(response):
""" To allow CORS """
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Methods', 'PUT, POST, GET')
response.headers.add('Access-Control-Allow-Credentials', 'true')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type')
return response
# internal cache of LED status
led_list = [{'led_id': i, 'brightness': 0} for i in range(1, 19)]
# global lock
lock = threading.Lock()
pyglow = PyGlow()
# interface to the h/w layer
def set_led(num, brightness):
"""
Set one LED
:param num: is the LED number, from 1 to 18
:param brightness: is the light level, from 0-255
"""
global lock
# do this one at a time
with lock:
led_list[num - 1]['brightness'] = brightness
pyglow.led(num, brightness=brightness)
def set_arm(num, brightness):
"""
Set one arm of the PiGlow
:param num: is the arm number, from 1 to 3
:param brightness: is the light level, from 0-255
"""
global lock
# do this one at a time
with lock:
for i in ARM_LED_LIST[num - 1]:
led_list[i - 1]['brightness'] = brightness
pyglow.arm(num, brightness=brightness)
def set_color(num, brightness):
"""
Set one color ring of the PiGlow
:param num: is the color/ring number, from 1 to 6
:param brightness: is the light level, from 0-255
"""
global lock
# do this one at a time
with lock:
for i in COLOR_LED_LIST[num - 1]:
led_list[i - 1]['brightness'] = brightness
pyglow.color(num, brightness=brightness)
def set_clear():
"""
Turn off all LEDs.
"""
global lock
# do this one at a time
with lock:
for i in range(1, 19):
led_list[i - 1]['brightness'] = 0
pyglow.all(brightness=0)
def set_starburst(brightness, speed_ms):
"""
Execute starburst pattern
:param brightness: is the light level, from 0-255
"""
global lock
# do this one at a time
with lock:
# clear first
pyglow.all(brightness=0)
for i in range(1, 7):
pyglow.color(i, brightness=brightness)
sleep(speed_ms / 1000.0)
pyglow.color(i, brightness=0)
# interface to the h/w layer
def set_leds(set_list):
"""
Set list of LED
:param set_list: is a list of (id, brightness)
"""
global lock
# do this one at a time
with lock:
for num, b in set_list:
led_list[num - 1]['brightness'] = b
pyglow.led(num, brightness=b)
class PiGlowResourceMixin(object):
"""
Mixin provide some helper functions.
"""
def validate_led_id(self, led_id):
if led_id is None or not led_id in range(1, 19):
abort(404, message='LED id must be in the range of 1 to 18')
def validate_brightness(self, b):
if b is None or not b in range(0, 256):
abort(400, message='brightness must be in the range of 0 to 255')
def validate_arm_id(self, arm_id):
if arm_id is None or not arm_id in range(1, 4):
abort(404, message='arm id must be in the range of 1 to 3')
def validate_color_id(self, color_id):
if color_id is None or not color_id in range(1, 7):
abort(404, message='color id must be in the range of 1 to 6')
def validate_speed(self, msec):
""" speed is in millseconds """
if msec is None or not msec in range(0, 5000):
abort(404, message='speed must be milliseconds in the range of 0 to 5000')
def queue_command(self, func, *args):
"""
Queue function with optional args in a separate thread.
"""
h = threading.Thread(target=func, args=args)
h.setDaemon(True)
h.start()
return h
class LedListAPI(PiGlowResourceMixin, Resource):
"""
REST interface to the list of LED as a whole.
Set the brightness of one or more LEDs
PUT /leds
URL Parameters:
None
Data Parameters (required), list of one or more dictionaries:
[
{
"led_id": [integer 1-18],
"brightness": [integer 0-255]
},
{
"led_id": [integer 1-18],
"brightness": [integer 0-255]
}
]
curl example
curl -X PUT -H 'Content-Type: application/json'
-d '[{"led_id":1,\"brightness":100}, {"led_id":2, "brightness":100} ]' localhost:5000/leds
"""
def get(self):
return led_list
def put(self):
"""
Accept JSON [ {led_id:n, brightness:b}, ...]
"""
set_list = []
for d in request.json:
n = d['led_id']
b = d['brightness']
self.validate_brightness(b)
self.validate_led_id(n)
set_list.append((n, b))
self.queue_command(set_leds, set_list)
return led_list
class LedAPI(PiGlowResourceMixin, Resource):
"""
REST interface to control individual LED.
"""
def get(self, led_id):
"""
Get the brightness of a LED.
(These are cached values, not necessary correct!)
"""
return led_list[led_id]
def put(self, led_id):
"""
Set the brightness of a LED
PUT /leds/:id
URL Parameters:
id=[integer] in the range of 1-18
Optional:
brightness=[integer 0-255]
Data Parameters (optional):
{
"brightness": [integer 0-255]
}
"""
self.validate_led_id(led_id)
parser = reqparse.RequestParser()
parser.add_argument('brightness', type=int, default=0,
help='Brightness for this arm of LED')
args = parser.parse_args()
b = args.get('brightness')
self.validate_brightness(b)
self.queue_command(set_led, led_id, b)
return led_list[led_id - 1]
class ArmAPI(PiGlowResourceMixin, Resource):
"""
Control a single arm on the PiGlow.
/arms/:arm_id/
The brightness value can be specified as json or form data in the request,
or directly on the URL.
:param arm_id: on the URL is 1 to 3
:param brightness: brightness=0..255
"""
def get(self, arm_id):
return led_list
def put(self, arm_id):
parser = reqparse.RequestParser()
parser.add_argument('brightness', type=int, default=0,
help='Brightness for this arm of LED')
args = parser.parse_args()
self.validate_arm_id(arm_id)
b = args.get('brightness')
self.validate_brightness(b)
self.queue_command(set_arm, arm_id, b)
return led_list
class ColorAPI(PiGlowResourceMixin, Resource):
"""
Control a single color ring on the PiGlow.
/colors/:color_id/
The brightness value can be specified as json or form data in the request,
or directly on the URL.
:param color_id: on the URL is 1 to 6
:param brightness: brightness=0..255
"""
def get(self, color_id):
return led_list
def put(self, color_id):
parser = reqparse.RequestParser()
parser.add_argument('brightness', type=int, default=0,
help='Brightness for this arm of LED')
args = parser.parse_args()
self.validate_color_id(color_id)
b = args.get('brightness')
self.validate_brightness(b)
self.queue_command(set_color, color_id, b)
return led_list
class PatternAPI(PiGlowResourceMixin, Resource):
"""
This API allows display of patterns as a whole.
/pattern/:pattern_name/
:param brightness: brightness=0..255
"""
def get(self, color_id):
return led_list
def put(self, pattern_name):
parser = reqparse.RequestParser()
parser.add_argument('brightness', type=int, default=0,
help='Brightness for the pattern')
parser.add_argument('speed', type=int, default=0,
help='Speed for the pattern')
args = parser.parse_args()
b = args.get('brightness')
self.validate_brightness(b)
s = args.get('speed')
self.validate_speed(s)
if pattern_name == 'clear':
self.queue_command(set_clear)
if pattern_name == 'starburst':
self.queue_command(set_starburst, b, s)
return led_list
api.add_resource(LedListAPI, '/leds')
api.add_resource(LedAPI, '/leds/<int:led_id>')
api.add_resource(ArmAPI, '/arms/<int:arm_id>')
api.add_resource(ColorAPI, '/colors/<int:color_id>')
api.add_resource(PatternAPI, '/patterns/<pattern_name>')
@app.route('/', methods=['GET', ])
def index():
return 'PiGlow RESTful API Server.<br />See http://github.com/pkshiu/piglowserver for info'
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
|
from neo.Network.common import blocking_prompt as prompt
from neo.Prompt.InputParser import InputParser
from neo.SmartContract.ContractParameter import ContractParameter
from neo.SmartContract.ContractParameterType import ContractParameterType
from boa.compiler import Compiler
import pprint
import pdb
import dis
import json
from neo.Settings import settings
from neo.logging import log_manager
logger = log_manager.getLogger()
class DebugContext:
start = None
end = None
line = None
file_id = None
file_url = None
files = None
file_lines = None
method = None
method_name = None
def __init__(self, ctx, files):
self.start = ctx['start']
self.end = ctx['end']
self.line = ctx['file_line_no']
self.file_id = ctx['file']
self.method_name = ctx['method']
for file in files:
if file['id'] == self.file_id:
self.file_url = file['url']
self.file_lines = []
try:
default_module = Compiler.load(self.file_url, use_nep8=settings.COMPILER_NEP_8).default
self.method = default_module.method_by_name(self.method_name)
except Exception as e:
logger.error('Could not load module %s %s ' % (self.file_url, e))
try:
with open(self.file_url, 'r') as dbg_file:
for ln in dbg_file:
self.file_lines.append(ln.replace('\n', ''))
except Exception as e:
logger.error("Could not open file %s : %s " % (self.file_url, e))
def print_context(self):
myrange = range(self.line - 3, self.line + 3)
for index, ln in enumerate(self.file_lines):
idx = index + 1
if idx in myrange:
if idx == self.line:
print('[%s] %s <<<<<<<<<<<<' % (idx, ln))
else:
print('[%s] %s' % (idx, ln))
def print_file(self):
for index, ln in enumerate(self.file_lines):
idx = index + 1
if idx == self.line:
print('[%s] %s <<<<<<<<<<<<' % (idx, ln))
else:
print('[%s] %s' % (idx, ln))
def print(self):
print("%s -> %s " % (self.file_url, self.method_name))
self.print_context()
def print_method_ops(self):
dis.dis(self.method.code_object)
class VMDebugger:
engine = None
parser = None
debug_map = None
debug_context = None
index = None
continue_debug = False
def __init__(self, engine):
self.engine = engine
self.parser = InputParser()
self.debug_map = engine._debug_map
self.index = engine.CurrentContext.InstructionPointer
def end(self):
self.continue_debug = False
def start(self):
self.continue_debug = True
# pprint.pprint(self.debug_map)
dbg_title = self.debug_map['avm']['name']
print("\n")
print("======= debug engine enter =======")
ctx = self.get_context()
ctx.print()
while self.continue_debug:
try:
result = prompt("[%s debug]> " % dbg_title)
except EOFError:
# Control-D pressed: quit
self.continue_debug = False
except KeyboardInterrupt:
# Control-C pressed: do nothing
self.continue_debug = False
command, arguments = self.parser.parse_input(result)
if command is not None and len(command) > 0:
command = command.lower()
if command in ['quit', 'exit', 'cont']:
self.continue_debug = False
elif command == 'estack':
if self.engine._InvocationStack.Count > 0: # eval stack now only available via ExecutionContext objects in the istack
if len(self.engine.CurrentContext.EvaluationStack.Items):
for item in self.engine.CurrentContext.EvaluationStack.Items:
print(ContractParameter.ToParameter(item).ToJson())
else:
print("Evaluation stack empty")
else:
print("Evaluation stack empty")
elif command == 'istack':
print("Invocation Stack:")
for item in self.engine.InvocationStack.Items:
pprint.pprint(item)
print(vars(item))
elif command == 'astack':
if len(self.engine.AltStack.Items):
for item in self.engine.AltStack.Items:
print(ContractParameter.ToParameter(item).ToJson())
else:
print("Alt Stack Empty")
elif command == 'rstack':
items = self.engine.ResultStack.Items
if len(items):
for item in items:
pprint.pprint(item)
else:
print("Result stack empty")
elif command == 'ctx':
ctx.print()
elif command == 'file':
ctx.print_file()
elif command == 'ops':
ctx.print_method_ops()
elif command == 'pdb':
pdb.set_trace()
elif command == 'help':
print("Use one of [estack, istack, astack, exit, quit, ctx, file, ops, pdb, or any local variable]")
elif command in ctx.method.scope:
try:
idx = ctx.method.scope[command]
value = self.engine.AltStack.Items[-1].GetArray()[idx]
param = ContractParameter.ToParameter(value)
print("\n")
if param.Type == ContractParameterType.InteropInterface:
cmd_value = json.dumps(param.Value.ToJson(), indent=4)
else:
cmd_value = param.Value
print(f"{command} = {cmd_value} [{param.Type}]")
print("\n")
except Exception as e:
logger.error("Could not lookup item %s: %s " % (command, e))
else:
print("unknown command: %s " % command)
print("======= debug engine exit =======")
print("\n")
def get_context(self):
files = self.debug_map['files']
for item in self.debug_map['map']:
if item['start'] == self.index:
ctx = DebugContext(item, files)
return ctx
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for print_selective_registration_header."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.tools import selective_registration_header_lib
# Note that this graph def is not valid to be loaded - its inputs are not
# assigned correctly in all cases.
GRAPH_DEF_TXT = """
node: {
name: "node_1"
op: "Reshape"
input: [ "none", "none" ]
device: "/cpu:0"
attr: { key: "T" value: { type: DT_FLOAT } }
}
node: {
name: "node_2"
op: "MatMul"
input: [ "none", "none" ]
device: "/cpu:0"
attr: { key: "T" value: { type: DT_FLOAT } }
attr: { key: "transpose_a" value: { b: false } }
attr: { key: "transpose_b" value: { b: false } }
}
node: {
name: "node_3"
op: "MatMul"
input: [ "none", "none" ]
device: "/cpu:0"
attr: { key: "T" value: { type: DT_DOUBLE } }
attr: { key: "transpose_a" value: { b: false } }
attr: { key: "transpose_b" value: { b: false } }
}
"""
# AccumulateNV2 is included because it should be included in the header despite
# lacking a kernel (it's rewritten by AccumulateNV2RemovePass; see
# core/common_runtime/accumulate_n_optimizer.cc.
GRAPH_DEF_TXT_2 = """
node: {
name: "node_4"
op: "BiasAdd"
input: [ "none", "none" ]
device: "/cpu:0"
attr: { key: "T" value: { type: DT_FLOAT } }
}
node: {
name: "node_5"
op: "AccumulateNV2"
attr: { key: "T" value: { type: DT_INT32 } }
attr: { key : "N" value: { i: 3 } }
}
"""
class PrintOpFilegroupTest(test.TestCase):
def setUp(self):
_, self.script_name = os.path.split(sys.argv[0])
def WriteGraphFiles(self, graphs):
fnames = []
for i, graph in enumerate(graphs):
fname = os.path.join(self.get_temp_dir(), 'graph%s.pb' % i)
with gfile.GFile(fname, 'wb') as f:
f.write(graph.SerializeToString())
fnames.append(fname)
return fnames
def testGetOps(self):
default_ops = 'NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'
graphs = [
text_format.Parse(d, graph_pb2.GraphDef())
for d in [GRAPH_DEF_TXT, GRAPH_DEF_TXT_2]
]
ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels(
'rawproto', self.WriteGraphFiles(graphs), default_ops)
matmul_prefix = ''
self.assertListEqual(
[
('AccumulateNV2', None), #
('BiasAdd', 'BiasOp<CPUDevice, float>'), #
('MatMul',
matmul_prefix + 'MatMulOp<CPUDevice, double, false >'), #
('MatMul', matmul_prefix + 'MatMulOp<CPUDevice, float, false >'), #
('NoOp', 'NoOp'), #
('Reshape', 'ReshapeOp'), #
('_Recv', 'RecvOp'), #
('_Send', 'SendOp'), #
],
ops_and_kernels)
graphs[0].node[0].ClearField('device')
graphs[0].node[2].ClearField('device')
ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels(
'rawproto', self.WriteGraphFiles(graphs), default_ops)
self.assertListEqual(
[
('AccumulateNV2', None), #
('BiasAdd', 'BiasOp<CPUDevice, float>'), #
('MatMul',
matmul_prefix + 'MatMulOp<CPUDevice, double, false >'), #
('MatMul', matmul_prefix + 'MatMulOp<CPUDevice, float, false >'), #
('NoOp', 'NoOp'), #
('Reshape', 'ReshapeOp'), #
('_Recv', 'RecvOp'), #
('_Send', 'SendOp'), #
],
ops_and_kernels)
def testAll(self):
default_ops = 'all'
graphs = [
text_format.Parse(d, graph_pb2.GraphDef())
for d in [GRAPH_DEF_TXT, GRAPH_DEF_TXT_2]
]
ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels(
'rawproto', self.WriteGraphFiles(graphs), default_ops)
header = selective_registration_header_lib.get_header_from_ops_and_kernels(
ops_and_kernels, include_all_ops_and_kernels=True)
self.assertListEqual(
[
'// This file was autogenerated by %s' % self.script_name,
'#ifndef OPS_TO_REGISTER', #
'#define OPS_TO_REGISTER', #
'#define SHOULD_REGISTER_OP(op) true', #
'#define SHOULD_REGISTER_OP_KERNEL(clz) true', #
'#define SHOULD_REGISTER_OP_GRADIENT true', #
'#endif'
],
header.split('\n'))
self.assertListEqual(
header.split('\n'),
selective_registration_header_lib.get_header(
self.WriteGraphFiles(graphs), 'rawproto', default_ops).split('\n'))
def testGetSelectiveHeader(self):
default_ops = ''
graphs = [text_format.Parse(GRAPH_DEF_TXT_2, graph_pb2.GraphDef())]
expected = """// This file was autogenerated by %s
#ifndef OPS_TO_REGISTER
#define OPS_TO_REGISTER
namespace {
constexpr const char* skip(const char* x) {
return (*x) ? (*x == ' ' ? skip(x + 1) : x) : x;
}
constexpr bool isequal(const char* x, const char* y) {
return (*skip(x) && *skip(y))
? (*skip(x) == *skip(y) && isequal(skip(x) + 1, skip(y) + 1))
: (!*skip(x) && !*skip(y));
}
template<int N>
struct find_in {
static constexpr bool f(const char* x, const char* const y[N]) {
return isequal(x, y[0]) || find_in<N - 1>::f(x, y + 1);
}
};
template<>
struct find_in<0> {
static constexpr bool f(const char* x, const char* const y[]) {
return false;
}
};
} // end namespace
constexpr const char* kNecessaryOpKernelClasses[] = {
"BiasOp<CPUDevice, float>",
};
#define SHOULD_REGISTER_OP_KERNEL(clz) (find_in<sizeof(kNecessaryOpKernelClasses) / sizeof(*kNecessaryOpKernelClasses)>::f(clz, kNecessaryOpKernelClasses))
constexpr inline bool ShouldRegisterOp(const char op[]) {
return false
|| isequal(op, "AccumulateNV2")
|| isequal(op, "BiasAdd")
;
}
#define SHOULD_REGISTER_OP(op) ShouldRegisterOp(op)
#define SHOULD_REGISTER_OP_GRADIENT false
#endif""" % self.script_name
header = selective_registration_header_lib.get_header(
self.WriteGraphFiles(graphs), 'rawproto', default_ops)
print(header)
self.assertListEqual(expected.split('\n'), header.split('\n'))
if __name__ == '__main__':
test.main()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) The James Hutton Institute 2017-2019
# (c) University of Strathclyde 2019-2020
# Author: Leighton Pritchard
#
# Contact:
# [email protected]
#
# Leighton Pritchard,
# Strathclyde Institute for Pharmacy and Biomedical Sciences,
# 161 Cathedral Street,
# Glasgow,
# G4 0RE
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2017-2019 The James Hutton Institute
# Copyright (c) 2019-2020 University of Strathclyde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Script to download from NCBI all genomes in a specified taxon subtree.
This script takes an NCBI taxonomy identifier (or string, though this is
not always reliable for taxonomy tree subgraphs...) and downloads all genomes
it can find from NCBI in the corresponding taxon subgraph that has
the passed argument as root.
"""
import logging
import re
import shutil
import subprocess # nosec
import sys
import time
import traceback
from argparse import ArgumentParser, Namespace
from collections import defaultdict
from pathlib import Path
from socket import timeout
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
from Bio import Entrez, SeqIO
from pyani import __version__
from pyani.download import create_hash
from pyani.logger import config_logger
class NCBIDownloadException(Exception):
"""General exception for failed NCBI download."""
def __init__(self):
"""Instantiate exception."""
Exception.__init__(self, "Error downloading file from NCBI")
# Parse command-line
def parse_cmdline(argv=None):
"""Parse command-line arguments.
:param argv: list of command-line arguments
"""
parser = ArgumentParser(prog="genbank_get_genomes_by_taxon.py")
parser.add_argument(
"-o",
"--outdir",
dest="outdirname",
required=True,
action="store",
default=None,
type=Path,
help="Output directory (required)",
)
parser.add_argument(
"-t",
"--taxon",
dest="taxon",
action="store",
default=None,
help="NCBI taxonomy ID",
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
default=False,
help="Give verbose output",
)
parser.add_argument(
"--debug",
dest="debug",
action="store_true",
default=False,
help="Report debugging output",
)
parser.add_argument(
"-f",
"--force",
dest="force",
action="store_true",
default=False,
help="Force file overwriting",
)
parser.add_argument(
"--noclobber",
dest="noclobber",
action="store_true",
default=False,
help="Don't nuke existing files",
)
parser.add_argument(
"-l",
"--logfile",
dest="logfile",
action="store",
default=None,
type=Path,
help="Logfile location",
)
parser.add_argument(
"--format",
dest="format",
action="store",
default="fasta",
help="Output file format [gbk|fasta]",
)
parser.add_argument(
"--email",
dest="email",
required=True,
action="store",
default=None,
help="Email associated with NCBI queries (required)",
)
parser.add_argument(
"--retries",
dest="retries",
action="store",
default=20,
help="Number of Entrez retry attempts per request.",
)
parser.add_argument(
"--batchsize",
dest="batchsize",
action="store",
default=10000,
help="Entrez record return batch size",
)
parser.add_argument(
"--timeout",
dest="timeout",
action="store",
default=10,
help="Timeout for URL connection (s)",
)
# Parse arguments
if argv is None:
argv = sys.argv[1:]
return parser.parse_args(argv)
# Report last exception as string
def last_exception():
"""Return last exception as a string, or use in logging."""
exc_type, exc_value, exc_traceback = sys.exc_info()
return "".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
# Set contact email for NCBI
def set_ncbi_email(args: Namespace) -> None:
"""Set contact email for NCBI.
:param args: Namespace, command-line arguments
"""
logger = logging.getLogger(__name__)
Entrez.email = args.email
logger.info("Set NCBI contact email to %s", args.email)
Entrez.tool = "genbank_get_genomes_by_taxon.py"
# Create output directory if it doesn't exist
def make_outdir(args: Namespace) -> None:
"""Make the output directory, if required.
:param args: Namespace, command-line arguments
This is a little involved. If the output directory already exists,
we take the safe option by default, and stop with an error. We can,
however, choose to force the program to go on, in which case we can
either clobber the existing directory, or not. The options turn out
as the following, if the directory exists:
DEFAULT: stop and report the collision
FORCE: continue, and remove the existing output directory
NOCLOBBER+FORCE: continue, but do not remove the existing output
"""
logger = logging.getLogger(__name__)
if args.outdirname.exists():
if not args.force:
logger.error(
"Output directory %s would overwrite existing files (exiting)",
args.outdirname,
)
raise SystemExit(1)
logger.info("--force output directory use")
if args.noclobber:
logger.warning("--noclobber: existing output directory kept")
else:
logger.info(
"Removing directory %s and everything below it", args.outdirname
)
shutil.rmtree(args.outdirname)
logger.info("Creating directory %s", args.outdirname)
try:
args.outdirname.mkdir(exist_ok=True) # We make the directory recursively
except OSError:
# This gets thrown if the directory exists. If we've forced overwrite/
# delete and we're not clobbering, we let things slide
if args.noclobber and args.force:
logger.info("NOCLOBBER+FORCE: not creating directory")
else:
logger.error(last_exception)
raise SystemExit(1)
# Retry Entrez requests (or any other function)
def entrez_retry(args, func, *fnargs, **fnkwargs):
"""Retry the passed function a defined number of times.
:param args: Namespace, command-line arguments
:param func: func, Entrez function to attempt
:param *fnargs: tuple, arguments to the Entrez function
:param **fnkwargs: dict, keyword arguments to the Entrez function
"""
logger = logging.getLogger(__name__)
tries, success = 0, False
while not success and tries < args.retries:
try:
output = func(*fnargs, **fnkwargs)
success = True
except (HTTPError, URLError):
tries += 1
logger.warning(
"Entrez query %s(%s, %s) failed (%d/%d)",
func,
fnargs,
fnkwargs,
tries + 1,
args.retries,
)
logger.warning(last_exception())
if not success:
logger.error("Too many Entrez failures (exiting)")
raise SystemExit(1)
return output
# Get results from NCBI web history, in batches
def entrez_batch_webhistory(args, record, expected, batchsize, *fnargs, **fnkwargs):
"""Recover Entrez data from a prior NCBI webhistory search.
:param args: Namespace, command-line arguments
:param record: Entrez webhistory record
:param expected: int, number of expected search returns
:param batchsize: int, number of search returns to retrieve in each batch
:param *fnargs: tuple, arguments to Efetch
:param **fnkwargs: dict, keyword arguments to Efetch
Recovery is performed in in batches of defined size, using Efetch.
Returns all results as a list.
"""
results = []
for start in range(0, expected, batchsize):
batch_handle = entrez_retry(
args,
Entrez.efetch,
retstart=start,
retmax=batchsize,
webenv=record["WebEnv"],
query_key=record["QueryKey"],
*fnargs,
**fnkwargs,
)
batch_record = Entrez.read(batch_handle, validate=False)
results.extend(batch_record)
return results
# Get assembly UIDs for the root taxon
def get_asm_uids(args, taxon_uid):
"""Return a set of NCBI UIDs associated with the passed taxon.
:param args: Namespace, command-line arguments
:param taxon_uid: str, NCBI taxon ID
This query at NCBI returns all assemblies for the taxon subtree
rooted at the passed taxon_uid.
"""
logger = logging.getLogger(__name__)
query = f"txid{taxon_uid}[Organism:exp]"
logger.info("Entrez ESearch with query: %s", query)
# Perform initial search for assembly UIDs with taxon ID as query.
# Use NCBI history for the search.
handle = entrez_retry(
args,
Entrez.esearch,
db="assembly",
term=query,
format="xml",
usehistory="y",
)
record = Entrez.read(handle, validate=False)
result_count = int(record["Count"])
logger.info("Entrez ESearch returns %d assembly IDs", result_count)
# Recover assembly UIDs from the web history
asm_ids = entrez_batch_webhistory(
args, record, result_count, 250, db="assembly", retmode="xml"
)
logger.info("Identified %d unique assemblies", len(asm_ids))
return asm_ids
# Extract filestem from Entrez eSummary
def extract_filestem(data):
"""Extract filestem from Entrez eSummary data.
:param data: Entrez eSummary
Function expects esummary['DocumentSummarySet']['DocumentSummary'][0]
Some illegal characters may occur in AssemblyName - for these, a more
robust regex replace/escape may be required. Sadly, NCBI don't just
use standard percent escapes, but instead replace certain
characters with underscores: white space, slash, comma, hash, brackets.
"""
escapes = re.compile(r"[\s/,#\(\)]")
escname = re.sub(escapes, "_", data["AssemblyName"])
return "_".join([data["AssemblyAccession"], escname])
# Download NCBI assembly file for a passed Assembly UID
def get_ncbi_asm(args, asm_uid, fmt="fasta"):
"""Return the NCBI AssemblyAccession and AssemblyName for an assembly.
:param args: Namespace, command-line arguments
:param asm_uid: NCBI assembly UID
:param fmt: str, format to retrieve assembly information
Returns organism data for class/label files also, as well
as accession, so we can track whether downloads fail because only the
most recent version is available..
AssemblyAccession and AssemblyName are data fields in the eSummary record,
and correspond to downloadable files for each assembly at
ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GC[AF]/nnn/nnn/nnn/<AA>_<AN>
where <AA> is AssemblyAccession, and <AN> is AssemblyName, and the choice
of GCA vs GCF, and the three values of nnn are taken from <AA>
"""
logger = logging.getLogger(__name__)
logger.info("Identifying assembly information from NCBI for %s", asm_uid)
# Obtain full eSummary data for the assembly
summary = Entrez.read(
entrez_retry(args, Entrez.esummary, db="assembly", id=asm_uid, report="full"),
validate=False,
)
# Extract filestem from assembly data
data = summary["DocumentSummarySet"]["DocumentSummary"][0]
filestem = extract_filestem(data)
# Report interesting things from the summary for those interested
logger.info("\tOrganism: %s", data["Organism"])
logger.info("\tTaxid: %s", data["SpeciesTaxid"])
logger.info("\tAccession: %s", data["AssemblyAccession"])
logger.info("\tName: %s", data["AssemblyName"])
# NOTE: Maybe parse out the assembly stats here, in future?
# Get class and label text
organism = data["SpeciesName"]
try:
strain = data["Biosource"]["InfraspeciesList"][0]["Sub_value"]
except (KeyError, IndexError):
# we consider this an error/incompleteness in the NCBI metadata
strain = ""
# Download and extract genome assembly
hash_md5 = None
try:
fastafname = retrieve_asm_contigs(args, filestem, fmt=fmt)
hash_md5 = create_hash(fastafname)
except NCBIDownloadException:
# This is a little hacky. Sometimes, RefSeq assemblies are
# suppressed (presumably because they are non-redundant),
# but the GenBank assembly persists. In those cases, we
# *assume* (because it may not be true) that the corresponding
# genbank sequence shares the same accession number, except
# that GCF is replaced by GCA
gbfilestem = re.sub("^GCF_", "GCA_", filestem)
logger.warning("Could not download %s, trying %s", filestem, gbfilestem)
try:
fastafname = retrieve_asm_contigs(args, gbfilestem, fmt=fmt)
hash_md5 = create_hash(fastafname)
except NCBIDownloadException:
fastafname = None
# Create label and class strings
genus, species = organism.split(" ", 1)
lbltxt = "%s\t%s_genomic\t%s %s %s" % (
hash_md5,
filestem,
genus[0] + ".",
species,
strain,
)
clstxt = "%s\t%s_genomic\t%s" % (hash_md5, filestem, organism)
logger.info("\tLabel: %s", lbltxt)
logger.info("\tClass: %s", clstxt)
return (fastafname, clstxt, lbltxt, data["AssemblyAccession"])
# Download and extract an NCBI assembly file, given a filestem
def retrieve_asm_contigs(
args,
filestem,
ftpstem="ftp://ftp.ncbi.nlm.nih.gov/genomes/all",
fmt="fasta",
):
"""Download assembly sequence to a local directory.
:param args: Namespace, command-line arguments
:param filestem: str, filestem for output file
:param ftpstem: str, URI stem for NCBI FTP site
:param fmt: str, format for output file
The filestem corresponds to <AA>_<AN>, where <AA> and <AN> are
AssemblyAccession and AssemblyName: data fields in the eSummary record.
These correspond to downloadable files for each assembly at
ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GC[AF]/nnn/nnn/nnn/<AA>_<AN>/
where <AA> is AssemblyAccession, and <AN> is AssemblyName. The choice
of GCA vs GCF, and the values of nnn, are derived from <AA>
The files in this directory all have the stem <AA>_<AN>_<suffix>, where
suffixes are:
assembly_report.txt
assembly_stats.txt
feature_table.txt.gz
genomic.fna.gz
genomic.gbff.gz
genomic.gff.gz
protein.faa.gz
protein.gpff.gz
rm_out.gz
rm.run
wgsmaster.gbff.gz
This function downloads the genomic_fna.gz file, and extracts it in the
output directory name specified when the script is called.
"""
logger = logging.getLogger(__name__)
logger.info("Retrieving assembly sequence for %s", filestem)
# Define format suffix
logger.info("%s format requested", fmt)
if fmt == "fasta":
suffix = "genomic.fna.gz"
elif fmt == "gbk":
suffix = "genomic.gbff.gz"
# Compile URL
fnameparts = tuple(filestem.split("_", 2)) # three elements: GC*, AA, discard
subdirs = "/".join(
[
fnameparts[1][i : i + 3]
for i in range(0, len(fnameparts[1].split(".")[0]), 3)
]
)
asmurl = "{0}/{1}/{2}/{3}/{3}_{4}".format(
ftpstem, fnameparts[0], subdirs, filestem, suffix
)
logger.info("Using URL: %s", asmurl)
# Get data info
try:
response = urlopen(asmurl, timeout=args.timeout)
except HTTPError:
logger.error("Download failed for URL: %s", asmurl, exc_info=True)
raise NCBIDownloadException()
except URLError as err:
if isinstance(err.reason, timeout):
logger.error("Download timed out for URL: %s", asmurl, exc_info=True)
else:
logger.error("Download failed for URL: %s", asmurl, exc_info=True)
raise NCBIDownloadException()
except timeout:
logger.error("Download timed out for URL: %s", asmurl, exc_info=True)
raise NCBIDownloadException()
fsize = int(response.info().get("Content-length"))
logger.info("Opened URL and parsed metadata.")
# Download data
outfname = args.outdirname / f"{filestem}_{suffix}"
if outfname.exists():
logger.warning("Output file %s exists, not downloading", outfname)
else:
logger.info("Downloading %s (%s bytes)", asmurl, fsize)
bsize = 1_048_576 # buffer size
fsize_dl = 0 # bytes downloaded
try:
with open(outfname, "wb") as outfh:
while True:
buffer = response.read(bsize)
if not buffer:
break
elif buffer == b"status=replaced\n":
logger.warning(
"Could not download %s; file has been replaced", asmurl
)
fsize_dl += len(buffer)
outfh.write(buffer)
status = r"%10d [%3.2f%%]" % (fsize_dl, fsize_dl * 100.0 / fsize)
logger.info(status)
except IOError:
logger.error("Download failed for %s", asmurl)
logger.error(last_exception())
raise NCBIDownloadException()
# Extract gzip archive and return path to extracted file
return extract_archive(outfname)
def extract_archive(archivepath):
"""Return path to extracted gzip file.
:param archivepath: Path, path to gzipped file with ".tar.gz" suffix
"""
logger = logging.getLogger(__name__)
# Extract data from targzed file.
if archivepath.suffix == ".gz":
ename = archivepath.with_suffix("") # Strips only .gz from filename
else:
logger.info("Expected .gz file, got %s - not extracting", ename)
if ename.exists():
logger.warning("Output file %s exists - not extracting", ename)
else:
logger.info("Extracting archive %s to %s", archivepath, ename)
try:
with open(ename, "w") as efh:
subprocess.call(
["gunzip", "-c", archivepath], stdout=efh
) # can be subprocess.run in Py3.5
logger.info("Archive extracted to %s", ename)
except IOError:
logger.error("Extracting archive %s failed", archivepath, exc_info=True)
raise NCBIDownloadException()
return ename
# Write contigs for a single assembly out to file
def write_contigs(args, asm_uid, contig_uids, batchsize=10000):
"""Write assembly contigs to a single FASTA file.
:param args: Namespace, command-line arguments
:param asm_uid: str, NCBI assembly UID
:param contig_uids:
:param batchsize: int
FASTA records are returned, as GenBank and even GenBankWithParts format
records don't reliably give correct sequence in all cases.
The script returns two strings for each assembly, a 'class' and a 'label'
string - this is for use with, e.g. pyani.
"""
logger = logging.getLogger(__name__)
# Has duplicate code with get_class_label_info() - needs refactoring
logger.info("Collecting contig data for %s", asm_uid)
# Assembly record - get binomial and strain names
asm_record = Entrez.read(
entrez_retry(args, Entrez.esummary, db="assembly", id=asm_uid, rettype="text"),
validate=False,
)
asm_smry = asm_record["DocumentSummarySet"]["DocumentSummary"][0]
asm_organism = asm_smry["SpeciesName"]
try:
asm_strain = asm_smry["Biosource"]["InfraspeciesList"][0]["Sub_value"]
except KeyError:
asm_strain = ""
# Assembly UID (long form) for the output filename
outfilename = f"args.outdirname, {asm_smry['AssemblyAccession']}.fasta"
# Create label and class strings
genus, species = asm_organism.split(" ", 1)
# Get FASTA records for contigs
logger.info(
"Downloading FASTA records for assembly %s (%s)",
asm_uid,
" ".join([genus[0] + ".", species, asm_strain]),
)
# We're doing an explicit outer retry loop here because we want to confirm
# we have the correct data, as well as test for Entrez connection errors,
# which is all the entrez_retry function does.
tries, success = 0, False
while not success and tries < args.retries:
records = [] # Holds all return records
# We may need to batch contigs
query_uids = ",".join(contig_uids)
try:
for start in range(0, len(contig_uids), batchsize):
logger.info("Batch: %d-%d", start, start + batchsize)
records.extend(
list(
SeqIO.parse(
entrez_retry(
args,
Entrez.efetch,
db="nucleotide",
id=query_uids,
rettype="fasta",
retmode="text",
retstart=start,
retmax=batchsize,
),
"fasta",
)
)
)
tries += 1
# Check only that correct number of records returned.
if len(records) == len(contig_uids):
success = True
else:
logger.warning(
"%d contigs expected, %d contigs returned",
len(contig_uids),
len(records),
)
logger.warning("FASTA download for assembly %s failed", asm_uid)
logger.warning("try %d/20", tries)
# Could also check expected assembly sequence length?
logger.info("Downloaded genome size: %d", sum([len(r) for r in records]))
except HTTPError:
logger.warning("FASTA download for assembly %s failed", asm_uid)
logger.warning(last_exception())
logger.warning("try %d/20", tries)
if not success:
# Could place option on command-line to stop or continue here.
logger.error("Failed to download records for %s (continuing)", asm_uid)
# Write contigs to file
retval = SeqIO.write(records, outfilename, "fasta")
logger.info("Wrote %d contigs to %s", retval, outfilename)
# Function to report whether an accession has been downloaded
def logreport_downloaded(accn, skiplist, accndict, uidaccndict):
"""Report to logger if alternative assemblies were downloaded.
:param accn:
:param skiplist:
:param accndict:
:param uidaccndict:
"""
logger = logging.getLogger(__name__)
for vid in accndict[accn.split(".")[0]]:
if vid in skiplist:
status = "NOT DOWNLOADED"
else:
status = "DOWNLOADED"
logger.warning("\t\t%s: %s - %s", vid, uidaccndict[vid], status)
# Run as script
def run_main(args=None):
"""Run main process for average_nucleotide_identity.py script.
:param args: Namespace, command-line arguments
"""
logger = logging.getLogger(__name__)
# If we need to (i.e. a namespace isn't passed), parse the command-line
if args is None:
args = parse_cmdline()
config_logger(args)
# Catch execution with no arguments
if len(sys.argv) == 1:
sys.stderr.write("pyani version: {0}\n".format(__version__))
return 0
# Have we got an email address? If not, exit.
if args.email is None:
logger.error("No email contact address provided (exiting)")
raise SystemExit(1)
set_ncbi_email(args)
# Have we got an output directory? If not, exit.
if args.outdirname is None:
logger.error("No output directory name (exiting)")
sys.exit(1)
make_outdir(args)
logger.info("Output directory: %s", args.outdirname)
# We might have more than one taxon in a comma-separated list
taxon_ids = args.taxon.split(",")
logger.info("Passed taxon IDs: %s", ", ".join(taxon_ids))
# Get all NCBI assemblies for each taxon UID
asm_dict = defaultdict(set)
for tid in taxon_ids:
asm_dict[tid] = get_asm_uids(args, tid)
for tid, asm_uids in list(asm_dict.items()):
logger.info("Taxon %s: %d assemblies", tid, len(asm_uids))
# Download contigs for each assembly UID
classes, labels = [], []
contig_dict = defaultdict(set)
accessiondict = defaultdict(list) # UIDs, keyed by accession
uidaccdict = {} # accessions, keyed by UID
skippedlist = []
for tid, asm_uids in list(asm_dict.items()):
for uid in asm_uids:
(fastafilename, classtxt, labeltxt, accession) = get_ncbi_asm(
args, uid, args.format
)
# fastafilename is None if there was an error thrown
if fastafilename is not None:
contig_dict[uid] = fastafilename
else:
logger.error("Skipping download for %s", uid)
skippedlist.append(uid)
# Populate dictionaries for all attempted downloads
classes.append(classtxt)
labels.append(labeltxt)
accessiondict[accession.split(".")[0]].append(uid)
uidaccdict[uid] = accession
# Write class and label files
classfilename = args.outdirname / "classes.txt"
labelfilename = args.outdirname / "labels.txt"
logger.info("Writing classes file to %s", classfilename)
with open(classfilename, "w") as ofh:
ofh.write("\n".join(classes) + "\n")
logger.info("Writing labels file to %s", labelfilename)
with open(labelfilename, "w") as ofh:
ofh.write("\n".join(labels) + "\n")
# How many downloads did we do/have to skip?
logger.info("Obtained %d assemblies", len(contig_dict))
if skippedlist:
logger.warning("Skipped %d downloads through error", len(skippedlist))
for uid in sorted(skippedlist):
logger.warning("Assembly UID %s skipped", uid)
acc = uidaccdict[uid]
logger.warning("\tUID: %s - accession: %s", uid, acc)
# Has another version of this genome been successfully dl'ed
logger.warning("\tAccession %s has versions:", acc.split(".")[0])
logreport_downloaded(acc, skippedlist, accessiondict, uidaccdict)
url = "http://www.ncbi.nlm.nih.gov/assembly/%s" % uid
# Is this a GenBank sequence with no RefSeq counterpart?
# e.g. http://www.ncbi.nlm.nih.gov/assembly/196191/
if acc.startswith("GCA"):
logger.warning("\tAccession is GenBank: does RefSeq exist?")
logger.warning("\tCheck under 'history' at %s", url)
# Check for RefSeq counterparts
rsacc = re.sub("^GCA_", "GCF_", uidaccdict[uid])
logger.warning(
"\tAlternative RefSeq candidate accession: %s", rsacc.split(".")[0]
)
logger.warning("\tWere alternative assemblies downloaded?")
logreport_downloaded(rsacc, skippedlist, accessiondict, uidaccdict)
# Is this a suppressed RefSeq sequence?
if acc.startswith("GCF"):
logger.warning("\tAccession is RefSeq: is it suppressed?")
logger.warning("\tCheck under 'history' at %s", url)
# Check for GenBank counterparts
gbacc = re.sub("^GCF_", "GCA_", uidaccdict[uid])
logger.warning(
"\tAlternative GenBank candidate accession: %s", gbacc.split(".")[0]
)
logger.warning("\tWere alternative assemblies downloaded?")
logreport_downloaded(gbacc, skippedlist, accessiondict, uidaccdict)
logger.info("Skipped assembly UIDs: %s", skippedlist)
# Let the user know we're done
logger.info(time.asctime())
logger.info("Done.")
# Exit
return 0
|
|
import __builtin__
import os
import unittest
import shutil
import tempfile
from .buck import BuildFileProcessor, add_rule
def foo_rule(name, srcs=[], visibility=[], build_env=None):
add_rule({
'buck.type': 'foo',
'name': name,
'srcs': srcs,
'visibility': visibility,
}, build_env)
class ProjectFile(object):
def __init__(self, path, contents):
self.path = path
self.name = '//{0}'.format(path)
if isinstance(contents, (tuple, list)):
contents = os.linesep.join(contents) + os.linesep
self.contents = contents
class BuckTest(unittest.TestCase):
def setUp(self):
self.project_root = tempfile.mkdtemp()
self.allow_empty_globs = False
self.build_file_name = 'BUCK'
self.watchman_client = None
self.watchman_error = None
def tearDown(self):
shutil.rmtree(self.project_root, True)
def write_file(self, pfile):
with open(os.path.join(self.project_root, pfile.path), 'w') as f:
f.write(pfile.contents)
def write_files(self, *pfiles):
for pfile in pfiles:
self.write_file(pfile)
def create_build_file_processor(self, *includes, **kwargs):
return BuildFileProcessor(
self.project_root,
self.project_root, # watchman_watch_root
None, # watchman_project_prefix
self.build_file_name,
self.allow_empty_globs,
self.watchman_client,
self.watchman_error,
includes,
**kwargs)
def test_sibling_includes_use_separate_globals(self):
"""
Test that consecutive includes can't see each others globals.
If a build file includes two include defs, one after another, verify
that the first's globals don't pollute the second's (e.g. the second
cannot implicitly reference globals from the first without including
it itself).
"""
# Setup the includes defs. The first one defines a variable that the
# second one (incorrectly) implicitly references.
include_def1 = ProjectFile(path='inc_def1', contents=('FOO = 1',))
include_def2 = ProjectFile(path='inc_def2', contents=('BAR = FOO',))
self.write_files(include_def1, include_def2)
# Construct a processor using the above as default includes, and verify
# that the second one can't use the first's globals.
build_file = ProjectFile(path='BUCK', contents='')
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
include_def1.name,
include_def2.name)
self.assertRaises(
NameError,
build_file_processor.process,
build_file.path)
# Construct a processor with no default includes, have a generated
# build file include the include defs one after another, and verify
# that the second one can't use the first's globals.
build_file = ProjectFile(
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def1.name),
'include_defs({0!r})'.format(include_def2.name),
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
self.assertRaises(
NameError,
build_file_processor.process,
build_file.path)
def test_lazy_include_defs(self):
"""
Tests bug reported in https://github.com/facebook/buck/issues/182.
If a include def references another include def via a lazy include_defs
call is some defined function, verify that it can correctly access the
latter's globals after the import.
"""
# Setup the includes defs. The first one defines a variable that the
# second one references after a local 'include_defs' call.
include_def1 = ProjectFile(path='inc_def1', contents=('FOO = 1',))
include_def2 = ProjectFile(
path='inc_def2',
contents=(
'def test():',
' include_defs({0!r})'.format(include_def1.name),
' FOO',
))
self.write_files(include_def1, include_def2)
# Construct a processor using the above as default includes, and verify
# that the function 'test' can use 'FOO' after including the first
# include def.
build_file = ProjectFile(path='BUCK', contents=('test()',))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
include_def1.name,
include_def2.name)
build_file_processor.process(build_file.path)
# Construct a processor with no default includes, have a generated
# build file include the include defs one after another, and verify
# that the function 'test' can use 'FOO' after including the first
# include def.
build_file = ProjectFile(
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def1.name),
'include_defs({0!r})'.format(include_def2.name),
'test()',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
build_file_processor.process(build_file.path)
def test_private_globals_are_ignored(self):
"""
Verify globals prefixed with '_' don't get imported via 'include_defs'.
"""
include_def = ProjectFile(path='inc_def1', contents=('_FOO = 1',))
self.write_file(include_def)
# Test we don't get private module attributes from default includes.
build_file = ProjectFile(path='BUCK', contents=('_FOO',))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
include_def.name)
self.assertRaises(
NameError,
build_file_processor.process,
build_file.path)
# Test we don't get private module attributes from explicit includes.
build_file = ProjectFile(
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def.name),
'_FOO',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
self.assertRaises(
NameError,
build_file_processor.process,
build_file.path)
def test_implicit_includes_apply_to_explicit_includes(self):
"""
Verify that implict includes are applied to explicit includes.
"""
# Setup an implicit include that defines a variable, another include
# that uses it, and a build file that uses the explicit include.
implicit_inc = ProjectFile(path='implicit', contents=('FOO = 1',))
explicit_inc = ProjectFile(path='explicit', contents=('FOO',))
build_file = ProjectFile(
path='BUCK',
contents=(
'include_defs({0!r})'.format(explicit_inc.name),
))
self.write_files(implicit_inc, explicit_inc, build_file)
# Run the processor to verify that the explicit include can use the
# variable in the implicit include.
build_file_processor = self.create_build_file_processor(
implicit_inc.name)
build_file_processor.process(build_file.path)
def test_all_list_is_respected(self):
"""
Verify that the `__all__` list in included files can be used to narrow
what gets pulled in.
"""
include_def = ProjectFile(
path='inc_def1',
contents=('__all__ = []', 'FOO = 1'))
self.write_file(include_def)
# Test we don't get non-whitelisted attributes from default includes.
build_file = ProjectFile(path='BUCK', contents=('FOO',))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor(
include_def.name)
self.assertRaises(
NameError,
build_file_processor.process,
build_file.path)
# Test we don't get non-whitelisted attributes from explicit includes.
build_file = ProjectFile(
path='BUCK',
contents=(
'include_defs({0!r})'.format(include_def.name),
'FOO',
))
self.write_file(build_file)
build_file_processor = self.create_build_file_processor()
self.assertRaises(
NameError,
build_file_processor.process,
build_file.path)
def test_do_not_override_overridden_builtins(self):
"""
We want to ensure that if you override something like java_binary, and then use
include_defs to get another file, you don't end up clobbering your override.
"""
# Override java_library and have it automatically add a dep
build_defs = ProjectFile(
path='BUILD_DEFS',
contents=(
# While not strictly needed for this test, we want to make sure we are overriding
# a provided method and not just defining it ourselves.
'old_get_base_path = get_base_path',
'def get_base_path(*args, **kwargs):',
' raise ValueError()',
'include_defs("//OTHER_DEFS")',
))
other_defs = ProjectFile(path='OTHER_DEFS', contents=())
build_file = ProjectFile(
path='BUCK',
contents=(
'get_base_path()',
))
self.write_files(build_defs, other_defs, build_file)
build_file_processor = self.create_build_file_processor(build_defs.name)
build_file_processor.install_builtins(__builtin__.__dict__)
self.assertRaises(
ValueError,
build_file_processor.process,
build_file.path)
def test_watchman_glob_failure_falls_back_to_regular_glob(self):
class FakeWatchmanError(Exception):
pass
class FakeWatchmanClient:
def FakeWatchmanClient(self):
self.query_invoked = False
def query(self, *args):
self.query_invoked = True
raise FakeWatchmanError("whoops")
def close(self):
pass
self.watchman_client = FakeWatchmanClient()
self.watchman_error = FakeWatchmanError
build_file = ProjectFile(
path='BUCK',
contents=(
'foo_rule(',
' name="foo",'
' srcs=glob(["*.java"]),',
')'
))
java_file = ProjectFile(path='Foo.java', contents=())
self.write_files(build_file, java_file)
build_file_processor = self.create_build_file_processor(extra_funcs=[foo_rule])
build_file_processor.install_builtins(__builtin__.__dict__)
rules = build_file_processor.process(build_file.path)
self.assertTrue(self.watchman_client.query_invoked)
self.assertEqual(['Foo.java'], rules[0]['srcs'])
|
|
from __future__ import unicode_literals
from mopidy.models import Playlist, Track
from tests.mpd import protocol
class PlaylistsHandlerTest(protocol.BaseTestCase):
def test_listplaylist(self):
self.backend.playlists.playlists = [
Playlist(
name='name', uri='dummy:name', tracks=[Track(uri='dummy:a')])]
self.sendRequest('listplaylist "name"')
self.assertInResponse('file: dummy:a')
self.assertInResponse('OK')
def test_listplaylist_without_quotes(self):
self.backend.playlists.playlists = [
Playlist(
name='name', uri='dummy:name', tracks=[Track(uri='dummy:a')])]
self.sendRequest('listplaylist name')
self.assertInResponse('file: dummy:a')
self.assertInResponse('OK')
def test_listplaylist_fails_if_no_playlist_is_found(self):
self.sendRequest('listplaylist "name"')
self.assertEqualResponse('ACK [50@0] {listplaylist} No such playlist')
def test_listplaylist_duplicate(self):
playlist1 = Playlist(name='a', uri='dummy:a1', tracks=[Track(uri='b')])
playlist2 = Playlist(name='a', uri='dummy:a2', tracks=[Track(uri='c')])
self.backend.playlists.playlists = [playlist1, playlist2]
self.sendRequest('listplaylist "a [2]"')
self.assertInResponse('file: c')
self.assertInResponse('OK')
def test_listplaylistinfo(self):
self.backend.playlists.playlists = [
Playlist(
name='name', uri='dummy:name', tracks=[Track(uri='dummy:a')])]
self.sendRequest('listplaylistinfo "name"')
self.assertInResponse('file: dummy:a')
self.assertInResponse('Track: 0')
self.assertNotInResponse('Pos: 0')
self.assertInResponse('OK')
def test_listplaylistinfo_without_quotes(self):
self.backend.playlists.playlists = [
Playlist(
name='name', uri='dummy:name', tracks=[Track(uri='dummy:a')])]
self.sendRequest('listplaylistinfo name')
self.assertInResponse('file: dummy:a')
self.assertInResponse('Track: 0')
self.assertNotInResponse('Pos: 0')
self.assertInResponse('OK')
def test_listplaylistinfo_fails_if_no_playlist_is_found(self):
self.sendRequest('listplaylistinfo "name"')
self.assertEqualResponse(
'ACK [50@0] {listplaylistinfo} No such playlist')
def test_listplaylistinfo_duplicate(self):
playlist1 = Playlist(name='a', uri='dummy:a1', tracks=[Track(uri='b')])
playlist2 = Playlist(name='a', uri='dummy:a2', tracks=[Track(uri='c')])
self.backend.playlists.playlists = [playlist1, playlist2]
self.sendRequest('listplaylistinfo "a [2]"')
self.assertInResponse('file: c')
self.assertInResponse('Track: 0')
self.assertNotInResponse('Pos: 0')
self.assertInResponse('OK')
def test_listplaylists(self):
last_modified = 1390942873222
self.backend.playlists.playlists = [
Playlist(name='a', uri='dummy:a', last_modified=last_modified)]
self.sendRequest('listplaylists')
self.assertInResponse('playlist: a')
# Date without milliseconds and with time zone information
self.assertInResponse('Last-Modified: 2014-01-28T21:01:13Z')
self.assertInResponse('OK')
def test_listplaylists_duplicate(self):
playlist1 = Playlist(name='a', uri='dummy:a1')
playlist2 = Playlist(name='a', uri='dummy:a2')
self.backend.playlists.playlists = [playlist1, playlist2]
self.sendRequest('listplaylists')
self.assertInResponse('playlist: a')
self.assertInResponse('playlist: a [2]')
self.assertInResponse('OK')
def test_listplaylists_ignores_playlists_without_name(self):
last_modified = 1390942873222
self.backend.playlists.playlists = [
Playlist(name='', uri='dummy:', last_modified=last_modified)]
self.sendRequest('listplaylists')
self.assertNotInResponse('playlist: ')
self.assertInResponse('OK')
def test_listplaylists_replaces_newline_with_space(self):
self.backend.playlists.playlists = [
Playlist(name='a\n', uri='dummy:')]
self.sendRequest('listplaylists')
self.assertInResponse('playlist: a ')
self.assertNotInResponse('playlist: a\n')
self.assertInResponse('OK')
def test_listplaylists_replaces_carriage_return_with_space(self):
self.backend.playlists.playlists = [
Playlist(name='a\r', uri='dummy:')]
self.sendRequest('listplaylists')
self.assertInResponse('playlist: a ')
self.assertNotInResponse('playlist: a\r')
self.assertInResponse('OK')
def test_listplaylists_replaces_forward_slash_with_pipe(self):
self.backend.playlists.playlists = [
Playlist(name='a/b', uri='dummy:')]
self.sendRequest('listplaylists')
self.assertInResponse('playlist: a|b')
self.assertNotInResponse('playlist: a/b')
self.assertInResponse('OK')
def test_load_appends_to_tracklist(self):
self.core.tracklist.add([Track(uri='a'), Track(uri='b')])
self.assertEqual(len(self.core.tracklist.tracks.get()), 2)
self.backend.playlists.playlists = [
Playlist(name='A-list', uri='dummy:A-list', tracks=[
Track(uri='c'), Track(uri='d'), Track(uri='e')])]
self.sendRequest('load "A-list"')
tracks = self.core.tracklist.tracks.get()
self.assertEqual(5, len(tracks))
self.assertEqual('a', tracks[0].uri)
self.assertEqual('b', tracks[1].uri)
self.assertEqual('c', tracks[2].uri)
self.assertEqual('d', tracks[3].uri)
self.assertEqual('e', tracks[4].uri)
self.assertInResponse('OK')
def test_load_with_range_loads_part_of_playlist(self):
self.core.tracklist.add([Track(uri='a'), Track(uri='b')])
self.assertEqual(len(self.core.tracklist.tracks.get()), 2)
self.backend.playlists.playlists = [
Playlist(name='A-list', uri='dummy:A-list', tracks=[
Track(uri='c'), Track(uri='d'), Track(uri='e')])]
self.sendRequest('load "A-list" "1:2"')
tracks = self.core.tracklist.tracks.get()
self.assertEqual(3, len(tracks))
self.assertEqual('a', tracks[0].uri)
self.assertEqual('b', tracks[1].uri)
self.assertEqual('d', tracks[2].uri)
self.assertInResponse('OK')
def test_load_with_range_without_end_loads_rest_of_playlist(self):
self.core.tracklist.add([Track(uri='a'), Track(uri='b')])
self.assertEqual(len(self.core.tracklist.tracks.get()), 2)
self.backend.playlists.playlists = [
Playlist(name='A-list', uri='dummy:A-list', tracks=[
Track(uri='c'), Track(uri='d'), Track(uri='e')])]
self.sendRequest('load "A-list" "1:"')
tracks = self.core.tracklist.tracks.get()
self.assertEqual(4, len(tracks))
self.assertEqual('a', tracks[0].uri)
self.assertEqual('b', tracks[1].uri)
self.assertEqual('d', tracks[2].uri)
self.assertEqual('e', tracks[3].uri)
self.assertInResponse('OK')
def test_load_unknown_playlist_acks(self):
self.sendRequest('load "unknown playlist"')
self.assertEqual(0, len(self.core.tracklist.tracks.get()))
self.assertEqualResponse('ACK [50@0] {load} No such playlist')
def test_playlistadd(self):
self.sendRequest('playlistadd "name" "dummy:a"')
self.assertEqualResponse('ACK [0@0] {playlistadd} Not implemented')
def test_playlistclear(self):
self.sendRequest('playlistclear "name"')
self.assertEqualResponse('ACK [0@0] {playlistclear} Not implemented')
def test_playlistdelete(self):
self.sendRequest('playlistdelete "name" "5"')
self.assertEqualResponse('ACK [0@0] {playlistdelete} Not implemented')
def test_playlistmove(self):
self.sendRequest('playlistmove "name" "5" "10"')
self.assertEqualResponse('ACK [0@0] {playlistmove} Not implemented')
def test_rename(self):
self.sendRequest('rename "old_name" "new_name"')
self.assertEqualResponse('ACK [0@0] {rename} Not implemented')
def test_rm(self):
self.sendRequest('rm "name"')
self.assertEqualResponse('ACK [0@0] {rm} Not implemented')
def test_save(self):
self.sendRequest('save "name"')
self.assertEqualResponse('ACK [0@0] {save} Not implemented')
|
|
"""The tests for the Radarr platform."""
import unittest
import pytest
import homeassistant.components.radarr.sensor as radarr
from tests.common import get_test_home_assistant
def mocked_exception(*args, **kwargs):
"""Mock exception thrown by requests.get."""
raise OSError
def mocked_requests_get(*args, **kwargs):
"""Mock requests.get invocations."""
class MockResponse:
"""Class to represent a mocked response."""
def __init__(self, json_data, status_code):
"""Initialize the mock response class."""
self.json_data = json_data
self.status_code = status_code
def json(self):
"""Return the json of the response."""
return self.json_data
url = str(args[0])
if 'api/calendar' in url:
return MockResponse([
{
"title": "Resident Evil",
"sortTitle": "resident evil final chapter",
"sizeOnDisk": 0,
"status": "announced",
"overview": "Alice, Jill, Claire, Chris, Leon, Ada, and...",
"inCinemas": "2017-01-25T00:00:00Z",
"physicalRelease": "2017-01-27T00:00:00Z",
"images": [
{
"coverType": "poster",
"url": ("/radarr/MediaCover/12/poster.jpg"
"?lastWrite=636208663600000000")
},
{
"coverType": "banner",
"url": ("/radarr/MediaCover/12/banner.jpg"
"?lastWrite=636208663600000000")
}
],
"website": "",
"downloaded": "false",
"year": 2017,
"hasFile": "false",
"youTubeTrailerId": "B5yxr7lmxhg",
"studio": "Impact Pictures",
"path": "/path/to/Resident Evil The Final Chapter (2017)",
"profileId": 3,
"monitored": "false",
"runtime": 106,
"lastInfoSync": "2017-01-24T14:52:40.315434Z",
"cleanTitle": "residentevilfinalchapter",
"imdbId": "tt2592614",
"tmdbId": 173897,
"titleSlug": "resident-evil-the-final-chapter-2017",
"genres": [
"Action",
"Horror",
"Science Fiction"
],
"tags": [],
"added": "2017-01-24T14:52:39.989964Z",
"ratings": {
"votes": 363,
"value": 4.3
},
"alternativeTitles": [
"Resident Evil: Rising"
],
"qualityProfileId": 3,
"id": 12
}
], 200)
if 'api/command' in url:
return MockResponse([
{
"name": "RescanMovie",
"startedOn": "0001-01-01T00:00:00Z",
"stateChangeTime": "2014-02-05T05:09:09.2366139Z",
"sendUpdatesToClient": "true",
"state": "pending",
"id": 24
}
], 200)
if 'api/movie' in url:
return MockResponse([
{
"title": "Assassin's Creed",
"sortTitle": "assassins creed",
"sizeOnDisk": 0,
"status": "released",
"overview": "Lynch discovers he is a descendant of...",
"inCinemas": "2016-12-21T00:00:00Z",
"images": [
{
"coverType": "poster",
"url": ("/radarr/MediaCover/1/poster.jpg"
"?lastWrite=636200219330000000")
},
{
"coverType": "banner",
"url": ("/radarr/MediaCover/1/banner.jpg"
"?lastWrite=636200219340000000")
}
],
"website": "https://www.ubisoft.com/en-US/",
"downloaded": "false",
"year": 2016,
"hasFile": "false",
"youTubeTrailerId": "pgALJgMjXN4",
"studio": "20th Century Fox",
"path": "/path/to/Assassin's Creed (2016)",
"profileId": 6,
"monitored": "true",
"runtime": 115,
"lastInfoSync": "2017-01-23T22:05:32.365337Z",
"cleanTitle": "assassinscreed",
"imdbId": "tt2094766",
"tmdbId": 121856,
"titleSlug": "assassins-creed-121856",
"genres": [
"Action",
"Adventure",
"Fantasy",
"Science Fiction"
],
"tags": [],
"added": "2017-01-14T20:18:52.938244Z",
"ratings": {
"votes": 711,
"value": 5.2
},
"alternativeTitles": [
"Assassin's Creed: The IMAX Experience"
],
"qualityProfileId": 6,
"id": 1
}
], 200)
if 'api/diskspace' in url:
return MockResponse([
{
"path": "/data",
"label": "",
"freeSpace": 282500067328,
"totalSpace": 499738734592
}
], 200)
if 'api/system/status' in url:
return MockResponse({
"version": "0.2.0.210",
"buildTime": "2017-01-22T23:12:49Z",
"isDebug": "false",
"isProduction": "true",
"isAdmin": "false",
"isUserInteractive": "false",
"startupPath": "/path/to/radarr",
"appData": "/path/to/radarr/data",
"osVersion": "4.8.13.1",
"isMonoRuntime": "true",
"isMono": "true",
"isLinux": "true",
"isOsx": "false",
"isWindows": "false",
"branch": "develop",
"authentication": "forms",
"sqliteVersion": "3.16.2",
"urlBase": "",
"runtimeVersion": ("4.6.1 "
"(Stable 4.6.1.3/abb06f1 "
"Mon Oct 3 07:57:59 UTC 2016)")
}, 200)
return MockResponse({
"error": "Unauthorized"
}, 401)
class TestRadarrSetup(unittest.TestCase):
"""Test the Radarr platform."""
# pylint: disable=invalid-name
DEVICES = []
def add_entities(self, devices, update):
"""Mock add devices."""
for device in devices:
self.DEVICES.append(device)
def setUp(self):
"""Initialize values for this testcase class."""
self.DEVICES = []
self.hass = get_test_home_assistant()
self.hass.config.time_zone = 'America/Los_Angeles'
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@unittest.mock.patch('requests.get', side_effect=mocked_requests_get)
def test_diskspace_no_paths(self, req_mock):
"""Test getting all disk space."""
config = {
'platform': 'radarr',
'api_key': 'foo',
'days': '2',
'unit': 'GB',
"include_paths": [],
'monitored_conditions': [
'diskspace'
]
}
radarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert '263.10' == device.state
assert 'mdi:harddisk' == device.icon
assert 'GB' == device.unit_of_measurement
assert 'Radarr Disk Space' == device.name
assert '263.10/465.42GB (56.53%)' == \
device.device_state_attributes["/data"]
@unittest.mock.patch('requests.get', side_effect=mocked_requests_get)
def test_diskspace_paths(self, req_mock):
"""Test getting diskspace for included paths."""
config = {
'platform': 'radarr',
'api_key': 'foo',
'days': '2',
'unit': 'GB',
"include_paths": [
'/data'
],
'monitored_conditions': [
'diskspace'
]
}
radarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert '263.10' == device.state
assert 'mdi:harddisk' == device.icon
assert 'GB' == device.unit_of_measurement
assert 'Radarr Disk Space' == device.name
assert '263.10/465.42GB (56.53%)' == \
device.device_state_attributes["/data"]
@unittest.mock.patch('requests.get', side_effect=mocked_requests_get)
def test_commands(self, req_mock):
"""Test getting running commands."""
config = {
'platform': 'radarr',
'api_key': 'foo',
'days': '2',
'unit': 'GB',
"include_paths": [
'/data'
],
'monitored_conditions': [
'commands'
]
}
radarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert 1 == device.state
assert 'mdi:code-braces' == device.icon
assert 'Commands' == device.unit_of_measurement
assert 'Radarr Commands' == device.name
assert 'pending' == \
device.device_state_attributes["RescanMovie"]
@unittest.mock.patch('requests.get', side_effect=mocked_requests_get)
def test_movies(self, req_mock):
"""Test getting the number of movies."""
config = {
'platform': 'radarr',
'api_key': 'foo',
'days': '2',
'unit': 'GB',
"include_paths": [
'/data'
],
'monitored_conditions': [
'movies'
]
}
radarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert 1 == device.state
assert 'mdi:television' == device.icon
assert 'Movies' == device.unit_of_measurement
assert 'Radarr Movies' == device.name
assert 'false' == \
device.device_state_attributes["Assassin's Creed (2016)"]
@unittest.mock.patch('requests.get', side_effect=mocked_requests_get)
def test_upcoming_multiple_days(self, req_mock):
"""Test the upcoming movies for multiple days."""
config = {
'platform': 'radarr',
'api_key': 'foo',
'days': '2',
'unit': 'GB',
"include_paths": [
'/data'
],
'monitored_conditions': [
'upcoming'
]
}
radarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert 1 == device.state
assert 'mdi:television' == device.icon
assert 'Movies' == device.unit_of_measurement
assert 'Radarr Upcoming' == device.name
assert '2017-01-27T00:00:00Z' == \
device.device_state_attributes["Resident Evil (2017)"]
@pytest.mark.skip
@unittest.mock.patch('requests.get', side_effect=mocked_requests_get)
def test_upcoming_today(self, req_mock):
"""Test filtering for a single day.
Radarr needs to respond with at least 2 days.
"""
config = {
'platform': 'radarr',
'api_key': 'foo',
'days': '1',
'unit': 'GB',
"include_paths": [
'/data'
],
'monitored_conditions': [
'upcoming'
]
}
radarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert 1 == device.state
assert 'mdi:television' == device.icon
assert 'Movies' == device.unit_of_measurement
assert 'Radarr Upcoming' == device.name
assert '2017-01-27T00:00:00Z' == \
device.device_state_attributes["Resident Evil (2017)"]
@unittest.mock.patch('requests.get', side_effect=mocked_requests_get)
def test_system_status(self, req_mock):
"""Test the getting of the system status."""
config = {
'platform': 'radarr',
'api_key': 'foo',
'days': '2',
'unit': 'GB',
"include_paths": [
'/data'
],
'monitored_conditions': [
'status'
]
}
radarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert '0.2.0.210' == device.state
assert 'mdi:information' == device.icon
assert 'Radarr Status' == device.name
assert '4.8.13.1' == device.device_state_attributes['osVersion']
@pytest.mark.skip
@unittest.mock.patch('requests.get', side_effect=mocked_requests_get)
def test_ssl(self, req_mock):
"""Test SSL being enabled."""
config = {
'platform': 'radarr',
'api_key': 'foo',
'days': '1',
'unit': 'GB',
"include_paths": [
'/data'
],
'monitored_conditions': [
'upcoming'
],
"ssl": "true"
}
radarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert 1 == device.state
assert 's' == device.ssl
assert 'mdi:television' == device.icon
assert 'Movies' == device.unit_of_measurement
assert 'Radarr Upcoming' == device.name
assert '2017-01-27T00:00:00Z' == \
device.device_state_attributes["Resident Evil (2017)"]
@unittest.mock.patch('requests.get', side_effect=mocked_exception)
def test_exception_handling(self, req_mock):
"""Test exception being handled."""
config = {
'platform': 'radarr',
'api_key': 'foo',
'days': '1',
'unit': 'GB',
"include_paths": [
'/data'
],
'monitored_conditions': [
'upcoming'
]
}
radarr.setup_platform(self.hass, config, self.add_entities, None)
for device in self.DEVICES:
device.update()
assert device.state is None
|
|
#!/usr/bin/env python
"""
@package ion.agents.platform.platform_driver
@file ion/agents/platform/platform_driver.py
@author Carlos Rueda
@brief Base classes supporting platform drivers.
"""
__author__ = 'Carlos Rueda'
__license__ = 'Apache 2.0'
from pyon.public import log
import logging
from copy import deepcopy
from mi.platform.platform_driver_event import StateChangeDriverEvent
from mi.platform.platform_driver_event import AsyncAgentEvent
from mi.platform.exceptions import PlatformDriverException
from mi.platform.platform_agent_enums import PlatformAgentEvent
from mi.core.common import BaseEnum
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.instrument_fsm import FSMError
from mi.platform.exceptions import PlatformConnectionException
from mi.platform.util.network_util import NetworkUtil
class PlatformDriverState(BaseEnum):
"""
Platform driver states
"""
UNCONFIGURED = 'PLATFORM_DRIVER_STATE_UNCONFIGURED'
DISCONNECTED = 'PLATFORM_DRIVER_STATE_DISCONNECTED'
CONNECTED = 'PLATFORM_DRIVER_STATE_CONNECTED'
class PlatformDriverEvent(BaseEnum):
"""
Base events for driver state machines.
Subclasses will typically extend this class to add events for the
CONNECTED state.
"""
ENTER = 'PLATFORM_DRIVER_EVENT_ENTER'
EXIT = 'PLATFORM_DRIVER_EVENT_EXIT'
CONFIGURE = 'PLATFORM_DRIVER_EVENT_CONFIGURE'
CONNECT = 'PLATFORM_DRIVER_EVENT_CONNECT'
CONNECTION_LOST = 'PLATFORM_DRIVER_CONNECTION_LOST'
DISCONNECT = 'PLATFORM_DRIVER_EVENT_DISCONNECT'
# Events for the CONNECTED state:
PING = 'PLATFORM_DRIVER_PING'
GET = 'PLATFORM_DRIVER_GET'
SET = 'PLATFORM_DRIVER_SET'
EXECUTE = 'PLATFORM_DRIVER_EXECUTE'
class PlatformDriverCapability(BaseEnum):
"""
Subclasses will indicate the particular set of capabilities to be exposed.
"""
pass
class PlatformDriver(object):
"""
A platform driver handles a particular platform in a platform network.
This base class provides a common interface and supporting functionality.
"""
def __init__(self, pnode, event_callback,
create_event_subscriber, destroy_event_subscriber):
"""
Creates a PlatformDriver instance.
@param pnode Root PlatformNode defining the platform network
rooted at this platform.
@param event_callback Callback to notify platform agent about events
generated by this driver.
This is captured in self._send_event for this
class and subclasses to call as needed.
@param create_event_subscriber
@param destroy_event_subscriber
functions to create/destroy any needed EventSubscriber's,
in particular regarding the Managed Endpoint API.
"""
#
# NOTE the "pnode" parameter may be not very "standard" but it is the
# current convenient mechanism that captures the overall definition
# of the corresponding platform (most of which coming from configuration)
#
self._pnode = pnode
self._send_event = event_callback
self._create_event_subscriber = create_event_subscriber
self._destroy_event_subscriber = destroy_event_subscriber
self._platform_id = self._pnode.platform_id
if self._pnode.parent:
self._parent_platform_id = self._pnode.parent.platform_id
else:
self._parent_platform_id = None
self._platform_attributes = \
dict((a.attr_id, a.defn) for a in self._pnode.attrs.itervalues())
if log.isEnabledFor(logging.DEBUG):
log.debug("%r: PlatformDriver constructor called: pnode:\n%s\n"
"_platform_attributes=%s",
self._platform_id,
NetworkUtil._dump_pnode(self._pnode, include_subplatforms=False),
self._platform_attributes)
self._driver_config = None
self._resource_schema = {}
# The parameter dictionary.
self._param_dict = {}
# construct FSM and start it with initial state UNCONFIGURED:
self._construct_fsm()
self._fsm.start(PlatformDriverState.UNCONFIGURED)
def get_platform_driver_event_class(self):
"""
Returns PlatformDriverEvent in this base class, but this is typically
overwritten.
"""
return PlatformDriverEvent
def get_platform_driver_capability_class(self):
"""
Returns PlatformDriverCapability in this base class, but this is typically
overwritten.
"""
return PlatformDriverCapability
def get_resource_capabilities(self, current_state=True, cmd_attrs=False):
"""
@param current_state
@param cmd_attrs If true, the returned commands will be the actual
attributes of the associated capability class (or
subclass) instead of the associated values.
"""
res_cmds = self._fsm.get_events(current_state)
res_cmds = self._filter_capabilities(res_cmds, cmd_attrs=cmd_attrs)
res_params = self._param_dict.keys()
return [res_cmds, res_params]
def _filter_capabilities(self, events, cmd_attrs=False):
"""
@param events the events to filter
@param cmd_attrs If true, then the actual attributes of the
PlatformDriverCapability class (or subclass) are
returned instead of the associated values.
"""
capability_class = self.get_platform_driver_capability_class()
event_values = [x for x in events if capability_class.has(x)]
if not cmd_attrs:
return event_values
# map event_values to the actual enum attributes:
event_attrs = []
for attr in dir(capability_class):
# first two checks below similar to BaseEnum.list()
if attr.startswith('__'):
continue
val = getattr(capability_class, attr)
if callable(val):
continue
if val in event_values:
event_attrs.append(attr)
return event_attrs
def get_resource_state(self, *args, **kwargs):
"""
Return the current state of the driver.
@retval str current driver state.
"""
return self._fsm.get_current_state()
def get_resource(self, *args, **kwargs):
"""
"""
return self._fsm.on_event(PlatformDriverEvent.GET, *args, **kwargs)
def set_resource(self, *args, **kwargs):
"""
"""
return self._fsm.on_event(PlatformDriverEvent.SET, *args, **kwargs)
def execute_resource(self, resource_cmd, *args, **kwargs):
"""
Platform agent calls this directly to trigger the execution of a
resource command. The actual action occurs in execute.
"""
return self._fsm.on_event(PlatformDriverEvent.EXECUTE, resource_cmd, *args, **kwargs)
def _get_platform_attributes(self):
"""
Gets a dict of the attribute definitions in this platform as given at
construction time (from pnode parameter).
"""
return self._platform_attributes
def validate_driver_configuration(self, driver_config):
"""
Called by configure so a subclass can perform any needed additional
validation of the provided configuration.
Nothing is done in this base class. Note that basic validation is
done by PlatformAgent prior to creating/configuring the driver.
@param driver_config Driver configuration.
@raise PlatformDriverException Error in driver configuration.
"""
pass
def configure(self, driver_config):
"""
Configures this driver. In this base class it basically
calls validate_driver_configuration and then assigns the given
config to self._driver_config.
@param driver_config Driver configuration.
"""
if log.isEnabledFor(logging.DEBUG):
log.debug("%r: configure: %s" % (self._platform_id, str(driver_config)))
self.validate_driver_configuration(driver_config)
self._driver_config = driver_config
#self._param_dict = deepcopy(self._driver_config.get('attributes',{}))
def get_config_metadata(self):
"""
"""
return deepcopy(self._resource_schema)
def connect(self, recursion=None):
"""
To be implemented by subclass.
Establishes communication with the platform device.
@raise PlatformConnectionException
"""
raise NotImplementedError() #pragma: no cover
def disconnect(self, recursion=None):
"""
To be implemented by subclass.
Ends communication with the platform device.
@raise PlatformConnectionException
"""
raise NotImplementedError() #pragma: no cover
def ping(self):
"""
To be implemented by subclass.
Verifies communication with external platform returning "PONG" if
this verification completes OK.
@retval "PONG"
@raise PlatformConnectionException If the connection to the external
platform is lost.
"""
raise NotImplementedError() #pragma: no cover
def get_attributes(self):
"""
To be implemented by subclass.
Returns the attributes of this platform. This is used by the agent
for attribute monitoring purposes.
@retval {attr_id: dict, ...}
dict indexed by attribute ID with associated properties.
@raise PlatformConnectionException If the connection to the external
platform is lost.
"""
raise NotImplementedError() #pragma: no cover
def get_attribute_values(self, attrs):
"""
To be implemented by subclass.
Returns the values for specific attributes since a given time for
each attribute.
@param attrs [(attrName, from_time), ...] desired attributes.
from_time Assummed to be in the format basically described by
pyon's get_ion_ts function, "a str representing an
integer number, the millis in UNIX epoch."
@retval {attrName : [(attrValue, timestamp), ...], ...}
dict indexed by attribute name with list of (value, timestamp)
pairs. Timestamps in same format as from_time.
@raise PlatformConnectionException If the connection to the external
platform is lost.
"""
raise NotImplementedError() #pragma: no cover
def supports_set_operation(self):
"""
@return True only if the SET operation is supported by this driver.
"""
return False
def set_attribute_values(self, attrs):
"""
Sets values for writable attributes in this platform.
Only called by SET handler when supports_set_operation() returns True.
@param attrs [(attrName, attrValue), ...] List of attribute values
@retval {attrName : [(attrValue, timestamp), ...], ...}
dict with a list of (value,timestamp) pairs for each attribute
indicated in the input. Returned timestamps indicate the time when the
value was set. Each timestamp is "a str representing an
integer number, the millis in UNIX epoch" to
align with description of pyon's get_ion_ts function.
@raise PlatformConnectionException If the connection to the external
platform is lost.
"""
#
# TODO Any needed alignment with the instrument case?
#
raise NotImplementedError() #pragma: no cover
def execute(self, cmd, *args, **kwargs):
"""
Executes the given command.
Subclasses can override to execute particular commands or delegate to
its super class. However, note that this base class raises
NotImplementedError.
@param cmd command
@param args command's args
@param kwargs command's kwargs
@return result of the execution
@raise PlatformConnectionException If the connection to the external
platform is lost.
"""
raise NotImplementedError() # pragma: no cover
def get(self, *args, **kwargs):
"""
Gets the values of the requested attributes.
Subclasses can override to get particular attributes and
delegate to this base implementation to handle common attributes.
@param args get's args
@param kwargs get's kwargs
@return result of the retrieval.
@raise PlatformConnectionException If the connection to the external
platform is lost.
"""
raise NotImplementedError() # pragma: no cover
def destroy(self):
"""
Stops all activity done by the driver. Nothing done in this class.
"""
pass
def get_driver_state(self):
"""
Returns the current FSM state.
"""
return self._fsm.get_current_state()
#####################################################################
# Supporting method for handling connection lost in CONNECT handlers
#####################################################################
def _connection_lost(self, cmd, args, kwargs, exc=None):
"""
Supporting method to be called by any CONNECTED handler right after
detecting that the connection with the external platform device has
been lost. It does a regular disconnect() and notifies the agent about
the lost connection. Note that the call to disconnect() itself may
throw some additional exception very likely caused by the fact that
the connection is lost--this exception is just logged out but ignored.
All parameters are for logging purposes.
@param cmd string indicating the command that was attempted
@param args args of the command that was attempted
@param kwargs kwargs of the command that was attempted
@param exc associated exception (if any),
@return (next_state, result) suitable as the return of the FSM
handler where the connection lost was detected. The
next_state will always be PlatformDriverState.DISCONNECTED.
"""
log.debug("%r: (LC) _connection_lost: cmd=%s, args=%s, kwargs=%s, exc=%s",
self._platform_id, cmd, args, kwargs, exc)
result = None
try:
result = self.disconnect()
except Exception as e:
# just log a message
log.debug("%r: (LC) ignoring exception while calling disconnect upon"
" lost connection: %s", self._platform_id, e)
# in any case, notify the agent about the lost connection and
# transition to DISCONNECTED:
self._send_event(AsyncAgentEvent(PlatformAgentEvent.LOST_CONNECTION))
next_state = PlatformDriverState.DISCONNECTED
return next_state, result
##############################################################
# FSM event handlers.
##############################################################
def _common_state_enter(self, *args, **kwargs):
"""
Common work upon every state entry.
"""
state = self.get_driver_state()
log.debug('%r: driver entering state: %s', self._platform_id, state)
self._send_event(StateChangeDriverEvent(state))
def _common_state_exit(self, *args, **kwargs):
"""
Common work upon every state exit.
Nothing done in this base class.
"""
##############################################################
# UNCONFIGURED event handlers.
##############################################################
def _handler_unconfigured_configure(self, *args, **kwargs):
"""
"""
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r/%s args=%s kwargs=%s" % (
self._platform_id, self.get_driver_state(),
str(args), str(kwargs)))
driver_config = kwargs.get('driver_config', None)
if driver_config is None:
raise FSMError('configure: missing driver_config argument')
try:
result = self.configure(driver_config)
next_state = PlatformDriverState.DISCONNECTED
except PlatformDriverException as e:
result = None
next_state = None
log.error("Error in platform driver configuration", e)
return next_state, result
##############################################################
# DISCONNECTED event handlers.
##############################################################
def _handler_disconnected_connect(self, *args, **kwargs):
"""
"""
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r/%s args=%s kwargs=%s" % (
self._platform_id, self.get_driver_state(),
str(args), str(kwargs)))
recursion = kwargs.get('recursion', None)
self.connect(recursion=recursion)
result = next_state = PlatformDriverState.CONNECTED
return next_state, result
def _handler_disconnected_disconnect(self, *args, **kwargs):
"""
We allow the DISCONNECT event in DISCONNECTED state for convenience,
in particular it facilitates the overall handling of the connection_lost
event, which is processed by a subsequent call to disconnect from the
platform agent. The handler here does nothing.
"""
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r/%s args=%s kwargs=%s" % (
self._platform_id, self.get_driver_state(),
str(args), str(kwargs)))
return None, None
###########################################################################
# CONNECTED event handlers.
# Except for the explicit disconnect and connection_lost handlers, the
# CONNECTED handlers (here and in subclasses) should directly catch any
# PlatformConnectionException to call _connection_lost.
###########################################################################
def _handler_connected_disconnect(self, *args, **kwargs):
"""
"""
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r/%s args=%s kwargs=%s" % (
self._platform_id, self.get_driver_state(),
str(args), str(kwargs)))
recursion = kwargs.get('recursion', None)
result = self.disconnect(recursion=recursion)
next_state = PlatformDriverState.DISCONNECTED
return next_state, result
def _handler_connected_connection_lost(self, *args, **kwargs):
"""
The connection was lost (as opposed to a normal disconnect request).
Here we do the regular disconnect but also notify the platform agent
about the lost connection.
NOTE: this handler in the FSM is provided in case there is a need to
directly trigger the associated transition along with the associated
notification to the agent. However, the typical case is that a CONNECTED
handler dealing with commands will catch any PlatformConnectionException
to call _connection_lost directly.
"""
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r/%s args=%s kwargs=%s" % (
self._platform_id, self.get_driver_state(),
str(args), str(kwargs)))
# just use our supporting method:
return self._connection_lost(PlatformDriverEvent.CONNECTION_LOST, args, kwargs)
def _handler_connected_ping(self, *args, **kwargs):
"""
"""
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r/%s args=%s kwargs=%s" % (
self._platform_id, self.get_driver_state(),
str(args), str(kwargs)))
try:
result = self.ping()
return None, result
except PlatformConnectionException as e:
return self._connection_lost(PlatformDriverEvent.PING, args, kwargs, e)
def _handler_connected_get(self, *args, **kwargs):
"""
"""
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r/%s args=%s kwargs=%s" % (
self._platform_id, self.get_driver_state(),
str(args), str(kwargs)))
try:
result = self.get(*args, **kwargs)
return None, result
except PlatformConnectionException as e:
return self._connection_lost(PlatformDriverEvent.GET, args, kwargs, e)
def _handler_connected_set(self, *args, **kwargs):
"""
"""
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r/%s args=%s kwargs=%s" % (
self._platform_id, self.get_driver_state(),
str(args), str(kwargs)))
if not self.supports_set_operation():
raise FSMError('Unsupported operation: %s' % PlatformDriverEvent.SET)
attrs = kwargs.get('attrs', None)
if attrs is None:
raise FSMError('set_attribute_values: missing attrs argument')
try:
result = self.set_attribute_values(attrs)
return None, result
except PlatformConnectionException as e:
return self._connection_lost(PlatformDriverEvent.SET, args, kwargs, e)
def _handler_connected_execute(self, *args, **kwargs):
"""
"""
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r/%s args=%s kwargs=%s" % (
self._platform_id, self.get_driver_state(),
str(args), str(kwargs)))
if len(args) == 0:
raise FSMError('execute_resource: missing resource_cmd argument')
try:
result = self.execute(*args, **kwargs)
return None, result
except PlatformConnectionException as e:
return self._connection_lost(PlatformDriverEvent.EXECUTE, args, kwargs, e)
##############################################################
# Platform driver FSM setup
##############################################################
def _construct_fsm(self, states=PlatformDriverState,
events=PlatformDriverEvent,
enter_event=PlatformDriverEvent.ENTER,
exit_event=PlatformDriverEvent.EXIT):
"""
Constructs the FSM for the driver. The preparations here are mostly
related with the UNCONFIGURED, DISCONNECTED, and CONNECTED state
transitions, with some common handlers for the CONNECTED state.
Subclasses can override to indicate specific parameters and add new
handlers (typically for the CONNECTED state).
"""
log.debug("constructing base platform driver FSM")
self._fsm = ThreadSafeFSM(states, events, enter_event, exit_event)
for state in PlatformDriverState.list():
self._fsm.add_handler(state, enter_event, self._common_state_enter)
self._fsm.add_handler(state, exit_event, self._common_state_exit)
# UNCONFIGURED state event handlers:
self._fsm.add_handler(PlatformDriverState.UNCONFIGURED, PlatformDriverEvent.CONFIGURE, self._handler_unconfigured_configure)
# DISCONNECTED state event handlers:
self._fsm.add_handler(PlatformDriverState.DISCONNECTED, PlatformDriverEvent.CONNECT, self._handler_disconnected_connect)
self._fsm.add_handler(PlatformDriverState.DISCONNECTED, PlatformDriverEvent.DISCONNECT, self._handler_disconnected_disconnect)
# CONNECTED state event handlers:
self._fsm.add_handler(PlatformDriverState.CONNECTED, PlatformDriverEvent.DISCONNECT, self._handler_connected_disconnect)
self._fsm.add_handler(PlatformDriverState.CONNECTED, PlatformDriverEvent.CONNECTION_LOST, self._handler_connected_connection_lost)
self._fsm.add_handler(PlatformDriverState.CONNECTED, PlatformDriverEvent.PING, self._handler_connected_ping)
self._fsm.add_handler(PlatformDriverState.CONNECTED, PlatformDriverEvent.GET, self._handler_connected_get)
self._fsm.add_handler(PlatformDriverState.CONNECTED, PlatformDriverEvent.SET, self._handler_connected_set)
self._fsm.add_handler(PlatformDriverState.CONNECTED, PlatformDriverEvent.EXECUTE, self._handler_connected_execute)
|
|
# tempfile.py unit tests.
import tempfile
import errno
import io
import os
import pathlib
import signal
import sys
import re
import warnings
import contextlib
import stat
import weakref
from unittest import mock
import unittest
from test import support
from test.support import script_helper
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
# TEST_FILES may need to be tweaked for systems depending on the maximum
# number of files that can be opened at one time (see ulimit -n)
if sys.platform.startswith('openbsd'):
TEST_FILES = 48
else:
TEST_FILES = 100
# This is organized as one test for each chunk of code in tempfile.py,
# in order of their appearance in the file. Testing which requires
# threads is not done here.
class TestLowLevelInternals(unittest.TestCase):
def test_infer_return_type_singles(self):
self.assertIs(str, tempfile._infer_return_type(''))
self.assertIs(bytes, tempfile._infer_return_type(b''))
self.assertIs(str, tempfile._infer_return_type(None))
def test_infer_return_type_multiples(self):
self.assertIs(str, tempfile._infer_return_type('', ''))
self.assertIs(bytes, tempfile._infer_return_type(b'', b''))
with self.assertRaises(TypeError):
tempfile._infer_return_type('', b'')
with self.assertRaises(TypeError):
tempfile._infer_return_type(b'', '')
def test_infer_return_type_multiples_and_none(self):
self.assertIs(str, tempfile._infer_return_type(None, ''))
self.assertIs(str, tempfile._infer_return_type('', None))
self.assertIs(str, tempfile._infer_return_type(None, None))
self.assertIs(bytes, tempfile._infer_return_type(b'', None))
self.assertIs(bytes, tempfile._infer_return_type(None, b''))
with self.assertRaises(TypeError):
tempfile._infer_return_type('', None, b'')
with self.assertRaises(TypeError):
tempfile._infer_return_type(b'', None, '')
def test_infer_return_type_pathlib(self):
self.assertIs(str, tempfile._infer_return_type(pathlib.Path('/')))
# Common functionality.
class BaseTestCase(unittest.TestCase):
str_check = re.compile(r"^[a-z0-9_-]{8}$")
b_check = re.compile(br"^[a-z0-9_-]{8}$")
def setUp(self):
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings("ignore", category=RuntimeWarning,
message="mktemp", module=__name__)
def tearDown(self):
self._warnings_manager.__exit__(None, None, None)
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
if dir is not None:
self.assertIs(
type(name),
str
if type(dir) is str or isinstance(dir, os.PathLike) else
bytes,
"unexpected return type",
)
if pre is not None:
self.assertIs(type(name), str if type(pre) is str else bytes,
"unexpected return type")
if suf is not None:
self.assertIs(type(name), str if type(suf) is str else bytes,
"unexpected return type")
if (dir, pre, suf) == (None, None, None):
self.assertIs(type(name), str, "default return type must be str")
# check for equality of the absolute paths!
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file %r not in directory %r" % (name, dir))
self.assertEqual(npre, pre,
"file %r does not begin with %r" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file %r does not end with %r" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
check = self.str_check if isinstance(nbase, str) else self.b_check
self.assertTrue(check.match(nbase),
"random characters %r do not match %r"
% (nbase, check.pattern))
class TestExports(BaseTestCase):
def test_exports(self):
# There are no surprising symbols in the tempfile module
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempprefixb" : 1,
"gettempdir" : 1,
"gettempdirb" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1,
"TemporaryDirectory" : 1,
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
class TestRandomNameSequence(BaseTestCase):
"""Test the internal iterator object _RandomNameSequence."""
def setUp(self):
self.r = tempfile._RandomNameSequence()
super().setUp()
def test_get_six_char_str(self):
# _RandomNameSequence returns a six-character string
s = next(self.r)
self.nameCheck(s, '', '', '')
def test_many(self):
# _RandomNameSequence returns no duplicate strings (stochastic)
dict = {}
r = self.r
for i in range(TEST_FILES):
s = next(r)
self.nameCheck(s, '', '', '')
self.assertNotIn(s, dict)
dict[s] = 1
def supports_iter(self):
# _RandomNameSequence supports the iterator protocol
i = 0
r = self.r
for s in r:
i += 1
if i == 20:
break
@unittest.skipUnless(hasattr(os, 'fork'),
"os.fork is required for this test")
def test_process_awareness(self):
# ensure that the random source differs between
# child and parent.
read_fd, write_fd = os.pipe()
pid = None
try:
pid = os.fork()
if not pid:
# child process
os.close(read_fd)
os.write(write_fd, next(self.r).encode("ascii"))
os.close(write_fd)
# bypass the normal exit handlers- leave those to
# the parent.
os._exit(0)
# parent process
parent_value = next(self.r)
child_value = os.read(read_fd, len(parent_value)).decode("ascii")
finally:
if pid:
# best effort to ensure the process can't bleed out
# via any bugs above
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
# Read the process exit status to avoid zombie process
os.waitpid(pid, 0)
os.close(read_fd)
os.close(write_fd)
self.assertNotEqual(child_value, parent_value)
class TestCandidateTempdirList(BaseTestCase):
"""Test the internal function _candidate_tempdir_list."""
def test_nonempty_list(self):
# _candidate_tempdir_list returns a nonempty list of strings
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertIsInstance(c, str)
def test_wanted_dirs(self):
# _candidate_tempdir_list contains the expected directories
# Make sure the interesting environment variables are all set.
with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertIn(dirname, cand)
try:
dirname = os.getcwd()
except (AttributeError, OSError):
dirname = os.curdir
self.assertIn(dirname, cand)
# Not practical to try to verify the presence of OS-specific
# paths in this list.
# We test _get_default_tempdir some more by testing gettempdir.
class TestGetDefaultTempdir(BaseTestCase):
"""Test _get_default_tempdir()."""
def test_no_files_left_behind(self):
# use a private empty directory
with tempfile.TemporaryDirectory() as our_temp_directory:
# force _get_default_tempdir() to consider our empty directory
def our_candidate_list():
return [our_temp_directory]
with support.swap_attr(tempfile, "_candidate_tempdir_list",
our_candidate_list):
# verify our directory is empty after _get_default_tempdir()
tempfile._get_default_tempdir()
self.assertEqual(os.listdir(our_temp_directory), [])
def raise_OSError(*args, **kwargs):
raise OSError()
with support.swap_attr(io, "open", raise_OSError):
# test again with failing io.open()
with self.assertRaises(FileNotFoundError):
tempfile._get_default_tempdir()
self.assertEqual(os.listdir(our_temp_directory), [])
def bad_writer(*args, **kwargs):
fp = orig_open(*args, **kwargs)
fp.write = raise_OSError
return fp
with support.swap_attr(io, "open", bad_writer) as orig_open:
# test again with failing write()
with self.assertRaises(FileNotFoundError):
tempfile._get_default_tempdir()
self.assertEqual(os.listdir(our_temp_directory), [])
class TestGetCandidateNames(BaseTestCase):
"""Test the internal function _get_candidate_names."""
def test_retval(self):
# _get_candidate_names returns a _RandomNameSequence object
obj = tempfile._get_candidate_names()
self.assertIsInstance(obj, tempfile._RandomNameSequence)
def test_same_thing(self):
# _get_candidate_names always returns the same object
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
@contextlib.contextmanager
def _inside_empty_temp_dir():
dir = tempfile.mkdtemp()
try:
with support.swap_attr(tempfile, 'tempdir', dir):
yield
finally:
support.rmtree(dir)
def _mock_candidate_names(*names):
return support.swap_attr(tempfile,
'_get_candidate_names',
lambda: iter(names))
class TestBadTempdir:
def test_read_only_directory(self):
with _inside_empty_temp_dir():
oldmode = mode = os.stat(tempfile.tempdir).st_mode
mode &= ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
os.chmod(tempfile.tempdir, mode)
try:
if os.access(tempfile.tempdir, os.W_OK):
self.skipTest("can't set the directory read-only")
with self.assertRaises(PermissionError):
self.make_temp()
self.assertEqual(os.listdir(tempfile.tempdir), [])
finally:
os.chmod(tempfile.tempdir, oldmode)
def test_nonexisting_directory(self):
with _inside_empty_temp_dir():
tempdir = os.path.join(tempfile.tempdir, 'nonexistent')
with support.swap_attr(tempfile, 'tempdir', tempdir):
with self.assertRaises(FileNotFoundError):
self.make_temp()
def test_non_directory(self):
with _inside_empty_temp_dir():
tempdir = os.path.join(tempfile.tempdir, 'file')
open(tempdir, 'wb').close()
with support.swap_attr(tempfile, 'tempdir', tempdir):
with self.assertRaises((NotADirectoryError, FileNotFoundError)):
self.make_temp()
class TestMkstempInner(TestBadTempdir, BaseTestCase):
"""Test the internal function _mkstemp_inner."""
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
output_type = tempfile._infer_return_type(dir, pre, suf)
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags, output_type)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre=None, suf=None, bin=1):
output_type = tempfile._infer_return_type(dir, pre, suf)
if dir is None:
if output_type is str:
dir = tempfile.gettempdir()
else:
dir = tempfile.gettempdirb()
if pre is None:
pre = output_type()
if suf is None:
suf = output_type()
file = self.mkstemped(dir, pre, suf, bin)
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# _mkstemp_inner can create files
self.do_create().write(b"blat")
self.do_create(pre="a").write(b"blat")
self.do_create(suf="b").write(b"blat")
self.do_create(pre="a", suf="b").write(b"blat")
self.do_create(pre="aa", suf=".txt").write(b"blat")
def test_basic_with_bytes_names(self):
# _mkstemp_inner can create files when given name parts all
# specified as bytes.
dir_b = tempfile.gettempdirb()
self.do_create(dir=dir_b, suf=b"").write(b"blat")
self.do_create(dir=dir_b, pre=b"a").write(b"blat")
self.do_create(dir=dir_b, suf=b"b").write(b"blat")
self.do_create(dir=dir_b, pre=b"a", suf=b"b").write(b"blat")
self.do_create(dir=dir_b, pre=b"aa", suf=b".txt").write(b"blat")
# Can't mix str & binary types in the args.
with self.assertRaises(TypeError):
self.do_create(dir="", suf=b"").write(b"blat")
with self.assertRaises(TypeError):
self.do_create(dir=dir_b, pre="").write(b"blat")
with self.assertRaises(TypeError):
self.do_create(dir=dir_b, pre=b"", suf="").write(b"blat")
def test_basic_many(self):
# _mkstemp_inner can create many files (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
# _mkstemp_inner can create files in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write(b"blat")
self.do_create(dir=pathlib.Path(dir)).write(b"blat")
finally:
os.rmdir(dir)
def test_file_mode(self):
# _mkstemp_inner creates files with the proper mode
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0o600
if sys.platform == 'win32':
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
@unittest.skipUnless(has_spawnl, 'os.spawnl not available')
def test_noinherit(self):
# _mkstemp_inner file handles are not inherited by child processes
if support.verbose:
v="v"
else:
v="q"
file = self.do_create()
self.assertEqual(os.get_inheritable(file.fd), False)
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
# but an arg with embedded spaces should be decorated with double
# quotes on each end
if sys.platform == 'win32':
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
@unittest.skipUnless(has_textmode, "text mode not available")
def test_textmode(self):
# _mkstemp_inner can create files in text mode
# A text file is truncated at the first Ctrl+Z byte
f = self.do_create(bin=0)
f.write(b"blat\x1a")
f.write(b"extra\n")
os.lseek(f.fd, 0, os.SEEK_SET)
self.assertEqual(os.read(f.fd, 20), b"blat")
def make_temp(self):
return tempfile._mkstemp_inner(tempfile.gettempdir(),
tempfile.gettempprefix(),
'',
tempfile._bin_openflags,
str)
def test_collision_with_existing_file(self):
# _mkstemp_inner tries another name when a file with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
(fd1, name1) = self.make_temp()
os.close(fd1)
self.assertTrue(name1.endswith('aaa'))
(fd2, name2) = self.make_temp()
os.close(fd2)
self.assertTrue(name2.endswith('bbb'))
def test_collision_with_existing_directory(self):
# _mkstemp_inner tries another name when a directory with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
dir = tempfile.mkdtemp()
self.assertTrue(dir.endswith('aaa'))
(fd, name) = self.make_temp()
os.close(fd)
self.assertTrue(name.endswith('bbb'))
class TestGetTempPrefix(BaseTestCase):
"""Test gettempprefix()."""
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertIsInstance(p, str)
self.assertGreater(len(p), 0)
pb = tempfile.gettempprefixb()
self.assertIsInstance(pb, bytes)
self.assertGreater(len(pb), 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
fd = os.open(p, os.O_RDWR | os.O_CREAT)
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
class TestGetTempDir(BaseTestCase):
"""Test gettempdir()."""
def test_directory_exists(self):
# gettempdir returns a directory which exists
for d in (tempfile.gettempdir(), tempfile.gettempdirb()):
self.assertTrue(os.path.isabs(d) or d == os.curdir,
"%r is not an absolute path" % d)
self.assertTrue(os.path.isdir(d),
"%r is not a directory" % d)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
with tempfile.NamedTemporaryFile() as file:
file.write(b"blat")
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
c = tempfile.gettempdirb()
self.assertTrue(a is b)
self.assertNotEqual(type(a), type(c))
self.assertEqual(a, os.fsdecode(c))
def test_case_sensitive(self):
# gettempdir should not flatten its case
# even on a case-insensitive file system
case_sensitive_tempdir = tempfile.mkdtemp("-Temp")
_tempdir, tempfile.tempdir = tempfile.tempdir, None
try:
with support.EnvironmentVarGuard() as env:
# Fake the first env var which is checked as a candidate
env["TMPDIR"] = case_sensitive_tempdir
self.assertEqual(tempfile.gettempdir(), case_sensitive_tempdir)
finally:
tempfile.tempdir = _tempdir
support.rmdir(case_sensitive_tempdir)
class TestMkstemp(BaseTestCase):
"""Test mkstemp()."""
def do_create(self, dir=None, pre=None, suf=None):
output_type = tempfile._infer_return_type(dir, pre, suf)
if dir is None:
if output_type is str:
dir = tempfile.gettempdir()
else:
dir = tempfile.gettempdirb()
if pre is None:
pre = output_type()
if suf is None:
suf = output_type()
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_basic_with_bytes_names(self):
# mkstemp can create files when given name parts all
# specified as bytes.
d = tempfile.gettempdirb()
self.do_create(dir=d, suf=b"")
self.do_create(dir=d, pre=b"a")
self.do_create(dir=d, suf=b"b")
self.do_create(dir=d, pre=b"a", suf=b"b")
self.do_create(dir=d, pre=b"aa", suf=b".txt")
self.do_create(dir=b".")
with self.assertRaises(TypeError):
self.do_create(dir=".", pre=b"aa", suf=b".txt")
with self.assertRaises(TypeError):
self.do_create(dir=b".", pre="aa", suf=b".txt")
with self.assertRaises(TypeError):
self.do_create(dir=b".", pre=b"aa", suf=".txt")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
self.do_create(dir=pathlib.Path(dir))
finally:
os.rmdir(dir)
class TestMkdtemp(TestBadTempdir, BaseTestCase):
"""Test mkdtemp()."""
def make_temp(self):
return tempfile.mkdtemp()
def do_create(self, dir=None, pre=None, suf=None):
output_type = tempfile._infer_return_type(dir, pre, suf)
if dir is None:
if output_type is str:
dir = tempfile.gettempdir()
else:
dir = tempfile.gettempdirb()
if pre is None:
pre = output_type()
if suf is None:
suf = output_type()
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_with_bytes_names(self):
# mkdtemp can create directories when given all binary parts
d = tempfile.gettempdirb()
os.rmdir(self.do_create(dir=d))
os.rmdir(self.do_create(dir=d, pre=b"a"))
os.rmdir(self.do_create(dir=d, suf=b"b"))
os.rmdir(self.do_create(dir=d, pre=b"a", suf=b"b"))
os.rmdir(self.do_create(dir=d, pre=b"aa", suf=b".txt"))
with self.assertRaises(TypeError):
os.rmdir(self.do_create(dir=d, pre="aa", suf=b".txt"))
with self.assertRaises(TypeError):
os.rmdir(self.do_create(dir=d, pre=b"aa", suf=".txt"))
with self.assertRaises(TypeError):
os.rmdir(self.do_create(dir="", pre=b"aa", suf=b".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = list(range(TEST_FILES))
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, str)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
os.rmdir(self.do_create(dir=pathlib.Path(dir)))
finally:
os.rmdir(dir)
def test_mode(self):
# mkdtemp creates directories with the proper mode
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0o777 # Mask off sticky bits inherited from /tmp
expected = 0o700
if sys.platform == 'win32':
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
def test_collision_with_existing_file(self):
# mkdtemp tries another name when a file with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
file = tempfile.NamedTemporaryFile(delete=False)
file.close()
self.assertTrue(file.name.endswith('aaa'))
dir = tempfile.mkdtemp()
self.assertTrue(dir.endswith('bbb'))
def test_collision_with_existing_directory(self):
# mkdtemp tries another name when a directory with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
dir1 = tempfile.mkdtemp()
self.assertTrue(dir1.endswith('aaa'))
dir2 = tempfile.mkdtemp()
self.assertTrue(dir2.endswith('bbb'))
class TestMktemp(BaseTestCase):
"""Test mktemp()."""
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
super().setUp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
super().tearDown()
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
# mysteriously appeared in the meanwhile.
os.close(os.open(self.name, self._bflags, 0o600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
file = self.mktemped(self.dir, pre, suf)
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
# mktemp can choose usable file names
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
# mktemp can choose many usable file names (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
## def test_warning(self):
## # mktemp issues a warning when used
## warnings.filterwarnings("error",
## category=RuntimeWarning,
## message="mktemp")
## self.assertRaises(RuntimeWarning,
## tempfile.mktemp, dir=self.dir)
# We test _TemporaryFileWrapper by testing NamedTemporaryFile.
class TestNamedTemporaryFile(BaseTestCase):
"""Test NamedTemporaryFile()."""
def do_create(self, dir=None, pre="", suf="", delete=True):
if dir is None:
dir = tempfile.gettempdir()
file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf,
delete=delete)
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# NamedTemporaryFile can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_method_lookup(self):
# Issue #18879: Looking up a temporary file method should keep it
# alive long enough.
f = self.do_create()
wr = weakref.ref(f)
write = f.write
write2 = f.write
del f
write(b'foo')
del write
write2(b'bar')
del write2
if support.check_impl_detail(cpython=True):
# No reference cycle was created.
self.assertIsNone(wr())
def test_iter(self):
# Issue #23700: getting iterator from a temporary file should keep
# it alive as long as it's being iterated over
lines = [b'spam\n', b'eggs\n', b'beans\n']
def make_file():
f = tempfile.NamedTemporaryFile(mode='w+b')
f.write(b''.join(lines))
f.seek(0)
return f
for i, l in enumerate(make_file()):
self.assertEqual(l, lines[i])
self.assertEqual(i, len(lines) - 1)
def test_creates_named(self):
# NamedTemporaryFile creates files with names
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
# A NamedTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
with tempfile.NamedTemporaryFile(dir=dir) as f:
f.write(b'blat')
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
# Tests that delete-on-close can be disabled
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write(b'blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
# A NamedTemporaryFile can be closed many times without error
f = tempfile.NamedTemporaryFile()
f.write(b'abc\n')
f.close()
f.close()
f.close()
def test_context_manager(self):
# A NamedTemporaryFile can be used as a context manager
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_no_leak_fd(self):
# Issue #21058: don't leak file descriptor when io.open() fails
closed = []
os_close = os.close
def close(fd):
closed.append(fd)
os_close(fd)
with mock.patch('os.close', side_effect=close):
with mock.patch('io.open', side_effect=ValueError):
self.assertRaises(ValueError, tempfile.NamedTemporaryFile)
self.assertEqual(len(closed), 1)
def test_bad_mode(self):
dir = tempfile.mkdtemp()
self.addCleanup(support.rmtree, dir)
with self.assertRaises(ValueError):
tempfile.NamedTemporaryFile(mode='wr', dir=dir)
with self.assertRaises(TypeError):
tempfile.NamedTemporaryFile(mode=2, dir=dir)
self.assertEqual(os.listdir(dir), [])
# How to test the mode and bufsize parameters?
class TestSpooledTemporaryFile(BaseTestCase):
"""Test SpooledTemporaryFile()."""
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
return file
def test_basic(self):
# SpooledTemporaryFile can create files
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
# A SpooledTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write(b'blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(isinstance(filename, str) and os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
# A SpooledTemporaryFile can be written to multiple within the max_size
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_writelines(self):
# Verify writelines with a SpooledTemporaryFile
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_sparse(self):
# A SpooledTemporaryFile that is written late in the file will extend
# when that occurs
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_fileno(self):
# A SpooledTemporaryFile should roll over to a real file on fileno()
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile()
f.write(b'abc\n')
self.assertFalse(f._rolled)
f.close()
f.close()
f.close()
def test_multiple_close_after_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
self.assertTrue(f._rolled)
f.close()
f.close()
f.close()
def test_bound_methods(self):
# It should be OK to steal a bound method from a SpooledTemporaryFile
# and use it independently; when the file rolls over, those bound
# methods should continue to function
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write(b"a" * 35)
write(b"b" * 35)
seek(0, 0)
self.assertEqual(read(70), b'a'*35 + b'b'*35)
def test_properties(self):
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
self.assertEqual(f.mode, 'w+b')
self.assertIsNone(f.name)
with self.assertRaises(AttributeError):
f.newlines
with self.assertRaises(AttributeError):
f.encoding
with self.assertRaises(AttributeError):
f.errors
f.write(b'x')
self.assertTrue(f._rolled)
self.assertEqual(f.mode, 'rb+')
self.assertIsNotNone(f.name)
with self.assertRaises(AttributeError):
f.newlines
with self.assertRaises(AttributeError):
f.encoding
with self.assertRaises(AttributeError):
f.errors
def test_text_mode(self):
# Creating a SpooledTemporaryFile with a text mode should produce
# a file object reading and writing (Unicode) text strings.
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10)
f.write("abc\n")
f.seek(0)
self.assertEqual(f.read(), "abc\n")
f.write("def\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\n")
self.assertFalse(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNone(f.name)
self.assertIsNone(f.newlines)
self.assertIsNone(f.encoding)
self.assertIsNone(f.errors)
f.write("xyzzy\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\n")
# Check that Ctrl+Z doesn't truncate the file
f.write("foo\x1abar\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\nfoo\x1abar\n")
self.assertTrue(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNotNone(f.name)
self.assertEqual(f.newlines, os.linesep)
self.assertIsNotNone(f.encoding)
self.assertIsNotNone(f.errors)
def test_text_newline_and_encoding(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10,
newline='', encoding='utf-8',
errors='ignore')
f.write("\u039B\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n")
self.assertFalse(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNone(f.name)
self.assertIsNone(f.newlines)
self.assertIsNone(f.encoding)
self.assertIsNone(f.errors)
f.write("\u039B" * 20 + "\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n" + ("\u039B" * 20) + "\r\n")
self.assertTrue(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNotNone(f.name)
self.assertIsNotNone(f.newlines)
self.assertEqual(f.encoding, 'utf-8')
self.assertEqual(f.errors, 'ignore')
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_truncate_with_size_parameter(self):
# A SpooledTemporaryFile can be truncated to zero size
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'abcdefg\n')
f.seek(0)
f.truncate()
self.assertFalse(f._rolled)
self.assertEqual(f._file.getvalue(), b'')
# A SpooledTemporaryFile can be truncated to a specific size
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'abcdefg\n')
f.truncate(4)
self.assertFalse(f._rolled)
self.assertEqual(f._file.getvalue(), b'abcd')
# A SpooledTemporaryFile rolls over if truncated to large size
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'abcdefg\n')
f.truncate(20)
self.assertTrue(f._rolled)
self.assertEqual(os.fstat(f.fileno()).st_size, 20)
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
class TestTemporaryFile(BaseTestCase):
"""Test TemporaryFile()."""
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
tempfile.TemporaryFile()
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write(b'blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
# cleanup
f.close()
os.rmdir(dir)
raise
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write(b'abc\n')
f.close()
f.close()
f.close()
# How to test the mode and bufsize parameters?
def test_mode_and_encoding(self):
def roundtrip(input, *args, **kwargs):
with tempfile.TemporaryFile(*args, **kwargs) as fileobj:
fileobj.write(input)
fileobj.seek(0)
self.assertEqual(input, fileobj.read())
roundtrip(b"1234", "w+b")
roundtrip("abdc\n", "w+")
roundtrip("\u039B", "w+", encoding="utf-16")
roundtrip("foo\r\n", "w+", newline="")
def test_no_leak_fd(self):
# Issue #21058: don't leak file descriptor when io.open() fails
closed = []
os_close = os.close
def close(fd):
closed.append(fd)
os_close(fd)
with mock.patch('os.close', side_effect=close):
with mock.patch('io.open', side_effect=ValueError):
self.assertRaises(ValueError, tempfile.TemporaryFile)
self.assertEqual(len(closed), 1)
# Helper for test_del_on_shutdown
class NulledModules:
def __init__(self, *modules):
self.refs = [mod.__dict__ for mod in modules]
self.contents = [ref.copy() for ref in self.refs]
def __enter__(self):
for d in self.refs:
for key in d:
d[key] = None
def __exit__(self, *exc_info):
for d, c in zip(self.refs, self.contents):
d.clear()
d.update(c)
class TestTemporaryDirectory(BaseTestCase):
"""Test TemporaryDirectory()."""
def do_create(self, dir=None, pre="", suf="", recurse=1, dirs=1, files=1):
if dir is None:
dir = tempfile.gettempdir()
tmp = tempfile.TemporaryDirectory(dir=dir, prefix=pre, suffix=suf)
self.nameCheck(tmp.name, dir, pre, suf)
self.do_create2(tmp.name, recurse, dirs, files)
return tmp
def do_create2(self, path, recurse=1, dirs=1, files=1):
# Create subdirectories and some files
if recurse:
for i in range(dirs):
name = os.path.join(path, "dir%d" % i)
os.mkdir(name)
self.do_create2(name, recurse-1, dirs, files)
for i in range(files):
with open(os.path.join(path, "test%d.txt" % i), "wb") as f:
f.write(b"Hello world!")
def test_mkdtemp_failure(self):
# Check no additional exception if mkdtemp fails
# Previously would raise AttributeError instead
# (noted as part of Issue #10188)
with tempfile.TemporaryDirectory() as nonexistent:
pass
with self.assertRaises(FileNotFoundError) as cm:
tempfile.TemporaryDirectory(dir=nonexistent)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_explicit_cleanup(self):
# A TemporaryDirectory is deleted when cleaned up
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
self.assertTrue(os.path.exists(d.name),
"TemporaryDirectory %s does not exist" % d.name)
d.cleanup()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after cleanup" % d.name)
finally:
os.rmdir(dir)
@support.skip_unless_symlink
def test_cleanup_with_symlink_to_a_directory(self):
# cleanup() should not follow symlinks to directories (issue #12464)
d1 = self.do_create()
d2 = self.do_create(recurse=0)
# Symlink d1/foo -> d2
os.symlink(d2.name, os.path.join(d1.name, "foo"))
# This call to cleanup() should not follow the "foo" symlink
d1.cleanup()
self.assertFalse(os.path.exists(d1.name),
"TemporaryDirectory %s exists after cleanup" % d1.name)
self.assertTrue(os.path.exists(d2.name),
"Directory pointed to by a symlink was deleted")
self.assertEqual(os.listdir(d2.name), ['test0.txt'],
"Contents of the directory pointed to by a symlink "
"were deleted")
d2.cleanup()
@support.cpython_only
def test_del_on_collection(self):
# A TemporaryDirectory is deleted when garbage collected
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
name = d.name
del d # Rely on refcounting to invoke __del__
self.assertFalse(os.path.exists(name),
"TemporaryDirectory %s exists after __del__" % name)
finally:
os.rmdir(dir)
def test_del_on_shutdown(self):
# A TemporaryDirectory may be cleaned up during shutdown
with self.do_create() as dir:
for mod in ('builtins', 'os', 'shutil', 'sys', 'tempfile', 'warnings'):
code = """if True:
import builtins
import os
import shutil
import sys
import tempfile
import warnings
tmp = tempfile.TemporaryDirectory(dir={dir!r})
sys.stdout.buffer.write(tmp.name.encode())
tmp2 = os.path.join(tmp.name, 'test_dir')
os.mkdir(tmp2)
with open(os.path.join(tmp2, "test0.txt"), "w") as f:
f.write("Hello world!")
{mod}.tmp = tmp
warnings.filterwarnings("always", category=ResourceWarning)
""".format(dir=dir, mod=mod)
rc, out, err = script_helper.assert_python_ok("-c", code)
tmp_name = out.decode().strip()
self.assertFalse(os.path.exists(tmp_name),
"TemporaryDirectory %s exists after cleanup" % tmp_name)
err = err.decode('utf-8', 'backslashreplace')
self.assertNotIn("Exception ", err)
self.assertIn("ResourceWarning: Implicitly cleaning up", err)
def test_exit_on_shutdown(self):
# Issue #22427
with self.do_create() as dir:
code = """if True:
import sys
import tempfile
import warnings
def generator():
with tempfile.TemporaryDirectory(dir={dir!r}) as tmp:
yield tmp
g = generator()
sys.stdout.buffer.write(next(g).encode())
warnings.filterwarnings("always", category=ResourceWarning)
""".format(dir=dir)
rc, out, err = script_helper.assert_python_ok("-c", code)
tmp_name = out.decode().strip()
self.assertFalse(os.path.exists(tmp_name),
"TemporaryDirectory %s exists after cleanup" % tmp_name)
err = err.decode('utf-8', 'backslashreplace')
self.assertNotIn("Exception ", err)
self.assertIn("ResourceWarning: Implicitly cleaning up", err)
def test_warnings_on_cleanup(self):
# ResourceWarning will be triggered by __del__
with self.do_create() as dir:
d = self.do_create(dir=dir, recurse=3)
name = d.name
# Check for the resource warning
with support.check_warnings(('Implicitly', ResourceWarning), quiet=False):
warnings.filterwarnings("always", category=ResourceWarning)
del d
support.gc_collect()
self.assertFalse(os.path.exists(name),
"TemporaryDirectory %s exists after __del__" % name)
def test_multiple_close(self):
# Can be cleaned-up many times without error
d = self.do_create()
d.cleanup()
d.cleanup()
d.cleanup()
def test_context_manager(self):
# Can be used as a context manager
d = self.do_create()
with d as name:
self.assertTrue(os.path.exists(name))
self.assertEqual(name, d.name)
self.assertFalse(os.path.exists(name))
def test_modes(self):
for mode in range(8):
mode <<= 6
with self.subTest(mode=format(mode, '03o')):
d = self.do_create(recurse=3, dirs=2, files=2)
with d:
# Change files and directories mode recursively.
for root, dirs, files in os.walk(d.name, topdown=False):
for name in files:
os.chmod(os.path.join(root, name), mode)
os.chmod(root, mode)
d.cleanup()
self.assertFalse(os.path.exists(d.name))
@unittest.skipUnless(hasattr(os, 'chflags'), 'requires os.lchflags')
def test_flags(self):
flags = stat.UF_IMMUTABLE | stat.UF_NOUNLINK
d = self.do_create(recurse=3, dirs=2, files=2)
with d:
# Change files and directories flags recursively.
for root, dirs, files in os.walk(d.name, topdown=False):
for name in files:
os.chflags(os.path.join(root, name), flags)
os.chflags(root, flags)
d.cleanup()
self.assertFalse(os.path.exists(d.name))
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from buildbot import locks
import factories
reload(factories)
from factories import self_update
from factories import buildslaves
# from factories import cpp_ethereum
# from factories import cpp_ethereum_osx
# from factories import cpp_ethereum_brew
# from factories import cpp_ethereum_windows
from factories import go_ethereum
from factories import go_ethereum_arm
from factories import go_ethereum_osx
from factories import go_ethereum_brew
from factories import go_ethereum_windows
from factories import mist
from factories import ethereumj
from factories import pyethereum
from factories import pyethapp
from factories import serpent
from factories import debian
from factories import debian_backport
from factories import poc_servers
from factories import integration
reload(self_update)
reload(buildslaves)
# reload(cpp_ethereum)
# reload(cpp_ethereum_osx)
# reload(cpp_ethereum_brew)
# reload(cpp_ethereum_windows)
reload(go_ethereum)
reload(go_ethereum_arm)
reload(go_ethereum_osx)
reload(go_ethereum_brew)
reload(go_ethereum_windows)
reload(mist)
reload(ethereumj)
reload(pyethereum)
reload(pyethapp)
reload(serpent)
reload(debian)
reload(debian_backport)
reload(poc_servers)
reload(integration)
from factories.factory import distributions
from factories.self_update import *
from factories.buildslaves import *
# from factories.cpp_ethereum import *
# from factories.cpp_ethereum_osx import *
# from factories.cpp_ethereum_brew import *
# from factories.cpp_ethereum_windows import *
from factories.go_ethereum import *
from factories.go_ethereum_arm import *
from factories.go_ethereum_osx import *
from factories.go_ethereum_brew import *
from factories.go_ethereum_windows import *
from factories.mist import *
from factories.ethereumj import *
from factories.pyethereum import *
from factories.pyethapp import *
from factories.serpent import *
from factories.debian import *
from factories.debian_backport import *
from factories.poc_servers import *
from factories.integration import *
# ###### BUILDERS
# The 'builders' list defines the Builders, which tell Buildbot how to perform a build:
# what steps, and which slaves can execute them. Note that any particular build will
# only take place on one slave.
builders = []
self_lock = locks.SlaveLock("self_update", maxCount=1)
build_lock = locks.SlaveLock("slave_builds", maxCount=2)
# package_lock = locks.SlaveLock("slave_packaging",
# maxCount=4,
# maxCountForSlave = {
# 'slave-cpp-one-deb': 2,
# 'slave-cpp-two-deb': 2,
# 'slave-go-one-deb': 2,
# 'slave-go-two-deb': 2 })
go_lock = locks.SlaveLock("go_builds", maxCount=1)
arm_lock = locks.SlaveLock("arm_builds", maxCount=1)
osx_lock = locks.SlaveLock("osx_builds", maxCount=2)
brew_lock = locks.SlaveLock("brew_builds", maxCount=1)
win_lock = locks.SlaveLock("win_builds", maxCount=2)
win_lock_go = locks.SlaveLock("win_go_builds", maxCount=1)
# Latent slaves for builders
max_latents = 20
latentslaves = []
maxperslave = {}
for n in range(1, max_latents + 1):
name = "latentslave%s" % n
latentslaves.append(name)
maxperslave[name] = 1 # One build per latent buildslave
latent_lock = locks.SlaveLock("latent_builds",
maxCount=max_latents,
maxCountForSlave=maxperslave)
#
# Builders
#
from buildbot.config import BuilderConfig
builders = []
# Self-update builder
for builder in [
BuilderConfig(
name="buildbot",
builddir="build-self",
slavenames=["selfslave"],
factory=self_update_factory(),
locks=[self_lock.access('exclusive')])
]: builders.append(builder)
# Buildslave builders
for buildslave in ['five', 'six']:
for builder in [
# BuilderConfig(
# name="buildslave-cpp-%s" % buildslave,
# builddir="build-buildslave-cpp-%s" % buildslave,
# slavenames=["buildslave-%s" % buildslave],
# factory=buildslave_factory("cpp", "cpp-ethereum"),
# locks=[build_lock.access('counting')]),
BuilderConfig(
name="buildslave-go-%s" % buildslave,
builddir="build-buildslave-go-%s" % buildslave,
slavenames=["buildslave-%s" % buildslave],
factory=buildslave_factory("go", "go-ethereum"),
locks=[build_lock.access('counting')]),
BuilderConfig(
name="buildslave-python-%s" % buildslave,
builddir="build-buildslave-python-%s" % buildslave,
slavenames=["buildslave-%s" % buildslave],
factory=buildslave_factory("python", "pyethereum"),
locks=[build_lock.access('counting')]),
# BuilderConfig(
# name="buildslave-java-%s" % buildslave,
# builddir="build-buildslave-java-%s" % buildslave,
# slavenames=["buildslave-%s" % buildslave],
# factory=buildslave_factory("java", "ethereumj"),
# locks=[build_lock.access('counting')])
]: builders.append(builder)
# Main builders
for branch in ['master', 'develop']:
for builder in [
# BuilderConfig(
# name="Linux C++ %s branch" % branch,
# builddir="build-cpp-ethereum-%s-docker" % branch,
# slavenames=[
# "slave-cpp-three%s" % ("" if branch == 'master' else "-develop"),
# "slave-cpp-four%s" % ("" if branch == 'master' else "-develop")
# ],
# factory=cpp_ethereum_factory(branch=branch, deb=True),
# locks=[build_lock.access('counting')]),
# BuilderConfig(
# name="Linux C++ GUI %s branch" % branch,
# builddir="build-cpp-ethereum-gui-%s" % branch,
# slavenames=[
# "slave-cpp-three%s" % ("" if branch == 'master' else "-develop"),
# "slave-cpp-four%s" % ("" if branch == 'master' else "-develop")
# ],
# factory=cpp_ethereum_factory(branch=branch, deb=True, headless=False),
# locks=[build_lock.access('counting')]),
# BuilderConfig(
# name="Linux C++ %s server" % branch,
# builddir="build-cpp-ethereum-%s-server" % branch,
# slavenames=["poc-server-%s" % branch],
# factory=cpp_ethereum_server_factory(branch=branch),
# locks=[build_lock.access('counting')]),
# BuilderConfig(
# name="Linux C++ %s evmjit" % branch,
# builddir="build-cpp-ethereum-%s-evmjit" % branch,
# slavenames=[
# "slave-cpp-three%s" % ("" if branch == 'master' else "-develop"),
# "slave-cpp-four%s" % ("" if branch == 'master' else "-develop")
# ],
# factory=cpp_ethereum_factory(branch=branch, deb=True, evmjit=True),
# locks=[build_lock.access('counting')]),
BuilderConfig(
name="Linux Go %s branch" % branch,
builddir="build-go-ethereum-%s-docker" % branch,
slavenames=[
"slave-go-five%s" % ("" if branch == 'master' else "-develop"),
"slave-go-six%s" % ("" if branch == 'master' else "-develop")
],
factory=go_ethereum_factory(branch=branch, deb=True),
locks=[go_lock.access('counting')]),
BuilderConfig(
name="ARM Go %s branch" % branch,
builddir="build-go-ethereum-%s-arm" % branch,
slavenames=[
"slave-go-five-arm",
"slave-go-six-arm"
],
factory=arm_go_factory(branch=branch),
locks=[arm_lock.access('counting')]),
# BuilderConfig(
# name="OSX C++ %s branch" % branch,
# builddir="build-cpp-osx-%s" % branch,
# slavenames=["osx", "osx-two"],
# factory=osx_cpp_factory(branch=branch),
# locks=[osx_lock.access('counting')]),
# BuilderConfig(
# name="OSX C++ GUI %s branch" % branch,
# builddir="build-cpp-gui-osx-%s" % branch,
# slavenames=["osx"],
# factory=osx_cpp_factory(branch=branch, headless=False),
# locks=[osx_lock.access('counting')]),
# BuilderConfig(
# name="OSX C++ %s evmjit" % branch,
# builddir="build-cpp-osx-%s-evmjit" % branch,
# slavenames=["osx"],
# factory=osx_cpp_factory(branch=branch, evmjit=True),
# locks=[osx_lock.access('counting')]),
BuilderConfig(
name="OSX Go %s branch" % branch,
builddir="build-go-osx-%s" % branch,
slavenames=["osx", "osx-two"],
factory=osx_go_factory(branch=branch),
locks=[osx_lock.access('counting')]),
# BuilderConfig(
# name="OSX C++ %s brew" % branch,
# builddir="build-cpp-osx-%s-brew" % branch,
# slavenames=["osx", "osx-two"],
# factory=brew_cpp_factory(branch=branch),
# locks=[brew_lock.access('counting')]),
# BuilderConfig(
# name="OSX C++ GUI %s brew" % branch,
# builddir="build-cpp-gui-osx-%s-brew" % branch,
# slavenames=["osx"],
# factory=brew_cpp_factory(branch=branch, headless=False),
# locks=[brew_lock.access('counting')]),
BuilderConfig(
name="OSX Go %s El Capitan" % branch,
builddir="build-go-ethereum-%s-el-capitan" % branch,
slavenames=["osx"],
factory=brew_go_factory(branch=branch, release='el_capitan'),
locks=[brew_lock.access('counting')]),
BuilderConfig(
name="OSX Go %s Yosemite" % branch,
builddir="build-go-ethereum-%s-yosemite" % branch,
slavenames=["osx-two"],
factory=brew_go_factory(branch=branch, release='yosemite'),
locks=[brew_lock.access('counting')]),
# BuilderConfig(
# name="Windows C++ %s branch" % branch,
# builddir="build-cpp-ethereum-%s-win" % branch,
# slavenames=["winslave"],
# factory=win_cpp_factory(branch=branch),
# locks=[win_lock.access('counting')]),
BuilderConfig(
name="Windows Go %s branch" % branch,
builddir="build-go-win-%s" % branch,
slavenames=["winslave-go"],
factory=windows_go_factory(branch=branch),
locks=[win_lock_go.access('counting')]),
BuilderConfig(
name="Mist %s branch" % branch,
builddir="build-mist-%s" % branch,
slavenames=["osx", "osx-two"],
factory=mist_factory(branch=branch),
locks=[osx_lock.access('counting')]),
BuilderConfig(
name="Linux PyEthereum %s" % branch,
builddir="build-pyethereum-%s" % branch,
slavenames=["slave-python-five", "slave-python-six"],
factory=pyethereum_factory(branch=branch),
locks=[build_lock.access('counting')]),
BuilderConfig(
name="Linux Serpent %s" % branch,
builddir="build-serpent-%s" % branch,
slavenames=["slave-python-five", "slave-python-six"],
factory=serpent_factory(branch=branch),
locks=[build_lock.access('counting')]),
BuilderConfig(
name="OSX PyEthereum %s" % branch,
builddir="build-pyethereum-osx-%s" % branch,
slavenames=["osx", "osx-two"],
factory=pyethereum_factory(branch=branch),
locks=[osx_lock.access('counting')]),
BuilderConfig(
name="OSX Serpent %s" % branch,
builddir="build-serpent-osx-%s" % branch,
slavenames=["osx", "osx-two"],
factory=serpent_factory(branch=branch),
locks=[osx_lock.access('counting')]),
# Extra checks
# BuilderConfig(
# name="Linux C++ %s check" % branch,
# builddir="build-cpp-ethereum-%s-check" % branch,
# slavenames=[
# "slave-cpp-one",
# "slave-cpp-two"
# ],
# factory=cpp_check_factory(branch=branch),
# locks=[build_lock.access('counting')]),
# BuilderConfig(
# name="OSX C++ %s check" % branch,
# builddir="build-cpp-ethereum-%s-osx-check" % branch,
# slavenames=["osx"],
# factory=osx_cpp_check_factory(branch=branch),
# locks=[osx_lock.access('counting')])
]: builders.append(builder)
# deb packaging
for architecture in ['i386', 'amd64']:
for distribution in distributions:
for builder in [
# BuilderConfig(
# name="Linux C++ %s deb %s-%s" % (branch, architecture, distribution),
# builddir="build-cpp-ethereum-%s-%s-%s" % (branch, architecture, distribution),
# slavenames=latentslaves,
# factory=deb_factory(
# name="cpp-ethereum",
# repourl="https://github.com/ethereum/cpp-ethereum.git",
# ppabranch=branch,
# branch=branch,
# architecture=architecture,
# distribution=distribution),
# locks=[latent_lock.access('counting')]),
BuilderConfig(
name="Linux Go %s deb %s-%s" % (branch, architecture, distribution),
builddir="build-go-ethereum-%s-%s-%s" % (branch, architecture, distribution),
slavenames=latentslaves,
factory=deb_factory(
name="ethereum",
repourl="https://github.com/ethereum/go-ethereum.git",
ppabranch="go-ethereum%s" % ("-develop" if branch == 'develop' else ""),
branch=branch,
architecture=architecture,
distribution=distribution),
locks=[latent_lock.access('counting')])
]: builders.append(builder)
# deps deb packaging
# for architecture in ['i386', 'amd64']:
for distribution in distributions:
for builder in [
# BuilderConfig(
# name="libcryptopp %s-%s" % ("amd64", distribution),
# builddir="build-libcryptopp-%s-%s" % ("amd64", distribution),
# slavenames=["slave-cpp-one-deb", "slave-cpp-two-deb"],
# factory=deb_factory(
# name="libcryptopp",
# repourl="https://github.com/mmoss/cryptopp.git",
# ppabranch="libcrypto++",
# branch="master",
# architecture="amd64",
# distribution=distribution),
# locks=[latent_lock.access('counting')]),
# BuilderConfig(
# name="libjson-rpc-cpp %s-%s" % ("amd64", distribution),
# builddir="build-libjson-rpc-cpp-%s-%s" % ("amd64", distribution),
# slavenames=["slave-cpp-one-deb", "slave-cpp-two-deb"],
# factory=deb_factory(
# name="libjson-rpc-cpp",
# repourl="https://github.com/cinemast/libjson-rpc-cpp.git",
# ppabranch="libjson-rpc-cpp",
# branch="master",
# architecture="amd64",
# distribution=distribution),
# locks=[latent_lock.access('counting')]),
# BuilderConfig(
# name="qtwebengine %s-%s" % ("amd64", distribution),
# builddir="build-qtwebengine-%s-%s" % ("amd64", distribution),
# slavenames=["slave-cpp-one-deb", "slave-cpp-two-deb"],
# factory=deb_factory(
# name="qtwebengine-opensource-src",
# repourl="https://github.com/qtproject/qtwebengine.git",
# ppabranch="qt5webengine",
# branch="5.4.1",
# architecture="amd64",
# distribution=distribution),
# locks=[latent_lock.access('counting')]),
BuilderConfig(
name="golang %s-%s" % ("amd64", distribution),
builddir="build-golang-%s-%s" % ("amd64", distribution),
slavenames=["slave-go-five-deb", "slave-go-six-deb"],
factory=backport_factory(
name="golang",
setVersion=True,
repo="ethereum",
architecture="amd64",
distribution=distribution,
packages=["golang"]),
locks=[latent_lock.access('counting')]),
# BuilderConfig(
# name="cmake %s-%s" % ("amd64", distribution),
# builddir="build-cmake-%s-%s" % ("amd64", distribution),
# slavenames=["slave-cpp-one-deb", "slave-cpp-two-deb"],
# factory=backport_factory(
# name="cmake",
# setVersion=True,
# repo="ethereum",
# architecture="amd64",
# distribution=distribution,
# packages=["cmake"]),
# locks=[latent_lock.access('counting')])
]: builders.append(builder)
# if distribution in ['trusty']:
# for builder in [
# BuilderConfig(
# name="qt5 %s" % distribution,
# builddir="build-qt-%s" % distribution,
# slavenames=["slave-cpp-one-deb", "slave-cpp-two-deb"],
# factory=backport_factory(
# name="qt5",
# repo="ethereum-qt",
# architecture="amd64",
# distribution=distribution,
# packages=[
# "harfbuzz",
# "libinput",
# "qtbase-opensource-src",
# "qtxmlpatterns-opensource-src",
# "qtdeclarative-opensource-src",
# "qtscript-opensource-src",
# "qtwebsockets-opensource-src",
# "qtwebkit-opensource-src",
# "qttools-opensource-src",
# "qtquick1-opensource-src",
# "qtquickcontrols-opensource-src",
# "qtlocation-opensource-src"
# ]),
# locks=[latent_lock.access('counting')])
# ]: builders.append(builder)
for builder in [
BuilderConfig(
name="Linux PyEthApp",
builddir="build-pyethapp",
slavenames=["slave-python-five", "slave-python-six"],
factory=pyethapp_factory(branch='master'),
locks=[build_lock.access('counting')]),
BuilderConfig(
name="OSX PyEthApp",
builddir="build-pyethapp-osx",
slavenames=["osx", "osx-two"],
factory=pyethapp_factory(branch='master'),
locks=[osx_lock.access('counting')]),
# BuilderConfig(
# name="Linux EthereumJ",
# builddir="build-ethereumj-docker",
# slavenames=["slave-java-one", "slave-java-two"],
# factory=ethereumj_factory(),
# locks=[build_lock.access('counting')]),
# Pull requests
# Linux
# BuilderConfig(
# name="Linux C++ pull requests",
# builddir="build-cpp-ethereum-pr",
# slavenames=[
# "slave-cpp-five-pr",
# "slave-cpp-six-pr"
# ],
# factory=cpp_ethereum_factory(branch='develop', headless=False),
# locks=[build_lock.access('counting')]),
# BuilderConfig(
# name="Linux C++ evmjit pull requests",
# builddir="build-cpp-ethereum-evmjit-pr",
# slavenames=[
# "slave-cpp-five-pr",
# "slave-cpp-six-pr"
# ],
# factory=cpp_ethereum_factory(branch='develop', evmjit=True, headless=False),
# locks=[build_lock.access('counting')]),
BuilderConfig(
name="Linux Go pull requests",
builddir="build-go-ethereum-pr",
slavenames=[
"slave-go-five-pr",
"slave-go-six-pr"
],
factory=go_ethereum_factory(branch='develop'),
locks=[build_lock.access('counting')]),
BuilderConfig(
name="ARM Go pull requests",
builddir="build-go-ethereum-arm-pr",
slavenames=[
"slave-go-five-arm",
"slave-go-six-arm"
],
factory=arm_go_factory(branch='develop', isPullRequest=True),
locks=[arm_lock.access('counting')]),
BuilderConfig(
name="Linux PyEthereum PRs",
builddir="build-pyethereum-pr",
slavenames=["slave-python-five-pr", "slave-python-six-pr"],
factory=pyethereum_factory(branch='develop'),
locks=[build_lock.access('counting')]),
BuilderConfig(
name="Linux PyEthApp PRs",
builddir="build-pyethapp-pr",
slavenames=["slave-python-five-pr", "slave-python-six-pr"],
factory=pyethapp_factory(branch='master'),
locks=[build_lock.access('counting')]),
BuilderConfig(
name="Linux Serpent PRs",
builddir="build-serpent-pr",
slavenames=["slave-python-five-pr", "slave-python-six-pr"],
factory=serpent_factory(branch='develop'),
locks=[build_lock.access('counting')]),
# BuilderConfig(
# name="Linux EthereumJ PRs",
# builddir="build-ethereumj-pr",
# slavenames=["slave-java-one-pr", "slave-java-two-pr"],
# factory=ethereumj_factory(branch='master'),
# locks=[build_lock.access('counting')]),
# OSX
# BuilderConfig(
# name="OSX C++ pull requests",
# builddir="build-cpp-ethereum-osx-pr",
# slavenames=["osx", "osx-two"],
# factory=osx_cpp_factory(branch='develop', isPullRequest=True, headless=False),
# locks=[osx_lock.access('counting')]),
# BuilderConfig(
# name="OSX C++ evmjit pull requests",
# builddir="build-cpp-ethereum-osx-evmjit-pr",
# slavenames=["osx"],
# factory=osx_cpp_factory(branch=branch, isPullRequest=True, evmjit=True, headless=False),
# locks=[osx_lock.access('counting')]),
BuilderConfig(
name="OSX Go pull requests",
builddir="build-go-ethereum-osx-pr",
slavenames=["osx", "osx-two"],
factory=osx_go_factory(branch='develop', isPullRequest=True),
locks=[osx_lock.access('counting')]),
BuilderConfig(
name="Mist pull requests",
builddir="build-mist-pr",
slavenames=["osx", "osx-two"],
factory=mist_factory(branch='develop', isPullRequest=True),
locks=[osx_lock.access('counting')]),
BuilderConfig(
name="OSX PyEthereum PRs",
builddir="build-pyethereum-osx-pr",
slavenames=["osx", "osx-two"],
factory=pyethereum_factory(branch='develop'),
locks=[osx_lock.access('counting')]),
BuilderConfig(
name="OSX PyEthApp PRs",
builddir="build-pyethapp-osx-pr",
slavenames=["osx", "osx-two"],
factory=pyethapp_factory(branch='master'),
locks=[osx_lock.access('counting')]),
BuilderConfig(
name="OSX Serpent PRs",
builddir="build-serpent-osx-pr",
slavenames=["osx", "osx-two"],
factory=serpent_factory(branch='develop'),
locks=[osx_lock.access('counting')]),
# Windows
# BuilderConfig(
# name="Windows C++ pull requests",
# builddir="build-cpp-ethereum-win-pr",
# slavenames=["winslave"],
# factory=win_cpp_factory(branch='develop', isPullRequest=True),
# locks=[win_lock.access('counting')]),
BuilderConfig(
name="Windows Go pull requests",
builddir="build-go-ethereum-win-pr",
slavenames=["winslave-go"],
factory=windows_go_factory(branch='develop', isPullRequest=True),
locks=[win_lock_go.access('counting')]),
# Integration
# BuilderConfig(
# name="Linux C++ integration",
# builddir="build-cpp-ethereum-integration",
# slavenames=[
# "slave-cpp-five-integration"
# ],
# factory=integration_factory(),
# locks=[build_lock.access('counting')]),
# BuilderConfig(
# name="Linux C++ deb tester",
# builddir="build-cpp-ethereum-deb-tester",
# slavenames=latentslaves,
# factory=deb_factory(
# name="cpp-ethereum",
# repourl="https://github.com/ethereum/cpp-ethereum.git",
# ppabranch="libethereum-lite",
# branch="master",
# architecture="amd64",
# distribution="vivid",
# testdeb=True),
# locks=[latent_lock.access('counting')]),
]: builders.append(builder)
|
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__author__ = 'John Sirois'
from collections import defaultdict
import os
from twitter.common import log
from twitter.common.dirutil import safe_open, safe_mkdir
from twitter.pants import is_apt
from twitter.pants.base.target import Target
from twitter.pants.targets import JavaLibrary, JavaTests
from twitter.pants.tasks import TaskError, Task
from twitter.pants.tasks.binary_utils import nailgun_profile_classpath
from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies
from twitter.pants.tasks.nailgun_task import NailgunTask
# Well known metadata file to auto-register annotation processors with a java 1.6+ compiler
_PROCESSOR_INFO_FILE = 'META-INF/services/javax.annotation.processing.Processor'
_JMAKE_MAIN = 'com.sun.tools.jmake.Main'
# From http://kenai.com/projects/jmake/sources/mercurial/content/src/com/sun/tools/jmake/Main.java?rev=26
# Main.mainExternal docs.
_JMAKE_ERROR_CODES = {
-1: 'invalid command line option detected',
-2: 'error reading command file',
-3: 'project database corrupted',
-4: 'error initializing or calling the compiler',
-5: 'compilation error',
-6: 'error parsing a class file',
-7: 'file not found',
-8: 'I/O exception',
-9: 'internal jmake exception',
-10: 'deduced and actual class name mismatch',
-11: 'invalid source file extension',
-12: 'a class in a JAR is found dependent on a class with the .java source',
-13: 'more than one entry for the same class is found in the project',
-20: 'internal Java error (caused by java.lang.InternalError)',
-30: 'internal Java error (caused by java.lang.RuntimeException).'
}
# When executed via a subprocess return codes will be treated as unsigned
_JMAKE_ERROR_CODES.update((256+code, msg) for code, msg in _JMAKE_ERROR_CODES.items())
class JavaCompile(NailgunTask):
@staticmethod
def _has_java_sources(target):
return is_apt(target) or isinstance(target, JavaLibrary) or isinstance(target, JavaTests)
@classmethod
def setup_parser(cls, option_group, args, mkflag):
NailgunTask.setup_parser(option_group, args, mkflag)
option_group.add_option(mkflag("warnings"), mkflag("warnings", negate=True),
dest="java_compile_warnings", default=True,
action="callback", callback=mkflag.set_bool,
help="[%default] Compile java code with all configured warnings "
"enabled.")
option_group.add_option(mkflag("partition-size-hint"), dest="java_compile_partition_size_hint",
action="store", type="int", default=-1,
help="Roughly how many source files to attempt to compile together. Set to a large number to compile "\
"all sources together. Set this to 0 to compile target-by-target. Default is set in pants.ini.")
def __init__(self, context):
NailgunTask.__init__(self, context, workdir=context.config.get('java-compile', 'nailgun_dir'))
self._partition_size_hint = \
context.options.java_compile_partition_size_hint \
if context.options.java_compile_partition_size_hint != -1 \
else context.config.getint('java-compile', 'partition_size_hint')
workdir = context.config.get('java-compile', 'workdir')
self._classes_dir = os.path.join(workdir, 'classes')
self._resources_dir = os.path.join(workdir, 'resources')
self._depfile_dir = os.path.join(workdir, 'depfiles')
self._deps = Dependencies(self._classes_dir)
self._jmake_profile = context.config.get('java-compile', 'jmake-profile')
self._compiler_profile = context.config.get('java-compile', 'compiler-profile')
self._args = context.config.getlist('java-compile', 'args')
self._jvm_args = context.config.getlist('java-compile', 'jvm_args')
if context.options.java_compile_warnings:
self._args.extend(context.config.getlist('java-compile', 'warning_args'))
else:
self._args.extend(context.config.getlist('java-compile', 'no_warning_args'))
self._confs = context.config.getlist('java-compile', 'confs')
# The artifact cache to read from/write to.
artifact_cache_spec = context.config.getlist('java-compile', 'artifact_caches')
self.setup_artifact_cache(artifact_cache_spec)
def product_type(self):
return 'classes'
def can_dry_run(self):
return True
def execute(self, targets):
java_targets = filter(JavaCompile._has_java_sources, targets)
if java_targets:
safe_mkdir(self._classes_dir)
safe_mkdir(self._depfile_dir)
with self.context.state('classpath', []) as cp:
for conf in self._confs:
cp.insert(0, (conf, self._resources_dir))
cp.insert(0, (conf, self._classes_dir))
with self.invalidated(java_targets, invalidate_dependents=True,
partition_size_hint=self._partition_size_hint) as invalidation_check:
for vt in invalidation_check.invalid_vts_partitioned:
# Compile, using partitions for efficiency.
self.execute_single_compilation(vt, cp)
if not self.dry_run:
vt.update()
for vt in invalidation_check.all_vts:
depfile = self.create_depfile_path(vt.targets)
if not self.dry_run and os.path.exists(depfile):
# Read in the deps created either just now or by a previous run on these targets.
deps = Dependencies(self._classes_dir)
deps.load(depfile)
self._deps.merge(deps)
if not self.dry_run:
if self.context.products.isrequired('classes'):
genmap = self.context.products.get('classes')
# Map generated classes to the owning targets and sources.
for target, classes_by_source in self._deps.findclasses(java_targets).items():
for source, classes in classes_by_source.items():
genmap.add(source, self._classes_dir, classes)
genmap.add(target, self._classes_dir, classes)
# TODO(John Sirois): Map target.resources in the same way
# 'Map' (rewrite) annotation processor service info files to the owning targets.
for target in java_targets:
if is_apt(target) and target.processors:
basedir = os.path.join(self._resources_dir, target.id)
processor_info_file = os.path.join(basedir, _PROCESSOR_INFO_FILE)
self.write_processor_info(processor_info_file, target.processors)
genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])
def execute_single_compilation(self, vt, cp):
depfile = self.create_depfile_path(vt.targets)
self.merge_depfile(vt) # Get what we can from previous builds.
self.context.log.info('Compiling targets %s' % str(vt.targets))
sources_by_target, processors, fingerprint = self.calculate_sources(vt.targets)
if sources_by_target:
sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
if not sources:
self.context.log.warn('Skipping java compile for targets with no sources:\n %s' %
'\n '.join(str(t) for t in sources_by_target.keys()))
else:
classpath = [jar for conf, jar in cp if conf in self._confs]
result = self.compile(classpath, sources, fingerprint, depfile)
if result != 0:
default_message = 'Unexpected error - %s returned %d' % (_JMAKE_MAIN, result)
raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message))
self.split_depfile(vt)
# NOTE: Currently all classfiles go into one global classes_dir. If we compile in
# multiple partitions the second one will cache all the classes of the first one.
# This won't result in error, but is wasteful. Currently, however, Java compilation
# is done in a single pass, so this won't occur in practice.
# TODO: Handle this case better. Separate classes dirs for each partition, like for scala?
artifact_files = [self._classes_dir, depfile]
if processors and not self.dry_run:
# Produce a monolithic apt processor service info file for further compilation rounds
# and the unit test classpath.
processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
if os.path.exists(processor_info_file):
with safe_open(processor_info_file, 'r') as f:
for processor in f:
processors.add(processor.strip())
self.write_processor_info(processor_info_file, processors)
artifact_files.append(processor_info_file)
if self._artifact_cache and self.context.options.write_to_artifact_cache:
self.update_artifact_cache(vt, artifact_files)
def create_depfile_path(self, targets):
compilation_id = Target.maybe_readable_identify(targets)
return os.path.join(self._depfile_dir, compilation_id) + '.dependencies'
def calculate_sources(self, targets):
sources = defaultdict(set)
processors = set()
def collect_sources(target):
src = (os.path.join(target.target_base, source)
for source in target.sources if source.endswith('.java'))
if src:
sources[target].update(src)
if is_apt(target) and target.processors:
processors.update(target.processors)
for target in targets:
collect_sources(target)
return sources, processors, Target.identify(targets)
def compile(self, classpath, sources, fingerprint, depfile):
jmake_classpath = nailgun_profile_classpath(self, self._jmake_profile)
args = [
'-classpath', ':'.join(classpath),
'-d', self._classes_dir,
'-pdb', os.path.join(self._classes_dir, '%s.dependencies.pdb' % fingerprint),
]
compiler_classpath = nailgun_profile_classpath(self, self._compiler_profile)
args.extend([
'-jcpath', ':'.join(compiler_classpath),
'-jcmainclass', 'com.twitter.common.tools.Compiler',
'-C-Tdependencyfile', '-C%s' % depfile,
])
args.extend(self._args)
args.extend(sources)
log.debug('Executing: %s %s' % (_JMAKE_MAIN, ' '.join(args)))
return self.runjava(_JMAKE_MAIN, classpath=jmake_classpath, args=args, jvmargs=self._jvm_args)
def check_artifact_cache(self, vts):
# Special handling for java artifacts.
cached_vts, uncached_vts = Task.check_artifact_cache(self, vts)
for vt in cached_vts:
self.split_depfile(vt)
return cached_vts, uncached_vts
def split_depfile(self, vt):
depfile = self.create_depfile_path(vt.targets)
if len(vt.targets) <= 1 or not os.path.exists(depfile) or self.dry_run:
return
deps = Dependencies(self._classes_dir)
deps.load(depfile)
classes_by_source_by_target = deps.findclasses(vt.targets)
for target in vt.targets:
classes_by_source = classes_by_source_by_target.get(target, {})
dst_depfile = self.create_depfile_path([target])
dst_deps = Dependencies(self._classes_dir)
for source, classes in classes_by_source.items():
src = os.path.join(target.target_base, source)
dst_deps.add(src, classes)
dst_deps.save(dst_depfile)
# Merges individual target depfiles into a single one for all those targets.
# Note that the merged depfile may be incomplete (e.g., if the previous build was aborted).
# TODO: Is this even necessary? JMake will stomp these anyway on success.
def merge_depfile(self, versioned_target_set):
if len(versioned_target_set.targets) <= 1:
return
dst_depfile = self.create_depfile_path(versioned_target_set.targets)
dst_deps = Dependencies(self._classes_dir)
for target in versioned_target_set.targets:
src_depfile = self.create_depfile_path([target])
if os.path.exists(src_depfile):
src_deps = Dependencies(self._classes_dir)
src_deps.load(src_depfile)
dst_deps.merge(src_deps)
dst_deps.save(dst_depfile)
def write_processor_info(self, processor_info_file, processors):
with safe_open(processor_info_file, 'w') as f:
for processor in processors:
f.write('%s\n' % processor)
|
|
#!/usr/bin/env python
#===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""
test_tile_record.py - tests for the TileRecord class
"""
# pylint: disable=too-many-public-methods
import re
import random
import os
import numpy as np
from EOtools.execute import execute
import logging
import sys
import unittest
from agdc import dbutil
#import landsat_bandstack
from agdc.abstract_ingester import AbstractIngester
from agdc.abstract_ingester import IngesterDataCube
from landsat_dataset import LandsatDataset
from test_landsat_tiler import TestLandsatTiler
import ingest_test_data as TestIngest
from test_tile_contents import TestTileContents
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
#
# Constants
#
class TestArgs(object):
"""The sole instance of this class stores the config_path and debug
arguments for passing to the datacube constructor."""
pass
class TestIngester(AbstractIngester):
"""An ingester class from which to get a datacube object"""
def __init__(self, datacube):
AbstractIngester.__init__(self, datacube)
def find_datasets(self, source_dir):
pass
def open_dataset(self, dataset_path):
pass
class TestTileRecord(unittest.TestCase):
"""Unit tests for the TileRecord class"""
# pylint: disable=too-many-instance-attributes
############################### User area #################################
MODULE = 'tile_record'
SUITE = 'TileRecord3'
# Set to true if we want to populate expected directory with results,
# without doing comparision. Set to False if we want to put (often
# a subset of) results in output directory and compare against the
# previously populated expected directory.
POPULATE_EXPECTED = True
############################################
INPUT_DIR = dbutil.input_directory(MODULE, SUITE)
OUTPUT_DIR = dbutil.output_directory(MODULE, SUITE)
EXPECTED_DIR = dbutil.expected_directory(MODULE, SUITE)
if POPULATE_EXPECTED:
destination_dir = 'expected'
else:
destination_dir = 'output'
TEMP_DIR = dbutil.temp_directory(MODULE, SUITE, destination_dir)
TILE_ROOT_DIR = dbutil.tile_root_directory(MODULE, SUITE, destination_dir)
def setUp(self):
#
# Parse out the name of the test case and use it to name a logfile
#
match = re.search(r'\.([^\.]+)$', self.id())
if match:
name = match.group(1)
else:
name = 'TestIngester'
logfile_name = "%s.log" % name
self.logfile_path = os.path.join(self.OUTPUT_DIR, logfile_name)
self.expected_path = os.path.join(self.EXPECTED_DIR, logfile_name)
if self.POPULATE_EXPECTED:
self.logfile_path = os.path.join(self.EXPECTED_DIR, logfile_name)
#
# Set up a handler to log to the logfile, and attach it to the
# root logger.
#
#logging.basicConfig()
self.handler = logging.FileHandler(self.logfile_path, mode='w')
self.handler.setLevel(logging.INFO)
self.handler.setFormatter(logging.Formatter('%(message)s'))
LOGGER.addHandler(self.handler)
# Add a streamhandler to write output to console
self.stream_handler = logging.StreamHandler(stream=sys.stdout)
self.stream_handler.setLevel(logging.INFO)
self.stream_handler.setFormatter(logging.Formatter('%(message)s'))
LOGGER.addHandler(self.stream_handler)
# Create an empty database
self.test_conn = None
self.test_dbname = dbutil.random_name("test_tile_record")
LOGGER.info('Creating %s', self.test_dbname)
dbutil.TESTSERVER.create(self.test_dbname,
self.INPUT_DIR, "hypercube_empty.sql")
# Set the datacube configuration file to point to the empty database
configuration_dict = {'dbname': self.test_dbname,
'temp_dir': self.TEMP_DIR,
'tile_root': self.TILE_ROOT_DIR}
config_file_path = dbutil.update_config_file2(configuration_dict,
self.INPUT_DIR,
self.OUTPUT_DIR,
"test_datacube.conf")
# Set an instance of the datacube and pass it to an ingester instance
test_args = TestArgs()
test_args.config_file = config_file_path
test_args.debug = False
test_datacube = IngesterDataCube(test_args)
self.ingester = TestIngester(datacube=test_datacube)
self.collection = self.ingester.collection
def tearDown(self):
#
# Flush the handler and remove it from the root logger.
#
self.handler.flush()
self.stream_handler.flush()
if self.test_dbname:
if self.POPULATE_EXPECTED:
dbutil.TESTSERVER.save(self.test_dbname, self.EXPECTED_DIR,
'hypercube_tile_record.sql')
else:
#TODO: make dbase comaprision
kkk=-1
LOGGER.info('About to drop %s', self.test_dbname)
dbutil.TESTSERVER.drop(self.test_dbname)
LOGGER.removeHandler(self.handler)
LOGGER.removeHandler(self.stream_handler)
def xxxtest_insert_tile_record(self):
"""Test the Landsat tiling process method by comparing output to a
file on disk."""
# pylint: disable=too-many-locals
# Test a single dataset for tile_record creation
processing_level = 'PQA'
dataset_path = TestIngest.DATASETS_TO_INGEST[processing_level][0]
LOGGER.info('Testing Dataset %s', dataset_path)
dset = LandsatDataset(dataset_path)
# Create a DatasetRecord instance so that we can access its
# list_tile_types() method. In doing this we need to create a
# collection object and entries on the acquisition and dataset
# tables of the database.
self.collection.begin_transaction()
acquisition = \
self.collection.create_acquisition_record(dset)
dset_record = acquisition.create_dataset_record(dset)
# Get tile types
dummy_tile_type_list = dset_record.list_tile_types()
# Assume dataset has tile_type = 1 only:
tile_type_id = 1
dataset_bands_dict = dset_record.get_tile_bands(tile_type_id)
ls_bandstack = dset.stack_bands(dataset_bands_dict)
temp_dir = os.path.join(self.ingester.datacube.tile_root,
'ingest_temp')
# Form scene vrt
ls_bandstack.buildvrt(self.collection.get_temp_tile_directory())
# Reproject scene data onto selected tile coverage
tile_footprint_list = dset_record.get_coverage(tile_type_id)
LOGGER.info('coverage=%s', str(tile_footprint_list))
for tile_footprint in tile_footprint_list:
tile_contents = \
self.collection.create_tile_contents(tile_type_id,
tile_footprint,
ls_bandstack)
LOGGER.info('reprojecting for %s tile %s',
processing_level, str(tile_footprint))
#Need to call reproject to set tile_contents.tile_extents
tile_contents.reproject()
if tile_contents.has_data():
dummy_tile_record = \
dset_record.create_tile_record(tile_contents)
self.collection.commit_transaction()
#TODO compare database with expected
def test_aaa(self):
pass
def test_bbb(self):
pass
def test_make_mosaics(self):
"""Make mosaic tiles from two adjoining scenes."""
# pylint: disable=too-many-locals
dataset_list = \
[TestIngest.DATASETS_TO_INGEST[level][i] for i in range(6)
for level in ['PQA', 'NBAR', 'ORTHO']]
dataset_list.extend(TestIngest.MOSAIC_SOURCE_NBAR)
dataset_list.extend(TestIngest.MOSAIC_SOURCE_PQA)
dataset_list.extend(TestIngest.MOSAIC_SOURCE_ORTHO)
random.shuffle(dataset_list)
LOGGER.info("Ingesting following datasets:")
for dset in dataset_list:
LOGGER.info('%d) %s', dataset_list.index(dset), dset)
for dataset_path in dataset_list:
LOGGER.info('Ingesting Dataset %d:\n%s',
dataset_list.index(dataset_path), dataset_path)
dset = LandsatDataset(dataset_path)
self.collection.begin_transaction()
acquisition = \
self.collection.create_acquisition_record(dset)
dset_record = acquisition.create_dataset_record(dset)
# Get tile types
dummy_tile_type_list = dset_record.list_tile_types()
# Assume dataset has tile_type = 1 only:
tile_type_id = 1
dataset_bands_dict = dset_record.get_tile_bands(tile_type_id)
ls_bandstack = dset.stack_bands(dataset_bands_dict)
temp_dir = os.path.join(self.ingester.datacube.tile_root,
'ingest_temp')
# Form scene vrt
ls_bandstack.buildvrt(temp_dir)
# Reproject scene data onto selected tile coverage
tile_footprint_list = dset_record.get_coverage(tile_type_id)
LOGGER.info('coverage=%s', str(tile_footprint_list))
for tile_ftprint in tile_footprint_list:
#Only do that footprint for which we have benchmark mosaics
if tile_ftprint not in [(141, -38)]:
continue
tile_contents = \
self.collection.create_tile_contents(tile_type_id,
tile_ftprint,
ls_bandstack)
LOGGER.info('Calling reproject for %s tile %s...',
dset_record.mdd['processing_level'], tile_ftprint)
tile_contents.reproject()
LOGGER.info('...finished')
if tile_contents.has_data():
LOGGER.info('tile %s has data',
tile_contents.temp_tile_output_path)
tile_record = dset_record.create_tile_record(tile_contents)
mosaic_required = tile_record.make_mosaics()
if not mosaic_required:
continue
# Test mosaic tiles against benchmark
# At this stage, transaction for this dataset not yet
# commited and so the tiles from this dataset, including
# any mosaics are still in the temporary location.
if self.POPULATE_EXPECTED:
continue
mosaic_benchmark = \
TestTileContents.swap_dir_in_path(tile_contents
.mosaic_final_pathname,
'output',
'expected')
mosaic_new = tile_contents.mosaic_temp_pathname
LOGGER.info("Comparing test output with benchmark:\n"\
"benchmark: %s\ntest output: %s",
mosaic_benchmark, mosaic_new)
if dset_record.mdd['processing_level'] == 'PQA':
LOGGER.info("For PQA mosaic, calling load_and_check...")
([data1, data2], dummy_nlayers) = \
TestLandsatTiler.load_and_check(
mosaic_benchmark,
mosaic_new,
tile_contents.band_stack.band_dict,
tile_contents.band_stack.band_dict)
LOGGER.info('Checking arrays ...')
if ~(data1 == data2).all():
self.fail("Difference in PQA mosaic "
"from expected result: %s and %s"
%(mosaic_benchmark, mosaic_new))
# Check that differences are due to differing treatment
# of contiguity bit.
else:
diff_cmd = ["diff",
"-I",
"[Ff]ilename",
"%s" %mosaic_benchmark,
"%s" %mosaic_new
]
result = execute(diff_cmd, shell=False)
assert result['stdout'] == '', \
"Differences between vrt files"
assert result['stderr'] == '', \
"Error in system diff command"
else:
LOGGER.info('... tile has no data')
tile_contents.remove()
self.collection.commit_transaction()
def the_suite():
"Runs the tests"""
test_classes = [TestTileRecord]
suite_list = map(unittest.defaultTestLoader.loadTestsFromTestCase,
test_classes)
suite = unittest.TestSuite(suite_list)
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(the_suite())
|
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
import _ni_support
import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64):
"""
Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
input : array_like
The input array.
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is `numpy.float64`.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order=3, output = numpy.float64):
"""
Multi-dimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
input : array_like
The input array.
mapping : callable
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
return_value : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Examples
--------
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> sp.ndimage.geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array.
coordinates : array_like
The coordinates at which `input` is evaluated.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
Parameters
----------
input : ndarray
The input array.
matrix : ndarray
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is assumed
that the matrix is diagonal. A more efficient algorithms is then
applied that exploits the separability of the problem.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The transformed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
input : ndarray
The input array.
shift : float or sequence, optional
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The shifted input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
zoom : float or sequence, optional
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The zoomed input. If `output` is given as a parameter, None is
returned.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes=(1, 0), reshape=True,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
input : ndarray
The input array.
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is pre-filtered with
`spline_filter` before interpolation (necessary for spline
interpolation of order > 1). If False, it is assumed that the input is
already filtered. Default is True.
Returns
-------
return_value : ndarray or None
The rotated input. If `output` is given as a parameter, None is
returned.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
shape=output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size //= input.shape[axes[0]]
size //= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = range(input.ndim)
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
|
|
"""
============================================
Tractography Clustering - Available Features
============================================
This page lists available features that can be used by the tractography
clustering framework. For every feature a brief description is provided
explaining: what it does, when it's useful and how to use it. If you are not
familiar with the tractography clustering framework, read the
:ref:`clustering-framework` first.
.. contents:: Available Features
:local:
:depth: 1
**Note**:
All examples assume a function `get_streamlines` exists. We defined here a
simple function to do so. It imports the necessary modules and load a small
streamline bundle.
"""
def get_streamlines():
from nibabel import trackvis as tv
from dipy.data import get_data
fname = get_data('fornix')
streams, hdr = tv.read(fname)
streamlines = [i[0] for i in streams]
return streamlines
"""
.. _clustering-examples-IdentityFeature:
Identity Feature
================
**What:** Instances of `IdentityFeature` simply return the streamlines
unaltered. In other words the features are the original data.
**When:** The QuickBundles algorithm requires streamlines to have the same
number of points. If this is the case for your streamlines, you can tell
QuickBundles to not perform resampling (see following example). The clustering
should be faster than using the default behaviour of QuickBundles since it will
require less computation (i.e. no resampling). However, it highly depends
on the number of points streamlines have. By default, QuickBundles resamples
streamlines so that they have 12 points each [Garyfallidis12]_.
*Unless stated otherwise, it is the default feature used by `Metric` objects
in the clustering framework.*
"""
from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import IdentityFeature
from dipy.segment.metric import AveragePointwiseEuclideanMetric
# Get some streamlines.
streamlines = get_streamlines() # Previously defined.
# Make sure our streamlines have the same number of points.
from dipy.tracking.streamline import set_number_of_points
streamlines = set_number_of_points(streamlines, nb_points=12)
# Create an instance of `IdentityFeature` and tell metric to use it.
feature = IdentityFeature()
metric = AveragePointwiseEuclideanMetric(feature=feature)
qb = QuickBundles(threshold=10., metric=metric)
clusters = qb.cluster(streamlines)
print("Nb. clusters:", len(clusters))
print("Cluster sizes:", list(map(len, clusters)))
"""
::
Nb. clusters: 4
Cluster sizes: [64, 191, 47, 1]
.. _clustering-examples-ResampleFeature:
Resample Feature
================
**What:** Instances of `ResampleFeature` resample streamlines to a
predetermined number of points. The resampling is done on the fly such that
there are no permanent modifications made to your streamlines.
**When:** The QuickBundles algorithm requires streamlines to have the same
number of points. By default, QuickBundles uses `ResampleFeature` to resample
streamlines so that they have 12 points each [Garyfallidis12]_. If you want to
use a different number of points for the resampling, you should provide your
own instance of `ResampleFeature` (see following example).
**Note:** Resampling streamlines has an impact on clustering results both in
term of speed and quality. Setting the number of points too low will result in
a loss of information about the shape of the streamlines. On the contrary,
setting the number of points too high will slow down the clustering process.
"""
from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import ResampleFeature
from dipy.segment.metric import AveragePointwiseEuclideanMetric
# Get some streamlines.
streamlines = get_streamlines() # Previously defined.
# Streamlines will be resampled to 24 points on the fly.
feature = ResampleFeature(nb_points=24)
metric = AveragePointwiseEuclideanMetric(feature=feature) # a.k.a. MDF
qb = QuickBundles(threshold=10., metric=metric)
clusters = qb.cluster(streamlines)
print("Nb. clusters:", len(clusters))
print("Cluster sizes:", list(map(len, clusters)))
"""
::
Nb. clusters: 4
Cluster sizes: [64, 191, 44, 1]
.. _clustering-examples-CenterOfMassFeature:
Center of Mass Feature
======================
**What:** Instances of `CenterOfMassFeature` compute the center of mass (also
known as center of gravity) of a set of points. This is achieved by taking the
mean of every coordinate independently (for more information see the
`wiki page <https://en.wikipedia.org/wiki/Center_of_mass>`_).
**When:** This feature can be useful when you *only* need information about the
spatial position of a streamline.
**Note:** The computed center is not guaranteed to be an existing point in the
streamline.
"""
import numpy as np
from dipy.viz import fvtk
from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import CenterOfMassFeature
from dipy.segment.metric import EuclideanMetric
# Get some streamlines.
streamlines = get_streamlines() # Previously defined.
feature = CenterOfMassFeature()
metric = EuclideanMetric(feature)
qb = QuickBundles(threshold=5., metric=metric)
clusters = qb.cluster(streamlines)
# Extract feature of every streamline.
centers = np.asarray(list(map(feature.extract, streamlines)))
# Color each center of mass according to the cluster they belong to.
rng = np.random.RandomState(42)
colormap = fvtk.create_colormap(np.arange(len(clusters)))
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
colormap_full[cluster.indices] = color
# Visualization
ren = fvtk.ren()
fvtk.clear(ren)
ren.SetBackground(0, 0, 0)
fvtk.add(ren, fvtk.streamtube(streamlines, fvtk.colors.white, opacity=0.05))
fvtk.add(ren, fvtk.point(centers[:, 0, :], colormap_full, point_radius=0.2))
fvtk.record(ren, n_frames=1, out_path='center_of_mass_feature.png', size=(600, 600))
"""
.. figure:: center_of_mass_feature.png
:align: center
**Showing the center of mass of each streamline and colored according to
the QuickBundles results**.
.. _clustering-examples-MidpointFeature:
Midpoint Feature
================
**What:** Instances of `MidpointFeature` extract the middle point of a
streamline. If there is an even number of points, the feature will then
correspond to the point halfway between the two middle points.
**When:** This feature can be useful when you *only* need information about the
spatial position of a streamline. This can also be an alternative to the
`CenterOfMassFeature` if the point extracted must be on the streamline.
"""
import numpy as np
from dipy.viz import fvtk
from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import MidpointFeature
from dipy.segment.metric import EuclideanMetric
# Get some streamlines.
streamlines = get_streamlines() # Previously defined.
feature = MidpointFeature()
metric = EuclideanMetric(feature)
qb = QuickBundles(threshold=5., metric=metric)
clusters = qb.cluster(streamlines)
# Extract feature of every streamline.
midpoints = np.asarray(list(map(feature.extract, streamlines)))
# Color each midpoint according to the cluster they belong to.
rng = np.random.RandomState(42)
colormap = fvtk.create_colormap(np.arange(len(clusters)))
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
colormap_full[cluster.indices] = color
# Visualization
ren = fvtk.ren()
fvtk.clear(ren)
ren.SetBackground(0, 0, 0)
fvtk.add(ren, fvtk.point(midpoints[:, 0, :], colormap_full, point_radius=0.2))
fvtk.add(ren, fvtk.streamtube(streamlines, fvtk.colors.white, opacity=0.05))
fvtk.record(ren, n_frames=1, out_path='midpoint_feature.png', size=(600, 600))
"""
.. figure:: midpoint_feature.png
:align: center
**Showing the middle point of each streamline and colored according to the
QuickBundles results**.
.. _clustering-examples-ArcLengthFeature:
ArcLength Feature
=================
**What:** Instances of `ArcLengthFeature` compute the length of a streamline.
More specifically, this feature corresponds to the sum of the lengths of every
streamline segments.
**When:** This feature can be useful when you *only* need information about the
length of a streamline.
"""
import numpy as np
from dipy.viz import fvtk
from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import ArcLengthFeature
from dipy.segment.metric import EuclideanMetric
# Get some streamlines.
streamlines = get_streamlines() # Previously defined.
feature = ArcLengthFeature()
metric = EuclideanMetric(feature)
qb = QuickBundles(threshold=2., metric=metric)
clusters = qb.cluster(streamlines)
# Color each streamline according to the cluster they belong to.
colormap = fvtk.create_colormap(np.ravel(clusters.centroids))
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
colormap_full[cluster.indices] = color
# Visualization
ren = fvtk.ren()
fvtk.clear(ren)
ren.SetBackground(0, 0, 0)
fvtk.add(ren, fvtk.streamtube(streamlines, colormap_full))
fvtk.record(ren, n_frames=1, out_path='arclength_feature.png', size=(600, 600))
"""
.. figure:: arclength_feature.png
:align: center
**Showing the streamlines colored according to their length**.
.. _clustering-examples-VectorOfEndpointsFeature:
Vector Between Endpoints Feature
================================
**What:** Instances of `VectorOfEndpointsFeature` extract the vector going
from one extremity of the streamline to the other. In other words, this feature
represents the vector beginning at the first point and ending at the last point
of the streamlines.
**When:** This feature can be useful when you *only* need information about the
orientation of a streamline.
**Note:** Since streamlines endpoints are ambiguous (e.g. the first point could
be either the beginning or the end of the streamline), one must be careful when
using this feature.
"""
import numpy as np
from dipy.viz import fvtk
from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import VectorOfEndpointsFeature
from dipy.segment.metric import CosineMetric
# Get some streamlines.
streamlines = get_streamlines() # Previously defined.
feature = VectorOfEndpointsFeature()
metric = CosineMetric(feature)
qb = QuickBundles(threshold=0.1, metric=metric)
clusters = qb.cluster(streamlines)
# Color each streamline according to the cluster they belong to.
colormap = fvtk.create_colormap(np.arange(len(clusters)))
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
colormap_full[cluster.indices] = color
# Visualization
ren = fvtk.ren()
fvtk.clear(ren)
ren.SetBackground(0, 0, 0)
fvtk.add(ren, fvtk.streamtube(streamlines, colormap_full))
fvtk.record(ren, n_frames=1, out_path='vector_of_endpoints_feature.png', size=(600, 600))
"""
.. figure:: vector_of_endpoints_feature.png
:align: center
**Showing the streamlines colored according to their orientation**.
.. include:: ../links_names.inc
.. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
tractography simplification, Frontiers in Neuroscience, vol
6, no 175, 2012.
"""
|
|
"""
A set of utility functions to get the actual computation complexity of various layers
"""
import math
import warnings
oneGiga = 1e9
def convolutional_layer_direct(imageSize, filterSize, padding='SAME', stride=(1, 1), inGFLOP=True,
result_format='mac'):
""" The input conforms to tensorflow tf.nn.conv2d format
:param imageSize: (image_height, image_width, in_channels), represented as a tuple
:param filterSize: (filter_height, filter_width, in_channels, out_channels)
:param padding:
:param stride: (height_stride, width_stride)
:return: The number of operations: accumulation + multiplication
"""
image_height, image_width, in_channels = imageSize
assert in_channels == filterSize[2], "image size and filter size must have the same depth."
filter_height, filter_width, _, out_channels = filterSize
height_stride, width_stride = stride
if padding == 'SAME':
height_padding = (image_height - 1) * height_stride - image_height + filter_height
width_padding = (image_width - 1) * width_stride - image_width + filter_width
elif padding == 'VALID':
height_padding = 0
width_padding = 0
else:
raise ValueError('Unknown padding')
out_height = (image_height - filter_height + height_padding) // height_stride + 1
out_width = (image_width - filter_width + width_padding) // width_stride + 1
# number of operations to get one result
numOpMac = filter_height * filter_width * in_channels
numOpPerResult = 2 * numOpMac + in_channels + 1 # 1 is the bias
total_num_result = out_height * out_width * out_channels
if result_format == 'mac':
total_num_ops = total_num_result * numOpMac
elif result_format == 'op':
total_num_ops = total_num_result * numOpPerResult
else:
raise ValueError('Unknown result format')
if inGFLOP:
total_num_ops /= oneGiga
return total_num_ops
def isPowerOfTwo(num):
return ((num & (num - 1)) == 0) and num > 0
def findUnitSize(size):
"""
>>> findUnitSize(3)
2
>>> findUnitSize(7)
2
"""
assert size >= 3, "It is not valid to use fft when size < 3"
F = size
start = int(math.ceil(math.log(F, 2)))
while True:
total = 2 ** start
unitSize = total + 1 - F
if unitSize >= 2:
break
start += 1
return unitSize
def convolutional_layer_fft(imageSize, filterSize, padding='SAME', stride=(1, 1), inGFLOP=True,
result_format='mac', fft_size=None):
""" The input conforms to tensorflow tf.nn.conv2d format
:param imageSize: (image_height, image_width, in_channels), represented as a tuple
:param filterSize: (filter_height, filter_width, in_channels, out_channels)
:param padding:
:param stride: (height_stride, width_stride)
:param fft_size: find the minimum feasible fft size if it is None, (height_fft_size, width_fft_size)
:return: The number of operations: accumulation + multiplication
"""
image_height, image_width, in_channels = imageSize
filter_height, filter_width, _, out_channels = filterSize
assert in_channels == filterSize[2], "image size and filter size must have the same depth."
if fft_size is None:
height_unit_size = findUnitSize(filter_height)
width_unit_size = findUnitSize(filter_width)
height_fft_size = height_unit_size + filter_height - 1
width_fft_size = width_unit_size + filter_width - 1
else:
height_fft_size, width_fft_size = fft_size
height_unit_size = height_fft_size + 1 - filter_height
width_unit_size = width_fft_size + 1 - filter_width
# number of operations
height_logFFTUnitSize = int(math.log(height_fft_size, 2))
width_logFFTUnitSize = int(math.log(width_fft_size, 2))
height_stride, width_stride = stride
if padding == 'SAME':
height_padding = (image_height - 1) * height_stride - image_height + filter_height
width_padding = (image_width - 1) * width_stride - image_width + filter_width
elif padding == 'VALID':
height_padding = 0
width_padding = 0
else:
raise ValueError('Unknown padding')
numTilt = int(math.ceil((image_height + height_padding) / float(height_unit_size))) * \
int(math.ceil((image_width + width_padding) / float(width_unit_size)))
# Note that this is complex multiplication, 1 complex multiplication = 3 real multiplication
num_multiply_per_complex_multiply = 3
num_add_per_complex_add = 2
numMultImageFFT = (height_fft_size * width_fft_size * height_logFFTUnitSize +
width_fft_size * height_fft_size * width_logFFTUnitSize) * numTilt * in_channels * \
num_multiply_per_complex_multiply / 2.0
numMultIFFT = (height_fft_size * width_fft_size * height_logFFTUnitSize +
width_fft_size * height_fft_size * width_logFFTUnitSize) * numTilt * out_channels * \
num_multiply_per_complex_multiply / 2.0
# numAddImageFFT = (height_fft_size * height_logFFTUnitSize * width_fft_size +
# width_fft_size * width_logFFTUnitSize * height_fft_size) * numTilt * in_channels * \
# num_add_per_complex_add
numMultImageFilter = height_fft_size * width_fft_size * in_channels * out_channels * numTilt * \
num_multiply_per_complex_multiply
numAddInDepth = numMultImageFilter
numAddOverlap = ((filter_height - 1) * width_fft_size + (filter_width - 1) * height_fft_size) * \
numTilt * out_channels * 2 # multiply by 2 because each tile overlapped by 4 boundary
if inGFLOP:
numMultImageFFT /= oneGiga
numMultImageFilter /= oneGiga
numMultIFFT /= oneGiga
numAddInDepth /= oneGiga
numAddOverlap /= oneGiga
if result_format == 'mac':
return numMultImageFFT, numMultImageFilter, numMultIFFT, numAddOverlap
elif result_format == 'op':
return numMultImageFFT, numMultImageFilter + numAddInDepth, numMultIFFT, numAddOverlap
else:
raise ValueError('Unknown result format')
def convolutional_layer_winograd(imageSize, filterSize, padding='SAME', stride=(1, 1), inGFLOP=True,
result_format='mac'):
""" The input conforms to tensorflow tf.nn.conv2d format
:param imageSize: (image_height, image_width, in_channels), represented as a tuple
:param filterSize: (filter_height, filter_width, in_channels, out_channels)
:param padding:
:param stride: (height_stride, width_stride)
:param fft_size: find the minimum feasible fft size if it is None, (height_fft_size, width_fft_size)
:return: The number of operations: accumulation + multiplication
"""
image_height, image_width, in_channels = imageSize
filter_height, filter_width, _, out_channels = filterSize
assert in_channels == filterSize[2], "image size and filter size must have the same depth."
assert filter_height == 3 and filter_width == 3, 'Winograd is only applicable to 3x3 filters'
if stride != (1, 1):
warnings.warn('For stride not equal to 1, it is generally not recommended to use Winograd algorithm')
height_stride, width_stride = stride
if padding == 'SAME':
height_padding = (image_height - 1) * height_stride - image_height + filter_height
width_padding = (image_width - 1) * width_stride - image_width + filter_width
elif padding == 'VALID':
height_padding = 0
width_padding = 0
else:
raise ValueError('Unknown padding')
num_tile = ((image_height + height_padding) / 2 - 1) * ((image_width + width_padding) / 2 - 1)
# for each 4x4 transform, B'dB
num_accumulation_per_transform_result = 2 # based on the paper, intermediate result can be reused
num_transform_result = 16
transform_add_per_tile = num_accumulation_per_transform_result * num_transform_result
total_transform_add = transform_add_per_tile * num_tile * in_channels
# mac
total_num_mac = num_transform_result * in_channels * num_tile * out_channels
# inverse transform
num_accumulation_per_inverse_transform_result = 6 # based on the paper, intermediate result can be reused
num_inverse_transform_result = 4
total_inverse_transform_add = num_accumulation_per_inverse_transform_result * \
num_inverse_transform_result * num_tile * out_channels
if inGFLOP:
total_transform_add /= oneGiga
total_num_mac /= oneGiga
total_inverse_transform_add /= oneGiga
if result_format == 'mac':
return total_transform_add, total_num_mac, total_inverse_transform_add
elif result_format == 'op':
return total_transform_add, total_num_mac * 2, total_inverse_transform_add
else:
raise ValueError('Unknown result format')
def fc_layer(input_width, output_width, inGFLOP=True, result_format='mac'):
total_num_mac = input_width * output_width
if inGFLOP:
total_num_mac /= oneGiga
if result_format == 'mac':
return total_num_mac
elif result_format == 'op':
return total_num_mac * 2
else:
raise ValueError('Unknown result format')
def batch_normalization_layer(input_shape, inGLOP=True):
N, D = input_shape
total_num_add = 2 * N * D
total_num_multiplication = 2 * N * D
if inGLOP:
total_num_add /= oneGiga
total_num_multiplication /= oneGiga
return total_num_add, total_num_multiplication
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test suite for XenAPI."""
import ast
import contextlib
import datetime
import functools
import os
import re
from nova import db
from nova import context
from nova import flags
from nova import log as logging
from nova import test
from nova import utils
from nova.compute import aggregate_states
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import utils as compute_utils
from nova import exception
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import vm_utils
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
from nova.tests.glance import stubs as glance_stubs
from nova.tests import fake_network
from nova.tests import fake_utils
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
"""
vm_utils.with_vdi_attached_here needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
@contextlib.contextmanager
def fake_vdi_attached_here(*args, **kwargs):
fake_dev = 'fakedev'
yield fake_dev
def fake_stream_disk(*args, **kwargs):
pass
def fake_is_vdi_pv(*args, **kwargs):
return should_return
orig_vdi_attached_here = vm_utils.vdi_attached_here
orig_stream_disk = vm_utils._stream_disk
orig_is_vdi_pv = vm_utils._is_vdi_pv
try:
vm_utils.vdi_attached_here = fake_vdi_attached_here
vm_utils._stream_disk = fake_stream_disk
vm_utils._is_vdi_pv = fake_is_vdi_pv
return function(self, *args, **kwargs)
finally:
vm_utils._is_vdi_pv = orig_is_vdi_pv
vm_utils._stream_disk = orig_stream_disk
vm_utils.vdi_attached_here = orig_vdi_attached_here
return decorated_function
class XenAPIVolumeTestCase(test.TestCase):
"""Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
self.instance_values = {'id': 1,
'project_id': self.user_id,
'user_id': 'fake',
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
def _create_volume(self, size='0'):
"""Create a volume object."""
vol = {}
vol['size'] = size
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['host'] = 'localhost'
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(self.context, vol)
@staticmethod
def _make_info():
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': 1,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.1:3260,fake',
'target_lun': None,
'auth_method': 'CHAP',
'auth_method': 'fake',
'auth_method': 'fake',
}
}
def test_mountpoint_to_number(self):
cases = {
'sda': 0,
'sdp': 15,
'hda': 0,
'hdp': 15,
'vda': 0,
'xvda': 0,
'0': 0,
'10': 10,
'vdq': -1,
'sdq': -1,
'hdq': -1,
'xvdq': -1,
}
for (input, expected) in cases.iteritems():
func = volume_utils.VolumeHelper.mountpoint_to_number
actual = func(input)
self.assertEqual(actual, expected,
'%s yielded %s, not %s' % (input, actual, expected))
def test_parse_volume_info_raise_exception(self):
"""This shows how to test helper classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = volume_utils.VolumeHelper
helper.XenAPI = session.get_imported_xenapi()
vol = self._create_volume()
# oops, wrong mount point!
self.assertRaises(volume_utils.StorageError,
helper.parse_volume_info,
self._make_info(),
'dev/sd'
)
db.volume_destroy(context.get_admin_context(), vol['id'])
def test_attach_volume(self):
"""This shows how to test Ops classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
vm = xenapi_fake.create_vm(instance.name, 'Running')
result = conn.attach_volume(self._make_info(),
instance.name, '/dev/sdc')
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vm)
def test_attach_volume_raise_exception(self):
"""This shows how to test when exceptions are raised."""
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance.name, 'Running')
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
{'driver_volume_type': 'nonexist'},
instance.name,
'/dev/sdc')
def configure_instance(*args):
pass
class XenAPIVMTestCase(test.TestCase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.network = utils.import_object(FLAGS.network_manager)
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
xenapi_fake.reset()
xenapi_fake.create_local_srs()
xenapi_fake.create_local_pifs()
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs)
stubs.stubout_is_vdi_pv(self.stubs)
self.stubs.Set(vmops.VMOps, '_configure_instance',
configure_instance)
stubs.stub_out_vm_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
fake_utils.stub_out_utils_execute(self.stubs)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.get_connection(False)
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEquals(instances, [])
def test_get_rrd_server(self):
self.flags(xenapi_connection_url='myscheme://myaddress/')
server_info = vm_utils.get_rrd_server()
self.assertEqual(server_info[0], 'myscheme')
self.assertEqual(server_info[1], 'myaddress')
def test_get_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
with open('xenapi/vm_rrd.xml') as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, 'get_rrd', fake_get_rrd)
fake_diagnostics = {
'vbd_xvdb_write': '0.0',
'memory_target': '10961792000.0000',
'memory_internal_free': '3612860.6020',
'memory': '10961792000.0000',
'vbd_xvda_write': '0.0',
'cpu0': '0.0110',
'vif_0_tx': '752.4007',
'vbd_xvda_read': '0.0',
'vif_0_rx': '4837.8805'
}
instance = self._create_instance()
expected = self.conn.get_diagnostics(instance)
self.assertDictMatch(fake_diagnostics, expected)
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(vm_ref, vdi_ref):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
name = "MySnapshot"
self.assertRaises(exception.Error, self.conn.snapshot,
self.context, instance, name)
def test_instance_snapshot(self):
stubs.stubout_instance_snapshot(self.stubs)
stubs.stubout_is_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
name = "MySnapshot"
template_vm_ref = self.conn.snapshot(self.context, instance, name)
# Ensure VM was torn down
vm_labels = []
for vm_ref in xenapi_fake.get_all('VM'):
vm_rec = xenapi_fake.get_record('VM', vm_ref)
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEquals(vm_labels, [instance.name])
# Ensure VBDs were torn down
vbd_labels = []
for vbd_ref in xenapi_fake.get_all('VBD'):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEquals(vbd_labels, [instance.name])
# Ensure VDIs were torn down
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
self.assert_(not name_label.endswith('snapshot'))
def create_vm_record(self, conn, os_type, instance_id=1):
instances = conn.list_instances()
self.assertEquals(instances, [str(instance_id)])
# Get Nova record for VM
vm_info = conn.get_info({'name': instance_id})
# Get XenAPI record for VM
vms = [rec for ref, rec
in xenapi_fake.get_all_records('VM').iteritems()
if not rec['is_control_domain']]
vm = vms[0]
self.vm_info = vm_info
self.vm = vm
def check_vm_record(self, conn, check_injection=False):
# Check that m1.large above turned into the right thing.
instance_type = db.instance_type_get_by_name(conn, 'm1.large')
mem_kib = long(instance_type['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = instance_type['vcpus']
self.assertEquals(self.vm_info['max_mem'], mem_kib)
self.assertEquals(self.vm_info['mem'], mem_kib)
self.assertEquals(self.vm['memory_static_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes)
self.assertEquals(self.vm['VCPUs_max'], str(vcpus))
self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus))
# Check that the VM is running according to Nova
self.assertEquals(self.vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to XenAPI.
self.assertEquals(self.vm['power_state'], 'Running')
if check_injection:
xenstore_data = self.vm['xenstore_data']
key = 'vm-data/networking/DEADBEEF0000'
xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value)
self.assertEquals(tcpip_data,
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'dhcp_server': '192.168.0.1',
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})
def check_vm_params_for_windows(self):
self.assertEquals(self.vm['platform']['nx'], 'true')
self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order')
# check that these are not set
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], '')
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
def check_vm_params_for_linux(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def check_vm_params_for_linux_with_external_kernel(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1')
self.assertNotEquals(self.vm['PV_kernel'], '')
self.assertNotEquals(self.vm['PV_ramdisk'], '')
# check that these are not set
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
session = xenapi_conn.XenAPISession(url, username, password)
return session.call_xenapi('VDI.get_all')
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
if not vdi_ref in start_list:
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
# If the cache is turned on then the base disk will be
# there even after the cleanup
if 'other_config' in vdi_rec:
if vdi_rec['other_config']['image-id'] is None:
self.fail('Found unexpected VDI:%s' % vdi_ref)
else:
self.fail('Found unexpected VDI:%s' % vdi_ref)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
hostname="test", architecture="x86-64", instance_id=1,
check_injection=False,
create_record=True, empty_dns=False):
stubs.stubout_loopingcall_start(self.stubs)
if create_record:
instance_values = {'id': instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': image_ref,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'root_gb': 20,
'instance_type_id': instance_type_id,
'os_type': os_type,
'hostname': hostname,
'architecture': architecture}
instance = db.instance_create(self.context, instance_values)
else:
instance = db.instance_get(self.context, instance_id)
network_info = [({'bridge': 'fa0', 'id': 0,
'injected': True,
'cidr': '192.168.0.0/24',
'cidr_v6': 'dead:beef::1/120',
},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'dhcp_server': '192.168.0.1',
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
if empty_dns:
network_info[0][1]['dns'] = []
image_meta = {'id': glance_stubs.FakeGlance.IMAGE_VHD,
'disk_format': 'vhd'}
self.conn.spawn(self.context, instance, image_meta, network_info)
self.create_vm_record(self.conn, os_type, instance_id)
self.check_vm_record(self.conn, check_injection)
self.assertTrue(instance.os_type)
self.assertTrue(instance.architecture)
def test_spawn_empty_dns(self):
"""Test spawning with an empty dns list"""
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
self.check_vm_params_for_linux()
def test_spawn_not_enough_memory(self):
self.assertRaises(exception.InsufficientFreeMemory,
self._test_spawn,
1, 2, 3, "4") # m1.xlarge
def test_spawn_fail_cleanup_1(self):
"""Simulates an error while downloading an image.
Verifies that VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
stubs.stubout_fetch_image_glance_disk(self.stubs, raise_failure=True)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
self._check_vdis(vdi_recs_start, vdi_recs_end)
def test_spawn_fail_cleanup_2(self):
"""Simulates an error while creating VM record.
It verifies that VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
self._check_vdis(vdi_recs_start, vdi_recs_end)
@stub_vm_utils_with_vdi_attached_here
def test_spawn_raw_glance(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_linux(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_swapdisk(self):
# Change the default host_call_plugin to one that'll return
# a swap disk
orig_func = stubs.FakeSessionForVMTests.host_call_plugin
_host_call_plugin = stubs.FakeSessionForVMTests.host_call_plugin_swap
stubs.FakeSessionForVMTests.host_call_plugin = _host_call_plugin
# Stubbing out firewall driver as previous stub sets a particular
# stub for async plugin calls
stubs.stubout_firewall_driver(self.stubs, self.conn)
try:
# We'll steal the above glance linux test
self.test_spawn_vhd_glance_linux()
finally:
# Make sure to put this back
stubs.FakeSessionForVMTests.host_call_plugin = orig_func
# We should have 2 VBDs.
self.assertEqual(len(self.vm['VBDs']), 2)
# Now test that we have 1.
self.tearDown()
self.setUp()
self.test_spawn_vhd_glance_linux()
self.assertEqual(len(self.vm['VBDs']), 1)
def test_spawn_vhd_glance_windows(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_iso_glance(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_ISO, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
stubs.stubout_fetch_image_glance_disk(self.stubs)
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _tee_handler(cmd, **kwargs):
input = kwargs.get('process_input', None)
self.assertNotEqual(input, None)
config = [line.strip() for line in input.split("\n")]
# Find the start of eth0 configuration and check it
index = config.index('auto eth0')
self.assertEquals(config[index + 1:index + 8], [
'iface eth0 inet static',
'address 192.168.0.100',
'netmask 255.255.255.0',
'broadcast 192.168.0.255',
'gateway 192.168.0.1',
'dns-nameservers 192.168.0.1',
''])
self._tee_executed = True
return '', ''
fake_utils.fake_execute_set_repliers([
# Capture the tee .../etc/network/interfaces command
(r'tee.*interfaces', _tee_handler),
])
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK,
check_injection=True)
self.assertTrue(self._tee_executed)
def test_spawn_netinject_xenstore(self):
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
# When mounting, create real files under the mountpoint to simulate
# files in the mounted filesystem
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
LOG.debug(_('Creating files in %s to simulate guest agent') %
self._tmpdir)
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'), 'w').close()
return '', ''
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normall make files in the m,ounted filesystem
# disappear, so do that here
LOG.debug(_('Removing simulated guest agent files in %s') %
self._tmpdir)
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
os.rmdir(os.path.join(self._tmpdir, 'usr'))
return '', ''
def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
self._tee_executed = True
return '', ''
fake_utils.fake_execute_set_repliers([
(r'mount', _mount_handler),
(r'umount', _umount_handler),
(r'tee.*interfaces', _tee_handler)])
self._test_spawn(1, 2, 3, check_injection=True)
# tee must not run in this case, where an injection-capable
# guest agent is detected
self.assertFalse(self._tee_executed)
def test_spawn_vlanmanager(self):
self.flags(image_service='nova.image.glance.GlanceImageService',
network_manager='nova.network.manager.VlanManager',
vlan_interface='fake0')
def dummy(*args, **kwargs):
pass
self.stubs.Set(vmops.VMOps, 'create_vifs', dummy)
# Reset network table
xenapi_fake.reset_table('network')
# Instance id = 2 will use vlan network (see db/fakes.py)
ctxt = self.context.elevated()
instance = self._create_instance(2, False)
networks = self.network.db.network_get_all(ctxt)
for network in networks:
self.network.set_network_host(ctxt, network)
self.network.allocate_for_instance(ctxt,
instance_id=2,
instance_uuid="00000000-0000-0000-0000-000000000000",
host=FLAGS.host,
vpn=None,
rxtx_factor=3,
project_id=self.project_id)
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK,
instance_id=2,
create_record=False)
# TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db
def test_spawn_with_network_qos(self):
self._create_instance()
for vif_ref in xenapi_fake.get_all('VIF'):
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEquals(vif_rec['qos_algorithm_params']['kbps'],
str(3 * 1024))
def test_rescue(self):
def _find_rescue_vbd_ref(*args):
return vbd
self.stubs.Set(vmops.VMOps, '_find_rescue_vbd_ref',
_find_rescue_vbd_ref)
instance = self._create_instance()
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
vm = vm_utils.VMHelper.lookup(session, instance.name)
vbd = xenapi_fake.create_vbd(vm, None)
conn = xenapi_conn.get_connection(False)
conn.rescue(self.context, instance, [], None)
def test_unrescue(self):
instance = self._create_instance()
conn = xenapi_conn.get_connection(False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
instance, None)
def test_finish_revert_migration(self):
instance = self._create_instance()
class VMOpsMock():
def __init__(self):
self.finish_revert_migration_called = False
def finish_revert_migration(self, instance):
self.finish_revert_migration_called = True
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
conn = xenapi_conn.get_connection(False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
stubs.stubout_loopingcall_start(self.stubs)
instance_values = {
'id': instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
instance = db.instance_create(self.context, instance_values)
network_info = [({'bridge': 'fa0', 'id': 0,
'injected': False,
'cidr': '192.168.0.0/24',
'cidr_v6': 'dead:beef::1/120',
},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'dhcp_server': '192.168.0.1',
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
image_meta = {'id': glance_stubs.FakeGlance.IMAGE_VHD,
'disk_format': 'vhd'}
if spawn:
self.conn.spawn(self.context, instance, image_meta, network_info)
return instance
class XenAPIDiffieHellmanTestCase(test.TestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = vmops.SimpleDH()
self.bob = vmops.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
bob_pub = self.bob.get_public()
alice_shared = self.alice.compute_shared(bob_pub)
bob_shared = self.bob.compute_shared(alice_pub)
self.assertEquals(alice_shared, bob_shared)
def _test_encryption(self, message):
enc = self.alice.encrypt(message)
self.assertFalse(enc.endswith('\n'))
dec = self.bob.decrypt(enc)
self.assertEquals(dec, message)
def test_encrypt_simple_message(self):
self._test_encryption('This is a simple message.')
def test_encrypt_message_with_newlines_at_end(self):
self._test_encryption('This message has a newline at the end.\n')
def test_encrypt_many_newlines_at_end(self):
self._test_encryption('Message with lotsa newlines.\n\n\n')
def test_encrypt_newlines_inside_message(self):
self._test_encryption('Message\nwith\ninterior\nnewlines.')
def test_encrypt_with_leading_newlines(self):
self._test_encryption('\n\nMessage with leading newlines.')
def test_encrypt_really_long_message(self):
self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
class XenAPIMigrateInstance(test.TestCase):
"""Unit test for verifying migration-related actions."""
def setUp(self):
super(XenAPIMigrateInstance, self).setUp()
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': None,
'ramdisk_id': None,
'root_gb': 5,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
migration_values = {
'source_compute': 'nova-compute',
'dest_compute': 'nova-compute',
'dest_host': '10.127.5.114',
'status': 'post-migrating',
'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
'old_instance_type_id': 5,
'new_instance_type_id': 1
}
self.migration = db.migration_create(
context.get_admin_context(), migration_values)
fake_utils.stub_out_utils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
def test_resize_xenserver_6(self):
instance = db.instance_create(self.context, self.instance_values)
called = {'resize': False}
def fake_vdi_resize(*args, **kwargs):
called['resize'] = True
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize", fake_vdi_resize)
stubs.stubout_session(self.stubs,
stubs.FakeSessionForMigrationTests,
product_version=(6, 0, 0))
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
conn._vmops._resize_instance(instance, '')
self.assertEqual(called['resize'], True)
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.instance_values)
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
conn = xenapi_conn.get_connection(False)
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', instance_type, None)
def test_migrate_disk_and_power_off_passes_exceptions(self):
instance = db.instance_create(self.context, self.instance_values)
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
def fake_raise(*args, **kwargs):
raise exception.MigrationError(reason='test failure')
self.stubs.Set(vmops.VMOps, "_migrate_vhd", fake_raise)
conn = xenapi_conn.get_connection(False)
self.assertRaises(exception.MigrationError,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', instance_type, None)
def test_revert_migrate(self):
instance = db.instance_create(self.context, self.instance_values)
self.called = False
self.fake_vm_start_called = False
self.fake_finish_revert_migration_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
def fake_finish_revert_migration(*args, **kwargs):
self.fake_finish_revert_migration_called = True
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
fake_finish_revert_migration)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
conn.finish_revert_migration(instance, network_info)
self.assertEqual(self.fake_finish_revert_migration_called, True)
def test_finish_migrate(self):
instance = db.instance_create(self.context, self.instance_values)
self.called = False
self.fake_vm_start_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
def test_finish_migrate_no_local_storage(self):
tiny_type = instance_types.get_instance_type_by_name('m1.tiny')
tiny_type_id = tiny_type['id']
self.instance_values.update({'instance_type_id': tiny_type_id,
'root_gb': 0})
instance = db.instance_create(self.context, self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = db.instance_create(self.context, self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway_v6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
# Resize instance would be determined by the compute call
image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
class XenAPIImageTypeTestCase(test.TestCase):
"""Test ImageType class."""
def test_to_string(self):
"""Can convert from type id to type string."""
self.assertEquals(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
def test_from_string(self):
"""Can convert from string to type id."""
self.assertEquals(
vm_utils.ImageType.from_string(vm_utils.ImageType.KERNEL_STR),
vm_utils.ImageType.KERNEL)
class XenAPIDetermineDiskImageTestCase(test.TestCase):
"""Unit tests for code that detects the ImageType."""
def setUp(self):
super(XenAPIDetermineDiskImageTestCase, self).setUp()
glance_stubs.stubout_glance_client(self.stubs)
class FakeInstance(object):
pass
self.fake_instance = FakeInstance()
self.fake_instance.id = 42
self.fake_instance.os_type = 'linux'
self.fake_instance.architecture = 'x86-64'
def assert_disk_type(self, image_meta, expected_disk_type):
actual = vm_utils.VMHelper.determine_disk_image_type(image_meta)
self.assertEqual(expected_disk_type, actual)
def test_machine(self):
image_meta = {'id': 'a', 'disk_format': 'ami'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
def test_raw(self):
image_meta = {'id': 'a', 'disk_format': 'raw'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
def test_vhd(self):
image_meta = {'id': 'a', 'disk_format': 'vhd'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
class CompareVersionTestCase(test.TestCase):
def test_less_than(self):
"""Test that cmp_version compares a as less than b"""
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
def test_greater_than(self):
"""Test that cmp_version compares a as greater than b"""
self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
def test_equal(self):
"""Test that cmp_version compares a as equal to b"""
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
def test_non_lexical(self):
"""Test that cmp_version compares non-lexically"""
self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
def test_length(self):
"""Test that cmp_version compares by length as last resort"""
self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
class XenAPIHostTestCase(test.TestCase):
"""Tests HostState, which holds metrics from XenServer that get
reported back to the Schedulers."""
def setUp(self):
super(XenAPIHostTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.reset()
xenapi_fake.create_local_srs()
self.conn = xenapi_conn.get_connection(False)
def test_host_state(self):
stats = self.conn.get_host_stats()
self.assertEquals(stats['disk_total'], 10000)
self.assertEquals(stats['disk_used'], 20000)
self.assertEquals(stats['host_memory_total'], 10)
self.assertEquals(stats['host_memory_overhead'], 20)
self.assertEquals(stats['host_memory_free'], 30)
self.assertEquals(stats['host_memory_free_computed'], 40)
def _test_host_action(self, method, action, expected=None):
result = method('host', action)
if not expected:
expected = action
self.assertEqual(result, expected)
def test_host_reboot(self):
self._test_host_action(self.conn.host_power_action, 'reboot')
def test_host_shutdown(self):
self._test_host_action(self.conn.host_power_action, 'shutdown')
def test_host_startup(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'host', 'startup')
def test_host_maintenance_on(self):
self._test_host_action(self.conn.host_maintenance_mode,
True, 'on_maintenance')
def test_host_maintenance_off(self):
self._test_host_action(self.conn.host_maintenance_mode,
False, 'off_maintenance')
def test_set_enable_host_enable(self):
self._test_host_action(self.conn.set_host_enabled, True, 'enabled')
def test_set_enable_host_disable(self):
self._test_host_action(self.conn.set_host_enabled, False, 'disabled')
class XenAPIAutoDiskConfigTestCase(test.TestCase):
def setUp(self):
super(XenAPIAutoDiskConfigTestCase, self).setUp()
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.reset()
self.conn = xenapi_conn.get_connection(False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
@classmethod
def fake_create_vbd(cls, session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True):
pass
self.stubs.Set(vm_utils.VMHelper,
"create_vbd",
fake_create_vbd)
def assertIsPartitionCalled(self, called):
marker = {"partition_called": False}
def fake_resize_part_and_fs(dev, start, old, new):
marker["partition_called"] = True
self.stubs.Set(vm_utils, "_resize_part_and_fs",
fake_resize_part_and_fs)
instance = db.instance_create(self.context, self.instance_values)
disk_image_type = vm_utils.ImageType.DISK_VHD
vm_ref = "blah"
first_vdi_ref = "blah"
vdis = ["blah"]
self.conn._vmops._attach_disks(
instance, disk_image_type, vm_ref, first_vdi_ref, vdis)
self.assertEqual(marker["partition_called"], called)
def test_instance_not_auto_disk_config(self):
"""Should not partition unless instance is marked as
auto_disk_config.
"""
self.instance_values['auto_disk_config'] = False
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_doesnt_pass_fail_safes(self):
"""Should not partition unless fail safes pass"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4'), (2, 100, 200, 'ext4')]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_passes_fail_safes(self):
"""Should partition if instance is marked as auto_disk_config=True and
virt-layer specific fail-safe checks pass.
"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4')]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(True)
class XenAPIGenerateLocal(test.TestCase):
"""Test generating of local disks, like swap and ephemeral"""
def setUp(self):
super(XenAPIGenerateLocal, self).setUp()
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
xenapi_generate_swap=True,
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.reset()
self.conn = xenapi_conn.get_connection(False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
@classmethod
def fake_create_vbd(cls, session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True):
pass
self.stubs.Set(vm_utils.VMHelper,
"create_vbd",
fake_create_vbd)
def assertCalled(self, instance):
disk_image_type = vm_utils.ImageType.DISK_VHD
vm_ref = "blah"
first_vdi_ref = "blah"
vdis = ["blah"]
self.called = False
self.conn._vmops._attach_disks(instance, disk_image_type,
vm_ref, first_vdi_ref, vdis)
self.assertTrue(self.called)
def test_generate_swap(self):
"""Test swap disk generation."""
instance = db.instance_create(self.context, self.instance_values)
instance = db.instance_update(self.context, instance['id'],
{'instance_type_id': 5})
@classmethod
def fake_generate_swap(cls, *args, **kwargs):
self.called = True
self.stubs.Set(vm_utils.VMHelper, 'generate_swap',
fake_generate_swap)
self.assertCalled(instance)
def test_generate_ephemeral(self):
"""Test ephemeral disk generation."""
instance = db.instance_create(self.context, self.instance_values)
instance = db.instance_update(self.context, instance['id'],
{'instance_type_id': 4})
@classmethod
def fake_generate_ephemeral(cls, *args):
self.called = True
self.stubs.Set(vm_utils.VMHelper, 'generate_ephemeral',
fake_generate_ephemeral)
self.assertCalled(instance)
class XenAPIBWUsageTestCase(test.TestCase):
def setUp(self):
super(XenAPIBWUsageTestCase, self).setUp()
self.stubs.Set(vm_utils.VMHelper, "compile_metrics",
XenAPIBWUsageTestCase._fake_compile_metrics)
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.reset()
self.conn = xenapi_conn.get_connection(False)
@classmethod
def _fake_compile_metrics(cls, start_time, stop_time=None):
raise exception.CouldNotFetchMetrics()
def test_get_all_bw_usage_in_failure_case(self):
"""Test that get_all_bw_usage returns an empty list when metrics
compilation failed. c.f. bug #910045.
"""
result = self.conn.get_all_bw_usage(datetime.datetime.utcnow())
self.assertEqual(result, [])
# TODO(salvatore-orlando): this class and
# nova.tests.test_libvirt.IPTablesFirewallDriverTestCase share a lot of code.
# Consider abstracting common code in a base class for firewall driver testing.
class XenAPIDom0IptablesFirewallTestCase(test.TestCase):
_in_nat_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
]
_in_filter_rules = [
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ',
'-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
_in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def setUp(self):
super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
xenapi_fake.reset()
xenapi_fake.create_local_srs()
xenapi_fake.create_local_pifs()
self.user_id = 'mappin'
self.project_id = 'fake'
stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = utils.import_object(FLAGS.network_manager)
self.conn = xenapi_conn.get_connection(False)
self.fw = self.conn._vmops.firewall_driver
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': self.user_id,
'project_id': self.project_id,
'instance_type_id': 1})
def _create_test_security_group(self):
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testgroup',
'description': 'test group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
return secgroup
def _validate_security_group(self):
in_rules = filter(lambda l: not l.startswith('#'),
self._in_filter_rules)
for rule in in_rules:
if not 'nova' in rule:
self.assertTrue(rule in self._out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('-A .* -j ACCEPT -p icmp -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('-A .* -j ACCEPT -p icmp -m icmp --icmp-type 8'
' -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
regex = re.compile('-A .* -j ACCEPT -p tcp --dport 80:81'
' -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = self._create_test_security_group()
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
network_model = fake_network.fake_get_instance_nw_info(self.stubs,
1, spectacular=True)
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
lambda *a, **kw: network_model)
network_info = compute_utils.legacy_network_info(network_model)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
self._validate_security_group()
# Extra test for TCP acceptance rules
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('-A .* -j ACCEPT -p tcp'
' --dport 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['id'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
_get_instance_nw_info = fake_network.fake_get_instance_nw_info
network_info = _get_instance_nw_info(self.stubs,
networks_count,
ipv4_addr_per_network)
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
self.assertEquals(ipv4_network_rules,
ipv4_rules_per_addr * ipv4_addr_per_network * networks_count)
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
admin_ctxt = context.get_admin_context()
instance_ref = self._create_instance_ref()
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
secgroup = self._create_test_security_group()
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.instances[instance_ref['id']] = instance_ref
self._validate_security_group()
# add a rule to the security group
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'udp',
'from_port': 200,
'to_port': 299,
'cidr': '192.168.99.0/24'})
#validate the extra rule
self.fw.refresh_security_group_rules(secgroup)
regex = re.compile('-A .* -j ACCEPT -p udp --dport 200:299'
' -s 192.168.99.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"Rules were not updated properly."
"The rule for UDP acceptance is missing")
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
# FRAGILE: as in libvirt tests
# peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
class XenAPISRSelectionTestCase(test.TestCase):
"""Unit tests for testing we find the right SR."""
def setUp(self):
super(XenAPISRSelectionTestCase, self).setUp()
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
def test_safe_find_sr_raise_exception(self):
"""Ensure StorageRepositoryNotFound is raise when wrong filter."""
self.flags(sr_matching_filter='yadayadayada')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = vm_utils.VMHelper
helper.XenAPI = session.get_imported_xenapi()
self.assertRaises(exception.StorageRepositoryNotFound,
helper.safe_find_sr, session)
def test_safe_find_sr_local_storage(self):
"""Ensure the default local-storage is found."""
self.flags(sr_matching_filter='other-config:i18n-key=local-storage')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = vm_utils.VMHelper
helper.XenAPI = session.get_imported_xenapi()
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(
name_label='Fake Storage',
type='lvm',
other_config={'i18n-original-value-name_label':
'Local storage',
'i18n-key': 'local-storage'},
host_ref=host_ref)
expected = helper.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_by_other_criteria(self):
"""Ensure the SR is found when using a different filter."""
self.flags(sr_matching_filter='other-config:my_fake_sr=true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = vm_utils.VMHelper
helper.XenAPI = session.get_imported_xenapi()
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
type='lvm',
other_config={'my_fake_sr': 'true'},
host_ref=host_ref)
expected = helper.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_default(self):
"""Ensure the default SR is found regardless of other-config."""
self.flags(sr_matching_filter='default-sr:true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = vm_utils.VMHelper
pool_ref = xenapi_fake.create_pool('')
helper.XenAPI = session.get_imported_xenapi()
expected = helper.safe_find_sr(session)
self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
expected)
class XenAPIAggregateTestCase(test.TestCase):
"""Unit tests for aggregate operations."""
def setUp(self):
super(XenAPIAggregateTestCase, self).setUp()
self.flags(xenapi_connection_url='http://test_url',
xenapi_connection_username='test_user',
xenapi_connection_password='test_pass',
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host')
xenapi_fake.reset()
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.conn = xenapi_conn.get_connection(False)
self.fake_metadata = {'master_compute': 'host',
'host': xenapi_fake.get_record('host',
host_ref)['uuid']}
def test_add_to_aggregate_called(self):
def fake_add_to_aggregate(context, aggregate, host):
fake_add_to_aggregate.called = True
self.stubs.Set(self.conn._pool,
"add_to_aggregate",
fake_add_to_aggregate)
self.conn.add_to_aggregate(None, None, None)
self.assertTrue(fake_add_to_aggregate.called)
def test_add_to_aggregate_for_first_host_sets_metadata(self):
def fake_init_pool(id, name):
fake_init_pool.called = True
self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
aggregate = self._aggregate_setup()
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate.id)
self.assertTrue(fake_init_pool.called)
self.assertDictMatch(self.fake_metadata, result.metadetails)
self.assertEqual(aggregate_states.ACTIVE, result.operational_state)
def test_join_slave(self):
"""Ensure join_slave gets called when the request gets to master."""
def fake_join_slave(id, compute_uuid, host, url, user, password):
fake_join_slave.called = True
self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
compute_uuid='fake_uuid',
url='fake_url',
user='fake_user',
passwd='fake_pass',
xenhost_uuid='fake_uuid')
self.assertTrue(fake_join_slave.called)
def test_add_to_aggregate_first_host(self):
def fake_pool_set_name_label(self, session, pool_ref, name):
fake_pool_set_name_label.called = True
self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
fake_pool_set_name_label)
self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
values = {"name": 'fake_aggregate',
"availability_zone": 'fake_zone'}
result = db.aggregate_create(self.context, values)
db.aggregate_host_add(self.context, result.id, "host")
aggregate = db.aggregate_get(self.context, result.id)
self.assertEqual(["host"], aggregate.hosts)
self.assertEqual({}, aggregate.metadetails)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
self.assertTrue(fake_pool_set_name_label.called)
def test_remove_from_aggregate_called(self):
def fake_remove_from_aggregate(context, aggregate, host):
fake_remove_from_aggregate.called = True
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
fake_remove_from_aggregate)
self.conn.remove_from_aggregate(None, None, None)
self.assertTrue(fake_remove_from_aggregate.called)
def test_remove_from_empty_aggregate(self):
values = {"name": 'fake_aggregate',
"availability_zone": 'fake_zone'}
result = db.aggregate_create(self.context, values)
self.assertRaises(exception.AggregateError,
self.conn._pool.remove_from_aggregate,
None, result, "test_host")
def test_remove_slave(self):
"""Ensure eject slave gets called."""
def fake_eject_slave(id, compute_uuid, host_uuid):
fake_eject_slave.called = True
self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
self.fake_metadata['host2'] = 'fake_host2_uuid'
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
self.assertTrue(fake_eject_slave.called)
def test_remove_master_solo(self):
"""Ensure metadata are cleared after removal."""
def fake_clear_pool(id):
fake_clear_pool.called = True
self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
aggregate = self._aggregate_setup(aggr_state=aggregate_states.ACTIVE,
metadata=self.fake_metadata)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate.id)
self.assertTrue(fake_clear_pool.called)
self.assertDictMatch({}, result.metadetails)
self.assertEqual(aggregate_states.ACTIVE, result.operational_state)
def test_remote_master_non_empty_pool(self):
"""Ensure AggregateError is raised if removing the master."""
aggregate = self._aggregate_setup(aggr_state=aggregate_states.ACTIVE,
hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.assertRaises(exception.InvalidAggregateAction,
self.conn._pool.remove_from_aggregate,
self.context, aggregate, "host")
def _aggregate_setup(self, aggr_name='fake_aggregate',
aggr_zone='fake_zone',
aggr_state=aggregate_states.CREATED,
hosts=['host'], metadata=None):
values = {"name": aggr_name,
"availability_zone": aggr_zone,
"operational_state": aggr_state, }
result = db.aggregate_create(self.context, values)
for host in hosts:
db.aggregate_host_add(self.context, result.id, host)
if metadata:
db.aggregate_metadata_add(self.context, result.id, metadata)
return db.aggregate_get(self.context, result.id)
|
|
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
import itertools
import sys
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
HTMLAwareEntitySubstitution,
whitespace_re
)
__all__ = [
'HTMLTreeBuilder',
'SAXTreeBuilder',
'TreeBuilder',
'TreeBuilderRegistry',
]
# Some useful features for a TreeBuilder to have.
FAST = 'fast'
PERMISSIVE = 'permissive'
STRICT = 'strict'
XML = 'xml'
HTML = 'html'
HTML_5 = 'html5'
class TreeBuilderRegistry(object):
def __init__(self):
self.builders_for_feature = defaultdict(list)
self.builders = []
def register(self, treebuilder_class):
"""Register a treebuilder based on its advertised features."""
for feature in treebuilder_class.features:
self.builders_for_feature[feature].insert(0, treebuilder_class)
self.builders.insert(0, treebuilder_class)
def lookup(self, *features):
if len(self.builders) == 0:
# There are no builders at all.
return None
if len(features) == 0:
# They didn't ask for any features. Give them the most
# recently registered builder.
return self.builders[0]
# Go down the list of features in order, and eliminate any builders
# that don't match every feature.
features = list(features)
features.reverse()
candidates = None
candidate_set = None
while len(features) > 0:
feature = features.pop()
we_have_the_feature = self.builders_for_feature.get(feature, [])
if len(we_have_the_feature) > 0:
if candidates is None:
candidates = we_have_the_feature
candidate_set = set(candidates)
else:
# Eliminate any candidates that don't have this feature.
candidate_set = candidate_set.intersection(
set(we_have_the_feature))
# The only valid candidates are the ones in candidate_set.
# Go through the original list of candidates and pick the first one
# that's in candidate_set.
if candidate_set is None:
return None
for candidate in candidates:
if candidate in candidate_set:
return candidate
return None
# The BeautifulSoup class will take feature lists from developers and use them
# to look up builders in this registry.
builder_registry = TreeBuilderRegistry()
class TreeBuilder(object):
"""Turn a document into a Beautiful Soup object tree."""
NAME = "[Unknown tree builder]"
ALTERNATE_NAMES = []
features = []
is_xml = False
picklable = False
preserve_whitespace_tags = set()
empty_element_tags = None # A tag will be considered an empty-element
# tag when and only when it has no contents.
# A value for these tag/attribute combinations is a space- or
# comma-separated list of CDATA, rather than a single CDATA.
cdata_list_attributes = {}
def __init__(self):
self.soup = None
def reset(self):
pass
def can_be_empty_element(self, tag_name):
"""Might a tag with this name be an empty-element tag?
The final markup may or may not actually present this tag as
self-closing.
For instance: an HTMLBuilder does not consider a <p> tag to be
an empty-element tag (it's not in
HTMLBuilder.empty_element_tags). This means an empty <p> tag
will be presented as "<p></p>", not "<p />".
The default implementation has no opinion about which tags are
empty-element tags, so a tag will be presented as an
empty-element tag if and only if it has no contents.
"<foo></foo>" will become "<foo />", and "<foo>bar</foo>" will
be left alone.
"""
if self.empty_element_tags is None:
return True
return tag_name in self.empty_element_tags
def feed(self, markup):
raise NotImplementedError()
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
return markup, None, None, False
def test_fragment_to_document(self, fragment):
"""Wrap an HTML fragment to make it look like a document.
Different parsers do this differently. For instance, lxml
introduces an empty <head> tag, and html5lib
doesn't. Abstracting this away lets us write simple tests
which run HTML fragments through the parser and compare the
results against other HTML fragments.
This method should not be used outside of tests.
"""
return fragment
def set_up_substitutions(self, tag):
return False
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
"""Replaces class="foo bar" with class=["foo", "bar"]
Modifies its input in place.
"""
if not attrs:
return attrs
if self.cdata_list_attributes:
universal = self.cdata_list_attributes.get('*', [])
tag_specific = self.cdata_list_attributes.get(
tag_name.lower(), None)
for attr in list(attrs.keys()):
if attr in universal or (tag_specific and attr in tag_specific):
# We have a "class"-type attribute whose string
# value is a whitespace-separated list of
# values. Split it into a list.
value = attrs[attr]
if isinstance(value, str):
values = whitespace_re.split(value)
else:
# html5lib sometimes calls setAttributes twice
# for the same tag when rearranging the parse
# tree. On the second call the attribute value
# here is already a list. If this happens,
# leave the value alone rather than trying to
# split it again.
values = value
attrs[attr] = values
return attrs
class SAXTreeBuilder(TreeBuilder):
"""A Beautiful Soup treebuilder that listens for SAX events."""
def feed(self, markup):
raise NotImplementedError()
def close(self):
pass
def startElement(self, name, attrs):
attrs = dict((key[1], value) for key, value in list(attrs.items()))
#print "Start %s, %r" % (name, attrs)
self.soup.handle_starttag(name, attrs)
def endElement(self, name):
#print "End %s" % name
self.soup.handle_endtag(name)
def startElementNS(self, nsTuple, nodeName, attrs):
# Throw away (ns, nodeName) for now.
self.startElement(nodeName, attrs)
def endElementNS(self, nsTuple, nodeName):
# Throw away (ns, nodeName) for now.
self.endElement(nodeName)
#handler.endElementNS((ns, node.nodeName), node.nodeName)
def startPrefixMapping(self, prefix, nodeValue):
# Ignore the prefix for now.
pass
def endPrefixMapping(self, prefix):
# Ignore the prefix for now.
# handler.endPrefixMapping(prefix)
pass
def characters(self, content):
self.soup.handle_data(content)
def startDocument(self):
pass
def endDocument(self):
pass
class HTMLTreeBuilder(TreeBuilder):
"""This TreeBuilder knows facts about HTML.
Such as which tags are empty-element tags.
"""
preserve_whitespace_tags = HTMLAwareEntitySubstitution.preserve_whitespace_tags
empty_element_tags = set([
# These are from HTML5.
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr',
# These are from earlier versions of HTML and are removed in HTML5.
'basefont', 'bgsound', 'command', 'frame', 'image', 'isindex', 'nextid', 'spacer'
])
# The HTML standard defines these as block-level elements. Beautiful
# Soup does not treat these elements differently from other elements,
# but it may do so eventually, and this information is available if
# you need to use it.
block_elements = set(["address", "article", "aside", "blockquote", "canvas", "dd", "div", "dl", "dt", "fieldset", "figcaption", "figure", "footer", "form", "h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "li", "main", "nav", "noscript", "ol", "output", "p", "pre", "section", "table", "tfoot", "ul", "video"])
# The HTML standard defines these attributes as containing a
# space-separated list of values, not a single value. That is,
# class="foo bar" means that the 'class' attribute has two values,
# 'foo' and 'bar', not the single value 'foo bar'. When we
# encounter one of these attributes, we will parse its value into
# a list of values if possible. Upon output, the list will be
# converted back into a string.
cdata_list_attributes = {
"*" : ['class', 'accesskey', 'dropzone'],
"a" : ['rel', 'rev'],
"link" : ['rel', 'rev'],
"td" : ["headers"],
"th" : ["headers"],
"td" : ["headers"],
"form" : ["accept-charset"],
"object" : ["archive"],
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
"area" : ["rel"],
"icon" : ["sizes"],
"iframe" : ["sandbox"],
"output" : ["for"],
}
def set_up_substitutions(self, tag):
# We are only interested in <meta> tags
if tag.name != 'meta':
return False
http_equiv = tag.get('http-equiv')
content = tag.get('content')
charset = tag.get('charset')
# We are interested in <meta> tags that say what encoding the
# document was originally in. This means HTML 5-style <meta>
# tags that provide the "charset" attribute. It also means
# HTML 4-style <meta> tags that provide the "content"
# attribute and have "http-equiv" set to "content-type".
#
# In both cases we will replace the value of the appropriate
# attribute with a standin object that can take on any
# encoding.
meta_encoding = None
if charset is not None:
# HTML 5 style:
# <meta charset="utf8">
meta_encoding = charset
tag['charset'] = CharsetMetaAttributeValue(charset)
elif (content is not None and http_equiv is not None
and http_equiv.lower() == 'content-type'):
# HTML 4 style:
# <meta http-equiv="content-type" content="text/html; charset=utf8">
tag['content'] = ContentMetaAttributeValue(content)
return (meta_encoding is not None)
def register_treebuilders_from(module):
"""Copy TreeBuilders from the given module into this module."""
# I'm fairly sure this is not the best way to do this.
this_module = sys.modules['bs4.builder']
for name in module.__all__:
obj = getattr(module, name)
if issubclass(obj, TreeBuilder):
setattr(this_module, name, obj)
this_module.__all__.append(name)
# Register the builder while we're at it.
this_module.builder_registry.register(obj)
class ParserRejectedMarkup(Exception):
pass
# Builders are registered in reverse order of priority, so that custom
# builder registrations will take precedence. In general, we want lxml
# to take precedence over html5lib, because it's faster. And we only
# want to use HTMLParser as a last result.
from . import _htmlparser
register_treebuilders_from(_htmlparser)
try:
from . import _html5lib
register_treebuilders_from(_html5lib)
except ImportError:
# They don't have html5lib installed.
pass
try:
from . import _lxml
register_treebuilders_from(_lxml)
except ImportError:
# They don't have lxml installed.
pass
|
|
import errno
import imp
import io
import json
import os
import sys
import textwrap
import warnings
try:
import configparser # py3
except ImportError:
import ConfigParser as configparser
import toml # requires "pip install toml"
import yaml # requires "pip install pyyaml"
import confix
from confix import AlreadyParsedError
from confix import AlreadyRegisteredError
from confix import discard
from confix import Error
from confix import get_parsed_conf
from confix import isemail
from confix import isin
from confix import isip4
from confix import isip46
from confix import isip6
from confix import isnotin
from confix import istrue
from confix import isurl
from confix import NotParsedError
from confix import parse
from confix import parse_with_envvars
from confix import register
from confix import RequiredSettingKeyError
from confix import schema
from confix import TypesMismatchError
from confix import UnrecognizedSettingKeyError
from confix import ValidationError
PY3 = sys.version_info >= (3, )
if PY3:
StringIO = io.StringIO
else:
from cStringIO import StringIO
if PY3:
import unittest
else:
import unittest2 as unittest # requires 'pip install unittest2'
THIS_MODULE = os.path.splitext(os.path.basename(__file__))[0]
TESTFN = '$testfile'
def safe_remove(path):
try:
os.remove(path)
except OSError as err:
if err.errno != errno.ENOENT:
raise
# ===================================================================
# base test case and mixin class
# ===================================================================
class BaseTestCase(unittest.TestCase):
def setUp(self):
discard()
self.original_environ = os.environ.copy()
if getattr(self, 'TESTFN', None) is not None:
safe_remove(self.TESTFN)
def tearDown(self):
discard()
os.environ = self.original_environ
if getattr(self, 'TESTFN', None) is not None:
safe_remove(self.TESTFN)
@classmethod
def write_to_file(cls, content, fname=None):
with open(fname or cls.TESTFN, 'w') as f:
f.write(content)
def parse(self, *args, **kwargs):
parse(*args, **kwargs)
def parse_with_envvars(self, *args, **kwargs):
parse_with_envvars(*args, **kwargs)
class BaseMixin(object):
"""Base class from which mixin classes are derived."""
TESTFN = None
section = None
def setUp(self):
super(BaseMixin, self).setUp()
self.original_section = self.section
def tearDown(self):
super(BaseMixin, self).tearDown()
self.section = self.original_section
def dict_to_file(self, dct):
raise NotImplementedError('must be implemented in subclass')
# --- base tests
def test_empty_conf_file(self):
@register(self.section)
class config:
foo = 1
bar = 2
self.write_to_file(" ")
self.parse(self.TESTFN)
assert config.foo == 1
assert config.bar == 2
def test_conf_file_overrides_key(self):
# Conf file overrides one key, other one should be default.
@register(self.section)
class config:
foo = 1
bar = 2
self.dict_to_file(
dict(foo=5)
)
self.parse(self.TESTFN)
assert config.foo == 5
assert config.bar == 2
def test_conf_file_overrides_all_keys(self):
# Conf file overrides both keys.
@register(self.section)
class config:
foo = 1
bar = 2
self.dict_to_file(
dict(foo=5, bar=6)
)
self.parse(self.TESTFN)
assert config.foo == 5
assert config.bar == 6
def test_unrecognized_key(self):
# Conf file has a key which is not specified in the config class.
@register(self.section)
class config:
foo = 1
bar = 2
self.dict_to_file(
dict(foo=5, apple=6)
)
with self.assertRaises(UnrecognizedSettingKeyError) as cm:
self.parse(self.TESTFN)
assert cm.exception.section == self.section
assert cm.exception.key, 'apple'
def test_types_mismatch(self):
# Conf file provides a key with a value whose type is != than
# conf class default type.
@register(self.section)
class config:
foo = 1
bar = 2
self.dict_to_file(
dict(foo=5, bar='foo')
)
with self.assertRaises(TypesMismatchError) as cm:
self.parse(self.TESTFN)
assert cm.exception.section == self.section
assert cm.exception.key == 'bar'
assert cm.exception.default_value == 2
assert cm.exception.new_value == 'foo'
# ...Unless we explicitly tell parse() to ignore type mismatch.
self.parse(self.TESTFN, type_check=False)
assert config.foo == 5
assert config.bar == 'foo'
def test_types_mismatch_schema_override(self):
# Same as above but schema(type_check=False) should override
# parse(type_check=True).
@register(self.section)
class config:
foo = schema(default=21, type_check=False)
self.dict_to_file(
dict(foo='aaa')
)
self.parse(self.TESTFN)
discard()
#
@register(self.section)
class config2:
foo = schema(default=21, type_check=True)
self.dict_to_file(
dict(foo='aaa')
)
with self.assertRaises(TypesMismatchError):
self.parse(self.TESTFN)
def test_base_types(self):
# str, int, float, bool are supposed to be supported by all
# file formats.
@register(self.section)
class config:
some_true_bool = True
some_false_bool = False
some_int = 0
some_str = "foo"
self.dict_to_file(dict(
some_true_bool=False,
some_false_bool=True,
some_int=1,
some_str="bar",
))
self.parse(self.TESTFN)
assert config.some_true_bool is False
assert config.some_false_bool is True
assert config.some_int == 1
assert config.some_str == "bar"
# def test_invalid_yaml_file(self):
# self.dict_to_file('?!?')
# with self.assertRaises(Error) as cm:
# self.parse(self.TESTFN)
# --- test schemas
def test_schema_base(self):
# A schema with no constraints is supposed to be converted into
# its default value after parse().
@register(self.section)
class config:
foo = schema(10)
self.dict_to_file({})
self.parse(self.TESTFN)
assert config.foo == 10
def test_schema_required(self):
# If a schema is required and it's not specified in the config
# file expect an error.
@register(self.section)
class config:
foo = schema(10, required=True)
bar = 2
self.dict_to_file(
dict(bar=2)
)
with self.assertRaises(RequiredSettingKeyError) as cm:
self.parse(self.TESTFN)
assert cm.exception.section == self.section
assert cm.exception.key == 'foo'
def test_schema_required_provided(self):
# If a schema is required and it's provided in the conf file
# eveything is cool.
@register(self.section)
class config:
foo = schema(10, required=True)
self.dict_to_file(
dict(foo=5)
)
self.parse(self.TESTFN)
assert config.foo == 5
def test_schemas_w_multi_validators(self):
def fun1(x):
flags.append(1)
return True
def fun2(x):
flags.append(2)
return True
def fun3(x):
flags.append(3)
return True
def fun4(x):
flags.append(4)
return True
@register(self.section)
class config:
overridden = schema(10, validator=[fun1, fun2])
not_overridden = schema(10, validator=[fun3, fun4])
flags = []
self.dict_to_file(
dict(overridden=5)
)
self.parse(self.TESTFN)
assert sorted(flags) == [1, 2, 3, 4]
assert config.overridden == 5
assert config.not_overridden == 10
# --- test validators
def test_validator_ok(self):
@register(self.section)
class config:
foo = schema(10, validator=lambda x: isinstance(x, int))
self.dict_to_file(
dict(foo=5)
)
self.parse(self.TESTFN)
def test_validator_ko(self):
@register(self.section)
class config:
foo = schema(10, validator=lambda x: isinstance(x, str))
self.dict_to_file(
dict(foo=5)
)
with self.assertRaises(ValidationError) as cm:
self.parse(self.TESTFN)
assert cm.exception.section == self.section
assert cm.exception.key == 'foo'
assert cm.exception.value == 5
def test_validator_ko_custom_exc_w_message(self):
def validator(value):
raise ValidationError('message')
@register(self.section)
class config:
foo = schema(10, validator=validator)
self.dict_to_file(
dict(foo=5)
)
with self.assertRaises(ValidationError) as cm:
self.parse(self.TESTFN)
# assert cm.exception.section == 'name' # TOD)
assert cm.exception.key == 'foo'
assert cm.exception.value == 5
assert cm.exception.msg == 'message'
def test_validator_ko_custom_exc_w_no_message(self):
def validator(value):
raise ValidationError
@register(self.section)
class config:
foo = schema(10, validator=validator)
self.dict_to_file(
dict(foo=5)
)
with self.assertRaises(ValidationError) as cm:
self.parse(self.TESTFN)
assert cm.exception.section == self.section
assert cm.exception.key == 'foo'
assert cm.exception.value == 5
assert cm.exception.msg is None
assert 'with value 5' in str(cm.exception)
# --- test parse_with_envvars
def test_envvars_w_file(self):
# Test both config file and env vars are taken into account.
@register(self.section)
class config:
foo = 1
bar = 2
apple = 3
self.dict_to_file(
dict(foo=5)
)
os.environ['APPLE'] = '10'
self.parse_with_envvars(self.TESTFN)
assert config.foo == 5
assert config.bar == 2
assert config.apple == 10
def test_envvars_precendence_order(self):
# Test env var takes precedence over config file.
@register(self.section)
class config:
foo = 1
self.dict_to_file(
dict(foo=5)
)
os.environ['FOO'] = '6'
self.parse_with_envvars(self.TESTFN)
assert config.foo == 6
def test_envvars_case_sensitive(self):
@register(self.section)
class config:
foo = 1
bar = 2
APPLE = 3
# non-uppercase env vars are supposed to be ignored
os.environ['FoO'] = '10'
os.environ['BAR'] = '20'
os.environ['APPLE'] = '30'
parse_with_envvars(case_sensitive=True)
assert config.foo == 1
assert config.bar == 2
assert config.APPLE == 30
def test_envvars_case_insensitive(self):
@register(self.section)
class config:
foo = 1
bar = 2
APPLE = 3
PeAr = 4
# non-uppercase env vars are supposed to be ignored
os.environ['FoO'] = '10'
os.environ['BAR'] = '20'
os.environ['APPLE'] = '30'
os.environ['PEAR'] = '40'
parse_with_envvars(case_sensitive=False)
assert config.foo == 1
assert config.bar == 20
assert config.APPLE == 30
assert config.PeAr == 40
def test_envvars_type_mismatch(self):
@register(self.section)
class config:
some_int = 1
some_float = 0.1
some_bool = True
# int
os.environ['SOME_INT'] = 'foo'
with self.assertRaises(TypesMismatchError) as cm:
parse_with_envvars()
assert cm.exception.section == self.section
assert cm.exception.key == 'some_int'
assert cm.exception.default_value == 1
assert cm.exception.new_value == 'foo'
del os.environ['SOME_INT']
# float
os.environ['SOME_FLOAT'] = 'foo'
with self.assertRaises(TypesMismatchError) as cm:
parse_with_envvars()
assert cm.exception.section == self.section
assert cm.exception.key == 'some_float'
assert cm.exception.default_value == 0.1
assert cm.exception.new_value == 'foo'
del os.environ['SOME_FLOAT']
# bool
os.environ['SOME_BOOL'] = 'foo'
with self.assertRaises(TypesMismatchError) as cm:
parse_with_envvars()
assert cm.exception.section == self.section
assert cm.exception.key == 'some_bool'
assert cm.exception.default_value is True
assert cm.exception.new_value == 'foo'
# --- test multiple sections
def test_multisection_multiple(self):
# Define two configuration classes, control them via a single
# conf file defining separate sections.
self.section = None
@register('ftp')
class ftp_config:
port = 21
username = 'ftp'
@register('http')
class http_config:
port = 80
username = 'www'
self.dict_to_file({
'ftp': dict(username='foo'),
'http': dict(username='bar'),
})
self.parse(self.TESTFN)
assert ftp_config.port == 21
assert ftp_config.username == 'foo'
assert http_config.port == 80
assert http_config.username == 'bar'
def test_multisection_invalid_section(self):
# Config file define a section which is not defined in config
# class.
self.section = None
@register('ftp')
class config:
port = 21
username = 'ftp'
self.dict_to_file({
'http': dict(username='bar'),
})
with self.assertRaises(UnrecognizedSettingKeyError) as cm:
self.parse(self.TESTFN)
assert cm.exception.key == 'http'
assert cm.exception.new_value == dict(username='bar')
assert cm.exception.section is None
def test_multisection_unrecognized_key(self):
# Config file define a section key which is not defined in config
# class.
self.section = None
@register('ftp')
class config:
port = 21
username = 'ftp'
self.dict_to_file({
'ftp': dict(password='bar'),
})
with self.assertRaises(UnrecognizedSettingKeyError) as cm:
self.parse(self.TESTFN)
assert cm.exception.key == 'password'
assert cm.exception.new_value == 'bar'
assert cm.exception.section == 'ftp'
# ===================================================================
# mixin tests
# ===================================================================
# yaml
class TestYamlMixin(BaseMixin, BaseTestCase):
TESTFN = TESTFN + '.yaml'
def dict_to_file(self, dct):
if self.section:
dct = {self.section: dct}
s = yaml.dump(dct, default_flow_style=False)
self.write_to_file(s)
class TestYamlWithSectionMixin(TestYamlMixin):
section = 'name'
# json
class TestJsonMixin(BaseMixin, BaseTestCase):
TESTFN = TESTFN + '.json'
def dict_to_file(self, dct):
if self.section:
dct = {self.section: dct}
self.write_to_file(json.dumps(dct))
class TestJsonWithSectionMixin(TestJsonMixin):
section = 'name'
# toml
class TestTomlMixin(BaseMixin, BaseTestCase):
TESTFN = TESTFN + '.toml'
def dict_to_file(self, dct):
if self.section:
dct = {self.section: dct}
s = toml.dumps(dct)
self.write_to_file(s)
class TestTomWithSectionlMixin(TestTomlMixin):
section = 'name'
# ini
class TestIniMixin(BaseMixin, BaseTestCase):
TESTFN = TESTFN + 'testfile.ini'
section = 'name'
def dict_to_file(self, dct):
if not self._testMethodName.startswith('test_multisection'):
dct = {self.section: dct}
config = configparser.RawConfigParser()
for section, values in dct.items():
assert isinstance(section, str)
config.add_section(section)
for key, value in values.items():
config.set(section, key, value)
fl = StringIO()
config.write(fl)
fl.seek(0)
content = fl.read()
self.write_to_file(content)
# env vars
class TestEnvVarsMixin(BaseMixin, BaseTestCase):
TESTFN = TESTFN + 'testfile.ini'
def setUp(self):
super(TestEnvVarsMixin, self).setUp()
if self._testMethodName.startswith('test_multisection'):
raise unittest.SkipTest
def parse(self, *args, **kwargs):
parse_with_envvars(**kwargs)
def parse_with_envvars(self, *args, **kwargs):
parse_with_envvars(**kwargs)
def dict_to_file(self, dct):
for k, v in dct.items():
os.environ[k.upper()] = str(v)
@unittest.skip("")
def test_unrecognized_key(self):
# Will fail because var names not matching the default conf
# keys are skipped.
pass
# ===================================================================
# tests for a specific format
# ===================================================================
class TestIni(BaseTestCase):
TESTFN = TESTFN + '.ini'
def test_sectionless_conf(self):
@register()
class config:
foo = 1
self.write_to_file("")
self.assertRaisesRegex(
Error,
"can't parse ini files if a sectionless configuration class",
parse, self.TESTFN)
def test_true_type(self):
for value in ("1", "yes", "true", "on", "YES", "TRUE", "ON"):
@register('name')
class config:
foo = False
self.write_to_file(textwrap.dedent("""
[name]
foo = %s
""" % (value)))
self.parse(self.TESTFN)
assert config.foo is True
discard()
def test_false_type(self):
for value in ("0", "no", "false", "off", "NO", "FALSE", "OFF"):
@register('name')
class config:
foo = True
self.write_to_file(textwrap.dedent("""
[name]
foo = %s
""" % (value)))
self.parse(self.TESTFN)
assert config.foo is False
discard()
class TestEnvVars(BaseTestCase):
def test_true_type(self):
for value in ("1", "yes", "true", "on", "YES", "TRUE", "ON"):
@register()
class config:
foo = False
os.environ['FOO'] = value
self.parse_with_envvars()
assert config.foo is True
discard()
def test_false_type(self):
for value in ("0", "no", "false", "off", "NO", "FALSE", "OFF"):
@register('name')
class config:
foo = True
os.environ['FOO'] = value
self.parse_with_envvars()
assert config.foo is False
discard()
# ===================================================================
# test validators
# ===================================================================
class TestValidators(BaseTestCase):
def test_istrue(self):
assert istrue('foo')
self.assertRaises(ValidationError, istrue, '')
def test_isin(self):
self.assertRaises(TypeError, isin, 1)
fun = isin(('1', '2'))
assert fun('1')
assert fun('2')
self.assertRaises(ValidationError, fun, '3')
self.assertRaises(ValueError, isin, [])
def test_isnotin(self):
self.assertRaises(TypeError, isin, 1)
fun = isnotin(('1', '2'))
assert fun('3')
assert fun('4')
self.assertRaises(ValidationError, fun, '2')
self.assertRaisesRegex(
TypeError, "is not iterable", isnotin, None)
self.assertRaisesRegex(
ValueError, "sequence can't be empty", isnotin, [])
def test_isemail(self):
assert isemail("[email protected]")
assert isemail("[email protected]")
self.assertRaises(ValidationError, isemail, "@bar.com")
self.assertRaises(ValidationError, isemail, "foo@bar")
self.assertRaises(ValidationError, isemail, "foo@bar.")
self.assertRaisesRegex(
ValidationError, "expected a string", isemail, None)
assert isemail("[email protected]")
assert isemail("\"email\"@domain.com")
assert isemail("[email protected]")
assert isemail("[email protected]")
assert isemail("[email protected]")
assert isemail("[email protected]")
assert isemail("email@[123.123.123.123]")
assert isemail("[email protected]")
assert isemail("[email protected]")
assert isemail("[email protected]")
assert isemail("[email protected]")
assert isemail("[email protected]")
assert isemail("[email protected]")
def test_isurl(self):
assert isurl("http://google.com")
assert isurl("http://www.google.com")
assert isurl("http://www.google.com/foo/bar")
assert isurl("https://google.com")
assert isurl("https://www.google.com")
assert isurl("https://www.google.com:80")
assert isurl("https://www.google.com:80")
assert isurl("http://www.2google.com")
assert isurl("http://127.0.0.1")
assert isurl("http://127.0.0.1:8000")
self.assertRaises(ValidationError, isurl, "htt://google.com")
self.assertRaises(ValidationError, isurl, "http://google.com:foo")
self.assertRaises(ValidationError, isurl, "ftp://google.com")
self.assertRaises(ValidationError, isurl, "google.com")
self.assertRaises(ValidationError, isurl, None)
def test_isip4(self, fun=isip4):
assert fun("127.0.0.1")
assert fun("10.0.0.1")
assert fun("255.255.255.255")
self.assertRaises(ValidationError, fun, "10.0.0.1/24")
self.assertRaises(ValidationError, fun, "10.0.0")
self.assertRaises(ValidationError, fun, "256.333.333.333")
self.assertRaisesRegex(
ValidationError, "expected a string", fun, None)
self.assertRaises(ValidationError, isip4, "::1")
def test_isip6(self, fun=isip6):
assert fun("::")
assert fun("::1")
assert fun("FE80:0000:0000:0000:0202:B3FF:FE1E:8329")
# http://www.ronnutter.com/ipv6-cheatsheet-on-identifying-valid-
# ipv6-addresses/
self.assertRaises(
ValidationError, fun, "1200::AB00:1234::2552:7777:1313")
self.assertRaises(
ValidationError, fun, "1200:0000:AB00:1234:O000:2552:7777:1313")
self.assertRaisesRegex(
ValidationError, "expected a string", fun, None)
self.assertRaises(ValidationError, isip6, "127.0.0.1")
def test_isip46(self):
self.test_isip4(fun=isip46)
self.test_isip6(fun=isip46)
self.assertRaisesRegex(
ValidationError, "expected a string", isip46, None)
# ===================================================================
# parse() tests
# ===================================================================
class TestParse(BaseTestCase):
def test_no_conf_file(self):
# parse() is supposed to parse also if no conf file is passed
@register()
class config:
foo = 1
bar = schema(10)
parse()
assert config.foo == 1
assert config.bar == 10
def test_conf_file_w_unknown_ext(self):
# Conf file with unsupported extension.
with open(TESTFN, 'w') as f:
f.write('foo')
self.addCleanup(safe_remove, TESTFN)
with self.assertRaises(ValueError) as cm:
parse(TESTFN)
assert "don't know how to parse" in str(cm.exception)
assert "extension not supported" in str(cm.exception)
def test_parser_with_no_file(self):
self.assertRaises(ValueError, parse, file_parser=lambda x: {})
def test_no_registered_class(self):
self.assertRaises(Error, parse)
def test_file_like(self):
@register()
class foo:
foo = 1
file = io.StringIO()
with self.assertRaises(Error) as cm:
parse(file)
assert str(cm.exception) == \
"can't determine file format from a file object with no 'name' " \
"attribute"
assert str(cm.exception) == \
"can't determine file format from a file object with no 'name' " \
"attribute"
file = io.StringIO()
parse(file, file_parser=lambda x: {})
def test_parse_called_twice(self):
@register()
class config:
foo = 1
bar = 2
parse()
self.assertRaises(AlreadyParsedError, parse)
self.assertRaises(AlreadyParsedError, parse_with_envvars)
# ===================================================================
# schema() tests
# ===================================================================
class TestSchema(BaseTestCase):
def test_errors(self):
# no default nor required=True
self.assertRaisesRegex(
ValueError, "specify a default value or set required", schema)
# not callable validator
self.assertRaisesRegex(
TypeError, "not callable", schema, default=10, validator=1)
self.assertRaisesRegex(
TypeError, "not callable", schema, default=10, validator=['foo'])
# ===================================================================
# exception classes tests
# ===================================================================
class TestExceptions(BaseTestCase):
def test_error(self):
exc = Error('foo')
assert str(exc) == 'foo'
assert repr(exc) == 'foo'
def test_already_parsed_error(self):
exc = AlreadyParsedError()
assert 'already parsed' in str(exc)
def test_already_registered_error(self):
exc = AlreadyRegisteredError('foo')
assert 'already registered' in str(exc)
assert 'foo' in str(exc)
def test_not_parsed_error(self):
exc = NotParsedError()
assert 'not parsed' in str(exc)
def test_unrecognized_key_error(self):
exc = UnrecognizedSettingKeyError(
section=None, key='foo', new_value='bar')
assert str(exc) == \
"config file provides setting key 'foo' with value 'bar' but " \
"setting key 'foo' is not defined in any of the config classes"
@register()
class config:
pass
exc = UnrecognizedSettingKeyError(
section=None, key='foo', new_value='bar')
assert str(exc) == \
"config file provides setting key 'foo' with value 'bar' but " \
"setting key 'foo' is not defined in config class %s.%s" % (
config.__module__, config.__name__)
def test_required_key_error(self):
exc = RequiredSettingKeyError(None, key="foo")
assert str(exc) == \
"configuration class requires 'foo' setting key to be specified " \
"via config file or environment variable"
def test_types_mismatch_error(self):
exc = TypesMismatchError(
section=None, key="foo", default_value=1, new_value='bar')
assert str(exc) == \
"type mismatch for setting key 'foo' (default_value=1, %s) got " \
"'bar' (%s)" % (type(1), type(""))
# ===================================================================
# get_parsed_conf() tests
# ===================================================================
class TestGetParsedConf(BaseTestCase):
def test_root_only(self):
@register()
class root_conf:
root_value = 1
self.assertRaises(NotParsedError, get_parsed_conf)
parse()
assert get_parsed_conf() == {'root_value': 1}
def test_root_plus_sub(self):
@register()
class root_conf:
root_value = 1
@register('sub')
class sub_conf:
sub_value = 1
parse()
assert get_parsed_conf() == {'root_value': 1, 'sub': {'sub_value': 1}}
def test_sub_plus_root(self):
@register('sub')
class sub_conf:
sub_value = 1
@register()
class root_conf:
root_value = 1
parse()
assert get_parsed_conf() == {'root_value': 1, 'sub': {'sub_value': 1}}
def test_hidden_key(self):
@register()
class config:
foo = 1
_hidden = 2
parse()
assert get_parsed_conf() == {'foo': 1}
# ===================================================================
# @register() tests
# ===================================================================
class TestRegister(BaseTestCase):
def test_dictify_and_method(self):
@register()
class config:
foo = 1
bar = 2
_hidden = 3
@classmethod
def some_method(cls):
return 1
assert dict(config) == {'foo': 1, 'bar': 2}
assert config.some_method() == 1
parse()
assert dict(config) == {'foo': 1, 'bar': 2}
assert config.some_method() == 1
def test_special_methods(self):
@register()
class config:
"""docstring"""
foo = 1
bar = 2
@classmethod
def some_method(cls):
return 1
assert config.__doc__ == "docstring"
assert config.__name__ == "config"
# __len__
assert len(config) == 2
# __getitem__
assert config['foo'] == 1
# __setitem__
config['foo'] == 33
assert config['foo'] == 1
# __contains__
assert 'foo' in config
# should we allow this?
assert 'some_method' in config
# __delitem__
del config['foo']
assert 'foo' not in config
assert len(config) == 1
# __repr__
repr(config)
def test_register_twice(self):
@register()
class config:
foo = 1
with self.assertRaises(AlreadyRegisteredError):
@register()
class config_2:
foo = 1
def test_decorate_fun(self):
with self.assertRaises(TypeError) as cm:
@register()
def foo():
pass
assert 'register decorator is supposed to be used against a class' in \
str(cm.exception)
def test_override_root_section_key(self):
@register()
class root:
foo = 1
with self.assertRaises(Error) as cm:
@register(section="foo")
class sub:
bar = 2
assert "previously registered root class" in str(cm.exception)
assert "already defines a section with the same name" \
in str(cm.exception)
def test_register_after_parse(self):
@register()
class config:
foo = 1
parse()
with warnings.catch_warnings(record=True) as ws:
@register(section="unparsed")
class unparsed_config:
bar = 1
assert len(ws) == 1
assert 'configuration class defined after parse' in \
str(ws[0].message)
assert ws[0].category is UserWarning
# global conf will not include this
assert get_parsed_conf() == {'foo': 1}
# but it's still a magic object
assert dict(unparsed_config) == {'bar': 1}
def test_invalid_section_type(self):
# this also serves as a test for
with self.assertRaises(TypeError):
@register(section=1)
class config:
foo = 1
def test_invalid_section_str(self):
with self.assertRaises(ValueError):
@register(section="")
class config:
foo = 1
# ===================================================================
# misc tests
# ===================================================================
class TestMisc(BaseTestCase):
def test_mro(self):
# This method is automatically added by the meta class wrapper:
# https://docs.python.org/3/library/stdtypes.html#class.mro
# Make sure we can override it.
@register()
class config:
mro = 2
assert config.mro == 2
discard()
@register()
class config:
pass
config.mro
def test__all__(self):
dir_confix = dir(confix)
for name in dir_confix:
if name in ('configparser', 'logger', 'basestring', 'unicode'):
continue
if not name.startswith('_'):
try:
__import__(name)
except ImportError:
if name not in confix.__all__:
fun = getattr(confix, name)
if fun is None:
continue
if (fun.__doc__ is not None and
'deprecated' not in fun.__doc__.lower()):
self.fail('%r not in confix.__all__' % name)
# Import 'star' will break if __all__ is inconsistent, see:
# https://github.com/giampaolo/psutil/issues/656
# Can't do `from confix import *` as it won't work on python 3
# so we simply iterate over __all__.
for name in confix.__all__:
assert name in dir_confix
def test_version(self):
assert '.'.join([str(x) for x in confix.version_info]) == \
confix.__version__
def test_setup_script(self):
here = os.path.abspath(os.path.dirname(__file__))
setup_py = os.path.realpath(os.path.join(here, 'setup.py'))
module = imp.load_source('setup', setup_py)
self.assertRaises(SystemExit, module.setup)
assert module.get_version() == confix.__version__
def main():
verbosity = 1 if 'TOX' in os.environ else 2
unittest.main(verbosity=verbosity)
if __name__ == '__main__':
main()
|
|
#/usr/bin/python
# The following module(s) are required for listing the files in a directory.
from os import listdir
from os.path import isfile, join
# The following module(s) are rquired for regular expressions.
import re
class ParseGTF:
# Function that initializes the ParseGTF class.
def __init__(self, pathToGTF):
# Verbose information for the programmer.
print("Initializing ParseGTF...\n")
# Specified path to all annotated gene files.
self.path = pathToGTF
print("Specified path:\n{}\n".format(self.path))
# Empty paramater for the file stream, having a global access to the file.
self.transcriptExpressionCSV = ""
# Counts the amount of processed files.
self.gtf_index_count = 0
# Saves the indexed content for each annotated gene file.
#self.gtf_index = { file_name : [self.gtf.file.index[ element ], ... }
self.gtf_index = {}
# The indexed content for an annotated gene file.
#self.gtf.file.index = { transcript_id : [ gene_id, raw_count ], ... }
self.gtf_file_index = {}
# A list with all the 'human readable' sample names.
self.gtf_names = []
self.alternate = {}
# Function that reads in GTF Files one by one, and closes the file stream afterward.
def GTFReader(self):
# Verbose information for the programmer.
print("Reading GTF File...\n")
# Store all files in the specified path.
files = [f for f in listdir(self.path) if isfile(join(self.path, f))]
#files = ['out.batch4_TCC-18-2_t10_150622_SN163_0652_AC7EUNACXX_L6_TGACCA.gtf', 'out.batch5_TCC-20-1_t10_160408_SN163_0708_BHMJTMBCXX_L2_TAGCTT_1.gtf']
# Filters file by Origin based on the file name information.
gtf_files = self.filterByFileOrigin(files)
# For file in filtered gtf_files.
for file in gtf_files:
# Add the file to self.gtf_file_index
self.gtf_index[file] = self.gtf_file_index
# Add the sub-dir to the file.
file = "FluxCapacitor/"+file
# Open file handle on variable gtf.
gtf = open(file, "r")
# Call GTFParser and start parsing the file.
self.GTFParser(gtf)
# Close the file handle, ending the stream for this partiuclar file in the loop.
gtf.close()
# Function that filters the files in the directory by file name.
def filterByFileOrigin(self, files):
# Verbose information for the programmer.
print("Filtering by File Origin...")
# Create an empty list to append all the 'correct' files.
gtf_files = []
# Compose a pattern to filter by file name, since the sample is in the file name we can easily pick out the samples we won't need.
pattern = re.compile(r'(out.(batch\d{1}_TCC-\d{2}-\d{1}_t\d+).*)')
# For every file in the listed list of files.
for file in files:
# Match each file against the pattern.
m = re.match(pattern, file)
# If regex finds a match between the pattern and the file name, thus filtering the files on file name.
if m:
# Increase the gtf_index_count with 1, telling us later that we have a total of n files.
self.gtf_index_count += 1
# Append the found files to the gtf_files list.
gtf_files.append(m.group(1))
# Append the found files to the gtf_names list.
self.gtf_names.append(m.group(2))
# return the filtered gtf files.
return gtf_files
# Function that parses the gtf file handle.
def GTFParser(self, fileHandle):
# Verbose information for the programmer.
#print("Parsing GTF File...\n")
# Read in the file handle.
gtf_content = fileHandle.readlines()
# For each line in the gtf file.
for line in gtf_content:
# If the line validates.
if self.validateLineConsistency(line):
# Split the line into bits, containing elements of lenght 9.
bits = line.split("\t")
# Split the 9th element (8) on the ";".
bits = bits[8].split(";")
# Save transcript id, gene id and reads.
transcript_id = bits[0]
gene_id = bits[2]
reads = bits[3]
# Store the transcript id as key in the gtf_file_index variable and assign gene_id and their reads as values.
self.gtf_file_index[transcript_id] = [gene_id, reads]
id = transcript_id + " " + gene_id
if id not in self.alternate.keys():
self.alternate[id] = [reads]
else:
self.alternate[id].append(reads)
def validateLineConsistency(self, line):
# Set a flag for the boolean.
validity = False
# Splits each line, generating exactly 9 elements.
line_elements = line.split("\t")
# Change this according to the TODO below.
if len(line_elements) != 9:
print("something went wrong with:\n")
print(line_elements)
sys.exit(1)
else:
validity = True
# Returns a True or False according to the validity check.
return validity
def assembleTranscriptExpressionMatrix(self):
print("Assembling Transcript Expression Matrix...\n\n")
# Opens a file handle for the new Transcript Expression Matrix.
self.createTranscriptExpressionCSV()
# Store all the keys for the files.
gtf_keys = self.gtf_index.keys()
# Create an empty list that will contain row's.
rows = []
# For each key in the stored keys.
for key in gtf_keys:
# Save the values from the files' keys. This is another dictionary with the content of the file.
content = self.gtf_index[key]
#self.writeToTranscriptExpressionCSV(content)
# Store the keys (Transcript id's) for each file.
content_keys = content.keys()
# For every key in the content of the file.
for c_key in content_keys:
# Save the value pair from the keys (Gene ID and read count).
values = content[c_key]
# Splitting the key results in the "ENSTXXXXX".
c_k = c_key.split(" ")
tr_id = c_k[1]
# Splitting the first element of the values results in a gene id literal and the gene id. We save the third element, which is the gene_id.
gene_id = values[0].split(" ")
gene_id = gene_id[2]
# Splitting the 2nd element of the values results in a reads literal and the read count. We save the third element, which is the read count.
reads = values[1].split(" ")
reads = reads[2]
# Assemble row.
row = tr_id + "\t" + gene_id + "\t" + reads + "\n"
# Add row to rows.
rows.append(row)
#self.writeToTranscriptExpressionCSV(rows)
self.writeToTranscriptExpressionCSV()
def createTranscriptExpressionCSV(self):
print("Creating Transcript Expression CSV...")
# Creates a class wide filehandle with name gsTcell_TranscriptExpression.csv
self.transcriptExpressionCSV = open("gsTcell_TranscriptExpression.csv","w")
# Create a string with sample names.
sample_row = "trId\tgeneId\t"
for i in self.gtf_names:
sample_row += i + "\t"
sample_row = sample_row + "\n"
# Write sample_row to transcript expression file. This happens only once!
self.transcriptExpressionCSV.write(sample_row)
def writeToTranscriptExpressionCSV(self):
#print("Writing to Transcript Expression CSV...")
# Store all the keys from the content of the gtf file.
keys = self.alternate.keys()
# Write row to file.
#self.transcriptExpressionCSV.write()
# For each key in the keys list.
row = ""
for key in keys:
# Get the value pairs from the keys.
#values = content[key]
counts = self.alternate[key]
#print(self.alternate)
# Split the key, so that the transcript id string is separated from the transcript id.
#k = key.split(" ")
id = key.split(" ")
id = id[1] + "\t" + id[4]
row += id + "\t"
# Split the gene id value, to separate the literal from the id.
#gene_id = values[0].split(" ")
#gene_id = gene_id[2]
# Split the reads value, to separate the literal from the reads.
#reads = values[1].split(" ")
#reads = reads[2]
reads = ""
for i in counts:
i = i.split(" ")
i = i[2]
reads += i + "\t"
row += reads + "\n"
#if self.gtf_index_count < 2:
# Create row string.
#row = k[1] + "\t" + gene_id + "\t" + count + "\t"
# Write to the CSV file.
self.transcriptExpressionCSV.write(row)
def main():
p = ParseGTF("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/output/FluxCapacitor/")
p.GTFReader()
p.assembleTranscriptExpressionMatrix()
if "__main__" == __name__:
main()
|
|
from io import BytesIO
from os.path import getmtime
import tempfile
from time import gmtime
import os
import shutil
import unittest
from webob import static
from webob.compat import bytes_
from webob.request import Request, environ_from_url
from webob.response import Response
def get_response(app, path='/', **req_kw):
"""Convenient function to query an application"""
req = Request(environ_from_url(path), **req_kw)
return req.get_response(app)
def create_file(content, *paths):
"""Convenient function to create a new file with some content"""
path = os.path.join(*paths)
with open(path, 'wb') as fp:
fp.write(bytes_(content))
return path
class TestFileApp(unittest.TestCase):
def setUp(self):
fp = tempfile.NamedTemporaryFile(suffix=".py", delete=False)
self.tempfile = fp.name
fp.write(b"import this\n")
fp.close()
def tearDown(self):
os.unlink(self.tempfile)
def test_fileapp(self):
app = static.FileApp(self.tempfile)
resp1 = get_response(app)
self.assertEqual(resp1.content_type, 'text/x-python')
self.assertEqual(resp1.charset, 'UTF-8')
self.assertEqual(resp1.last_modified.timetuple(), gmtime(getmtime(self.tempfile)))
resp2 = get_response(app)
self.assertEqual(resp2.content_type, 'text/x-python')
self.assertEqual(resp2.last_modified.timetuple(), gmtime(getmtime(self.tempfile)))
resp3 = get_response(app, range=(7, 11))
self.assertEqual(resp3.status_code, 206)
self.assertEqual(tuple(resp3.content_range)[:2], (7, 11))
self.assertEqual(resp3.last_modified.timetuple(), gmtime(getmtime(self.tempfile)))
self.assertEqual(resp3.body, bytes_('this'))
def test_unexisting_file(self):
app = static.FileApp('/tmp/this/doesnt/exist')
self.assertEqual(404, get_response(app).status_code)
def test_allowed_methods(self):
app = static.FileApp(self.tempfile)
# Alias
resp = lambda method: get_response(app, method=method)
self.assertEqual(200, resp(method='GET').status_code)
self.assertEqual(200, resp(method='HEAD').status_code)
self.assertEqual(405, resp(method='POST').status_code)
# Actually any other method is not allowed
self.assertEqual(405, resp(method='xxx').status_code)
def test_exception_while_opening_file(self):
# Mock the built-in ``open()`` function to allow finner control about
# what we are testing.
def open_ioerror(*args, **kwargs):
raise IOError()
def open_oserror(*args, **kwargs):
raise OSError()
app = static.FileApp(self.tempfile)
app._open = open_ioerror
self.assertEqual(403, get_response(app).status_code)
app._open = open_oserror
self.assertEqual(403, get_response(app).status_code)
def test_use_wsgi_filewrapper(self):
class TestWrapper(object):
def __init__(self, file, block_size):
self.file = file
self.block_size = block_size
environ = environ_from_url('/')
environ['wsgi.file_wrapper'] = TestWrapper
app = static.FileApp(self.tempfile)
app_iter = Request(environ).get_response(app).app_iter
self.assertTrue(isinstance(app_iter, TestWrapper))
self.assertEqual(bytes_('import this\n'), app_iter.file.read())
self.assertEqual(static.BLOCK_SIZE, app_iter.block_size)
class TestFileIter(unittest.TestCase):
def test_empty_file(self):
fp = BytesIO()
fi = static.FileIter(fp)
self.assertRaises(StopIteration, next, iter(fi))
def test_seek(self):
fp = BytesIO(bytes_("0123456789"))
i = static.FileIter(fp).app_iter_range(seek=4)
self.assertEqual(bytes_("456789"), next(i))
self.assertRaises(StopIteration, next, i)
def test_limit(self):
fp = BytesIO(bytes_("0123456789"))
i = static.FileIter(fp).app_iter_range(limit=4)
self.assertEqual(bytes_("0123"), next(i))
self.assertRaises(StopIteration, next, i)
def test_limit_and_seek(self):
fp = BytesIO(bytes_("0123456789"))
i = static.FileIter(fp).app_iter_range(limit=4, seek=1)
self.assertEqual(bytes_("123"), next(i))
self.assertRaises(StopIteration, next, i)
def test_multiple_reads(self):
fp = BytesIO(bytes_("012"))
i = static.FileIter(fp).app_iter_range(block_size=1)
self.assertEqual(bytes_("0"), next(i))
self.assertEqual(bytes_("1"), next(i))
self.assertEqual(bytes_("2"), next(i))
self.assertRaises(StopIteration, next, i)
def test_seek_bigger_than_limit(self):
fp = BytesIO(bytes_("0123456789"))
i = static.FileIter(fp).app_iter_range(limit=1, seek=2)
# XXX: this should not return anything actually, since we are starting
# to read after the place we wanted to stop.
self.assertEqual(bytes_("23456789"), next(i))
self.assertRaises(StopIteration, next, i)
def test_limit_is_zero(self):
fp = BytesIO(bytes_("0123456789"))
i = static.FileIter(fp).app_iter_range(limit=0)
self.assertRaises(StopIteration, next, i)
class TestDirectoryApp(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_empty_directory(self):
app = static.DirectoryApp(self.test_dir)
self.assertEqual(404, get_response(app).status_code)
self.assertEqual(404, get_response(app, '/foo').status_code)
def test_serve_file(self):
app = static.DirectoryApp(self.test_dir)
create_file('abcde', self.test_dir, 'bar')
self.assertEqual(404, get_response(app).status_code)
self.assertEqual(404, get_response(app, '/foo').status_code)
resp = get_response(app, '/bar')
self.assertEqual(200, resp.status_code)
self.assertEqual(bytes_('abcde'), resp.body)
def test_dont_serve_file_in_parent_directory(self):
# We'll have:
# /TEST_DIR/
# /TEST_DIR/bar
# /TEST_DIR/foo/ <- serve this directory
create_file('abcde', self.test_dir, 'bar')
serve_path = os.path.join(self.test_dir, 'foo')
os.mkdir(serve_path)
app = static.DirectoryApp(serve_path)
# The file exists, but is outside the served dir.
self.assertEqual(403, get_response(app, '/../bar').status_code)
def test_file_app_arguments(self):
app = static.DirectoryApp(self.test_dir, content_type='xxx/yyy')
create_file('abcde', self.test_dir, 'bar')
resp = get_response(app, '/bar')
self.assertEqual(200, resp.status_code)
self.assertEqual('xxx/yyy', resp.content_type)
def test_file_app_factory(self):
def make_fileapp(*args, **kwargs):
make_fileapp.called = True
return Response()
make_fileapp.called = False
app = static.DirectoryApp(self.test_dir)
app.make_fileapp = make_fileapp
create_file('abcde', self.test_dir, 'bar')
get_response(app, '/bar')
self.assertTrue(make_fileapp.called)
def test_must_serve_directory(self):
serve_path = create_file('abcde', self.test_dir, 'bar')
self.assertRaises(IOError, static.DirectoryApp, serve_path)
def test_index_page(self):
os.mkdir(os.path.join(self.test_dir, 'index-test'))
create_file(bytes_('index'), self.test_dir, 'index-test', 'index.html')
app = static.DirectoryApp(self.test_dir)
resp = get_response(app, '/index-test')
self.assertEqual(resp.status_code, 301)
self.assertTrue(resp.location.endswith('/index-test/'))
resp = get_response(app, '/index-test?test')
self.assertTrue(resp.location.endswith('/index-test/?test'))
resp = get_response(app, '/index-test/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.body, bytes_('index'))
self.assertEqual(resp.content_type, 'text/html')
resp = get_response(app, '/index-test/index.html')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.body, bytes_('index'))
redir_app = static.DirectoryApp(self.test_dir, hide_index_with_redirect=True)
resp = get_response(redir_app, '/index-test/index.html')
self.assertEqual(resp.status_code, 301)
self.assertTrue(resp.location.endswith('/index-test/'))
resp = get_response(redir_app, '/index-test/index.html?test')
self.assertTrue(resp.location.endswith('/index-test/?test'))
page_app = static.DirectoryApp(self.test_dir, index_page='something-else.html')
self.assertEqual(get_response(page_app, '/index-test/').status_code, 404)
|
|
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Match,
Instruction,
SetNwTTLAction,
DecNwTTLAction,
OutputAction)
from pybvc.common.utils import load_dict_from_file
from pybvc.common.status import STATUS
from pybvc.common.constants import ETH_TYPE_IPv4
def delete_flows(ofswitch, table_id, flow_ids):
for flow_id in flow_ids:
result = ofswitch.delete_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow with id of '%s' successfully removed "
"from the Controller" % flow_id)
else:
print ("!!!Flow '%s' removal error, reason: %s" %
(flow_id, status.brief()))
def of_demo_41():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit(0)
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 41 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'" %
(ctrlIpAddr, nodeName))
first_flow_id = 110
# ---------------------------------------------------
# First flow entry
# ---------------------------------------------------
table_id = 0
flow_id = first_flow_id
flow_name = "Modify IP TTL example1"
priority = 800
cookie = 1200
match_in_port = 1
match_eth_type = ETH_TYPE_IPv4
match_ipv4_dst_addr = "10.0.0.0/8"
act_mod_ip_ttl = 3
act_out_port = 2
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" IPv4 Destination Address (%s)" %
(match_in_port,
hex(match_eth_type),
match_ipv4_dst_addr))
print (" Actions: Set IP TTL (%s)\n"
" Output (%s)" %
(act_mod_ip_ttl, act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry1 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry1.set_flow_table_id(table_id)
flow_entry1.set_flow_name(flow_name)
flow_entry1.set_flow_id(flow_id)
flow_entry1.set_flow_cookie(cookie)
flow_entry1.set_flow_priority(priority)
flow_entry1.set_flow_hard_timeout(0)
flow_entry1.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = SetNwTTLAction(action_order)
action.set_ttl(act_mod_ip_ttl)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry1.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_ipv4_dst(match_ipv4_dst_addr)
flow_entry1.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry1.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry1)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
# ---------------------------------------------------
# Second flow entry
# ---------------------------------------------------
table_id = 0
flow_id += 1
flow_name = "Modify IP TTL example2"
priority = 800
cookie = 1200
match_in_port = 2
match_eth_type = ETH_TYPE_IPv4
match_ipv4_src_addr = "10.0.0.0/8"
act_out_port = 1
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" IPv4 Source Address (%s)" %
(match_in_port,
hex(match_eth_type),
match_ipv4_src_addr))
print (" Actions: Decrement IP TTL\n"
" Output (%s)" %
(act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry2 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry2.set_flow_table_id(table_id)
flow_entry2.set_flow_name(flow_name)
flow_entry2.set_flow_id(flow_id)
flow_entry2.set_flow_cookie(cookie)
flow_entry2.set_flow_priority(priority)
flow_entry2.set_flow_hard_timeout(0)
flow_entry2.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = DecNwTTLAction(action_order)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry2.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_ipv4_src(match_ipv4_src_addr)
flow_entry2.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry2.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry2)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
print ("\n")
print ("<<< Delete flows from the Controller's cache "
"and from the table '%s' on the '%s' node" % (table_id, nodeName))
time.sleep(rundelay)
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_41()
|
|
"""Convert compiler package AST nodes into Python source code.
The entry point is the L{pp} function.
This is mostly used to present certain nodes in the rendered documentation -- for
example any default values for a function's arguments are rendered using L{pp}.
The code was stolen from exarkun's svn.twistedmatrix.com sandbox:
U{http://twistedmatrix.com/trac/browser/sandbox/exarkun/ast/ast_pp.py}
"""
from cStringIO import StringIO
from compiler import walk
class SourceWriter(object):
_i = 0
def __init__(self):
self.s = StringIO()
def w(self, s):
self.s.write(s)
def nl(self):
self.s.write('\n')
self.s.write(' ' * 4 * self._i)
def indent(self):
self._i += 1
self.nl()
def dedent(self):
self._i -= 1
self.nl()
def visitModule(self, node):
if node.doc is not None:
self.wl(repr(node.doc))
walk(node.node, self)
def visitStmt(self, node):
for n in node.getChildren():
walk(n, self)
def _functionSignature(self, node, fmt):
if node.defaults:
nargs = len(node.argnames)
ndefs = len(node.defaults)
noDefaults = node.argnames[:nargs-ndefs]
s = ', '.join(node.argnames[:noDefaults])
if ndefs < nargs:
argdefs = zip(node.argnames[noDefaults:], node.defaults)
s = s + ', ' + ', '.join(['='.join(x) for x in argdefs])
else:
s = ', '.join(node.argnames)
self.w(fmt % (s,))
def visitLambda(self, node):
self._functionSignature(node, 'lambda %s: ')
walk(node.code, self)
def visitFunction(self, node):
self._functionSignature(node, 'def %s(%%s):' % node.name)
self.indent()
try:
walk(node.code, self)
finally:
self.dedent()
def visitAssign(self, node):
walk(node.nodes[0], self)
self.w(' = ')
walk(node.expr, self)
self.nl()
def visitAssName(self, node):
self.w(node.name)
def visitCallFunc(self, node):
walk(node.node, self)
self.w('(')
for a in node.args[:-1]:
walk(a, self)
self.w(', ')
for a in node.args[-1:]:
walk(a, self)
self.w(')')
def visitListComp(self, node):
self.w('[')
walk(node.expr, self)
for q in node.quals:
walk(q, self)
self.w(']')
def visitList(self, node):
self.w('[')
for a in node.nodes[:-1]:
walk(a, self)
self.w(', ')
for a in node.nodes[-1:]:
walk(a, self)
self.w(']')
def visitSet(self, node):
self.w('{')
for a in node.nodes[:-1]:
walk(a, self)
self.w(', ')
for a in node.nodes[-1:]:
walk(a, self)
self.w('}')
def visitListCompFor(self, node):
self.w(' for ')
walk(node.assign, self)
self.w(' in ')
walk(node.list, self)
for expr in node.ifs:
self.w(' if ')
walk(expr, self)
def visitName(self, node):
self.w(node.name)
def visitDiscard(self, node):
walk(node.expr, self)
self.nl()
def visitPrintnl(self, node):
self.w('print ')
if node.dest:
self.w('>>')
walk(node.dest, self)
self.w(', ')
for e in node.nodes:
walk(e, self)
self.nl()
def visitGetattr(self, node):
walk(node.expr, self)
self.w('.')
self.w(node.attrname)
def visitImport(self, node):
self.w('import ')
for (mod, as_) in node.names:
self.w(mod)
if as_ is not None:
self.w(' as ')
self.w(as_)
self.w(', ')
self.nl()
def visitFrom(self, node):
self.w('from ')
self.w(node.modname)
self.w(' import ')
for (mod, as_) in node.names:
self.w(mod)
if as_ is not None:
self.w(' as ')
self.w(as_)
self.w(', ')
self.nl()
def visitConst(self, node):
self.w(repr(node.value))
def visitReturn(self, node):
self.w('return ')
walk(node.value, self)
self.nl()
def visitClass(self, node):
self.w('class ')
self.w(node.name)
if node.bases:
self.w('(')
for b in node.bases:
walk(b, self)
self.w(', ')
self.w('):')
self.indent()
try:
if node.doc is not None:
self.w(repr(node.doc))
walk(node.code, self)
finally:
self.dedent()
def visitAssAttr(self, node):
walk(node.expr, self)
self.w('.')
self.w(node.attrname)
def visitMul(self, node):
walk(node.left, self)
self.w(' * ')
walk(node.right, self)
def visitSub(self, node):
walk(node.left, self)
self.w(' - ')
walk(node.right, self)
def visitAdd(self, node):
walk(node.left, self)
self.w(' + ')
walk(node.right, self)
def visitPower(self, node):
walk(node.left, self)
self.w('**')
walk(node.right, self)
def visitMod(self, node):
walk(node.left, self)
self.w(' % ')
walk(node.right, self)
def visitAugAssign(self, node):
walk(node.node, self)
self.w(' ')
self.w(node.op)
self.w(' ')
walk(node.expr, self)
self.nl()
def visitIf(self, node):
keyword = 'if'
for (cond, body) in node.tests:
self.w(keyword)
self.w(' ')
walk(cond, self)
self.w(':')
self.indent()
try:
walk(body, self)
finally:
self.dedent()
keyword = 'elif'
if node.else_:
self.w('else:')
self.indent()
try:
walk(node.else_, self)
finally:
self.dedent()
def visitCompare(self, node):
walk(node.expr, self)
for (op, arg) in node.ops:
self.w(' ')
self.w(op)
self.w(' ')
walk(arg, self)
def visitFor(self, node):
self.w('for ')
walk(node.assign, self)
self.w(' in ')
walk(node.list, self)
self.w(':')
self.indent()
try:
walk(node.body, self)
finally:
self.dedent()
if node.else_:
self.w('else:')
self.indent()
try:
walk(node.else_, self)
finally:
self.dedent()
def visitSlice(self, node):
walk(node.expr, self)
self.w('[')
if node.lower:
walk(node.lower, self)
self.w(':')
if node.upper:
walk(node.upper, self)
self.w(']')
def visitTuple(self, node):
self.w('(')
if len(node.nodes) == 0:
pass
elif len(node.nodes) == 1:
walk(node.nodes[0], self)
self.w(',')
else:
for expr in node.nodes[:-1]:
walk(expr, self)
self.w(', ')
walk(node.nodes[-1], self)
self.w(')')
def visitTryFinally(self, node):
self.w('try:')
self.indent()
try:
walk(node.body, self)
finally:
self.dedent()
self.w('finally:')
self.indent()
try:
walk(node.final, self)
finally:
self.dedent()
def visitSubscript(self, node):
walk(node.expr, self)
self.w('[')
walk(node.subs[0], self)
self.w(']')
def visitUnarySub(self, node):
self.w('-')
walk(node.expr, self)
def visitAssTuple(self, node):
self.w('(')
for expr in node.nodes:
walk(expr, self)
self.w(', ')
self.w(')')
def visitRaise(self, node):
self.w('raise ')
walk(node.expr1, self)
if node.expr2:
self.w(', ')
walk(node.expr2, self)
if node.expr3:
self.w(', ')
walk(node.expr3, self)
self.nl()
def visitDict(self, node):
self.w('{')
for (k, v) in node.items[:-1]:
walk(k, self)
self.w(':')
walk(v, self)
self.w(', ')
for (k, v) in node.items[-1:]:
walk(k, self)
self.w(':')
walk(v, self)
self.w('}')
def __str__(self):
return self.s.getvalue()
def pp(ast):
"""Convert C{ast} to Python source.
@param ast: The node to render into Python source.
"""
sw = SourceWriter()
walk(ast, sw)
return sw.s.getvalue()
if __name__ == '__main__':
from compiler import parse
import sys
ast = parse(sys.argv[1])
print ast
print pp(ast)
|
|
# Copyright (c) 2012 OpenStack, LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import mock
import webob.exc
from quantum.api.v2 import attributes as attr
from quantum.common.test_lib import test_config
from quantum import context
from quantum.db import db_base_plugin_v2
from quantum.db import securitygroups_db
from quantum.extensions import securitygroup as ext_sg
from quantum.openstack.common import cfg
from quantum.tests.unit import test_db_plugin
DB_PLUGIN_KLASS = ('quantum.tests.unit.test_extension_security_group.'
'SecurityGroupTestPlugin')
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
class SecurityGroupTestExtensionManager(object):
def get_resources(self):
return ext_sg.Securitygroup.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class SecurityGroupsTestCase(test_db_plugin.QuantumDbPluginV2TestCase):
def _create_security_group(self, fmt, name, description, external_id=None,
**kwargs):
data = {'security_group': {'name': name,
'tenant_id': kwargs.get('tenant_id',
'test_tenant'),
'description': description}}
if external_id:
data['security_group']['external_id'] = external_id
security_group_req = self.new_create_request('security-groups', data,
fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_req.environ['quantum.context'] = (
context.Context('', kwargs['tenant_id']))
return security_group_req.get_response(self.ext_api)
def _build_security_group_rule(self, security_group_id, direction,
protocol, port_range_min, port_range_max,
source_ip_prefix=None, source_group_id=None,
external_id=None, tenant_id='test_tenant',
ethertype='IPv4'):
data = {'security_group_rule': {'security_group_id': security_group_id,
'direction': direction,
'protocol': protocol,
'ethertype': ethertype,
'port_range_min': port_range_min,
'port_range_max': port_range_max,
'tenant_id': tenant_id,
'ethertype': ethertype}}
if external_id:
data['security_group_rule']['external_id'] = external_id
if source_ip_prefix:
data['security_group_rule']['source_ip_prefix'] = source_ip_prefix
if source_group_id:
data['security_group_rule']['source_group_id'] = source_group_id
return data
def _create_security_group_rule(self, fmt, rules, **kwargs):
security_group_rule_req = self.new_create_request(
'security-group-rules', rules, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_rule_req.environ['quantum.context'] = (
context.Context('', kwargs['tenant_id']))
return security_group_rule_req.get_response(self.ext_api)
def _make_security_group(self, fmt, name, description, external_id=None,
**kwargs):
res = self._create_security_group(fmt, name, description,
external_id, **kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_security_group_rule(self, fmt, rules, **kwargs):
res = self._create_security_group_rule(self.fmt, rules)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def security_group(self, name='webservers', description='webservers',
external_id=None, fmt=None, no_delete=False):
if not fmt:
fmt = self.fmt
security_group = self._make_security_group(fmt, name, description,
external_id)
try:
yield security_group
finally:
if not no_delete:
self._delete('security-groups',
security_group['security_group']['id'])
@contextlib.contextmanager
def security_group_rule(self, security_group_id='4cd70774-cc67-4a87-9b39-7'
'd1db38eb087',
direction='ingress', protocol='tcp',
port_range_min='22', port_range_max='22',
source_ip_prefix=None, source_group_id=None,
external_id=None, fmt=None, no_delete=False,
ethertype='IPv4'):
if not fmt:
fmt = self.fmt
rule = self._build_security_group_rule(security_group_id,
direction,
protocol, port_range_min,
port_range_max,
source_ip_prefix,
source_group_id,
external_id,
ethertype=ethertype)
security_group_rule = self._make_security_group_rule(self.fmt, rule)
try:
yield security_group_rule
finally:
if not no_delete:
self._delete('security-group-rules',
security_group_rule['security_group_rule']['id'])
class SecurityGroupsTestCaseXML(SecurityGroupsTestCase):
fmt = 'xml'
class SecurityGroupTestPlugin(db_base_plugin_v2.QuantumDbPluginV2,
securitygroups_db.SecurityGroupDbMixin):
""" Test plugin that implements necessary calls on create/delete port for
associating ports with security groups.
"""
supported_extension_aliases = ["security-group"]
def create_port(self, context, port):
tenant_id = self._get_tenant_id_for_create(context, port['port'])
default_sg = self._ensure_default_security_group(context, tenant_id)
if not attr.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)):
port['port'][ext_sg.SECURITYGROUPS] = [default_sg]
session = context.session
with session.begin(subtransactions=True):
sgids = self._get_security_groups_on_port(context, port)
port = super(SecurityGroupTestPlugin, self).create_port(context,
port)
self._process_port_create_security_group(context, port['id'],
sgids)
self._extend_port_dict_security_group(context, port)
return port
def update_port(self, context, id, port):
session = context.session
with session.begin(subtransactions=True):
if ext_sg.SECURITYGROUPS in port['port']:
port['port'][ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
# delete the port binding and read it with the new rules
self._delete_port_security_group_bindings(context, id)
self._process_port_create_security_group(
context, id, port['port'].get(ext_sg.SECURITYGROUPS))
port = super(SecurityGroupTestPlugin, self).update_port(
context, id, port)
self._extend_port_dict_security_group(context, port)
return port
def create_network(self, context, network):
tenant_id = self._get_tenant_id_for_create(context, network['network'])
self._ensure_default_security_group(context, tenant_id)
return super(SecurityGroupTestPlugin, self).create_network(context,
network)
def get_ports(self, context, filters=None, fields=None):
quantum_lports = super(SecurityGroupTestPlugin, self).get_ports(
context, filters)
for quantum_lport in quantum_lports:
self._extend_port_dict_security_group(context, quantum_lport)
return quantum_lports
class SecurityGroupDBTestCase(SecurityGroupsTestCase):
def setUp(self, plugin=None):
test_config['plugin_name_v2'] = DB_PLUGIN_KLASS
ext_mgr = SecurityGroupTestExtensionManager()
test_config['extension_manager'] = ext_mgr
super(SecurityGroupDBTestCase, self).setUp(plugin)
def tearDown(self):
del test_config['plugin_name_v2']
super(SecurityGroupDBTestCase, self).tearDown()
class TestSecurityGroups(SecurityGroupDBTestCase):
def test_create_security_group(self):
name = 'webservers'
description = 'my webservers'
keys = [('name', name,), ('description', description)]
with self.security_group(name, description) as security_group:
for k, v, in keys:
self.assertEqual(security_group['security_group'][k], v)
def test_create_security_group_external_id(self):
cfg.CONF.set_override('proxy_mode', True, 'SECURITYGROUP')
name = 'webservers'
description = 'my webservers'
external_id = 10
keys = [('name', name,), ('description', description),
('external_id', external_id)]
with self.security_group(name, description, external_id) as sg:
for k, v, in keys:
self.assertEqual(sg['security_group'][k], v)
def test_default_security_group(self):
with self.network():
res = self.new_list_request('security-groups')
groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(groups['security_groups']), 1)
def test_create_security_group_proxy_mode_not_admin(self):
cfg.CONF.set_override('proxy_mode', True, 'SECURITYGROUP')
res = self._create_security_group(self.fmt, 'webservers',
'webservers', '1',
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 403)
def test_create_security_group_no_external_id_proxy_mode(self):
cfg.CONF.set_override('proxy_mode', True, 'SECURITYGROUP')
res = self._create_security_group(self.fmt, 'webservers',
'webservers')
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_create_security_group_no_external_id_not_proxy_mode(self):
res = self._create_security_group(self.fmt, 'webservers',
'webservers', '1')
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_default_security_group_fail(self):
name = 'default'
description = 'my webservers'
res = self._create_security_group(self.fmt, name, description)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_security_group_duplicate_external_id(self):
cfg.CONF.set_override('proxy_mode', True, 'SECURITYGROUP')
name = 'webservers'
description = 'my webservers'
external_id = 1
with self.security_group(name, description, external_id):
res = self._create_security_group(self.fmt, name, description,
external_id)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_list_security_groups(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description):
res = self.new_list_request('security-groups')
groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(groups['security_groups']), 2)
for group in groups['security_groups']:
if group['name'] == 'default':
self.assertEquals(len(group['security_group_rules']), 2)
else:
self.assertEquals(len(group['security_group_rules']), 0)
def test_create_security_group_rule_ethertype_invalid_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
ethertype = 2
rule = self._build_security_group_rule(
security_group_id, 'ingress', 'tcp', '22', '22', None, None,
ethertype=ethertype)
res = self._create_security_group_rule('json', rule)
self.deserialize('json', res)
self.assertEqual(res.status_int, 400)
def test_create_security_group_rule_protocol_invalid_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
protocol = 2
rule = self._build_security_group_rule(
security_group_id, 'ingress', protocol, '22', '22',
None, None)
res = self._create_security_group_rule('json', rule)
self.deserialize('json', res)
self.assertEqual(res.status_int, 400)
def test_create_security_group_rule_case_insensitive(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
source_ip_prefix = "10.0.0.0/24"
protocol = 'TCP'
port_range_min = 22
port_range_max = 22
ethertype = 'ipV4'
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
source_ip_prefix,
ethertype=ethertype) as rule:
# the lower case value will be return
self.assertEquals(rule['security_group_rule']['protocol'],
protocol.lower())
self.assertEquals(rule['security_group_rule']['ethertype'],
'IPv4')
def test_get_security_group(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
source_group_id = sg['security_group']['id']
res = self.new_show_request('security-groups', source_group_id)
security_group_id = sg['security_group']['id']
direction = "ingress"
source_ip_prefix = "10.0.0.0/24"
protocol = 'tcp'
port_range_min = 22
port_range_max = 22
keys = [('source_ip_prefix', source_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
source_ip_prefix):
group = self.deserialize(
self.fmt, res.get_response(self.ext_api))
sg_rule = group['security_group']['security_group_rules']
self.assertEqual(group['security_group']['id'],
source_group_id)
self.assertEqual(len(sg_rule), 1)
for k, v, in keys:
self.assertEqual(sg_rule[0][k], v)
def test_delete_security_group(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description, no_delete=True) as sg:
source_group_id = sg['security_group']['id']
self._delete('security-groups', source_group_id, 204)
def test_delete_default_security_group_fail(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
self._delete('security-groups', sg['security_groups'][0]['id'],
409)
def test_default_security_group_rules(self):
with self.network():
res = self.new_list_request('security-groups')
groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(groups['security_groups']), 1)
res = self.new_list_request('security-group-rules')
rules = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(rules['security_group_rules']), 2)
# just generic rules to allow default egress and
# intergroup communicartion
for rule in rules['security_group_rules']:
self.assertEqual(rule['port_range_max'], None)
self.assertEqual(rule['port_range_min'], None)
self.assertEqual(rule['protocol'], None)
def test_create_security_group_rule_source_ip_prefix(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
source_ip_prefix = "10.0.0.0/24"
protocol = 'tcp'
port_range_min = 22
port_range_max = 22
keys = [('source_ip_prefix', source_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
source_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_group_id(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
with self.security_group(name, description) as sg2:
security_group_id = sg['security_group']['id']
direction = "ingress"
source_group_id = sg2['security_group']['id']
protocol = 'tcp'
port_range_min = 22
port_range_max = 22
keys = [('source_group_id', source_group_id),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
source_group_id=source_group_id
) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_source_group_ip_and_ip_prefix(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
source_ip_prefix = "10.0.0.0/24"
protocol = 'tcp'
port_range_min = 22
port_range_max = 22
source_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
source_ip_prefix,
source_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_create_security_group_rule_bad_security_group_id(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
source_ip_prefix = "10.0.0.0/24"
protocol = 'tcp'
port_range_min = 22
port_range_max = 22
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
source_ip_prefix)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 404)
def test_create_security_group_rule_bad_tenant(self):
with self.security_group() as sg:
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': "bad_tenant"}}
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 404)
def test_create_security_group_rule_exteral_id_proxy_mode(self):
cfg.CONF.set_override('proxy_mode', True, 'SECURITYGROUP')
with self.security_group(external_id=1) as sg:
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': '22',
'port_range_max': '22',
'external_id': '1',
'tenant_id': 'test_tenant',
'source_group_id': sg['security_group']['id']}}
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 201)
def test_create_security_group_rule_exteral_id_not_proxy_mode(self):
with self.security_group() as sg:
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': '22',
'port_range_max': '22',
'external_id': 1,
'tenant_id': 'test_tenant',
'source_group_id': sg['security_group']['id']}}
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_security_group_rule_not_admin(self):
cfg.CONF.set_override('proxy_mode', True, 'SECURITYGROUP')
with self.security_group(external_id='1') as sg:
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': 'bad_tenant',
'external_id': 1,
'source_group_id': sg['security_group']['id']}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 403)
def test_create_security_group_rule_bad_tenant_source_group_id(self):
with self.security_group() as sg:
res = self._create_security_group(self.fmt, 'webservers',
'webservers',
tenant_id='bad_tenant')
sg2 = self.deserialize(self.fmt, res)
rule = {'security_group_rule':
{'security_group_id': sg2['security_group']['id'],
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': 'bad_tenant',
'source_group_id': sg['security_group']['id']}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 404)
def test_create_security_group_rule_bad_tenant_security_group_rule(self):
with self.security_group() as sg:
res = self._create_security_group(self.fmt, 'webservers',
'webservers',
tenant_id='bad_tenant')
self.deserialize(self.fmt, res)
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': 'bad_tenant'}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 404)
def test_create_security_group_rule_bad_source_group_id(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
source_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
protocol = 'tcp'
port_range_min = 22
port_range_max = 22
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
source_group_id=source_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 404)
def test_create_security_group_rule_duplicate_rules(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', 'tcp', '22', '22')
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_security_group_rule_min_port_greater_max(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', 'tcp', '50', '22')
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_create_security_group_rule_ports_but_no_protocol(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', None, '22', '22')
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_list_ports_security_group(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'])
self.deserialize(self.fmt, res)
res = self.new_list_request('ports')
ports = self.deserialize(self.fmt,
res.get_response(self.api))
port = ports['ports'][0]
self.assertEquals(len(port[ext_sg.SECURITYGROUPS]), 1)
self._delete('ports', port['id'])
def test_update_port_with_security_group(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'])
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
ext_sg.SECURITYGROUPS:
[sg['security_group']['id']]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
# Test update port without security group
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name']}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
self._delete('ports', port['port']['id'])
def test_update_port_with_multiple_security_groups(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg1:
with self.security_group() as sg2:
res = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1['security_group']['id'],
sg2['security_group']['id']])
port = self.deserialize(self.fmt, res)
self.assertEqual(len(
port['port'][ext_sg.SECURITYGROUPS]), 2)
self._delete('ports', port['port']['id'])
def test_update_port_remove_security_group_empty_list(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
'security_groups': []}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'].get(ext_sg.SECURITYGROUPS),
[])
self._delete('ports', port['port']['id'])
def test_update_port_remove_security_group_none(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
'security_groups': None}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'].get(ext_sg.SECURITYGROUPS),
[])
self._delete('ports', port['port']['id'])
def test_create_port_with_bad_security_group(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
security_groups=['bad_id'])
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_create_delete_security_group_port_in_use(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
# try to delete security group that's in use
res = self._delete('security-groups',
sg['security_group']['id'], 409)
# delete the blocking port
self._delete('ports', port['port']['id'])
def test_create_security_group_rule_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule1 = self._build_security_group_rule(sg['security_group']['id'],
'ingress', 'tcp', '22',
'22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(sg['security_group']['id'],
'ingress', 'tcp', '23',
'23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 201)
def test_create_security_group_rule_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule1 = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', 'tcp', '22', '22',
'10.0.0.1/24')
rule2 = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', 'tcp', '23', '23',
'10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]
}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 201)
def test_create_security_group_rule_duplicate_rule_in_post(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule = self._build_security_group_rule(sg['security_group']['id'],
'ingress', 'tcp', '22',
'22', '10.0.0.1/24')
rules = {'security_group_rules': [rule['security_group_rule'],
rule['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_security_group_rule_duplicate_rule_in_post_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', 'tcp', '22', '22',
'10.0.0.1/24')
rules = {'security_group_rules': [rule['security_group_rule'],
rule['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_security_group_rule_duplicate_rule_db(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule = self._build_security_group_rule(sg['security_group']['id'],
'ingress', 'tcp', '22',
'22', '10.0.0.1/24')
rules = {'security_group_rules': [rule]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_security_group_rule_duplicate_rule_db_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', 'tcp', '22', '22',
'10.0.0.1/24')
rules = {'security_group_rules': [rule]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_security_group_rule_differnt_security_group_ids(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg1:
with self.security_group() as sg2:
rule1 = self._build_security_group_rule(
sg1['security_group']['id'], 'ingress', 'tcp', '22', '22',
'10.0.0.1/24')
rule2 = self._build_security_group_rule(
sg2['security_group']['id'], 'ingress', 'tcp', '23', '23',
'10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]
}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_create_security_group_rule_with_invalid_ethertype(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
source_ip_prefix = "10.0.0.0/24"
protocol = 'tcp'
port_range_min = 22
port_range_max = 22
source_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
source_ip_prefix,
source_group_id,
ethertype='IPv5')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_create_security_group_rule_with_invalid_protocol(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
source_ip_prefix = "10.0.0.0/24"
protocol = 'tcp/ip'
port_range_min = 22
port_range_max = 22
source_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
source_ip_prefix,
source_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_validate_port_external_id_quantum_id(self):
cfg.CONF.set_override('proxy_mode', True, 'SECURITYGROUP')
with self.network() as n:
with self.subnet(n):
sg1 = (self.deserialize(self.fmt,
self._create_security_group(self.fmt,
'foo', 'bar', '1')))
sg2 = (self.deserialize(self.fmt,
self._create_security_group(self.fmt,
'foo', 'bar', '2')))
res = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1['security_group']['id']])
port = self.deserialize(self.fmt, res)
# This request updates the port sending the quantum security
# group id in and a nova security group id.
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
ext_sg.SECURITYGROUPS:
[sg1['security_group']['external_id'],
sg2['security_group']['id']]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEquals(len(res['port'][ext_sg.SECURITYGROUPS]), 2)
for sg_id in res['port'][ext_sg.SECURITYGROUPS]:
# only security group id's should be
# returned and not external_ids
self.assertEquals(len(sg_id), 36)
self._delete('ports', port['port']['id'])
def test_validate_port_external_id_string_or_int(self):
cfg.CONF.set_override('proxy_mode', True, 'SECURITYGROUP')
with self.network() as n:
with self.subnet(n):
string_id = '1'
int_id = 2
self.deserialize(
self.fmt, self._create_security_group(self.fmt,
'foo', 'bar',
string_id))
self.deserialize(
self.fmt, self._create_security_group(self.fmt,
'foo', 'bar',
int_id))
res = self._create_port(
self.fmt, n['network']['id'],
security_groups=[string_id, int_id])
port = self.deserialize(self.fmt, res)
self._delete('ports', port['port']['id'])
def test_create_port_with_non_uuid_or_int(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
security_groups=['not_valid'])
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
def test_validate_port_external_id_fail(self):
cfg.CONF.set_override('proxy_mode', True, 'SECURITYGROUP')
with self.network() as n:
with self.subnet(n):
bad_id = 1
res = self._create_port(
self.fmt, n['network']['id'],
security_groups=[bad_id])
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 404)
class TestSecurityGroupsXML(TestSecurityGroups):
fmt = 'xml'
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import mobile_app_category_constant
from google.ads.googleads.v9.services.types import (
mobile_app_category_constant_service,
)
from .transports.base import (
MobileAppCategoryConstantServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import MobileAppCategoryConstantServiceGrpcTransport
class MobileAppCategoryConstantServiceClientMeta(type):
"""Metaclass for the MobileAppCategoryConstantService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[MobileAppCategoryConstantServiceTransport]]
_transport_registry["grpc"] = MobileAppCategoryConstantServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[MobileAppCategoryConstantServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class MobileAppCategoryConstantServiceClient(
metaclass=MobileAppCategoryConstantServiceClientMeta
):
"""Service to fetch mobile app category constants."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MobileAppCategoryConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MobileAppCategoryConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> MobileAppCategoryConstantServiceTransport:
"""Return the transport used by the client instance.
Returns:
MobileAppCategoryConstantServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def mobile_app_category_constant_path(mobile_app_category_id: str,) -> str:
"""Return a fully-qualified mobile_app_category_constant string."""
return "mobileAppCategoryConstants/{mobile_app_category_id}".format(
mobile_app_category_id=mobile_app_category_id,
)
@staticmethod
def parse_mobile_app_category_constant_path(path: str) -> Dict[str, str]:
"""Parse a mobile_app_category_constant path into its component segments."""
m = re.match(
r"^mobileAppCategoryConstants/(?P<mobile_app_category_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[
str, MobileAppCategoryConstantServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the mobile app category constant service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.MobileAppCategoryConstantServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, MobileAppCategoryConstantServiceTransport):
# transport is a MobileAppCategoryConstantServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = MobileAppCategoryConstantServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_mobile_app_category_constant(
self,
request: Union[
mobile_app_category_constant_service.GetMobileAppCategoryConstantRequest,
dict,
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> mobile_app_category_constant.MobileAppCategoryConstant:
r"""Returns the requested mobile app category constant.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetMobileAppCategoryConstantRequest, dict]):
The request object. Request message for
[MobileAppCategoryConstantService.GetMobileAppCategoryConstant][google.ads.googleads.v9.services.MobileAppCategoryConstantService.GetMobileAppCategoryConstant].
resource_name (:class:`str`):
Required. Resource name of the mobile
app category constant to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.MobileAppCategoryConstant:
A mobile application category
constant.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a mobile_app_category_constant_service.GetMobileAppCategoryConstantRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
mobile_app_category_constant_service.GetMobileAppCategoryConstantRequest,
):
request = mobile_app_category_constant_service.GetMobileAppCategoryConstantRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_mobile_app_category_constant
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("MobileAppCategoryConstantServiceClient",)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 Grid Dynamics
# Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import utils as compute_utils
from nova import exception
from nova import network
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'floating_ips')
def make_float_ip(elem):
elem.set('id')
elem.set('ip')
elem.set('pool')
elem.set('fixed_ip')
elem.set('instance_id')
class FloatingIPTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('floating_ip',
selector='floating_ip')
make_float_ip(root)
return xmlutil.MasterTemplate(root, 1)
class FloatingIPsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('floating_ips')
elem = xmlutil.SubTemplateElement(root, 'floating_ip',
selector='floating_ips')
make_float_ip(elem)
return xmlutil.MasterTemplate(root, 1)
def _translate_floating_ip_view(floating_ip):
result = {
'id': floating_ip['id'],
'ip': floating_ip['address'],
'pool': floating_ip['pool'],
}
try:
result['fixed_ip'] = floating_ip['fixed_ip']['address']
except (TypeError, KeyError):
result['fixed_ip'] = None
try:
result['instance_id'] = floating_ip['instance']['uuid']
except (TypeError, KeyError):
result['instance_id'] = None
return {'floating_ip': result}
def _translate_floating_ips_view(floating_ips):
return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip']
for ip in floating_ips]}
def get_instance_by_floating_ip_addr(self, context, address):
snagiibfa = self.network_api.get_instance_id_by_floating_address
instance_id = snagiibfa(context, address)
if instance_id:
return self.compute_api.get(context, instance_id)
def disassociate_floating_ip(self, context, instance, address):
try:
self.network_api.disassociate_floating_ip(context, instance, address)
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
except exception.FloatingIpNotAssociated:
msg = _('Floating ip is not associated')
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.CannotDisassociateAutoAssignedFloatingIP:
msg = _('Cannot disassociate auto assigned floating ip')
raise webob.exc.HTTPForbidden(explanation=msg)
class FloatingIPController(object):
"""The Floating IPs API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API()
self.network_api = network.API()
super(FloatingIPController, self).__init__()
def _get_fixed_ip(self, context, fixed_ip_id):
if fixed_ip_id is None:
return None
try:
return self.network_api.get_fixed_ip(context, fixed_ip_id)
except exception.FixedIpNotFound:
return None
def _get_instance(self, context, instance_id):
return self.compute_api.get(context, instance_id)
def _set_metadata(self, context, floating_ip):
# When Quantum v2 API is used, 'fixed_ip' and 'instance' are
# already set. In this case we don't need to update the fields.
if 'fixed_ip' not in floating_ip:
fixed_ip_id = floating_ip['fixed_ip_id']
floating_ip['fixed_ip'] = self._get_fixed_ip(context,
fixed_ip_id)
if 'instance' not in floating_ip:
instance_uuid = None
if floating_ip['fixed_ip']:
instance_uuid = floating_ip['fixed_ip']['instance_uuid']
if instance_uuid:
floating_ip['instance'] = self._get_instance(context,
instance_uuid)
else:
floating_ip['instance'] = None
@wsgi.serializers(xml=FloatingIPTemplate)
def show(self, req, id):
"""Return data about the given floating ip."""
context = req.environ['nova.context']
authorize(context)
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except exception.NotFound:
msg = _("Floating ip not found for id %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
self._set_metadata(context, floating_ip)
return _translate_floating_ip_view(floating_ip)
@wsgi.serializers(xml=FloatingIPsTemplate)
def index(self, req):
"""Return a list of floating ips allocated to a project."""
context = req.environ['nova.context']
authorize(context)
floating_ips = self.network_api.get_floating_ips_by_project(context)
for floating_ip in floating_ips:
self._set_metadata(context, floating_ip)
return _translate_floating_ips_view(floating_ips)
@wsgi.serializers(xml=FloatingIPTemplate)
def create(self, req, body=None):
context = req.environ['nova.context']
authorize(context)
pool = None
if body and 'pool' in body:
pool = body['pool']
try:
address = self.network_api.allocate_floating_ip(context, pool)
ip = self.network_api.get_floating_ip_by_address(context, address)
except exception.NoMoreFloatingIps, nmfi:
if pool:
nmfi.message = _("No more floating ips in pool %s.") % pool
else:
nmfi.message = _("No more floating ips available.")
raise
return _translate_floating_ip_view(ip)
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
# get the floating ip object
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except exception.NotFound:
msg = _("Floating ip not found for id %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
address = floating_ip['address']
# get the associated instance object (if any)
instance = get_instance_by_floating_ip_addr(self, context, address)
# disassociate if associated
if floating_ip.get('fixed_ip_id'):
disassociate_floating_ip(self, context, instance, address)
# release ip from project
self.network_api.release_floating_ip(context, address)
return webob.Response(status_int=202)
def _get_ip_by_id(self, context, value):
"""Checks that value is id and then returns its address."""
return self.network_api.get_floating_ip(context, value)['address']
class FloatingIPActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(FloatingIPActionController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.network_api = network.API()
@wsgi.action('addFloatingIp')
def _add_floating_ip(self, req, id, body):
"""Associate floating_ip to an instance."""
context = req.environ['nova.context']
authorize(context)
try:
address = body['addFloatingIp']['address']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Address not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
instance = self.compute_api.get(context, id)
cached_nwinfo = compute_utils.get_nw_info_for_instance(instance)
if not cached_nwinfo:
msg = _('No nw_info cache associated with instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
fixed_ips = cached_nwinfo.fixed_ips()
if not fixed_ips:
msg = _('No fixed ips associated to instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
# TODO(tr3buchet): this will associate the floating IP with the
# first fixed_ip an instance has. This should be
# changed to support specifying a particular fixed_ip if
# multiple exist.
if len(fixed_ips) > 1:
msg = _('multiple fixed_ips exist, using the first: %s')
LOG.warning(msg, fixed_ips[0]['address'])
try:
self.network_api.associate_floating_ip(context, instance,
floating_address=address,
fixed_address=fixed_ips[0]['address'])
except exception.FloatingIpAssociated:
msg = _('floating ip is already associated')
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.NoFloatingIpInterface:
msg = _('l3driver call to add floating ip failed')
raise webob.exc.HTTPBadRequest(explanation=msg)
except (exception.FloatingIpNotFoundForAddress,
exception.NotAuthorized):
msg = _('floating ip not found')
raise webob.exc.HTTPNotFound(explanation=msg)
except Exception:
msg = _('Error. Unable to associate floating ip')
LOG.exception(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.action('removeFloatingIp')
def _remove_floating_ip(self, req, id, body):
"""Dissociate floating_ip from an instance."""
context = req.environ['nova.context']
authorize(context)
try:
address = body['removeFloatingIp']['address']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Address not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
# get the floating ip object
try:
floating_ip = self.network_api.get_floating_ip_by_address(context,
address)
except exception.FloatingIpNotFoundForAddress:
msg = _("floating ip not found")
raise webob.exc.HTTPNotFound(explanation=msg)
# get the associated instance object (if any)
instance = get_instance_by_floating_ip_addr(self, context, address)
# disassociate if associated
if (instance and
floating_ip.get('fixed_ip_id') and
(uuidutils.is_uuid_like(id) and
[instance['uuid'] == id] or
[instance['id'] == id])[0]):
disassociate_floating_ip(self, context, instance, address)
return webob.Response(status_int=202)
else:
return webob.Response(status_int=404)
class Floating_ips(extensions.ExtensionDescriptor):
"""Floating IPs support."""
name = "FloatingIps"
alias = "os-floating-ips"
namespace = "http://docs.openstack.org/compute/ext/floating_ips/api/v1.1"
updated = "2011-06-16T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-floating-ips',
FloatingIPController(),
member_actions={})
resources.append(res)
return resources
def get_controller_extensions(self):
controller = FloatingIPActionController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2014 Rackspace, Inc.
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo.utils import excutils
from oslo_config import cfg
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.drivers import base
from ironic.drivers.modules import agent_client
from ironic.drivers.modules import deploy_utils
from ironic import objects
from ironic.openstack.common import log
agent_opts = [
cfg.IntOpt('heartbeat_timeout',
default=300,
help='Maximum interval (in seconds) for agent heartbeats.'),
]
CONF = cfg.CONF
CONF.register_opts(agent_opts, group='agent')
LOG = log.getLogger(__name__)
def _time():
"""Broken out for testing."""
return time.time()
def _get_client():
client = agent_client.AgentClient()
return client
class BaseAgentVendor(base.VendorInterface):
def __init__(self):
self.supported_payload_versions = ['2']
self._client = _get_client()
def continue_deploy(self, task, **kwargs):
"""Continues the deployment of baremetal node.
This method continues the deployment of the baremetal node after
the ramdisk have been booted.
:param task: a TaskManager instance
"""
pass
def deploy_is_done(self, task):
"""Check if the deployment is already completed.
:returns: True if the deployment is completed. False otherwise
"""
pass
def reboot_to_instance(self, task, **kwargs):
"""Method invoked after the deployment is completed.
:param task: a TaskManager instance
"""
pass
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
# NOTE(jroll) all properties are set by the driver,
# not by the operator.
return {}
def validate(self, task, method, **kwargs):
"""Validate the driver-specific Node deployment info.
No validation necessary.
:param task: a TaskManager instance
:param method: method to be validated
"""
pass
def driver_validate(self, method, **kwargs):
"""Validate the driver deployment info.
:param method: method to be validated.
"""
version = kwargs.get('version')
if not version:
raise exception.MissingParameterValue(_('Missing parameter '
'version'))
if version not in self.supported_payload_versions:
raise exception.InvalidParameterValue(_('Unknown lookup '
'payload version: %s')
% version)
@base.passthru(['POST'])
def heartbeat(self, task, **kwargs):
"""Method for agent to periodically check in.
The agent should be sending its agent_url (so Ironic can talk back)
as a kwarg. kwargs should have the following format::
{
'agent_url': 'http://AGENT_HOST:AGENT_PORT'
}
AGENT_PORT defaults to 9999.
"""
node = task.node
driver_internal_info = node.driver_internal_info
LOG.debug(
'Heartbeat from %(node)s, last heartbeat at %(heartbeat)s.',
{'node': node.uuid,
'heartbeat': driver_internal_info.get('agent_last_heartbeat')})
driver_internal_info['agent_last_heartbeat'] = int(_time())
try:
driver_internal_info['agent_url'] = kwargs['agent_url']
except KeyError:
raise exception.MissingParameterValue(_('For heartbeat operation, '
'"agent_url" must be '
'specified.'))
node.driver_internal_info = driver_internal_info
node.save()
# Async call backs don't set error state on their own
# TODO(jimrollenhagen) improve error messages here
msg = _('Failed checking if deploy is done.')
try:
if node.provision_state == states.DEPLOYWAIT:
msg = _('Node failed to get image for deploy.')
self.continue_deploy(task, **kwargs)
elif (node.provision_state == states.DEPLOYING and
self.deploy_is_done(task)):
msg = _('Node failed to move to active state.')
self.reboot_to_instance(task, **kwargs)
except Exception:
LOG.exception(_LE('Async exception for %(node)s: %(msg)s'),
{'node': node,
'msg': msg})
deploy_utils.set_failed_state(task, msg)
@base.driver_passthru(['POST'], async=False)
def lookup(self, context, **kwargs):
"""Find a matching node for the agent.
Method to be called the first time a ramdisk agent checks in. This
can be because this is a node just entering decom or a node that
rebooted for some reason. We will use the mac addresses listed in the
kwargs to find the matching node, then return the node object to the
agent. The agent can that use that UUID to use the node vendor
passthru method.
Currently, we don't handle the instance where the agent doesn't have
a matching node (i.e. a brand new, never been in Ironic node).
kwargs should have the following format::
{
"version": "2"
"inventory": {
"interfaces": [
{
"name": "eth0",
"mac_address": "00:11:22:33:44:55",
"switch_port_descr": "port24"
"switch_chassis_descr": "tor1"
}, ...
], ...
}
}
The interfaces list should include a list of the non-IPMI MAC addresses
in the form aa:bb:cc:dd:ee:ff.
This method will also return the timeout for heartbeats. The driver
will expect the agent to heartbeat before that timeout, or it will be
considered down. This will be in a root level key called
'heartbeat_timeout'
:raises: NotFound if no matching node is found.
:raises: InvalidParameterValue with unknown payload version
"""
inventory = kwargs.get('inventory')
interfaces = self._get_interfaces(inventory)
mac_addresses = self._get_mac_addresses(interfaces)
node = self._find_node_by_macs(context, mac_addresses)
LOG.debug('Initial lookup for node %s succeeded.', node.uuid)
# Only support additional hardware in v2 and above. Grab all the
# top level keys in inventory that aren't interfaces and add them.
# Nest it in 'hardware' to avoid namespace issues
hardware = {
'hardware': {
'network': interfaces
}
}
for key, value in kwargs.items():
if key != 'interfaces':
hardware['hardware'][key] = value
return {
'heartbeat_timeout': CONF.agent.heartbeat_timeout,
'node': node
}
def _get_interfaces(self, inventory):
interfaces = []
try:
interfaces = inventory['interfaces']
except (KeyError, TypeError):
raise exception.InvalidParameterValue(_(
'Malformed network interfaces lookup: %s') % inventory)
return interfaces
def _get_mac_addresses(self, interfaces):
"""Returns MACs for the network devices."""
mac_addresses = []
for interface in interfaces:
try:
mac_addresses.append(utils.validate_and_normalize_mac(
interface.get('mac_address')))
except exception.InvalidMAC:
LOG.warning(_LW('Malformed MAC: %s'), interface.get(
'mac_address'))
return mac_addresses
def _find_node_by_macs(self, context, mac_addresses):
"""Get nodes for a given list of MAC addresses.
Given a list of MAC addresses, find the ports that match the MACs
and return the node they are all connected to.
:raises: NodeNotFound if the ports point to multiple nodes or no
nodes.
"""
ports = self._find_ports_by_macs(context, mac_addresses)
if not ports:
raise exception.NodeNotFound(_(
'No ports matching the given MAC addresses %sexist in the '
'database.') % mac_addresses)
node_id = self._get_node_id(ports)
try:
node = objects.Node.get_by_id(context, node_id)
except exception.NodeNotFound:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Could not find matching node for the '
'provided MACs %s.'), mac_addresses)
return node
def _find_ports_by_macs(self, context, mac_addresses):
"""Get ports for a given list of MAC addresses.
Given a list of MAC addresses, find the ports that match the MACs
and return them as a list of Port objects, or an empty list if there
are no matches
"""
ports = []
for mac in mac_addresses:
# Will do a search by mac if the mac isn't malformed
try:
port_ob = objects.Port.get_by_address(context, mac)
ports.append(port_ob)
except exception.PortNotFound:
LOG.warning(_LW('MAC address %s not found in database'), mac)
return ports
def _get_node_id(self, ports):
"""Get a node ID for a list of ports.
Given a list of ports, either return the node_id they all share or
raise a NotFound if there are multiple node_ids, which indicates some
ports are connected to one node and the remaining port(s) are connected
to one or more other nodes.
:raises: NodeNotFound if the MACs match multiple nodes. This
could happen if you swapped a NIC from one server to another and
don't notify Ironic about it or there is a MAC collision (since
they're not guaranteed to be unique).
"""
# See if all the ports point to the same node
node_ids = set(port_ob.node_id for port_ob in ports)
if len(node_ids) > 1:
raise exception.NodeNotFound(_(
'Ports matching mac addresses match multiple nodes. MACs: '
'%(macs)s. Port ids: %(port_ids)s') %
{'macs': [port_ob.address for port_ob in ports], 'port_ids':
[port_ob.uuid for port_ob in ports]}
)
# Only have one node_id left, return it.
return node_ids.pop()
|
|
import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
RegistrationProviderFactory,
AuthUserFactory
)
from django.contrib.auth.models import Group
from osf.models import RegistrationSchema
from waffle.models import Flag
from osf.migrations import update_provider_auth_groups
from osf.features import EGAP_ADMINS
@pytest.mark.django_db
class TestRegistrationProviderSchemas:
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def egap_flag(self):
flag = Flag.objects.get(name='egap_admins')
flag.everyone = True
flag.save()
return flag
@pytest.fixture()
def schema(self):
reg_schema = RegistrationSchema.objects.get(name='OSF Preregistration', schema_version=2)
reg_schema.active = True
reg_schema.save()
return reg_schema
@pytest.fixture()
def egap_schema(self):
schema = RegistrationSchema.objects.get(name='EGAP Registration', schema_version=3)
schema.visible = True
schema.active = True
schema.save()
return schema
@pytest.fixture()
def out_dated_schema(self):
reg_schema = RegistrationSchema(name='Old Schema', schema_version=1)
reg_schema.save()
return reg_schema
@pytest.fixture()
def osf_reg_schema(self):
osf_reg = RegistrationSchema.objects.get(name='OSF Preregistration', schema_version=3)
osf_reg.visible = True
osf_reg.active = True
osf_reg.save()
return osf_reg
@pytest.fixture()
def invisible_schema(self):
reg_schema = RegistrationSchema(name='Test Schema (Invisible)', schema_version=1, visible=False)
reg_schema.save()
return reg_schema
@pytest.fixture()
def inactive_schema(self):
reg_schema = RegistrationSchema(name='Test Schema (Inactive)', schema_version=1, active=False)
reg_schema.save()
return reg_schema
@pytest.fixture()
def provider(self, schema, out_dated_schema, invisible_schema, inactive_schema):
provider = RegistrationProviderFactory()
update_provider_auth_groups()
provider.schemas.add(*[schema, out_dated_schema, invisible_schema, inactive_schema])
provider.save()
return provider
@pytest.fixture()
def provider_with_v2_reg_only(self, schema):
provider = RegistrationProviderFactory()
update_provider_auth_groups()
provider.schemas.add(schema)
provider.save()
return provider
@pytest.fixture()
def provider_with_egap_only(self, egap_schema):
provider = RegistrationProviderFactory()
update_provider_auth_groups()
provider.schemas.add(egap_schema)
provider.save()
return provider
@pytest.fixture()
def provider_with_reg(self, osf_reg_schema, egap_schema, schema, out_dated_schema):
provider = RegistrationProviderFactory()
update_provider_auth_groups()
provider.schemas.add(*[osf_reg_schema, schema, out_dated_schema, egap_schema])
provider.save()
return provider
@pytest.fixture
def egap_admin(self):
user = AuthUserFactory()
user.save()
flag = Flag.objects.get(name=EGAP_ADMINS)
group = Group.objects.create(name=EGAP_ADMINS) # Just using the same name for convenience
flag.groups.add(group)
group.user_set.add(user)
group.save()
flag.save()
return user
@pytest.fixture()
def url(self, provider):
return f'/{API_BASE}providers/registrations/{provider._id}/schemas/'
@pytest.fixture()
def url_with_v2_reg_only(self, provider_with_v2_reg_only):
return f'/{API_BASE}providers/registrations/{provider_with_v2_reg_only._id}/schemas/'
@pytest.fixture()
def url_with_egap_only(self, provider_with_egap_only):
return f'/{API_BASE}providers/registrations/{provider_with_egap_only._id}/schemas/'
@pytest.fixture()
def url_with_reg(self, provider_with_reg):
return f'/{API_BASE}providers/registrations/{provider_with_reg._id}/schemas/'
def test_registration_provider_with_schema(
self,
app,
url,
schema,
egap_schema,
egap_admin,
invisible_schema,
user,
url_with_v2_reg_only,
url_with_egap_only
):
res = app.get(url, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 3
assert schema._id in [item['id'] for item in data]
assert invisible_schema._id in [item['id'] for item in data]
assert schema.name in [item['attributes']['name'] for item in data]
res = app.get(url_with_v2_reg_only, auth=egap_admin.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 1
assert data[0]['id'] == schema._id
assert data[0]['attributes']['name'] == schema.name
res = app.get(url_with_egap_only, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 0
def test_egap_registration_schema(
self,
app,
user,
egap_admin,
egap_schema,
url_with_egap_only
):
res = app.get(url_with_egap_only, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 0
res = app.get(url_with_egap_only, auth=egap_admin.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 1
assert data[0]['id'] == egap_schema._id
assert data[0]['attributes']['name'] == egap_schema.name
def test_registration_provider_with_default_schema(
self,
app,
provider_with_reg,
out_dated_schema,
user,
egap_schema,
schema,
url_with_reg,
osf_reg_schema
):
provider_with_reg.default_schema = osf_reg_schema
provider_with_reg.save()
res = app.get(url_with_reg, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert provider_with_reg.schemas.all().count() == 4
assert len(data) == 2
assert osf_reg_schema._id == data[0]['id']
assert schema.name in [item['attributes']['name'] for item in data]
|
|
# Django settings for tjrapid.
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Serve static files even when DEBUG is False.
SERVE_STATIC = False
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(BASE_DIR, 'db/tjrapid.sqlite'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Bratislava'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'sk'
gettext = lambda s: s
LANGUAGES = (
('sk', gettext('Slovak')),
('en', gettext('English')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Paths to locale directories.
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'tjrapid/static/'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_((jqebw&(e$hyk^a!f68ct(ylnwy-zhl8sx!50$ccp#yf3h%9'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['tjrapid/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'main.context_processors.category',
]
}
},
]
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'main.middleware.LanguageMiddleware',
)
ROOT_URLCONF = 'tjrapid.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'tjrapid.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'tjrapid/templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#'django.contrib.admin',
'tjrapid.apps.CustomAdminConfig',
'django.contrib.admindocs',
'django_attach',
'lib',
'main.apps.MainAppConfig',
'ob.apps.ObAppConfig',
'news.apps.NewsAppConfig',
'eventapp.apps.EventAppConfig',
)
APP_LIST = (
('main', ('Page', 'Category')),
('auth', ('User', 'Group')),
('ob', ('Event', 'Member')),
('news', ('Article', 'Comment')),
('eventapp', ('Event', 'ClassFee', 'Accommodation', 'Participant', 'Entry', 'Directory')),
('django_attach', ('Attachment', 'Temporary')),
('sites', ('Site',)),
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'root': {
'handlers': ['console'],
'level': 'INFO',
},
}
}
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.contrib.auth.context_processors.auth',
'main.context_processors.category',
)
SESSION_SAVE_EVERY_REQUEST = True
# Fonts configuration for the eventapp application.
FONTS_DIR = os.path.join(BASE_DIR, 'fonts')
RGFONT = os.path.join(FONTS_DIR, 'LiberationSans-Regular.ttf')
BDFONT = os.path.join(FONTS_DIR, 'LiberationSans-Bold.ttf')
ITFONT = os.path.join(FONTS_DIR, 'LiberationSans-Italic.ttf')
# Write outgoing mail to console if debugging is on.
if DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
# Override the settings with local modifications.
from .settings_local import *
|
|
from __future__ import print_function
import argparse
import ast
import cProfile
import imp
import locale
import os
import select
import sys
from collections import OrderedDict
from contextlib import contextmanager
from copy import deepcopy
from datetime import datetime
from json import dumps, loads
from signal import signal
from signal import SIGTERM, SIGUSR1
from subprocess import Popen
from subprocess import PIPE
from subprocess import call
from tempfile import NamedTemporaryFile
from threading import Event, Thread
from time import sleep, time
from syslog import syslog, LOG_ERR, LOG_INFO, LOG_WARNING
try:
from setproctitle import setproctitle
setproctitle('py3status')
except ImportError:
pass
# Used in development
enable_profiling = False
def profile(thread_run_fn):
if not enable_profiling:
return thread_run_fn
def wrapper_run(self):
"""Wrap the Thread.run() method
"""
profiler = cProfile.Profile()
try:
return profiler.runcall(thread_run_fn, self)
finally:
thread_id = getattr(self, 'ident', 'core')
profiler.dump_stats("py3status-%s.profile" % thread_id)
return wrapper_run
@contextmanager
def jsonify(string):
"""
Transform the given string to a JSON in a context manager fashion.
"""
prefix = ''
if string.startswith(','):
prefix, string = ',', string[1:]
yield (prefix, loads(string))
def print_line(line):
"""
Print given line to stdout (i3bar).
"""
sys.__stdout__.write('{}\n'.format(line))
sys.__stdout__.flush()
def print_stderr(line):
"""Print line to stderr
"""
print(line, file=sys.stderr)
class IOPoller:
"""
This class implements a predictive and timing-out I/O reader
using select and the poll() mechanism for greater compatibility.
"""
def __init__(self, io, eventmask=select.POLLIN):
"""
Our default is to read (POLLIN) the specified 'io' file descriptor.
"""
self.io = io
self.poller = select.poll()
self.poller.register(io, eventmask)
def readline(self, timeout=0.5):
"""
Try to read our I/O for 'timeout' seconds, return None otherwise.
This makes calling and reading I/O non blocking !
"""
poll_result = self.poller.poll(timeout)
if poll_result:
line = self.io.readline().strip()
if self.io == sys.stdin and line == '[':
# skip first event line wrt issue #19
line = self.io.readline().strip()
try:
# python3 compatibility code
line = line.decode()
except (AttributeError, UnicodeDecodeError):
pass
return line
else:
return None
class I3status(Thread):
"""
This class is responsible for spawning i3status and reading its output.
"""
def __init__(self, lock, i3status_config_path, standalone):
"""
Our output will be read asynchronously from 'last_output'.
"""
Thread.__init__(self)
self.error = None
self.i3status_module_names = [
'battery',
'cpu_temperature',
'cpu_usage',
'ddate',
'disk',
'ethernet',
'ipv6',
'load',
'path_exists',
'run_watch',
'time',
'tztime',
'volume',
'wireless'
]
self.json_list = None
self.json_list_ts = None
self.last_output = None
self.last_output_ts = None
self.last_prefix = None
self.lock = lock
self.ready = False
self.standalone = standalone
self.tmpfile_path = None
#
self.config = self.i3status_config_reader(i3status_config_path)
def valid_config_param(self, param_name, cleanup=False):
"""
Check if a given section name is a valid parameter for i3status.
"""
if cleanup:
valid_config_params = [ _ for _ in self.i3status_module_names if _ not in [
'cpu_usage', 'ddate', 'load', 'time'
]]
else:
valid_config_params = self.i3status_module_names + [
'general', 'order'
]
return param_name.split(' ')[0] in valid_config_params
@staticmethod
def eval_config_parameter(param):
"""
Try to evaluate the given parameter as a string or integer and return
it properly. This is used to parse i3status configuration parameters
such as 'disk "/home" {}' or worse like '"cpu_temperature" 0 {}'.
"""
params = param.split(' ')
result_list = list()
for p in params:
try:
e_value = eval(p)
if isinstance(e_value, str) or isinstance(e_value, int):
p = str(e_value)
else:
raise ValueError()
except (NameError, SyntaxError, ValueError):
pass
finally:
result_list.append(p)
return ' '.join(result_list)
@staticmethod
def eval_config_value(value):
"""
Try to evaluate the given parameter as a string or integer and return
it properly. This is used to parse i3status configuration parameters
such as 'disk "/home" {}' or worse like '"cpu_temperature" 0 {}'.
"""
try:
e_value = eval(value)
if isinstance(e_value, str) or isinstance(e_value, int):
value = e_value
else:
raise ValueError()
except (NameError, ValueError):
pass
finally:
return value
def i3status_config_reader(self, i3status_config_path):
"""
Parse i3status.conf so we can adapt our code to the i3status config.
"""
config = {
'general': {
'color_bad': '#FF0000',
'color_degraded': '#FFFF00',
'color_good': '#00FF00',
'color_separator': '#333333',
'colors': False,
'interval': 5,
'output_format': 'i3bar'
},
'i3s_modules': [],
'on_click': {},
'order': [],
'py3_modules': []
}
# some ugly parsing
in_section = False
section_name = ''
for line in open(i3status_config_path, 'r'):
line = line.strip(' \t\n\r')
if not line or line.startswith('#'):
continue
if line.startswith('order'):
in_section = True
section_name = 'order'
if not in_section:
section_name = line.split('{')[0].strip()
section_name = self.eval_config_parameter(section_name)
if not section_name:
continue
else:
in_section = True
if section_name not in config:
config[section_name] = {}
if '{' in line:
in_section = True
if section_name and '=' in line:
section_line = line
# one liner cases
if line.endswith('}'):
section_line = section_line.split('}', -1)[0].strip()
if line.startswith(section_name + ' {'):
section_line = section_line.split(
section_name + ' {'
)[1].strip()
key = section_line.split('=')[0].strip()
key = self.eval_config_parameter(key)
value = section_line.split('=')[1].strip()
value = self.eval_config_value(value)
if section_name == 'order':
config[section_name].append(value)
line = '}'
# create an empty config for this module
if value not in config:
config[value] = {}
# detect internal modules to be loaded dynamically
if not self.valid_config_param(value):
config['py3_modules'].append(value)
else:
config['i3s_modules'].append(value)
else:
if not key.startswith('on_click'):
config[section_name][key] = value
else:
# on_click special parameters
try:
button = int(key.split()[1])
if button not in range(1, 6):
raise ValueError(
'should be 1, 2, 3, 4 or 5'
)
except IndexError as e:
raise IndexError(
'missing "button id" for "on_click" '
'parameter in section {}'.format(section_name)
)
except ValueError as e:
raise ValueError(
'invalid "button id" '
'for "on_click" parameter '
'in section {} ({})'.format(section_name, e)
)
on_c = config['on_click']
on_c[section_name] = on_c.get(section_name, {})
on_c[section_name][button] = value
if line.endswith('}'):
in_section = False
section_name = ''
# py3status only uses the i3bar protocol because it needs JSON output
if config['general']['output_format'] != 'i3bar':
raise RuntimeError(
'i3status output_format should be set' +
' to "i3bar" on {}'.format(
i3status_config_path,
' or on your own {}/.i3status.conf'.format(
os.path.expanduser('~')
)
if i3status_config_path == '/etc/i3status.conf'
else ''
)
)
# cleanup unconfigured i3status modules that have no default
for module_name in deepcopy(config['order']):
if (self.valid_config_param(module_name, cleanup=True) and
not config.get(module_name)):
config.pop(module_name)
config['i3s_modules'].remove(module_name)
config['order'].remove(module_name)
return config
def set_responses(self, json_list):
"""
Set the given i3status responses on their respective configuration.
"""
for index, item in enumerate(self.json_list):
conf_name = self.config['i3s_modules'][index]
self.config[conf_name]['response'] = item
def set_time_modules(self):
"""
This method is executed only once after the first i3status output.
We parse all the i3status time and tztime modules and generate
a datetime for each of them while preserving (or defaulting) their
configured time format.
We also calculate a timedelta for each of them representing their
timezone offset. This is this delta that we'll be using from now on as
any future time or tztime update from i3status will be overwritten
thanks to our pre-parsed date here.
"""
default_time_format = '%Y-%m-%d %H:%M:%S'
default_tztime_format = '%Y-%m-%d %H:%M:%S %Z'
utcnow = self.last_output_ts
#
for index, item in enumerate(self.json_list):
if item.get('name') in ['time', 'tztime']:
conf_name = self.config['i3s_modules'][index]
time_name = item.get('name')
# time and tztime have different defaults
if time_name == 'time':
time_format = self.config.get(
conf_name,
{}
).get('format', default_time_format)
else:
time_format = self.config.get(
conf_name,
{}
).get('format', default_tztime_format)
# parse i3status date
i3s_time = item['full_text'].encode('UTF-8', 'replace')
try:
# python3 compatibility code
i3s_time = i3s_time.decode()
except:
pass
try:
# add mendatory items in i3status time format wrt issue #18
time_fmt = time_format
for fmt in ['%Y', '%m', '%d']:
if fmt not in time_format:
time_fmt = '{} {}'.format(time_fmt, fmt)
i3s_time = '{} {}'.format(
i3s_time, datetime.now().strftime(fmt)
)
# get a datetime from the parsed string date
date = datetime.strptime(i3s_time, time_fmt)
except Exception:
err = sys.exc_info()[1]
syslog(
LOG_ERR,
'i3status set_time_modules {} failed ({})'.format(
conf_name,
err
)
)
date = datetime.now()
finally:
self.config[conf_name]['date'] = date
self.config[conf_name]['delta'] = date - utcnow
self.config[conf_name]['time_format'] = time_format
def tick_time_modules(self, json_list, force):
"""
Adjust the 'time' and 'tztime' objects from the given json_list so that
they are updated only at py3status interval seconds.
This method is used to overwrite any i3status time or tztime output
with respect to their parsed and timezone offset detected on start.
"""
utcnow = datetime.utcnow()
# every whole minute, resync our time from i3status'
# this ensures we will catch any daylight savings time change
if utcnow.second == 0:
self.set_time_modules()
#
for index, item in enumerate(json_list):
if item.get('name') in ['time', 'tztime']:
conf_name = self.config['i3s_modules'][index]
time_module = self.config[conf_name]
if force:
date = utcnow + time_module['delta']
time_module['date'] = date
else:
date = time_module['date']
time_format = self.config[conf_name].get('time_format')
# set the full_text date on the json_list to be returned
item['full_text'] = date.strftime(time_format)
json_list[index] = item
# reset the full_text date on the config object for next
# iteration to be consistent with this one
time_module['response']['full_text'] = item['full_text']
return json_list
def update_json_list(self):
"""
Copy the last json list output from i3status so that any module
can modify it without altering the original output.
This is done so that any module's alteration of a i3status output json
will not be overwritten when the next i3status output gets polled.
"""
self.json_list = deepcopy(self.last_output)
self.json_list_ts = deepcopy(self.last_output_ts)
def get_modules_output(self, json_list, py3_modules):
"""
Return the final json list to be displayed on the i3bar by taking
into account every py3status configured module and i3status'.
Simply put, this method honors the initial 'order' configured by
the user in his i3status.conf.
"""
ordered = []
for module_name in self.config['order']:
if module_name in py3_modules:
for method in py3_modules[module_name].methods.values():
ordered.append(method['last_output'])
else:
if self.config.get(module_name, {}).get('response'):
ordered.append(self.config[module_name]['response'])
return ordered
@staticmethod
def write_in_tmpfile(text, tmpfile):
"""
Write the given text in the given tmpfile in python2 and python3.
"""
try:
tmpfile.write(text)
except TypeError:
tmpfile.write(str.encode(text))
def write_tmp_i3status_config(self, tmpfile):
"""
Given a temporary file descriptor, write a valid i3status config file
based on the parsed one from 'i3status_config_path'.
"""
for section_name, conf in sorted(self.config.items()):
if section_name in ['i3s_modules', 'py3_modules']:
continue
elif section_name == 'order':
for module_name in conf:
if self.valid_config_param(module_name):
self.write_in_tmpfile(
'order += "%s"\n' % module_name,
tmpfile
)
self.write_in_tmpfile('\n', tmpfile)
elif self.valid_config_param(section_name) and conf:
self.write_in_tmpfile('%s {\n' % section_name, tmpfile)
for key, value in conf.items():
self.write_in_tmpfile(
' %s = "%s"\n' % (key, value),
tmpfile
)
self.write_in_tmpfile('}\n\n', tmpfile)
tmpfile.flush()
@profile
def run(self):
"""
Spawn i3status using a self generated config file and poll its output.
"""
try:
with NamedTemporaryFile(prefix='py3status_') as tmpfile:
self.write_tmp_i3status_config(tmpfile)
syslog(
LOG_INFO,
'i3status spawned using config file {}'.format(
tmpfile.name
)
)
i3status_pipe = Popen(
['i3status', '-c', tmpfile.name],
stdout=PIPE,
stderr=PIPE,
)
self.poller_inp = IOPoller(i3status_pipe.stdout)
self.poller_err = IOPoller(i3status_pipe.stderr)
self.tmpfile_path = tmpfile.name
try:
# at first, poll very quickly
# to avoid delay in first i3bar display
timeout = 0.001
# loop on i3status output
while self.lock.is_set():
line = self.poller_inp.readline(timeout)
if line:
if line.startswith('[{'):
print_line(line)
with jsonify(line) as (prefix, json_list):
self.last_output = json_list
self.last_output_ts = datetime.utcnow()
self.last_prefix = ','
self.update_json_list()
self.set_responses(json_list)
# on first i3status output, we parse
# the time and tztime modules
self.set_time_modules()
self.ready = True
elif not line.startswith(','):
if 'version' in line:
header = loads(line)
header.update({'click_events': True})
line = dumps(header)
print_line(line)
else:
timeout = 0.5
with jsonify(line) as (prefix, json_list):
self.last_output = json_list
self.last_output_ts = datetime.utcnow()
self.last_prefix = prefix
self.update_json_list()
self.set_responses(json_list)
else:
err = self.poller_err.readline(timeout)
code = i3status_pipe.poll()
if code is not None:
msg = 'i3status died'
if err:
msg += ' and said: {}'.format(err)
else:
msg += ' with code {}'.format(code)
raise IOError(msg)
else:
# poll is CPU intensive, breath a bit
sleep(timeout)
except IOError:
err = sys.exc_info()[1]
self.error = err
except OSError:
# we cleanup the tmpfile ourselves so when the delete will occur
# it will usually raise an OSError: No such file or directory
pass
def cleanup_tmpfile(self):
"""
Cleanup i3status tmp configuration file.
"""
if os.path.isfile(self.tmpfile_path):
os.remove(self.tmpfile_path)
def mock(self):
"""
Mock i3status behavior, used in standalone mode.
"""
# mock thread is_alive() method
self.is_alive = lambda: True
# mock i3status base output
init_output = [
'{"click_events": true, "version": 1}',
'[',
'[]'
]
for line in init_output:
print_line(line)
# mock i3status output parsing
self.last_output = []
self.last_output_ts = datetime.utcnow()
self.last_prefix = ','
self.update_json_list()
class Events(Thread):
"""
This class is responsible for dispatching event JSONs sent by the i3bar.
"""
def __init__(self, lock, config, modules, i3s_config):
"""
We need to poll stdin to receive i3bar messages.
"""
Thread.__init__(self)
self.config = config
self.i3s_config = i3s_config
self.last_refresh_ts = time()
self.lock = lock
self.modules = modules
self.on_click = i3s_config['on_click']
self.poller_inp = IOPoller(sys.stdin)
def dispatch(self, module, obj, event):
"""
Dispatch the event or enforce the default clear cache action.
"""
module_name = '{} {}'.format(
module.module_name,
module.module_inst
).strip()
#
if module.click_events:
# module accepts click_events, use it
module.click_event(event)
if self.config['debug']:
syslog(LOG_INFO, 'dispatching event {}'.format(event))
else:
# default button 2 action is to clear this method's cache
if self.config['debug']:
syslog(LOG_INFO, 'dispatching default event {}'.format(event))
# to make the bar more responsive to users we ask for a refresh
# of the module or of i3status if the module is an i3status one
self.refresh(module_name)
def i3bar_click_events_module(self):
"""
Detect the presence of the special i3bar_click_events.py module.
When py3status detects a module named 'i3bar_click_events.py',
it will dispatch i3status click events to this module so you can catch
them and trigger any function call based on the event.
"""
for module in self.modules.values():
if not module.click_events:
continue
if module.module_name == 'i3bar_click_events.py':
return module
else:
return False
def refresh(self, module_name):
"""
Force a cache expiration for all the methods of the given module.
We rate limit the i3status refresh to 100ms.
"""
module = self.modules.get(module_name)
if module is not None:
if self.config['debug']:
syslog(LOG_INFO, 'refresh module {}'.format(module_name))
for obj in module.methods.values():
obj['cached_until'] = time()
else:
if time() > (self.last_refresh_ts + 0.1):
if self.config['debug']:
syslog(
LOG_INFO,
'refresh i3status for module {}'.format(module_name)
)
call(['killall', '-s', 'USR1', 'i3status'])
self.last_refresh_ts = time()
def refresh_all(self, module_name):
"""
Force a full refresh of py3status and i3status modules by sending
a SIGUSR1 signal to py3status.
We rate limit this command to 100ms for obvious abusive behavior.
"""
if time() > (self.last_refresh_ts + 0.1):
call(['killall', '-s', 'USR1', 'py3status'])
self.last_refresh_ts = time()
def on_click_dispatcher(self, module_name, command):
"""
Dispatch on_click config parameters to either:
- Our own methods for special py3status commands (listed below)
- The i3-msg program which is part of i3wm
"""
py3_commands = ['refresh', 'refresh_all']
if command is None:
return
elif command in py3_commands:
# this is a py3status command handled by this class
method = getattr(self, command)
method(module_name)
else:
# this is a i3 message
self.i3_msg(module_name, command)
# to make the bar more responsive to users we ask for a refresh
# of the module or of i3status if the module is an i3status one
self.refresh(module_name)
@staticmethod
def i3_msg(module_name, command):
"""
Execute the given i3 message and log its output.
"""
i3_msg_pipe = Popen(['i3-msg', command], stdout=PIPE)
syslog(
LOG_INFO,
'i3-msg module="{}" command="{}" stdout={}'.format(
module_name,
command,
i3_msg_pipe.stdout.read()
)
)
def i3status_mod_guess(self, instance, name):
"""
Some i3status modules output a name and instance that are different
from the configuration parameters in i3status.conf.
For example the 'disk' module will output with name 'disk_info' so
we try to be clever and figure it out here, case by case.
"""
try:
# disk_info /home
if name == 'disk_info':
name = 'disk'
# /sys/class/power_supply/BAT0/uevent
elif name == 'battery':
instance = str([int(s) for s in instance if s.isdigit()][0])
# /sys/devices/platform/coretemp.0/temp1_input
elif name == 'cpu_temperature':
instance = str([int(s) for s in instance if s.isdigit()][0])
# run_watch /var/run/openvpn.pid
elif name == 'run_watch':
for k, v in self.i3s_config.items():
if (
k.startswith('run_watch')
and isinstance(v, dict)
and v.get('pidfile') == instance
):
instance = k.split(' ', 1)[1]
break
# volume default.Master.0
elif name == 'volume':
device, mixer, mixer_idx = instance.split('.')
for k, v in self.i3s_config.items():
if (
k.startswith('volume')
and isinstance(v, dict)
and v.get('device') == device
and v.get('mixer') == mixer
and str(v.get('mixer_idx')) == mixer_idx
):
instance = k.split(' ', 1)[1]
break
else:
instance = 'master'
except:
pass
finally:
return (instance, name)
@profile
def run(self):
"""
Wait for an i3bar JSON event, then find the right module to dispatch
the message to based on the 'name' and 'instance' of the event.
In case the module does NOT support click_events, the default
implementation is to clear the module's cache
when the MIDDLE button (2) is pressed on it.
Example event:
{'y': 13, 'x': 1737, 'button': 1, 'name': 'empty', 'instance': 'first'}
"""
while self.lock.is_set():
event = self.poller_inp.readline()
if not event:
sleep(0.20)
continue
try:
with jsonify(event) as (prefix, event):
if self.config['debug']:
syslog(LOG_INFO, 'received event {}'.format(event))
# usage variables
button = event.get('button', 0)
default_event = False
dispatched = False
instance = event.get('instance', '')
name = event.get('name', '')
# i3status module name guess
instance, name = self.i3status_mod_guess(instance, name)
if self.config['debug']:
syslog(
LOG_INFO,
'trying to dispatch event to module "{}"'.format(
'{} {}'.format(name, instance).strip()
)
)
# guess the module config name
module_name = '{} {}'.format(name, instance).strip()
# execute any configured i3-msg command
if self.on_click.get(module_name, {}).get(button):
self.on_click_dispatcher(
module_name,
self.on_click[module_name].get(button)
)
dispatched = True
# otherwise setup default action on button 2 press
elif button == 2:
default_event = True
for module in self.modules.values():
# skip modules not supporting click_events
# unless we have a default_event set
if not module.click_events and not default_event:
continue
# check for the method name/instance
for obj in module.methods.values():
if name == obj['name']:
if instance:
if instance == obj['instance']:
self.dispatch(module, obj, event)
dispatched = True
break
else:
self.dispatch(module, obj, event)
dispatched = True
break
# fall back to i3bar_click_events.py module if present
if not dispatched:
module = self.i3bar_click_events_module()
if module:
if self.config['debug']:
syslog(
LOG_INFO,
'dispatching event to i3bar_click_events'
)
self.dispatch(module, obj, event)
except Exception:
err = sys.exc_info()[1]
syslog(LOG_WARNING, 'event failed ({})'.format(err))
class Module(Thread):
"""
This class represents a user module (imported file).
It is reponsible for executing it every given interval and
caching its output based on user will.
"""
def __init__(self, lock, config, module, i3_thread, user_modules):
"""
We need quite some stuff to occupy ourselves don't we ?
"""
Thread.__init__(self)
self.click_events = False
self.config = config
self.has_kill = False
self.i3status_thread = i3_thread
self.last_output = []
self.lock = lock
self.methods = OrderedDict()
self.module_class = None
self.module_inst = ''.join(module.split(' ')[1:])
self.module_name = module.split(' ')[0]
#
self.load_methods(module, user_modules)
@staticmethod
def load_from_file(filepath):
"""
Return user-written class object from given path.
"""
class_inst = None
expected_class = 'Py3status'
module_name, file_ext = os.path.splitext(os.path.split(filepath)[-1])
if file_ext.lower() == '.py':
py_mod = imp.load_source(module_name, filepath)
if hasattr(py_mod, expected_class):
class_inst = py_mod.Py3status()
return class_inst
@staticmethod
def load_from_namespace(module_name):
"""
Load a py3status bundled module.
"""
class_inst = None
name = 'py3status.modules.{}'.format(module_name)
py_mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
py_mod = getattr(py_mod, comp)
class_inst = py_mod.Py3status()
return class_inst
def clear_cache(self):
"""
Reset the cache for all methods of this module.
"""
for meth in self.methods:
self.methods[meth]['cached_until'] = time()
if self.config['debug']:
syslog(LOG_INFO, 'clearing cache for method {}'.format(meth))
def load_methods(self, module, user_modules):
"""
Read the given user-written py3status class file and store its methods.
Those methods will be executed, so we will deliberately ignore:
- private methods starting with _
- decorated methods such as @property or @staticmethod
- 'on_click' methods as they'll be called upon a click_event
- 'kill' methods as they'll be called upon this thread's exit
"""
# user provided modules take precedence over py3status provided modules
if self.module_name in user_modules:
include_path, f_name = user_modules[self.module_name]
syslog(
LOG_INFO,
'loading module "{}" from {}{}'.format(
module,
include_path,
f_name
)
)
class_inst = self.load_from_file(include_path + f_name)
# load from py3status provided modules
else:
syslog(
LOG_INFO,
'loading module "{}" from py3status.modules.{}'.format(
module,
self.module_name
)
)
class_inst = self.load_from_namespace(self.module_name)
if class_inst:
self.module_class = class_inst
# apply module configuration from i3status config
mod_config = self.i3status_thread.config.get(module, {})
for config, value in mod_config.items():
setattr(self.module_class, config, value)
# get the available methods for execution
for method in sorted(dir(class_inst)):
if method.startswith('_'):
continue
else:
m_type = type(getattr(class_inst, method))
if 'method' in str(m_type):
if method == 'on_click':
self.click_events = True
elif method == 'kill':
self.has_kill = True
else:
# the method_obj stores infos about each method
# of this module.
method_obj = {
'cached_until': time(),
'instance': None,
'last_output': {
'name': method,
'full_text': ''
},
'method': method,
'name': None,
'position': 0
}
self.methods[method] = method_obj
# done, syslog some debug info
if self.config['debug']:
syslog(
LOG_INFO,
'module "{}" click_events={} has_kill={} methods={}'.format(
module,
self.click_events,
self.has_kill,
self.methods.keys()
)
)
def click_event(self, event):
"""
Execute the 'on_click' method of this module with the given event.
"""
try:
click_method = getattr(self.module_class, 'on_click')
click_method(
self.i3status_thread.json_list,
self.i3status_thread.config['general'],
event
)
except Exception:
err = sys.exc_info()[1]
msg = 'on_click failed with ({}) for event ({})'.format(err, event)
syslog(LOG_WARNING, msg)
@profile
def run(self):
"""
On a timely fashion, execute every method found for this module.
We will respect and set a cache timeout for each method if the user
didn't already do so.
We will execute the 'kill' method of the module when we terminate.
"""
while self.lock.is_set():
# execute each method of this module
for meth, obj in self.methods.items():
my_method = self.methods[meth]
# always check the lock
if not self.lock.is_set():
break
# respect the cache set for this method
if time() < obj['cached_until']:
continue
try:
# execute method and get its output
method = getattr(self.module_class, meth)
response = method(
self.i3status_thread.json_list,
self.i3status_thread.config['general']
)
if isinstance(response, dict):
# this is a shiny new module giving a dict response
position, result = None, response
result['name'] = self.module_name
result['instance'] = self.module_inst
else:
raise TypeError('response should be a dict')
# validate the response
if 'full_text' not in result:
raise KeyError('missing "full_text" key in response')
# initialize method object
if my_method['name'] is None:
my_method['name'] = result['name']
if 'instance' in result:
my_method['instance'] = result['instance']
else:
my_method['instance'] = result['name']
# update method object cache
if 'cached_until' in result:
cached_until = result['cached_until']
else:
cached_until = time() + self.config['cache_timeout']
my_method['cached_until'] = cached_until
# update method object output
my_method['last_output'] = result
# update method object position
my_method['position'] = position
# debug info
if self.config['debug']:
syslog(
LOG_INFO,
'method {} returned {} '.format(meth, result)
)
except Exception:
err = sys.exc_info()[1]
syslog(
LOG_WARNING,
'user method {} failed ({})'.format(meth, err)
)
# don't be hasty mate, let's take it easy for now
sleep(self.config['interval'])
# check and execute the 'kill' method if present
if self.has_kill:
try:
kill_method = getattr(self.module_class, 'kill')
kill_method(
self.i3status_thread.json_list,
self.i3status_thread.config['general']
)
except Exception:
# this would be stupid to die on exit
pass
class Py3statusWrapper():
"""
This is the py3status wrapper.
"""
def __init__(self):
"""
Useful variables we'll need.
"""
self.last_refresh_ts = time()
self.lock = Event()
self.modules = {}
self.py3_modules = []
def get_config(self):
"""
Create the py3status based on command line options we received.
"""
# get home path
home_path = os.path.expanduser('~')
# defaults
config = {
'cache_timeout': 60,
'include_paths': ['{}/.i3/py3status/'.format(home_path)],
'interval': 1
}
# package version
try:
import pkg_resources
version = pkg_resources.get_distribution('py3status').version
except:
version = 'unknown'
config['version'] = version
# i3status config file default detection
# respect i3status' file detection order wrt issue #43
i3status_config_file_candidates = [
'{}/.i3status.conf'.format(home_path),
'{}/.config/i3status/config'.format(
os.environ.get('XDG_CONFIG_HOME', home_path)
),
'/etc/i3status.conf',
'{}/i3status/config'.format(
os.environ.get('XDG_CONFIG_DIRS', '/etc/xdg')
)
]
for fn in i3status_config_file_candidates:
if os.path.isfile(fn):
i3status_config_file_default = fn
break
else:
# if none of the default files exists, we will default
# to ~/.i3/i3status.conf
i3status_config_file_default = '{}/.i3/i3status.conf'.format(
home_path
)
# command line options
parser = argparse.ArgumentParser(
description='The agile, python-powered, i3status wrapper')
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-c', '--config', action="store",
dest="i3status_conf",
type=str,
default=i3status_config_file_default,
help="path to i3status config file")
parser.add_argument('-d', '--debug', action="store_true",
help="be verbose in syslog")
parser.add_argument('-i', '--include', action="append",
dest="include_paths",
help="""include user-written modules from those
directories (default ~/.i3/py3status)""")
parser.add_argument('-n', '--interval', action="store",
dest="interval",
type=float,
default=config['interval'],
help="update interval in seconds (default 1 sec)")
parser.add_argument('-s', '--standalone', action="store_true",
help="standalone mode, do not use i3status")
parser.add_argument('-t', '--timeout', action="store",
dest="cache_timeout",
type=int,
default=config['cache_timeout'],
help="""default injection cache timeout in seconds
(default 60 sec)""")
parser.add_argument('-v', '--version', action="store_true",
help="""show py3status version and exit""")
parser.add_argument('cli_command', nargs='*', help=argparse.SUPPRESS)
options = parser.parse_args()
if options.cli_command:
config['cli_command'] = options.cli_command
# only asked for version
if options.version:
from platform import python_version
print(
'py3status version {} (python {})'.format(
config['version'],
python_version()
)
)
sys.exit(0)
# override configuration and helper variables
config['cache_timeout'] = options.cache_timeout
config['debug'] = options.debug
if options.include_paths:
config['include_paths'] = options.include_paths
config['interval'] = int(options.interval)
config['standalone'] = options.standalone
config['i3status_config_path'] = options.i3status_conf
# all done
return config
def get_user_modules(self):
"""
Search import directories and files through include paths with
respect to i3status.conf configured py3status modules.
User provided modules take precedence over py3status generic modules.
"""
user_modules = dict()
if not self.py3_modules:
return user_modules
for include_path in sorted(self.config['include_paths']):
include_path = os.path.abspath(include_path) + '/'
if not os.path.isdir(include_path):
continue
for f_name in sorted(os.listdir(include_path)):
if not f_name.endswith('.py'):
continue
module_name = f_name[:-3]
# i3status.conf based behaviour (using order += 'xx')
for module in self.py3_modules:
if module_name == module.split(' ')[0]:
user_modules[module_name] = (include_path, f_name)
return user_modules
def load_modules(self, modules_list, user_modules):
"""
Load the given modules from the list (contains instance name) with
respect to the user provided modules dict.
modules_list: ['weather_yahoo paris', 'net_rate']
user_modules: {
'weather_yahoo': ('/etc/py3status.d/', 'weather_yahoo.py')
}
"""
for module in modules_list:
# ignore already provided modules (prevents double inclusion)
if module in self.modules:
continue
try:
my_m = Module(
self.lock,
self.config,
module,
self.i3status_thread,
user_modules
)
# only start and handle modules with available methods
if my_m.methods:
my_m.start()
self.modules[module] = my_m
elif self.config['debug']:
syslog(
LOG_INFO,
'ignoring module "{}" (no methods found)'.format(
module
)
)
except Exception:
err = sys.exc_info()[1]
msg = 'loading module "{}" failed ({})'.format(module, err)
self.i3_nagbar(msg, level='warning')
def setup(self):
"""
Setup py3status and spawn i3status/events/modules threads.
"""
# set the Event lock
self.lock.set()
# setup configuration
self.config = self.get_config()
if self.config.get('cli_command'):
self.handle_cli_command(self.config['cli_command'])
sys.exit()
if self.config['debug']:
syslog(
LOG_INFO,
'py3status started with config {}'.format(self.config)
)
# setup i3status thread
self.i3status_thread = I3status(
self.lock,
self.config['i3status_config_path'],
self.config['standalone']
)
if self.config['standalone']:
self.i3status_thread.mock()
else:
self.i3status_thread.start()
while not self.i3status_thread.ready:
if not self.i3status_thread.is_alive():
err = self.i3status_thread.error
raise IOError(err)
sleep(0.1)
if self.config['debug']:
syslog(
LOG_INFO,
'i3status thread {} with config {}'.format(
'started' if not self.config['standalone'] else 'mocked',
self.i3status_thread.config
)
)
# setup input events thread
self.events_thread = Events(
self.lock,
self.config,
self.modules,
self.i3status_thread.config
)
self.events_thread.start()
if self.config['debug']:
syslog(LOG_INFO, 'events thread started')
# suppress modules' ouput wrt issue #20
if not self.config['debug']:
sys.stdout = open('/dev/null', 'w')
sys.stderr = open('/dev/null', 'w')
# get the list of py3status configured modules
self.py3_modules = self.i3status_thread.config['py3_modules']
# get a dict of all user provided modules
user_modules = self.get_user_modules()
if self.config['debug']:
syslog(LOG_INFO, 'user_modules={}'.format(user_modules))
if self.py3_modules:
# load and spawn i3status.conf configured modules threads
self.load_modules(self.py3_modules, user_modules)
def i3_nagbar(self, msg, level='error'):
"""
Make use of i3-nagbar to display errors and warnings to the user.
We also make sure to log anything to keep trace of it.
"""
msg = 'py3status: {}. '.format(msg)
msg += 'please try to fix this and reload i3wm (Mod+Shift+R)'
try:
log_level = LOG_ERR if level == 'error' else LOG_WARNING
syslog(log_level, msg)
Popen(
['i3-nagbar', '-m', msg, '-t', level],
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w')
)
except:
pass
def stop(self):
"""
Clear the Event lock, this will break all threads' loops.
"""
try:
self.lock.clear()
if self.config['debug']:
syslog(LOG_INFO, 'lock cleared, exiting')
self.i3status_thread.cleanup_tmpfile()
except:
pass
def sig_handler(self, signum, frame):
"""
SIGUSR1 was received, the user asks for an immediate refresh of the bar
so we force i3status to refresh by sending it a SIGUSR1
and we clear all py3status modules' cache.
To prevent abuse, we rate limit this function to 100ms.
"""
if time() > (self.last_refresh_ts + 0.1):
syslog(LOG_INFO, 'received USR1, forcing refresh')
# send SIGUSR1 to i3status
call(['killall', '-s', 'USR1', 'i3status'])
# clear the cache of all modules
self.clear_modules_cache()
# reset the refresh timestamp
self.last_refresh_ts = time()
else:
syslog(
LOG_INFO,
'received USR1 but rate limit is in effect, calm down'
)
def clear_modules_cache(self):
"""
For every module, reset the 'cached_until' of all its methods.
"""
for module in self.modules.values():
module.clear_cache()
def terminate(self, signum, frame):
"""
Received request to terminate (SIGTERM), exit nicely.
"""
raise KeyboardInterrupt()
@profile
def run(self):
"""
Main py3status loop, continuously read from i3status and modules
and output it to i3bar for displaying.
"""
# SIGUSR1 forces a refresh of the bar both for py3status and i3status,
# this mimics the USR1 signal handling of i3status (see man i3status)
signal(SIGUSR1, self.sig_handler)
signal(SIGTERM, self.terminate)
# initialize usage variables
delta = 0
last_delta = -1
previous_json_list = []
# main loop
while True:
# check i3status thread
if not self.i3status_thread.is_alive():
err = self.i3status_thread.error
if not err:
err = 'i3status died horribly'
self.i3_nagbar(err)
break
# check events thread
if not self.events_thread.is_alive():
# don't spam the user with i3-nagbar warnings
if not hasattr(self.events_thread, 'i3_nagbar'):
self.events_thread.i3_nagbar = True
err = 'events thread died, click events are disabled'
self.i3_nagbar(err, level='warning')
# check that every module thread is alive
for module in self.modules.values():
if not module.is_alive():
# don't spam the user with i3-nagbar warnings
if not hasattr(module, 'i3_nagbar'):
module.i3_nagbar = True
msg = 'output frozen for dead module(s) {}'.format(
','.join(module.methods.keys())
)
self.i3_nagbar(msg, level='warning')
# get output from i3status
prefix = self.i3status_thread.last_prefix
json_list = deepcopy(self.i3status_thread.json_list)
# transform time and tztime outputs from i3status
# every configured interval seconds
if (
self.config['interval'] <= 1 or (
int(delta) % self.config['interval'] == 0
and int(last_delta) != int(delta)
)
):
delta = 0
last_delta = 0
json_list = self.i3status_thread.tick_time_modules(
json_list,
force=True
)
else:
json_list = self.i3status_thread.tick_time_modules(
json_list,
force=False
)
# construct the global output
if self.modules and self.py3_modules:
# new style i3status configured ordering
json_list = self.i3status_thread.get_modules_output(
json_list,
self.modules
)
# dump the line to stdout only on change
if json_list != previous_json_list:
print_line('{}{}'.format(prefix, dumps(json_list)))
# remember the last json list output
previous_json_list = deepcopy(json_list)
# reset i3status json_list and json_list_ts
self.i3status_thread.update_json_list()
# sleep a bit before doing this again to avoid killing the CPU
delta += 0.1
sleep(0.1)
@staticmethod
def print_module_description(details, mod_name, mod_path):
"""Print module description extracted from its docstring.
"""
if mod_name == '__init__':
return
path = os.path.join(*mod_path)
try:
with open(path) as f:
module = ast.parse(f.read())
docstring = ast.get_docstring(module, clean=True)
if docstring:
short_description = docstring.split('\n')[0].rstrip('.')
print_stderr(' %-22s %s.' % (mod_name, short_description))
if details:
for description in docstring.split('\n')[1:]:
print_stderr(' ' * 25 + '%s' % description)
print_stderr(' ' * 25 + '---')
else:
print_stderr(' %-22s No docstring in %s' % (mod_name, path))
except Exception:
print_stderr(' %-22s Unable to parse %s' % (mod_name, path))
def handle_cli_command(self, cmd):
"""Handle a command from the CLI.
"""
# aliases
if cmd[0] in ['mod', 'module', 'modules']:
cmd[0] = 'modules'
# allowed cli commands
if cmd[:2] in (['modules', 'list'], ['modules', 'details']):
try:
py3_modules_path = imp.find_module('py3status')[1]
py3_modules_path += '/modules/'
if os.path.isdir(py3_modules_path):
self.config['include_paths'].append(py3_modules_path)
except:
print_stderr('Unable to locate py3status modules !')
details = cmd[1] == 'details'
user_modules = self.get_user_modules()
print_stderr('Available modules:')
for mod_name, mod_path in sorted(user_modules.items()):
self.print_module_description(details, mod_name, mod_path)
elif cmd[:2] in (['modules', 'enable'], ['modules', 'disable']):
# TODO: to be implemented
pass
else:
print_stderr('Error: unknown command')
sys.exit(1)
def main():
try:
locale.setlocale(locale.LC_ALL, '')
py3 = Py3statusWrapper()
py3.setup()
except KeyboardInterrupt:
err = sys.exc_info()[1]
py3.i3_nagbar('setup interrupted (KeyboardInterrupt)')
sys.exit(0)
except Exception:
err = sys.exc_info()[1]
py3.i3_nagbar('setup error ({})'.format(err))
py3.stop()
sys.exit(2)
try:
py3.run()
except Exception:
err = sys.exc_info()[1]
py3.i3_nagbar('runtime error ({})'.format(err))
sys.exit(3)
except KeyboardInterrupt:
pass
finally:
py3.stop()
sys.exit(0)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import time
import socket
import errno
import struct
import logging
import traceback
import random
from shadowsocks import encrypt, eventloop, shell, common
from shadowsocks.common import parse_header
# we clear at most TIMEOUTS_CLEAN_SIZE timeouts each time
TIMEOUTS_CLEAN_SIZE = 512
MSG_FASTOPEN = 0x20000000
# SOCKS command definition
CMD_CONNECT = 1
CMD_BIND = 2
CMD_UDP_ASSOCIATE = 3
# for each opening port, we have a TCP Relay
# for each connection, we have a TCP Relay Handler to handle the connection
# for each handler, we have 2 sockets:
# local: connected to the client
# remote: connected to remote server
# for each handler, it could be at one of several stages:
# as sslocal:
# stage 0 SOCKS hello received from local, send hello to local
# stage 1 addr received from local, query DNS for remote
# stage 2 UDP assoc
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
# as ssserver:
# stage 0 just jump to stage 1
# stage 1 addr received from local, query DNS for remote
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
STAGE_INIT = 0
STAGE_ADDR = 1
STAGE_UDP_ASSOC = 2
STAGE_DNS = 3
STAGE_CONNECTING = 4
STAGE_STREAM = 5
STAGE_DESTROYED = -1
# for each handler, we have 2 stream directions:
# upstream: from client to server direction
# read local and write to remote
# downstream: from server to client direction
# read remote and write to local
STREAM_UP = 0
STREAM_DOWN = 1
# for each stream, it's waiting for reading, or writing, or both
WAIT_STATUS_INIT = 0
WAIT_STATUS_READING = 1
WAIT_STATUS_WRITING = 2
WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING
BUF_SIZE = 32 * 1024
#class here is used to detect every poll event,every active connection.
class TCPRelayHandler(object):
def __init__(self, server, fd_to_handlers, loop, local_sock, config,
dns_resolver):
#server here is a tcpRelay,never forget this
self._server = server
self._fd_to_handlers = fd_to_handlers
self._loop = loop
self._local_sock = local_sock#local sock means current sock
self._remote_sock = None
self._config = config
self._dns_resolver = dns_resolver
# TCP Relay works as either sslocal or ssserver
# if is_local, this is sslocal
# self._is_local = is_local
self._stage = STAGE_INIT
self._encryptor = encrypt.Encryptor(config['password'],
config['method'])
self._from_encryptor = encrypt.Encryptor(config['local_pwd'],config['local_method'])
self._to_encryptor = encrypt.Encryptor(config['server_pwd'],config['server_method'])
self._fastopen_connected = False
self._data_to_write_to_local = []
self._data_to_write_to_remote = []
self._upstream_status = WAIT_STATUS_READING
self._downstream_status = WAIT_STATUS_INIT
self._client_address = local_sock.getpeername()[:2]
self._remote_address = None
self._chosen_server = [self._config['server'],self._config['server_port']];
fd_to_handlers[local_sock.fileno()] = self
local_sock.setblocking(False)
local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR,self._server)
self.last_activity = 0
self._update_activity()#by here begin to loop tcprelay,update tcpRelay activities here~
def __hash__(self):
# default __hash__ is id / 16
# we want to eliminate collisions
return id(self)
@property
def remote_address(self):
return self._remote_address
def _update_activity(self, data_len=0):
# tell the TCP Relay we have activities recently
# else it will think we are inactive and timed out
self._server.update_activity(self, data_len)
def _update_stream(self, stream, status):
# update a stream to a new waiting status
# check if status is changed
# only update if dirty
dirty = False
# dirty = False
if stream == STREAM_DOWN:
if self._downstream_status != status:
self._downstream_status = status
dirty = True
elif stream == STREAM_UP:
if self._upstream_status != status:
self._upstream_status = status
dirty = True
if dirty:
if self._local_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
if self._upstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
self._loop.modify(self._local_sock, event)
if self._remote_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
if self._upstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
self._loop.modify(self._remote_sock, event)
def _write_to_sock(self, data, sock):
# write data to sock
# if only some of the data are written, put remaining in the buffer
# and update the stream to wait for writing
if not data or not sock:
return False
uncomplete = False
try:
l = len(data)
s = sock.send(data)
if s < l:
data = data[s:]
uncomplete = True
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
uncomplete = True
else:
shell.print_exception(e)
self.destroy()
return False
if uncomplete:
if sock == self._local_sock:
self._data_to_write_to_local.append(data)
self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING)
elif sock == self._remote_sock:
self._data_to_write_to_remote.append(data)
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
else:
logging.error('write_all_to_sock:unknown socket')
else:
if sock == self._local_sock:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
elif sock == self._remote_sock:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
else:
logging.error('write_all_to_sock:unknown socket')
return True
def _handle_stage_connecting(self, data):
if self._is_local:
data = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data)
if self._is_local and not self._fastopen_connected and \
self._config['fast_open']:
# for sslocal and fastopen, we basically wait for data and use
# sendto to connect
try:
# only connect once
self._fastopen_connected = True
remote_sock = \
self._create_remote_socket(self._chosen_server[0],
self._chosen_server[1])
self._loop.add(remote_sock, eventloop.POLL_ERR, self._server)
data = b''.join(self._data_to_write_to_remote)
l = len(data)
s = remote_sock.sendto(data, MSG_FASTOPEN, self._chosen_server)
if s < l:
data = data[s:]
self._data_to_write_to_remote = [data]
else:
self._data_to_write_to_remote = []
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == errno.EINPROGRESS:
# in this case data is not sent at all
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
elif eventloop.errno_from_exception(e) == errno.ENOTCONN:
logging.error('fast open not supported on this OS')
self._config['fast_open'] = False
self.destroy()
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _handle_stage_addr(self, data):
try:
if self._is_local:
cmd = common.ord(data[1])
if cmd == CMD_UDP_ASSOCIATE:
logging.debug('UDP associate')
if self._local_sock.family == socket.AF_INET6:
header = b'\x05\x00\x00\x04'
else:
header = b'\x05\x00\x00\x01'
addr, port = self._local_sock.getsockname()[:2]
addr_to_send = socket.inet_pton(self._local_sock.family,
addr)
port_to_send = struct.pack('>H', port)
self._write_to_sock(header + addr_to_send + port_to_send,
self._local_sock)
self._stage = STAGE_UDP_ASSOC
# just wait for the client to disconnect
return
elif cmd == CMD_CONNECT:
# just trim VER CMD RSV
data = data[3:]
else:
logging.error('unknown command %d', cmd)
self.destroy()
return
header_result = parse_header(data)
if header_result is None:
raise Exception('can not parse header')
addrtype, remote_addr, remote_port, header_length = header_result
logging.info('connecting %s:%d from %s:%d' %
(common.to_str(remote_addr), remote_port,
self._client_address[0], self._client_address[1]))
self._remote_address = (common.to_str(remote_addr), remote_port)
# pause reading
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
self._stage = STAGE_DNS
if self._is_local:
# forward address to remote
self._write_to_sock((b'\x05\x00\x00\x01'
b'\x00\x00\x00\x00\x10\x10'),
self._local_sock)
data_to_send = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data_to_send)
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(self._chosen_server[0],
self._handle_dns_resolved)
else:
if len(data) > header_length:
self._data_to_write_to_remote.append(data[header_length:])
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(remote_addr,
self._handle_dns_resolved)
except Exception as e:
self._log_error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _create_remote_socket(self, ip, port):
addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM,
socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("getaddrinfo failed for %s:%d" % (ip, port))
af, socktype, proto, canonname, sa = addrs[0]
remote_sock = socket.socket(af, socktype, proto)
self._remote_sock = remote_sock
self._fd_to_handlers[remote_sock.fileno()] = self
remote_sock.setblocking(False)
remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return remote_sock
def _handle_dns_resolved(self, result, error):
if error:
self._log_error(error)
self.destroy()
return
if result:
ip = result[1]
if ip:
try:
self._stage = STAGE_CONNECTING
remote_addr = ip
if self._is_local:
remote_port = self._chosen_server[1]
else:
remote_port = self._remote_address[1]
if self._is_local and self._config['fast_open']:
# for fastopen:
# wait for more data to arrive and send them in one SYN
self._stage = STAGE_CONNECTING
# we don't have to wait for remote since it's not
# created
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
# TODO when there is already data in this packet
else:
# else do connect
remote_sock = self._create_remote_socket(remote_addr,
remote_port)
try:
remote_sock.connect((remote_addr, remote_port))
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == \
errno.EINPROGRESS:
pass
self._loop.add(remote_sock,
eventloop.POLL_ERR | eventloop.POLL_OUT,
self._server)
self._stage = STAGE_CONNECTING
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
return
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _on_local_read(self):
# handle all local read events and dispatch them to methods for
# each stage
if not self._local_sock:
return
is_local = self._is_local
data = None
try:
data = self._local_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if not is_local:
data = self._encryptor.decrypt(data)
if not data:
return
if self._stage == STAGE_STREAM:
if self._is_local:
data = self._encryptor.encrypt(data)
self._write_to_sock(data, self._remote_sock)
return
elif is_local and self._stage == STAGE_INIT:
# TODO check auth method
self._write_to_sock(b'\x05\00', self._local_sock)
self._stage = STAGE_ADDR
return
elif self._stage == STAGE_CONNECTING:
self._handle_stage_connecting(data)
elif (is_local and self._stage == STAGE_ADDR) or \
(not is_local and self._stage == STAGE_INIT):
self._handle_stage_addr(data)
def _on_remote_read(self):
# handle all remote read events
data = None
try:
data = self._remote_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if self._is_local:
data = self._encryptor.decrypt(data)
else:
data = self._encryptor.encrypt(data)
try:
self._write_to_sock(data, self._local_sock)
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _on_local_write(self):
# handle local writable event
if self._data_to_write_to_local:
data = b''.join(self._data_to_write_to_local)
self._data_to_write_to_local = []
self._write_to_sock(data, self._local_sock)
else:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
def _on_remote_write(self):
# handle remote writable event
self._stage = STAGE_STREAM
if self._data_to_write_to_remote:
data = b''.join(self._data_to_write_to_remote)
self._data_to_write_to_remote = []
self._write_to_sock(data, self._remote_sock)
else:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
def _on_local_error(self):
logging.debug('got local error')
if self._local_sock:
logging.error(eventloop.get_sock_error(self._local_sock))
self.destroy()
def _on_remote_error(self):
logging.debug('got remote error')
if self._remote_sock:
logging.error(eventloop.get_sock_error(self._remote_sock))
self.destroy()
def handle_event(self, sock, event):# handle all events in this handler and dispatch them to methods
if self._stage == STAGE_DESTROYED:
logging.debug('ignore handle_event: destroyed')
return
# order is important
if sock == self._remote_sock:
if event & eventloop.POLL_ERR:
self._on_remote_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_remote_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_remote_write()
elif sock == self._local_sock:
if event & eventloop.POLL_ERR:
self._on_local_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_local_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_local_write()
else:
logging.warn('unknown socket')
def _log_error(self, e):
logging.error('%s when handling connection from %s:%d' %
(e, self._client_address[0], self._client_address[1]))
def destroy(self):
# destroy the handler and release any resources
# promises:
# 1. destroy won't make another destroy() call inside
# 2. destroy releases resources so it prevents future call to destroy
# 3. destroy won't raise any exceptions
# if any of the promises are broken, it indicates a bug has been
# introduced! mostly likely memory leaks, etc
if self._stage == STAGE_DESTROYED:
# this couldn't happen
logging.debug('already destroyed')
return
self._stage = STAGE_DESTROYED
if self._remote_address:
logging.debug('destroy: %s:%d' %
self._remote_address)
else:
logging.debug('destroy')
if self._remote_sock:
logging.debug('destroying remote')
self._loop.remove(self._remote_sock)
del self._fd_to_handlers[self._remote_sock.fileno()]
self._remote_sock.close()
self._remote_sock = None
if self._local_sock:
logging.debug('destroying local')
self._loop.remove(self._local_sock)
del self._fd_to_handlers[self._local_sock.fileno()]
self._local_sock.close()
self._local_sock = None
self._dns_resolver.remove_callback(self._handle_dns_resolved)
self._server.remove_handler(self)
class TCPRelay(object):
def __init__(self, config, dns_resolver, stat_callback=None):
self._config = config
# self._is_local = is_local
self._dns_resolver = dns_resolver
self._closed = False
self._eventloop = None
self._fd_to_handlers = {}
self._timeout = config['timeout']
self._timeouts = [] # a list for all the handlers
# we trim the timeouts once a while
self._timeout_offset = 0 # last checked position for timeout
self._handler_to_timeouts = {} # key: handler value: index in timeouts
# if is_local:
listen_addr = config['local_address']
listen_port = config['local_port']
# else:
# listen_addr = config['server']
# listen_port = config['server_port']
self._listen_port = listen_port
addrs = socket.getaddrinfo(listen_addr, listen_port, 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(listen_addr, listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(sa)
server_socket.setblocking(False)
server_socket.listen(1024)
self._server_socket = server_socket
self._stat_callback = stat_callback
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
self._eventloop.add(self._server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
self._eventloop.add_periodic(self.handle_periodic)
def remove_handler(self, handler):
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
del self._handler_to_timeouts[hash(handler)]
def update_activity(self, handler, data_len):#This is invoked from tcpRelayHandler,processing an update.
if data_len and self._stat_callback:
self._stat_callback(self._listen_port, data_len)
# set handler to active
now = int(time.time())
if now - handler.last_activity < eventloop.TIMEOUT_PRECISION:
# thus we can lower timeout modification frequency
return
handler.last_activity = now
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
length = len(self._timeouts)
self._timeouts.append(handler)
self._handler_to_timeouts[hash(handler)] = length
def _sweep_timeout(self):
# tornado's timeout memory management is more flexible than we need
# we just need a sorted last_activity queue and it's faster than heapq
# in fact we can do O(1) insertion/remove so we invent our own
if self._timeouts:
logging.log(shell.VERBOSE_LEVEL, 'sweeping timeouts')
now = time.time()
length = len(self._timeouts)
pos = self._timeout_offset
while pos < length:
handler = self._timeouts[pos]
if handler:
if now - handler.last_activity < self._timeout:
break
else:
if handler.remote_address:
logging.warn('timed out: %s:%d' %
handler.remote_address)
else:
logging.warn('timed out')
handler.destroy()
self._timeouts[pos] = None # free memory
pos += 1
else:
pos += 1
if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
# clean up the timeout queue when it gets larger than half
# of the queue
self._timeouts = self._timeouts[pos:]
for key in self._handler_to_timeouts:
self._handler_to_timeouts[key] -= pos
pos = 0
self._timeout_offset = pos
def handle_event(self, sock, fd, event):
#derived from event loop,eventloop gets infor from system layer.
# handle events and dispatch to handlers
if sock:
logging.log(shell.VERBOSE_LEVEL, 'fd %d %s', fd,
eventloop.EVENT_NAMES.get(event, event))
#say like a poll in event come in,and a new response socket will be established.
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
# TODO
raise Exception('server_socket error')
try:
logging.debug('accept')
#this will return (conn,addres):conn is a new socket used to receive data or send data
conn = self._server_socket.accept()
#self._fd_to_handlers will be passed as a parameter to tcpRelayHandler
#then current tcphandler and filedescripter will be written into this handlers
TCPRelayHandler(self, self._fd_to_handlers,
self._eventloop, conn[0], self._config,
self._dns_resolver)
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
else:#other socks comes in,we get their handlers,and process event here.
if sock:
handler = self._fd_to_handlers.get(fd, None)
if handler:
handler.handle_event(sock, event)
else:
logging.warn('poll removed fd')
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._eventloop.remove(self._server_socket)
self._server_socket.close()
self._server_socket = None
logging.info('closed TCP port %d', self._listen_port)
if not self._fd_to_handlers:
logging.info('stopping')
self._eventloop.stop()
self._sweep_timeout()
def close(self, next_tick=False):
logging.debug('TCP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for handler in list(self._fd_to_handlers.values()):
handler.destroy()
|
|
from __future__ import print_function
import os
import sys
import wx
import numpy as np
import matplotlib
matplotlib.interactive(False)
#Use the WxAgg back end. The Wx one takes too long to render
matplotlib.use('WXAgg')
from sas.sasgui.guiframe.local_perspectives.plotting.SimplePlot import PlotFrame
#import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.colors as colors
from sas.sasgui.guiframe.events import StatusEvent
from sas.sasgui.perspectives.calculator.calculator_widgets import InputTextCtrl
from sas.sascalc.dataloader.data_info import Data2D
from sas.sascalc.dataloader.data_info import Detector
from sas.sascalc.dataloader.manipulations import reader2D_converter
from sas.sasgui.guiframe.documentation_window import DocumentationWindow
_BOX_WIDTH = 60
IS_WIN = True
if sys.platform.count("win32") > 0:
_DIALOG_WIDTH = 400
else:
_DIALOG_WIDTH = 480
IS_WIN = False
class ImageView:
"""
Open a file dialog to allow the user to select a given file.
Display the loaded data if available.
"""
def __init__(self, parent=None):
"""
Init
"""
self.parent = parent
def load(self):
"""
load image files
"""
parent = self.parent
if parent is None:
location = os.getcwd()
else:
location = parent._default_save_location
path_list = self.choose_data_file(location=location)
if path_list is None:
return
if len(path_list) >= 0 and path_list[0] is not None:
if parent is not None:
parent._default_save_location = os.path.dirname(path_list[0])
err_msg = ''
for file_path in path_list:
basename = os.path.basename(file_path)
_, extension = os.path.splitext(basename)
try:
img = mpimg.imread(file_path)
is_png = extension.lower() == '.png'
plot_frame = ImageFrame(parent, -1, basename, img)
plot_frame.Show(False)
ax = plot_frame.plotpanel
if not is_png:
ax.subplot.set_ylim(ax.subplot.get_ylim()[::-1])
ax.subplot.set_xlabel('x [pixel]')
ax.subplot.set_ylabel('y [pixel]')
ax.figure.subplots_adjust(left=0.15, bottom=0.1,
right=0.95, top=0.95)
plot_frame.SetTitle('Picture -- %s --' % basename)
plot_frame.Show(True)
if parent is not None:
parent.put_icon(plot_frame)
except:
err_msg += "Failed to load '%s'.\n" % basename
if err_msg:
if parent is not None:
wx.PostEvent(parent, StatusEvent(status=err_msg, info="error"))
else:
print(err_msg)
def choose_data_file(self, location=None):
"""
Open a file dialog to allow loading a file
"""
path = None
if location is None:
location = os.getcwd()
dlg = wx.FileDialog(self.parent, "Image Viewer: Choose a image file",
location, "", "", style=wx.FD_OPEN | wx.FD_MULTIPLE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPaths()
else:
return None
dlg.Destroy()
return path
class ImageFrame(PlotFrame):
"""
Frame for simple plot
"""
def __init__(self, parent, id, title, image=None, scale='log_{10}',
size=wx.Size(550, 470)):
"""
comment
:Param data: image array got from imread() of matplotlib [narray]
:param parent: parent panel/container
"""
# Initialize the Frame object
PlotFrame.__init__(self, parent, id, title, scale, size,
show_menu_icons=False)
self.parent = parent
self.data = image
self.file_name = title
menu = wx.Menu()
id = wx.NewId()
item = wx.MenuItem(menu, id, "&Convert to Data")
menu.AppendItem(item)
wx.EVT_MENU(self, id, self.on_set_data)
self.menu_bar.Append(menu, "&Image")
menu_help = wx.Menu()
id = wx.NewId()
item = wx.MenuItem(menu_help, id, "&HowTo")
menu_help.AppendItem(item)
wx.EVT_MENU(self, id, self.on_help)
self.menu_bar.Append(menu_help, "&Help")
self.SetMenuBar(self.menu_bar)
self.im_show(image)
def on_set_data(self, event):
"""
Rescale the x y range, make 2D data and send it to data explore
"""
title = self.file_name
self.panel = SetDialog(parent=self, title=title, image=self.data)
self.panel.ShowModal()
def on_help(self, event):
"""
Bring up Image Viewer Documentation from the image viewer window
whenever the help menu item "how to" is clicked. Calls
DocumentationWindow with the path of the location within the
documentation tree (after /doc/ ....".
:param evt: Triggers on clicking "how to" in help menu
"""
_TreeLocation = "user/sasgui/perspectives/calculator/"
_TreeLocation += "image_viewer_help.html"
_doc_viewer = DocumentationWindow(self, -1, _TreeLocation, "",
"Image Viewer Help")
class SetDialog(wx.Dialog):
"""
Dialog for Data Set
"""
def __init__(self, parent, id= -1, title="Convert to Data", image=None,
size=(_DIALOG_WIDTH, 270)):
wx.Dialog.__init__(self, parent, id, title, size)
# parent
self.parent = parent
self.base = parent.parent
self.title = title
self.image = np.array(image)
self.z_ctrl = None
self.xy_ctrls = []
self.is_png = self._get_is_png()
self._build_layout()
my_title = "Convert Image to Data - %s -" % self.title
self.SetTitle(my_title)
self.SetSize(size)
def _get_is_png(self):
"""
Get if the image file is png
"""
_, extension = os.path.splitext(self.title)
return extension.lower() == '.png'
def _build_layout(self):
"""
Layout
"""
vbox = wx.BoxSizer(wx.VERTICAL)
zbox = wx.BoxSizer(wx.HORIZONTAL)
xbox = wx.BoxSizer(wx.HORIZONTAL)
ybox = wx.BoxSizer(wx.HORIZONTAL)
btnbox = wx.BoxSizer(wx.VERTICAL)
sb_title = wx.StaticBox(self, -1, 'Transform Axes')
boxsizer = wx.StaticBoxSizer(sb_title, wx.VERTICAL)
z_title = wx.StaticText(self, -1, 'z values (range: 0 - 255) to:')
ztime_title = wx.StaticText(self, -1, 'z *')
x_title = wx.StaticText(self, -1, 'x values from pixel # to:')
xmin_title = wx.StaticText(self, -1, 'xmin:')
xmax_title = wx.StaticText(self, -1, 'xmax:')
y_title = wx.StaticText(self, -1, 'y values from pixel # to:')
ymin_title = wx.StaticText(self, -1, 'ymin: ')
ymax_title = wx.StaticText(self, -1, 'ymax:')
z_ctl = InputTextCtrl(self, -1, size=(_BOX_WIDTH , 20),
style=wx.TE_PROCESS_ENTER)
xmin_ctl = InputTextCtrl(self, -1, size=(_BOX_WIDTH, 20),
style=wx.TE_PROCESS_ENTER)
xmax_ctl = InputTextCtrl(self, -1, size=(_BOX_WIDTH, 20),
style=wx.TE_PROCESS_ENTER)
ymin_ctl = InputTextCtrl(self, -1, size=(_BOX_WIDTH, 20),
style=wx.TE_PROCESS_ENTER)
ymax_ctl = InputTextCtrl(self, -1, size=(_BOX_WIDTH, 20),
style=wx.TE_PROCESS_ENTER)
z_ctl.SetValue('1.0')
xmin_ctl.SetValue('-0.3')
xmax_ctl.SetValue('0.3')
ymin_ctl.SetValue('-0.3')
ymax_ctl.SetValue('0.3')
z_ctl.Bind(wx.EVT_TEXT, self._on_z_enter)
xmin_ctl.Bind(wx.EVT_TEXT, self._onparam)
xmax_ctl.Bind(wx.EVT_TEXT, self._onparam)
ymin_ctl.Bind(wx.EVT_TEXT, self._onparam)
ymax_ctl.Bind(wx.EVT_TEXT, self._onparam)
xbox.AddMany([(x_title , 0, wx.LEFT, 0),
(xmin_title , 0, wx.LEFT, 10),
(xmin_ctl , 0, wx.LEFT, 10),
(xmax_title , 0, wx.LEFT, 10),
(xmax_ctl , 0, wx.LEFT, 10)])
ybox.AddMany([(y_title , 0, wx.LEFT, 0),
(ymin_title , 0, wx.LEFT, 10),
(ymin_ctl , 0, wx.LEFT, 10),
(ymax_title , 0, wx.LEFT, 10),
(ymax_ctl , 0, wx.LEFT, 10)])
zbox.AddMany([(z_title , 0, wx.LEFT, 0),
(ztime_title, 0, wx.LEFT, 10),
(z_ctl , 0, wx.LEFT, 7),
])
msg = "The data rescaled will show up in the Data Explorer. \n"
msg += "*Note: Recommend to use an image with 8 bit Grey \n"
msg += " scale (and with No. of pixels < 300 x 300).\n"
msg += " Otherwise, z = 0.299R + 0.587G + 0.114B."
note_txt = wx.StaticText(self, -1, msg)
note_txt.SetForegroundColour("black")
hbox = wx.BoxSizer(wx.HORIZONTAL)
okButton = wx.Button(self, -1, 'OK')
okButton.Bind(wx.EVT_BUTTON, self.on_set)
cancelButton = wx.Button(self, -1, 'Cancel')
cancelButton.Bind(wx.EVT_BUTTON, self.OnClose)
btnbox.Add(okButton, 0, wx.LEFT | wx.BOTTOM, 5)
btnbox.Add(cancelButton, 0, wx.LEFT | wx.TOP, 5)
hbox.Add(note_txt, 0, wx.LEFT, 5)
hbox.Add(btnbox, 0, wx.LEFT, 15)
vbox.Add((10, 15))
boxsizer.Add(xbox, 1, wx.LEFT | wx.BOTTOM, 5)
boxsizer.Add(ybox, 1, wx.LEFT | wx.BOTTOM, 5)
boxsizer.Add(zbox, 1, wx.LEFT | wx.BOTTOM, 5)
vbox.Add(boxsizer, 0, wx.LEFT, 20)
vbox.Add(hbox, 0, wx.LEFT | wx.TOP, 15)
okButton.SetFocus()
# set sizer
self.SetSizer(vbox)
#pos = self.parent.GetPosition()
#self.SetPosition(pos)
self.z_ctrl = z_ctl
self.xy_ctrls = [[xmin_ctl, xmax_ctl], [ymin_ctl, ymax_ctl]]
def _onparamEnter(self, event=None):
"""
By pass original txtcrl binding
"""
pass
def _onparam(self, event=None):
"""
Set to default
"""
item = event.GetEventObject()
self._check_ctrls(item)
def _check_ctrls(self, item, is_button=False):
"""
"""
flag = True
item.SetBackgroundColour("white")
try:
val = float(item.GetValue())
if val < -10.0 or val > 10.0:
item.SetBackgroundColour("pink")
item.Refresh()
flag = False
except:
item.SetBackgroundColour("pink")
item.Refresh()
flag = False
if not flag and is_button:
err_msg = "The allowed range of the min and max values are \n"
err_msg += "between -10 and 10."
if self.base is not None:
wx.PostEvent(self.base, StatusEvent(status=err_msg,
info="error"))
else:
print(err_msg)
return flag
def _on_z_enter(self, event=None):
"""
On z factor enter
"""
item = event.GetEventObject()
self._check_z_ctrl(item)
def _check_z_ctrl(self, item, is_button=False):
"""
"""
flag = True
item.SetBackgroundColour("white")
try:
val = float(item.GetValue())
if val <= 0:
item.SetBackgroundColour("pink")
item.Refresh()
flag = False
except:
item.SetBackgroundColour("pink")
item.Refresh()
flag = False
if not flag and is_button:
err_msg = "The z scale value should be larger than 0."
if self.base is not None:
wx.PostEvent(self.base, StatusEvent(status=err_msg,
info="error"))
else:
print(err_msg)
return flag
def on_set(self, event):
"""
Set image as data
"""
event.Skip()
# Check the textctrl values
for item_list in self.xy_ctrls:
for item in item_list:
if not self._check_ctrls(item, True):
return
if not self._check_z_ctrl(self.z_ctrl, True):
return
try:
image = self.image
xmin = float(self.xy_ctrls[0][0].GetValue())
xmax = float(self.xy_ctrls[0][1].GetValue())
ymin = float(self.xy_ctrls[1][0].GetValue())
ymax = float(self.xy_ctrls[1][1].GetValue())
zscale = float(self.z_ctrl.GetValue())
self.convert_image(image, xmin, xmax, ymin, ymax, zscale)
except:
err_msg = "Error occurred while converting Image to Data."
if self.base is not None:
wx.PostEvent(self.base, StatusEvent(status=err_msg,
info="error"))
else:
print(err_msg)
self.OnClose(event)
def convert_image(self, rgb, xmin, xmax, ymin, ymax, zscale):
"""
Convert image to data2D
"""
x_len = len(rgb[0])
y_len = len(rgb)
x_vals = np.linspace(xmin, xmax, num=x_len)
y_vals = np.linspace(ymin, ymax, num=y_len)
# Instantiate data object
output = Data2D()
output.filename = os.path.basename(self.title)
output.id = output.filename
detector = Detector()
detector.pixel_size.x = None
detector.pixel_size.y = None
# Store the sample to detector distance
detector.distance = None
output.detector.append(detector)
# Initiazed the output data object
output.data = zscale * self.rgb2gray(rgb)
output.err_data = np.zeros([x_len, y_len])
output.mask = np.ones([x_len, y_len], dtype=bool)
output.xbins = x_len
output.ybins = y_len
output.x_bins = x_vals
output.y_bins = y_vals
output.qx_data = np.array(x_vals)
output.qy_data = np.array(y_vals)
output.xmin = xmin
output.xmax = xmax
output.ymin = ymin
output.ymax = ymax
output.xaxis('\\rm{Q_{x}}', '\AA^{-1}')
output.yaxis('\\rm{Q_{y}}', '\AA^{-1}')
# Store loading process information
output.meta_data['loader'] = self.title.split('.')[-1] + "Reader"
output.is_data = True
output = reader2D_converter(output)
if self.base is not None:
data = self.base.create_gui_data(output, self.title)
self.base.add_data({data.id:data})
def rgb2gray(self, rgb):
"""
RGB to Grey
"""
if self.is_png:
# png image limits: 0 to 1, others 0 to 255
#factor = 255.0
rgb = rgb[::-1]
if rgb.ndim == 2:
grey = np.rollaxis(rgb, axis=0)
else:
red, green, blue = np.rollaxis(rgb[..., :3], axis= -1)
grey = 0.299 * red + 0.587 * green + 0.114 * blue
max_i = rgb.max()
factor = 255.0 / max_i
grey *= factor
return np.array(grey)
def OnClose(self, event):
"""
Close event
"""
# clear event
event.Skip()
self.Destroy()
if __name__ == "__main__":
app = wx.App()
ImageView(None).load()
app.MainLoop()
|
|
__author__ = 'pierleonia'
# biopython
from Bio import Alphabet
from Bio.SeqUtils.CheckSum import crc64
from Bio import Entrez
from Bio.Seq import Seq, UnknownSeq
from Bio.SeqRecord import SeqRecord, _RestrictedDict
from Bio import SeqFeature
class BaseBioSQLAlter():
'''Base class for to handlers for BioSQL data '''
def __init__(self, handler,):
self.handler = handler
self.adaptor = handler.adaptor
def create(self,):
raise NotImplementedError('This method is not available')
def read(self,):
raise NotImplementedError('This method is not available')
def update(self,):
raise NotImplementedError('This method is not available')
def delete(self,):
raise NotImplementedError('This method is not available')
class BioSQLAlterFeature(BaseBioSQLAlter):
# def __init__(self, handler):
# '''handler is an initiated BioSQLHandler object
# bioentry_id required
# seqfeature_id if 0 means new, else is the id in the database
# feature pass to upload a new one
#
# '''
# BaseBioSQLAlter.__init__(self, handler = handler)
def create(self,feature, bioentry_id):
"""
Load a new biopython SeqFeature into the database (PRIVATE).
"""
ranks = [row.rank for row in self.adaptor((self.adaptor.seqfeature.bioentry_id == bioentry_id)
).select(self.adaptor.seqfeature.rank)]
if ranks:
rank = max(ranks)+1
else:
rank = 1
seqfeature_id = self._load_seqfeature_basic(feature.type, rank, bioentry_id)
self._load_seqfeature_locations(feature, seqfeature_id)
self._load_seqfeature_qualifiers(feature.qualifiers, seqfeature_id)
return seqfeature_id
def read(self, seqfeature_id):
'''get id and type'''
rows = self.adaptor((self.adaptor.seqfeature.seqfeature_id == seqfeature_id) &\
(self.adaptor.seqfeature.type_term_id == self.adaptor.term.term_id)
).select(self.adaptor.seqfeature.seqfeature_id,self.adaptor.term.name)
if rows:
feature_type = rows[0].term.name
else:
raise IndexError('No feature available for seqfeature_id %i'%(seqfeature_id))
'''get qualifiers'''
qvs=((qualifier.term.name,qualifier.seqfeature_qualifier_value.value,)\
for qualifier in self.adaptor((self.adaptor.seqfeature_qualifier_value.seqfeature_id == seqfeature_id) &\
(self.adaptor.seqfeature_qualifier_value.term_id == self.adaptor.term.term_id)\
).select(self.adaptor.term.name,self.adaptor.seqfeature_qualifier_value.value,orderby=self.adaptor.seqfeature_qualifier_value.rank))
qualifiers = {}
for qv_name, qv_value in qvs:
qualifiers.setdefault(qv_name, []).append(qv_value)
# Get db_xrefs [special case of qualifiers]
qvs = ((row.dbname,row.accession)\
for row in self.adaptor((self.adaptor.seqfeature_dbxref.seqfeature_id == seqfeature_id) &
(self.adaptor.seqfeature_dbxref.dbxref_id == self.adaptor.dbxref.dbxref_id)
).select(self.adaptor.dbxref.dbname,self.adaptor.dbxref.accession,orderby=self.adaptor.seqfeature_dbxref.rank))
for qv_name, qv_value in qvs:
value = "%s:%s" % (qv_name, qv_value)
qualifiers.setdefault("db_xref", []).append(value)
''' Get locations '''
results= ((row.location_id,
row.start_pos,
row.end_pos,
row.strand) for row in self.adaptor(self.adaptor.location.seqfeature_id == seqfeature_id).select(
self.adaptor.location.location_id,
self.adaptor.location.start_pos,
self.adaptor.location.end_pos,
self.adaptor.location.strand,
orderby=self.adaptor.location.rank))
locations = []
# convert to Python standard form
# Convert strand = 0 to strand = None
# re: comment in Loader.py:
# Biopython uses None when we don't know strand information but
# BioSQL requires something (non null) and sets this as zero
# So we'll use the strand or 0 if Biopython spits out None
for location_id, start, end, strand in results:
if start:
start -= 1
if strand == 0:
strand = None
if strand not in (+1, -1, None):
raise ValueError("Invalid strand %s found in database for "\
"seqfeature_id %s" % (strand, seqfeature_id))
if end < start:
import warnings
warnings.warn("Inverted location start/end (%i and %i) for "\
"seqfeature_id %s" % (start, end, seqfeature_id))
locations.append( (location_id, start, end, strand) )
''' Get possible remote reference information'''
remote_results = ((row.location.location_id,
row.dbxref.dbname,
row.dbxref.accession,
row.dbxref.accession,) for row in self.adaptor((self.adaptor.location.seqfeature_id == seqfeature_id) &
(self.adaptor.location.dbxref_id == self.adaptor.dbxref.dbxref_id)).select(self.adaptor.location.location_id,
self.adaptor.dbxref.dbname,
self.adaptor.dbxref.accession,
self.adaptor.dbxref.accession,))
ref_lookup = {}
for location_id, dbname, accession, version in remote_results:
if version and version != "0":
v = "%s.%s" % (accession, version)
else:
v = accession
# subfeature remote location db_ref are stored as a empty string when
# not present
if dbname == "":
dbname = None
ref_lookup[location_id] = (dbname, v)
feature = SeqFeature.SeqFeature(type =feature_type)
feature._seqfeature_id = seqfeature_id #Store the key as a private property
feature.qualifiers = qualifiers
if len(locations) == 0:
pass
elif len(locations) == 1:
location_id, start, end, strand = locations[0]
#See Bug 2677, we currently don't record the location_operator
#For consistency with older versions Biopython, default to "".
feature.location_operator =\
self.handler._retrieve_location_qualifier_value(location_id)
dbname, version = ref_lookup.get(location_id, (None, None))
feature.location = SeqFeature.FeatureLocation(start, end)
feature.strand = strand
feature.ref_db = dbname
feature.ref = version
else:
assert feature.sub_features == []
for location in locations:
location_id, start, end, strand = location
dbname, version = ref_lookup.get(location_id, (None, None))
subfeature = SeqFeature.SeqFeature()
subfeature.type = feature_type
subfeature.location_operator =\
self.handler._retrieve_location_qualifier_value(location_id)
#TODO - See Bug 2677 - we don't yet record location_operator,
#so for consistency with older versions of Biopython default
#to assuming its a join.
if not subfeature.location_operator:
subfeature.location_operator="join"
subfeature.location = SeqFeature.FeatureLocation(start, end)
subfeature.strand = strand
subfeature.ref_db = dbname
subfeature.ref = version
feature.sub_features.append(subfeature)
# Assuming that the feature loc.op is the same as the sub_feature
# loc.op:
feature.location_operator =\
feature.sub_features[0].location_operator
# Locations are in order, but because of remote locations for
# sub-features they are not necessarily in numerical order:
start = locations[0][1]
end = locations[-1][2]
feature.location = SeqFeature.FeatureLocation(start, end)
# To get the parent strand (as done when parsing GenBank files),
# need to consider evil mixed strand examples like this,
# join(complement(69611..69724),139856..140087,140625..140650)
strands = set(sf.strand for sf in feature.sub_features)
if len(strands)==1:
feature.strand = feature.sub_features[0].strand
else:
feature.strand = None # i.e. mixed strands
return feature
def update(self, feature, seqfeature_id):
'''do the same as insert except for _load_seqfeature_basic to maintain seqfeature_id'''
seqfeature_row = self.adaptor((self.adaptor.seqfeature.seqfeature_id == seqfeature_id)
).select(self.adaptor.seqfeature.rank, self.adaptor.seqfeature.bioentry_id)
self._update_seqfeature_basic(seqfeature_id,
feature.type,
seqfeature_row.rank,
seqfeature_row.bioentry_id)
'''remove dbxrefs '''
self.adaptor(self.adaptor.seqfeature_dbxref.seqfeature_id == seqfeature_id).delete()
'''remove qualifiers '''
self.adaptor(self.adaptor.seqfeature_qualifier_value.seqfeature_id == seqfeature_id).delete()
'''remove locations '''
self.adaptor(self.adaptor.location.seqfeature_id == seqfeature_id).delete()
'''Add updated location and qualifiers '''
self._load_seqfeature_locations(feature, seqfeature_id)
self._load_seqfeature_qualifiers(feature.qualifiers, seqfeature_id)
self.handler._update_bioentry_time_stamp(seqfeature_row.bioentry_id)
def delete(self, seqfeature_id):
'''remove all the data related to a given seqfeature_id
terms and dbxrefs will remain in the respective tables on the db,
since they can be used by other entities, but will no longer be linked to
the deleted seqfeature '''
bioentry_id = self.adaptor.seqfeature[seqfeature_id].bioentry_id
'''remove dbxrefs '''
self.adaptor(self.adaptor.seqfeature_dbxref.seqfeature_id == seqfeature_id).delete()
'''remove qualifiers '''
self.adaptor(self.adaptor.seqfeature_qualifier_value.seqfeature_id == seqfeature_id).delete()
'''remove locations '''
self.adaptor(self.adaptor.location.seqfeature_id == seqfeature_id).delete()
'''remove seqfeature '''
self.adaptor(self.adaptor.seqfeature.seqfeature_id == seqfeature_id).delete()
self.handler._update_bioentry_time_stamp(bioentry_id)
def update_seqfeature_type(self, seqfeature_id, new_value):
ontology_id = self.handler._get_ontology_id('SeqFeature Keys')
seqfeature_key_id = self.handler._get_term_id(new_value,
ontology_id = ontology_id)
self.adaptor.seqfeature[seqfeature_id] = dict (type_term_id = seqfeature_key_id)
def update_seqfeature_start(self, seqfeature_id, new_value):
raise NotImplementedError('TODO')
def update_seqfeature_end(self, seqfeature_id, new_value):
raise NotImplementedError('TODO')
def update_seqfeature_identifier(self, seqfeature_id, new_value):
self._update_qualifier(seqfeature_id, 'id', new_value)
def update_seqfeature_description(self, seqfeature_id, new_value):
self._update_qualifier(seqfeature_id, 'description', new_value)
def _update_seqfeature_basic(self, seqfeature_id, feature_type, feature_rank, bioentry_id):
"""Load the first tables of a seqfeature and returns the id (PRIVATE).
This loads the "key" of the seqfeature (ie. CDS, gene) and
the basic seqfeature table itself.
"""
ontology_id = self.handler._get_ontology_id('SeqFeature Keys')
seqfeature_key_id = self.handler._get_term_id(feature_type,
ontology_id = ontology_id)
# XXX source is always EMBL/GenBank/SwissProt here; it should depend on
# the record (how?)
source_cat_id = self.handler._get_ontology_id('SeqFeature Sources')
source_term_id = self.handler._get_term_id('EMBL/GenBank/SwissProt',
ontology_id = source_cat_id)
self.adaptor.seqfeature[seqfeature_id] = dict (bioentry_id = bioentry_id,
type_term_id = seqfeature_key_id,
source_term_id = source_term_id,
rank = feature_rank)
def _load_seqfeature_basic(self, feature_type, feature_rank, bioentry_id):
"""Load the first tables of a seqfeature and returns the id (PRIVATE).
This loads the "key" of the seqfeature (ie. CDS, gene) and
the basic seqfeature table itself.
"""
ontology_id = self.handler._get_ontology_id('SeqFeature Keys')
seqfeature_key_id = self.handler._get_term_id(feature_type,
ontology_id = ontology_id)
# XXX source is always EMBL/GenBank/SwissProt here; it should depend on
# the record (how?)
source_cat_id = self.handler._get_ontology_id('SeqFeature Sources')
source_term_id = self.handler._get_term_id('EMBL/GenBank/SwissProt',
ontology_id = source_cat_id)
seqfeature_id = self.adaptor.seqfeature.insert(bioentry_id = bioentry_id,
type_term_id = seqfeature_key_id,
source_term_id = source_term_id,
rank = feature_rank)
return seqfeature_id
def _load_seqfeature_locations(self, feature, seqfeature_id):
"""Load all of the locations for a SeqFeature into tables (PRIVATE).
This adds the locations related to the SeqFeature into the
seqfeature_location table. Fuzzies are not handled right now.
For a simple location, ie (1..2), we have a single table row
with seq_start = 1, seq_end = 2, location_rank = 1.
For split locations, ie (1..2, 3..4, 5..6) we would have three
row tables with:
start = 1, end = 2, rank = 1
start = 3, end = 4, rank = 2
start = 5, end = 6, rank = 3
"""
# TODO - Record an ontology for the locations (using location.term_id)
# which for now as in BioPerl we leave defaulting to NULL.
if feature.location_operator and feature.location_operator != "join":
# e.g. order locations... we don't record "order" so it
# will become a "join" on reloading. What does BioPerl do?
import warnings
warnings.warn("%s location operators are not fully supported"\
% feature.location_operator)
# two cases, a simple location or a split location
if not feature.sub_features: # simple location
self._insert_seqfeature_location(feature, 1, seqfeature_id)
else: # split location
for rank, cur_feature in enumerate(feature.sub_features):
self._insert_seqfeature_location(cur_feature,
rank + 1,
seqfeature_id)
def _insert_seqfeature_location(self, feature, rank, seqfeature_id):
"""Add a location of a SeqFeature to the seqfeature_location table (PRIVATE).
TODO - Add location_operators to location_qualifier_value.
"""
# convert biopython locations to the 1-based location system
# used in bioSQL
# XXX This could also handle fuzzies
start = feature.location.nofuzzy_start + 1
end = feature.location.nofuzzy_end
# Biopython uses None when we don't know strand information but
# BioSQL requires something (non null) and sets this as zero
# So we'll use the strand or 0 if Biopython spits out None
strand = feature.strand or 0
# TODO - Record an ontology term for the location (location.term_id)
# which for now like BioPerl we'll leave as NULL.
# This might allow us to record "between" positions properly, but I
# doesn't really see how it could work for before/after fuzzy positions
loc_term_id = None
if feature.ref:
# sub_feature remote locations when they are in the same db as the current
# record do not have a value for ref_db, which the SeqFeature object
# stores as None. BioSQL schema requires a varchar and is not NULL
dbxref_id = self.handler._get_dbxref_id(feature.ref_db or "", feature.ref)
else:
dbxref_id = None
oid = self.adaptor.location.insert(seqfeature_id = seqfeature_id,
dbxref_id = dbxref_id,
term_id = loc_term_id,
start_pos = start,
end_pos = end,
strand = strand,
rank = rank)
"""
# See Bug 2677
# TODO - Record the location_operator (e.g. "join" or "order")
# using the location_qualifier_value table (which we and BioPerl
# have historically left empty).
# Note this will need an ontology term for the location qualifer
# (location_qualifier_value.term_id) for which oddly the schema
# does not allow NULL.
if feature.location_operator:
#e.g. "join" (common),
#or "order" (see Tests/GenBank/protein_refseq2.gb)
location_id = self.adaptor.last_id('location')
loc_qual_term_id = None # Not allowed in BioSQL v1.0.1
sql = r"INSERT INTO location_qualifier_value" \
r"(location_id, term_id, value)" \
r"VALUES (%s, %s, %s)"
self.adaptor.execute(sql, (location_id, loc_qual_term_id,
feature.location_operator))
"""
def _load_seqfeature_qualifiers(self, qualifiers, seqfeature_id):
"""Insert the (key, value) pair qualifiers relating to a feature (PRIVATE).
Qualifiers should be a dictionary of the form:
{key : [value1, value2]}
"""
tag_ontology_id = self.handler._get_ontology_id('Annotation Tags')
for qualifier_key in qualifiers.keys():
# Treat db_xref qualifiers differently to sequence annotation
# qualifiers by populating the seqfeature_dbxref and dbxref
# tables. Other qualifiers go into the seqfeature_qualifier_value
# and (if new) term tables.
if qualifier_key != 'db_xref':
qualifier_key_id = self.handler._get_term_id(qualifier_key,
ontology_id=tag_ontology_id)
# now add all of the values to their table
entries = qualifiers[qualifier_key]
if not isinstance(entries, list):
# Could be a plain string, or an int or a float.
# However, we exect a list of strings here.
entries = [entries]
for qual_value_rank in range(len(entries)):
qualifier_value = entries[qual_value_rank]
oid = self.adaptor.seqfeature_qualifier_value.insert(seqfeature_id = seqfeature_id,
term_id = qualifier_key_id,
rank = qual_value_rank + 1,
value = qualifier_value)
else:
# The dbxref_id qualifier/value sets go into the dbxref table
# as dbname, accession, version tuples, with dbxref.dbxref_id
# being automatically assigned, and into the seqfeature_dbxref
# table as seqfeature_id, dbxref_id, and rank tuples
self._load_seqfeature_dbxref(qualifiers[qualifier_key],
seqfeature_id)
def _update_qualifier(self, seqfeature_id, qualifier_key, qualifier_values):
tag_ontology_id = self.handler._get_ontology_id('Annotation Tags')
if qualifier_key != 'db_xref':
qualifier_key_id = self.handler._get_term_id(qualifier_key,
ontology_id=tag_ontology_id)
# now add all of the values to their table
entries = qualifier_values
if not isinstance(entries, list):
# Could be a plain string, or an int or a float.
# However, we exect a list of strings here.
entries = [entries]
for qual_value_rank in range(len(entries)):
qualifier_value = entries[qual_value_rank]
stored_value = self.adaptor((self.adaptor.seqfeature_qualifier_value.seqfeature_id == seqfeature_id) &\
(self.adaptor.seqfeature_qualifier_value.term_id == qualifier_key_id) &\
(self.adaptor.seqfeature_qualifier_value.rank == qual_value_rank + 1))
if qualifier_value:
if stored_value.count():
stored_value.update(value = qualifier_value)
else:
self.adaptor.seqfeature_qualifier_value.insert(seqfeature_id = seqfeature_id,
term_id = qualifier_key_id,
rank = qual_value_rank + 1,
value = qualifier_value)
else:
stored_value.delete()
def _load_seqfeature_dbxref(self, dbxrefs, seqfeature_id):
"""Add database crossreferences of a SeqFeature to the database (PRIVATE).
o dbxrefs List, dbxref data from the source file in the
format <database>:<accession>
o seqfeature_id Int, the identifier for the seqfeature in the
seqfeature table
Insert dbxref qualifier data for a seqfeature into the
seqfeature_dbxref and, if required, dbxref tables.
The dbxref_id qualifier/value sets go into the dbxref table
as dbname, accession, version tuples, with dbxref.dbxref_id
being automatically assigned, and into the seqfeature_dbxref
table as seqfeature_id, dbxref_id, and rank tuples
"""
# NOTE - In older versions of Biopython, we would map the GenBank
# db_xref "name", for example "GI" to "GeneIndex", and give a warning
# for any unknown terms. This was a long term maintainance problem,
# and differed from BioPerl and BioJava's implementation. See bug 2405
for rank, value in enumerate(dbxrefs):
# Split the DB:accession format string at colons. We have to
# account for multiple-line and multiple-accession entries
try:
dbxref_data = value.replace(' ','').replace('\n','').split(':')
db = dbxref_data[0]
accessions = dbxref_data[1:]
except:
raise ValueError("Parsing of db_xref failed: '%s'" % value)
# Loop over all the grabbed accessions, and attempt to fill the
# table
for accession in accessions:
# Get the dbxref_id value for the dbxref data
dbxref_id = self.handler._get_dbxref_id(db, accession)
# Insert the seqfeature_dbxref data
self._get_seqfeature_dbxref(seqfeature_id, dbxref_id, rank+1)
def _get_seqfeature_dbxref(self, seqfeature_id, dbxref_id, rank):
""" Check for a pre-existing seqfeature_dbxref entry with the passed
seqfeature_id and dbxref_id. If one does not exist, insert new
data
"""
# Check for an existing record
result = self.adaptor((self.adaptor.seqfeature_dbxref.seqfeature_id == seqfeature_id) &
(self.adaptor.seqfeature_dbxref.dbxref_id == dbxref_id)).select(self.adaptor.seqfeature_dbxref.seqfeature_id,
self.adaptor.seqfeature_dbxref.dbxref_id)
# If there was a record, return without executing anything, else create
# the record and return
if result:
return (result[0].seqfeature_id, result[0].dbxref_id)
return self._add_seqfeature_dbxref(seqfeature_id, dbxref_id, rank)
def _add_seqfeature_dbxref(self, seqfeature_id, dbxref_id, rank):
""" Insert a seqfeature_dbxref row and return the seqfeature_id and
dbxref_id
"""
self.adaptor.seqfeature_dbxref.insert(seqfeature_id = seqfeature_id,
dbxref_id = dbxref_id,
rank = rank)
return (seqfeature_id, dbxref_id)
class BioSQLAlterBioentryFeatures(BaseBioSQLAlter):
''' TO DO
'''
def __init__(self, handler, bioentry_id,):
BaseBioSQLAlter.__init__(self, handler = handler)
self.bioentry_id = bioentry_id
self.features = dict() #{rank:{seqfeature_id, feature}} dictionary
self._get()
def _get(self):
'''get all avilable features '''
rows = self.adaptor((self.adaptor.seqfeature.bioentry_id == self.bioentry_id)).select()
featureHandler = BioSQLAlterFeature(handler = self.handler)
if rows:
for row in rows:
if row.rank in self.features:
raise ValueError('multiple seqfeatures present with the same rank')
else:
self.features[row.rank] = dict(seqfeature_id = row.seqfeature_id,
seqfeature = featureHandler.read(seqfeature_id = row.seqfeature_id))
self.features
def get_seqfeatures(self):
ordered = []
for key in sorted(self.features):
ordered.append((self.features[key]['seqfeature_id'], self.features[key]['seqfeature']))
return ordered
class BioSQLAlterBioentryDBXref(BaseBioSQLAlter):
def create(self):
raise NotImplementedError('This method is not available')
def read(self,):
raise NotImplementedError('This method is not available')
def update(self,dbxref_id, dbname, accession):
self.adaptor.dbxref[int(dbxref_id)] = dict (dbname = dbname, accession = accession)
def update_dbname(self,dbxref_id, dbname):
self.adaptor.dbxref[int(dbxref_id)] = dict (dbname = dbname)
def update_accession(self,dbxref_id, accession):
self.adaptor.dbxref[int(dbxref_id)] = dict ( accession = accession)
def delete(self,bioentry_id, dbxref_id):
self.adaptor((self.adaptor.bioentry_dbxref.bioentry_id == bioentry_id) &\
(self.adaptor.bioentry_dbxref.dbxref_id == dbxref_id)).delete()
def _get(self,):
dbxrefs=((dbxref.dbname, dbxref.accession, dbxref.version) for dbxref in self.adaptor((self.adaptor.bioentry.bioentry_id == self.bioentry_id) &\
(self.adaptor.bioentry.bioentry_id == self.adaptor.bioentry_dbxref.bioentry_id) &\
(self.adaptor.bioentry_dbxref.dbxref_id == self.adaptor.dbxref.dbxref_id)).select(
self.adaptor.dbxref.dbname,self.adaptor.dbxref.accession,self.adaptor.dbxref.version, orderby=self.adaptor.bioentry_dbxref.rank))
for dbname, accession, version in dbxrefs:
if version and version != "0":
v = "%s.%s" % (accession, version)
else:
v = accession
self.dbxrefs.append("%s:%s" % (dbname, v))
class BioSQLAlterBioentryDBXrefs(BaseBioSQLAlter):
def create(self):
raise NotImplementedError('This method is not available')
def read(self,bioentry_id):
dbxrefs=((dbxref.dbname, dbxref.accession, dbxref.version, dbxref.dbxref_id) for dbxref in self.adaptor((self.adaptor.bioentry.bioentry_id == bioentry_id) &\
(self.adaptor.bioentry.bioentry_id == self.adaptor.bioentry_dbxref.bioentry_id) &\
(self.adaptor.bioentry_dbxref.dbxref_id == self.adaptor.dbxref.dbxref_id)).select(
self.adaptor.dbxref.dbname,
self.adaptor.dbxref.dbxref_id,
self.adaptor.dbxref.accession,
self.adaptor.dbxref.version,
orderby=self.adaptor.bioentry_dbxref.rank))
returnlist = []
for dbname, accession, version, dbxref_id in dbxrefs:
if version and version != "0":
v = "%s.%s" % (accession, version)
else:
v = accession
returnlist.append(("%s:%s" % (dbname, v), dbxref_id))
return returnlist
def update(self,):
raise NotImplementedError('This method is not available')
def delete(self,):
raise NotImplementedError('This method is not available')
|
|
#!/usr/bin/env python3
"""Application and persistence management."""
# pylint: disable=no-member, import-error, no-init, too-few-public-methods
# pylint: disable=cyclic-import, no-name-in-module, invalid-name
from sqlalchemy.dialects.postgresql import UUID
from ersa_flask_util import db, id_column, boot, get_or_create, commit
from ersa_flask_util import add, request, require_auth, QueryResource
# Data Models
class Snapshot(db.Model):
"""Snapshot Data Model"""
id = id_column()
ts = db.Column(db.Integer, nullable=False)
message = db.Column(UUID, nullable=False, unique=True)
person_email = db.relationship("PersonEmail", backref="snapshot")
person_username = db.relationship("PersonUsername", backref="snapshot")
memberships = db.relationship("Membership", backref="snapshot")
def json(self):
"""Jsonify"""
return {"id": self.id, "ts": self.ts, "message": self.message}
class Organisation(db.Model):
"""Organisation Data Model"""
id = id_column()
insightly_id = db.Column(db.Integer, index=True, nullable=False)
name = db.Column(db.String(256), index=True)
membership = db.relationship("Membership", backref="organisation")
def json(self):
"""Jsonify"""
return {
"id": self.id,
"insightly_id": self.insightly_id,
"name": self.name
}
class Person(db.Model):
"""Person Data Model"""
id = id_column()
insightly_id = db.Column(db.Integer, index=True, nullable=False)
first_name = db.Column(db.String(128), index=True)
last_name = db.Column(db.String(128), index=True)
email = db.relationship("PersonEmail", backref="person")
username = db.relationship("PersonUsername", backref="person")
membership = db.relationship("Membership", backref="person")
def json(self):
"""Jsonify"""
return {
"id": self.id,
"insightly_id": self.insightly_id,
"first_name": self.first_name,
"last_name": self.last_name
}
class Email(db.Model):
"""Email Data Model"""
id = id_column()
address = db.Column(db.String(128), index=True, nullable=False)
person = db.relationship("PersonEmail", backref="email")
def json(self):
"""Jsonify"""
return {"id": self.id, "address": self.address}
class PersonEmail(db.Model):
"""Person-Email Mapping Data Model"""
id = id_column()
person_id = db.Column(None, db.ForeignKey("person.id"))
email_id = db.Column(None, db.ForeignKey("email.id"))
snapshot_id = db.Column(None, db.ForeignKey("snapshot.id"))
def json(self):
"""Jsonify"""
return {
"id": self.id,
"person": self.person_id,
"email": self.email_id,
"snapshot": self.snapshot_id
}
class Username(db.Model):
"""Username Data Model"""
id = id_column()
username = db.Column(db.String(64), index=True, nullable=False)
person = db.relationship("PersonUsername", backref="username")
def json(self):
"""Jsonify"""
return {"id": self.id, "username": self.username}
class PersonUsername(db.Model):
"""Person-Username Mapping Data Model"""
id = id_column()
person_id = db.Column(None, db.ForeignKey("person.id"))
username_id = db.Column(None, db.ForeignKey("username.id"))
snapshot_id = db.Column(None, db.ForeignKey("snapshot.id"))
def json(self):
"""Jsonify"""
return {
"id": self.id,
"person": self.person_id,
"username": self.username_id,
"snapshot": self.snapshot_id
}
class Membership(db.Model):
"""Organisation Membership Data Model"""
id = id_column()
person_id = db.Column(None, db.ForeignKey("person.id"))
organisation_id = db.Column(None, db.ForeignKey("organisation.id"))
snapshot_id = db.Column(None, db.ForeignKey("snapshot.id"))
def json(self):
"""Jsonify"""
return {
"id": self.id,
"person": self.person_id,
"organisation": self.organisation_id,
"snapshot": self.snapshot_id
}
# Endpoints
class SnapshotResource(QueryResource):
"""Snapshot Endpoint"""
query_class = Snapshot
@require_auth
def put(self):
"""Ingest snapshots."""
for message in request.json:
data = message["data"]
snapshot = Snapshot(ts=data["timestamp"], message=message["id"])
add(snapshot)
for entry in data["organisations"]:
organisation = get_or_create(
Organisation,
insightly_id=entry["id"])
organisation.name = entry["name"]
for entry in data["contacts"]:
person = get_or_create(Person, insightly_id=entry["id"])
person.first_name = entry["first_name"]
person.last_name = entry["last_name"]
if entry["username"]:
username = get_or_create(Username,
username=entry["username"])
get_or_create(PersonUsername,
snapshot=snapshot,
person=person,
username=username)
if entry["email"]:
for address in entry["email"]:
email = get_or_create(Email, address=address)
get_or_create(PersonEmail,
snapshot=snapshot,
person=person,
email=email)
if entry["organisations"]:
for insightly_id in entry["organisations"]:
organisation = get_or_create(
Organisation,
insightly_id=insightly_id)
get_or_create(Membership,
snapshot=snapshot,
organisation=organisation,
person=person)
commit()
return "", 204
class OrganisationResource(QueryResource):
"""Organisation Endpoint"""
query_class = Organisation
class PersonResource(QueryResource):
"""Person Endpoint"""
query_class = Person
class EmailResource(QueryResource):
"""Email Endpoint"""
query_class = Email
class PersonEmailResource(QueryResource):
"""Person/Email Endpoint"""
query_class = PersonEmail
class UsernameResource(QueryResource):
"""Username Endpoint"""
query_class = Username
class PersonUsernameResource(QueryResource):
"""Person/Username Endpoint"""
query_class = PersonUsername
class MembershipResource(QueryResource):
"""Membership Endpoint"""
query_class = Membership
def run():
"""Let's roll."""
resources = {
"/snapshot": SnapshotResource,
"/person": PersonResource,
"/email": EmailResource,
"/person-email": PersonEmailResource,
"/username": UsernameResource,
"/person-username": PersonUsernameResource,
"/organisation": OrganisationResource,
"/membership": MembershipResource
}
boot(resources)
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.logging.v2 LoggingServiceV2 API."""
import functools
import pkg_resources
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
from google.api import monitored_resource_pb2
from google.cloud.logging_v2.gapic import enums
from google.cloud.logging_v2.gapic import logging_service_v2_client_config
from google.cloud.logging_v2.proto import log_entry_pb2
from google.cloud.logging_v2.proto import logging_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-logging', ).version
class LoggingServiceV2Client(object):
"""Service for ingesting and querying logs."""
SERVICE_ADDRESS = 'logging.googleapis.com:443'
"""The default address of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_DEFAULT_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only',
'https://www.googleapis.com/auth/logging.admin',
'https://www.googleapis.com/auth/logging.read',
'https://www.googleapis.com/auth/logging.write',
)
# The name of the interface for this client. This is the key used to find
# method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.logging.v2.LoggingServiceV2'
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
'projects/{project}',
project=project,
)
@classmethod
def log_path(cls, project, log):
"""Return a fully-qualified log string."""
return google.api_core.path_template.expand(
'projects/{project}/logs/{log}',
project=project,
log=log,
)
def __init__(self,
channel=None,
credentials=None,
client_config=logging_service_v2_client_config.config,
client_info=None):
"""Constructor.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_config (dict): A dictionary of call options for each
method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments to {} are mutually '
'exclusive.'.format(self.__class__.__name__), )
# Create the channel.
if channel is None:
channel = google.api_core.grpc_helpers.create_channel(
self.SERVICE_ADDRESS,
credentials=credentials,
scopes=self._DEFAULT_SCOPES,
)
# Create the gRPC stubs.
self.logging_service_v2_stub = (
logging_pb2.LoggingServiceV2Stub(channel))
if client_info is None:
client_info = (
google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO)
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Write the "inner API call" methods to the class.
# These are wrapped versions of the gRPC stub methods, with retry and
# timeout configuration applied, called by the public methods on
# this class.
self._delete_log = google.api_core.gapic_v1.method.wrap_method(
self.logging_service_v2_stub.DeleteLog,
default_retry=method_configs['DeleteLog'].retry,
default_timeout=method_configs['DeleteLog'].timeout,
client_info=client_info,
)
self._write_log_entries = google.api_core.gapic_v1.method.wrap_method(
self.logging_service_v2_stub.WriteLogEntries,
default_retry=method_configs['WriteLogEntries'].retry,
default_timeout=method_configs['WriteLogEntries'].timeout,
client_info=client_info,
)
self._list_log_entries = google.api_core.gapic_v1.method.wrap_method(
self.logging_service_v2_stub.ListLogEntries,
default_retry=method_configs['ListLogEntries'].retry,
default_timeout=method_configs['ListLogEntries'].timeout,
client_info=client_info,
)
self._list_monitored_resource_descriptors = google.api_core.gapic_v1.method.wrap_method(
self.logging_service_v2_stub.ListMonitoredResourceDescriptors,
default_retry=method_configs[
'ListMonitoredResourceDescriptors'].retry,
default_timeout=method_configs['ListMonitoredResourceDescriptors']
.timeout,
client_info=client_info,
)
self._list_logs = google.api_core.gapic_v1.method.wrap_method(
self.logging_service_v2_stub.ListLogs,
default_retry=method_configs['ListLogs'].retry,
default_timeout=method_configs['ListLogs'].timeout,
client_info=client_info,
)
# Service calls
def delete_log(self,
log_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes all the log entries in a log.
The log reappears if it receives new entries.
Log entries written shortly before the delete operation might not be
deleted.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> log_name = client.log_path('[PROJECT]', '[LOG]')
>>>
>>> client.delete_log(log_name)
Args:
log_name (str): Required. The resource name of the log to delete:
::
\"projects/[PROJECT_ID]/logs/[LOG_ID]\"
\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"
\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"
\"folders/[FOLDER_ID]/logs/[LOG_ID]\"
``[LOG_ID]`` must be URL-encoded. For example,
``\"projects/my-project-id/logs/syslog\"``,
``\"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\"``.
For more information about log names, see
``LogEntry``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = logging_pb2.DeleteLogRequest(log_name=log_name, )
self._delete_log(
request, retry=retry, timeout=timeout, metadata=metadata)
def write_log_entries(self,
entries,
log_name=None,
resource=None,
labels=None,
partial_success=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
## Log entry resources
Writes log entries to Stackdriver Logging. This API method is the
only way to send log entries to Stackdriver Logging. This method
is used, directly or indirectly, by the Stackdriver Logging agent
(fluentd) and all logging libraries configured to use Stackdriver
Logging.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> entries = []
>>>
>>> response = client.write_log_entries(entries)
Args:
entries (list[Union[dict, ~google.cloud.logging_v2.types.LogEntry]]): Required. The log entries to send to Stackdriver Logging. The order of log
entries in this list does not matter. Values supplied in this method's
``log_name``, ``resource``, and ``labels`` fields are copied into those log
entries in this list that do not include values for their corresponding
fields. For more information, see the ``LogEntry`` type.
If the ``timestamp`` or ``insert_id`` fields are missing in log entries, then
this method supplies the current time or a unique identifier, respectively.
The supplied values are chosen so that, among the log entries that did not
supply their own values, the entries earlier in the list will sort before
the entries later in the list. See the ``entries.list`` method.
Log entries with timestamps that are more than the
`logs retention period <https://cloud.google.com/logging/quota-policy>`_ in the past or more than
24 hours in the future might be discarded. Discarding does not return
an error.
To improve throughput and to avoid exceeding the
`quota limit <https://cloud.google.com/logging/quota-policy>`_ for calls to ``entries.write``,
you should try to include several log entries in this list,
rather than calling this method for each individual log entry.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.logging_v2.types.LogEntry`
log_name (str): Optional. A default log resource name that is assigned to all log entries
in ``entries`` that do not specify a value for ``log_name``:
::
\"projects/[PROJECT_ID]/logs/[LOG_ID]\"
\"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]\"
\"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]\"
\"folders/[FOLDER_ID]/logs/[LOG_ID]\"
``[LOG_ID]`` must be URL-encoded. For example,
``\"projects/my-project-id/logs/syslog\"`` or
``\"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity\"``.
For more information about log names, see
``LogEntry``.
resource (Union[dict, ~google.cloud.logging_v2.types.MonitoredResource]): Optional. A default monitored resource object that is assigned to all log
entries in ``entries`` that do not specify a value for ``resource``. Example:
::
{ \"type\": \"gce_instance\",
\"labels\": {
\"zone\": \"us-central1-a\", \"instance_id\": \"00000000000000000000\" }}
See ``LogEntry``.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.logging_v2.types.MonitoredResource`
labels (dict[str -> str]): Optional. Default labels that are added to the ``labels`` field of all log
entries in ``entries``. If a log entry already has a label with the same key
as a label in this parameter, then the log entry's label is not changed.
See ``LogEntry``.
partial_success (bool): Optional. Whether valid entries should be written even if some other
entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any
entry is not written, then the response status is the error associated
with one of the failed entries and the response includes error details
keyed by the entries' zero-based index in the ``entries.write`` method.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.logging_v2.types.WriteLogEntriesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = logging_pb2.WriteLogEntriesRequest(
entries=entries,
log_name=log_name,
resource=resource,
labels=labels,
partial_success=partial_success,
)
return self._write_log_entries(
request, retry=retry, timeout=timeout, metadata=metadata)
def list_log_entries(self,
resource_names,
project_ids=None,
filter_=None,
order_by=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists log entries. Use this method to retrieve log entries from
Stackdriver Logging. For ways to export log entries, see
`Exporting Logs <https://cloud.google.com/logging/docs/export>`_.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> resource_names = []
>>>
>>>
>>> # Iterate over all results
>>> for element in client.list_log_entries(resource_names):
... # process element
... pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in client.list_log_entries(resource_names, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
resource_names (list[str]): Required. Names of one or more parent resources from which to
retrieve log entries:
::
\"projects/[PROJECT_ID]\"
\"organizations/[ORGANIZATION_ID]\"
\"billingAccounts/[BILLING_ACCOUNT_ID]\"
\"folders/[FOLDER_ID]\"
Projects listed in the ``project_ids`` field are added to this list.
project_ids (list[str]): Deprecated. Use ``resource_names`` instead. One or more project identifiers
or project numbers from which to retrieve log entries. Example:
``\"my-project-1A\"``. If present, these project identifiers are converted to
resource name format and added to the list of resources in
``resource_names``.
filter_ (str): Optional. A filter that chooses which log entries to return. See [Advanced
Logs Filters](/logging/docs/view/advanced_filters). Only log entries that
match the filter are returned. An empty filter matches all log entries in
the resources listed in ``resource_names``. Referencing a parent resource
that is not listed in ``resource_names`` will cause the filter to return no
results.
The maximum length of the filter is 20000 characters.
order_by (str): Optional. How the results should be sorted. Presently, the only permitted
values are ``\"timestamp asc\"`` (default) and ``\"timestamp desc\"``. The first
option returns entries in order of increasing values of
``LogEntry.timestamp`` (oldest first), and the second option returns entries
in order of decreasing timestamps (newest first). Entries with equal
timestamps are returned in order of their ``insert_id`` values.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.logging_v2.types.LogEntry` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = logging_pb2.ListLogEntriesRequest(
resource_names=resource_names,
project_ids=project_ids,
filter=filter_,
order_by=order_by,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._list_log_entries,
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='entries',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def list_monitored_resource_descriptors(
self,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists the descriptors for monitored resource types used by Stackdriver
Logging.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>>
>>> # Iterate over all results
>>> for element in client.list_monitored_resource_descriptors():
... # process element
... pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in client.list_monitored_resource_descriptors(options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.logging_v2.types.MonitoredResourceDescriptor` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = logging_pb2.ListMonitoredResourceDescriptorsRequest(
page_size=page_size, )
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._list_monitored_resource_descriptors,
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='resource_descriptors',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def list_logs(self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists the logs in projects, organizations, folders, or billing accounts.
Only logs that have entries are listed.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>>
>>> # Iterate over all results
>>> for element in client.list_logs(parent):
... # process element
... pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in client.list_logs(parent, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The resource name that owns the logs:
::
\"projects/[PROJECT_ID]\"
\"organizations/[ORGANIZATION_ID]\"
\"billingAccounts/[BILLING_ACCOUNT_ID]\"
\"folders/[FOLDER_ID]\"
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`str` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = logging_pb2.ListLogsRequest(
parent=parent,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._list_logs,
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='log_names',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
|
|
# Copyright 2015 Rackspace Hosting Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import inspect
import sys
import mock
from neutronclient.common import extension
from neutronclient.neutron.v2_0.contrib import _fox_sockets as fox_sockets
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
class CLITestV20ExtensionJSON(test_cli20.CLITestV20Base):
non_admin_status_resources = ['fox_socket']
def setUp(self):
# need to mock before super because extensions loaded on instantiation
self._mock_extension_loading()
super(CLITestV20ExtensionJSON, self).setUp(plurals={'tags': 'tag'})
def _mock_extension_loading(self):
ext_pkg = 'neutronclient.common.extension'
contrib = mock.patch(ext_pkg + '._discover_via_entry_points').start()
contrib.return_value = [("_fox_sockets", fox_sockets)]
return contrib
def test_ext_cmd_loaded(self):
shell.NeutronShell('2.0')
ext_cmd = {'fox-sockets-list': fox_sockets.FoxInSocketsList,
'fox-sockets-create': fox_sockets.FoxInSocketsCreate,
'fox-sockets-update': fox_sockets.FoxInSocketsUpdate,
'fox-sockets-delete': fox_sockets.FoxInSocketsDelete,
'fox-sockets-show': fox_sockets.FoxInSocketsShow}
self.assertDictContainsSubset(ext_cmd, shell.COMMANDS['2.0'])
def test_ext_cmd_help_doc_with_extension_name(self):
shell.NeutronShell('2.0')
ext_cmd = {'fox-sockets-list': fox_sockets.FoxInSocketsList,
'fox-sockets-create': fox_sockets.FoxInSocketsCreate,
'fox-sockets-update': fox_sockets.FoxInSocketsUpdate,
'fox-sockets-delete': fox_sockets.FoxInSocketsDelete,
'fox-sockets-show': fox_sockets.FoxInSocketsShow}
self.assertDictContainsSubset(ext_cmd, shell.COMMANDS['2.0'])
for item in ext_cmd:
cmdcls = shell.COMMANDS['2.0'].get(item)
self.assertTrue(cmdcls.__doc__.startswith("[_fox_sockets]"))
def test_delete_fox_socket(self):
# Delete fox socket: myid.
resource = 'fox_socket'
cmd = fox_sockets.FoxInSocketsDelete(test_cli20.MyApp(sys.stdout),
None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def test_update_fox_socket(self):
# Update fox_socket: myid --name myname.
resource = 'fox_socket'
cmd = fox_sockets.FoxInSocketsUpdate(test_cli20.MyApp(sys.stdout),
None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname'],
{'name': 'myname'})
def test_create_fox_socket(self):
# Create fox_socket: myname.
resource = 'fox_socket'
cmd = fox_sockets.FoxInSocketsCreate(test_cli20.MyApp(sys.stdout),
None)
name = 'myname'
myid = 'myid'
args = [name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_fox_sockets(self):
# List fox_sockets.
resources = 'fox_sockets'
cmd = fox_sockets.FoxInSocketsList(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_fox_pagination(self):
resources = 'fox_sockets'
cmd = fox_sockets.FoxInSocketsList(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_show_fox_socket(self):
# Show fox_socket: --fields id --fields name myid.
resource = 'fox_socket'
cmd = fox_sockets.FoxInSocketsShow(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
class CLITestV20ExtensionJSONAlternatePlurals(test_cli20.CLITestV20Base):
class IPAddress(extension.NeutronClientExtension):
resource = 'ip_address'
resource_plural = '%ses' % resource
object_path = '/%s' % resource_plural
resource_path = '/%s/%%s' % resource_plural
versions = ['2.0']
class IPAddressesList(extension.ClientExtensionList, IPAddress):
shell_command = 'ip-address-list'
def setUp(self):
# need to mock before super because extensions loaded on instantiation
self._mock_extension_loading()
super(CLITestV20ExtensionJSONAlternatePlurals, self).setUp()
def _mock_extension_loading(self):
ext_pkg = 'neutronclient.common.extension'
contrib = mock.patch(ext_pkg + '._discover_via_entry_points').start()
ip_address = mock.MagicMock()
ip_address.IPAddress = self.IPAddress
ip_address.IPAddressesList = self.IPAddressesList
contrib.return_value = [("ip_address", ip_address)]
return contrib
def test_ext_cmd_loaded(self):
shell.NeutronShell('2.0')
ext_cmd = {'ip-address-list': self.IPAddressesList}
self.assertDictContainsSubset(ext_cmd, shell.COMMANDS['2.0'])
def test_list_ip_addresses(self):
# List ip_addresses.
resources = 'ip_addresses'
cmd = self.IPAddressesList(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
class CLITestV20ExtensionJSONChildResource(test_cli20.CLITestV20Base):
class Child(extension.NeutronClientExtension):
parent_resource = 'parents'
child_resource = 'child'
resource = '%s_%s' % (parent_resource, child_resource)
resource_plural = '%sren' % resource
child_resource_plural = '%ren' % child_resource
object_path = '/%s/%%s/%s' % (parent_resource, child_resource_plural)
resource_path = '/%s/%%s/%s/%%s' % (parent_resource,
child_resource_plural)
versions = ['2.0']
class ChildrenList(extension.ClientExtensionList, Child):
shell_command = 'parent-child-list'
class ChildShow(extension.ClientExtensionShow, Child):
shell_command = 'parent-child-show'
class ChildUpdate(extension.ClientExtensionUpdate, Child):
shell_command = 'parent-child-update'
class ChildDelete(extension.ClientExtensionDelete, Child):
shell_command = 'parent-child-delete'
class ChildCreate(extension.ClientExtensionCreate, Child):
shell_command = 'parent-child-create'
def setUp(self):
# need to mock before super because extensions loaded on instantiation
self._mock_extension_loading()
super(CLITestV20ExtensionJSONChildResource, self).setUp()
def _mock_extension_loading(self):
ext_pkg = 'neutronclient.common.extension'
contrib = mock.patch(ext_pkg + '._discover_via_entry_points').start()
child = mock.MagicMock()
child.Child = self.Child
child.ChildrenList = self.ChildrenList
child.ChildShow = self.ChildShow
child.ChildUpdate = self.ChildUpdate
child.ChildDelete = self.ChildDelete
child.ChildCreate = self.ChildCreate
contrib.return_value = [("child", child)]
return contrib
def test_ext_cmd_loaded(self):
shell.NeutronShell('2.0')
ext_cmd = {'parent-child-list': self.ChildrenList,
'parent-child-show': self.ChildShow,
'parent-child-update': self.ChildUpdate,
'parent-child-delete': self.ChildDelete,
'parent-child-create': self.ChildCreate}
self.assertDictContainsSubset(ext_cmd, shell.COMMANDS['2.0'])
def test_client_methods_have_parent_id_arg(self):
methods = (self.client.list_parents_children,
self.client.show_parents_child,
self.client.update_parents_child,
self.client.delete_parents_child,
self.client.create_parents_child)
for method in methods:
argspec = inspect.getargspec(method)
self.assertIn("parent_id", argspec.args)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_messaging.rpc import dispatcher
from heat.common import exception
from heat.engine import service
from heat.engine import stack as parser
from heat.engine import stack_lock
from heat.objects import stack as stack_object
from heat.objects import stack_lock as stack_lock_object
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import utils
class StackDeleteTest(common.HeatTestCase):
def setUp(self):
super(StackDeleteTest, self).setUp()
self.ctx = utils.dummy_context()
self.man = service.EngineService('a-host', 'a-topic')
self.man.create_periodic_tasks()
@mock.patch.object(parser.Stack, 'load')
def test_stack_delete(self, mock_load):
stack_name = 'service_delete_test_stack'
stack = tools.get_stack(stack_name, self.ctx)
sid = stack.store()
mock_load.return_value = stack
s = stack_object.Stack.get_by_id(self.ctx, sid)
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.man.thread_group_mgr.groups[sid].wait()
mock_load.assert_called_once_with(self.ctx, stack=s)
def test_stack_delete_nonexist(self):
stack_name = 'service_delete_nonexist_test_stack'
stack = tools.get_stack(stack_name, self.ctx)
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.delete_stack,
self.ctx, stack.identifier())
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
def test_stack_delete_acquired_lock(self, mock_acquire, mock_load):
mock_acquire.return_value = self.man.engine_id
stack_name = 'service_delete_test_stack_acquired_lock'
stack = tools.get_stack(stack_name, self.ctx)
sid = stack.store()
mock_load.return_value = stack
st = stack_object.Stack.get_by_id(self.ctx, sid)
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.man.thread_group_mgr.groups[sid].wait()
mock_acquire.assert_called_once_with()
mock_load.assert_called_once_with(self.ctx, stack=st)
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
def test_stack_delete_acquired_lock_stop_timers(self, mock_acquire,
mock_load):
mock_acquire.return_value = self.man.engine_id
stack_name = 'service_delete_test_stack_stop_timers'
stack = tools.get_stack(stack_name, self.ctx)
sid = stack.store()
mock_load.return_value = stack
st = stack_object.Stack.get_by_id(self.ctx, sid)
self.man.thread_group_mgr.add_timer(stack.id, 'test')
self.assertEqual(1, len(self.man.thread_group_mgr.groups[sid].timers))
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.assertEqual(0, len(self.man.thread_group_mgr.groups[sid].timers))
self.man.thread_group_mgr.groups[sid].wait()
mock_acquire.assert_called_once_with()
mock_load.assert_called_once_with(self.ctx, stack=st)
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
@mock.patch.object(stack_lock.StackLock, 'acquire')
def test_stack_delete_current_engine_active_lock(self, mock_acquire,
mock_try, mock_load):
self.man.start()
stack_name = 'service_delete_test_stack_current_active_lock'
stack = tools.get_stack(stack_name, self.ctx)
sid = stack.store()
# Insert a fake lock into the db
stack_lock_object.StackLock.create(stack.id, self.man.engine_id)
# Create a fake ThreadGroup too
self.man.thread_group_mgr.groups[stack.id] = tools.DummyThreadGroup()
st = stack_object.Stack.get_by_id(self.ctx, sid)
mock_load.return_value = stack
mock_try.return_value = self.man.engine_id
mock_stop = self.patchobject(self.man.thread_group_mgr, 'stop')
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
mock_load.assert_called_with(self.ctx, stack=st)
self.assertEqual(2, len(mock_load.mock_calls))
mock_try.assert_called_once_with()
mock_acquire.assert_called_once_with(True)
mock_stop.assert_called_once_with(stack.id)
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
@mock.patch.object(stack_lock.StackLock, 'engine_alive')
def test_stack_delete_other_engine_active_lock_failed(self, mock_alive,
mock_try, mock_load):
OTHER_ENGINE = "other-engine-fake-uuid"
self.man.start()
stack_name = 'service_delete_test_stack_other_engine_lock_fail'
stack = tools.get_stack(stack_name, self.ctx)
sid = stack.store()
# Insert a fake lock into the db
stack_lock_object.StackLock.create(stack.id, OTHER_ENGINE)
st = stack_object.Stack.get_by_id(self.ctx, sid)
mock_load.return_value = stack
mock_try.return_value = OTHER_ENGINE
mock_alive.return_value = True
mock_call = self.patchobject(self.man, '_remote_call',
return_value=False)
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.delete_stack,
self.ctx, stack.identifier())
self.assertEqual(exception.StopActionFailed, ex.exc_info[0])
mock_load.assert_called_once_with(self.ctx, stack=st)
mock_try.assert_called_once_with()
mock_alive.assert_called_once_with(self.ctx, OTHER_ENGINE)
mock_call.assert_called_once_with(self.ctx, OTHER_ENGINE, "stop_stack",
stack_identity=mock.ANY)
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
@mock.patch.object(stack_lock.StackLock, 'engine_alive')
@mock.patch.object(stack_lock.StackLock, 'acquire')
def test_stack_delete_other_engine_active_lock_succeeded(
self, mock_acquire, mock_alive, mock_try, mock_load):
OTHER_ENGINE = "other-engine-fake-uuid"
self.man.start()
stack_name = 'service_delete_test_stack_other_engine_lock'
stack = tools.get_stack(stack_name, self.ctx)
sid = stack.store()
# Insert a fake lock into the db
stack_lock_object.StackLock.create(stack.id, OTHER_ENGINE)
st = stack_object.Stack.get_by_id(self.ctx, sid)
mock_load.return_value = stack
mock_try.return_value = OTHER_ENGINE
mock_alive.return_value = True
mock_call = self.patchobject(self.man, '_remote_call',
return_value=None)
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.man.thread_group_mgr.groups[sid].wait()
self.assertEqual(2, len(mock_load.mock_calls))
mock_load.assert_called_with(self.ctx, stack=st)
mock_try.assert_called_once_with()
mock_alive.assert_called_once_with(self.ctx, OTHER_ENGINE)
mock_call.assert_called_once_with(self.ctx, OTHER_ENGINE, "stop_stack",
stack_identity=mock.ANY)
mock_acquire.assert_called_once_with(True)
@mock.patch.object(parser.Stack, 'load')
@mock.patch.object(stack_lock.StackLock, 'try_acquire')
@mock.patch.object(stack_lock.StackLock, 'engine_alive')
@mock.patch.object(stack_lock.StackLock, 'acquire')
def test_stack_delete_other_dead_engine_active_lock(
self, mock_acquire, mock_alive, mock_try, mock_load):
OTHER_ENGINE = "other-engine-fake-uuid"
stack_name = 'service_delete_test_stack_other_dead_engine'
stack = tools.get_stack(stack_name, self.ctx)
sid = stack.store()
# Insert a fake lock into the db
stack_lock_object.StackLock.create(stack.id, "other-engine-fake-uuid")
st = stack_object.Stack.get_by_id(self.ctx, sid)
mock_load.return_value = stack
mock_try.return_value = OTHER_ENGINE
mock_alive.return_value = False
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.man.thread_group_mgr.groups[sid].wait()
mock_load.assert_called_with(self.ctx, stack=st)
mock_try.assert_called_once_with()
mock_acquire.assert_called_once_with(True)
mock_alive.assert_called_once_with(self.ctx, OTHER_ENGINE)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancersOperations(object):
"""LoadBalancersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.LoadBalancer"
"""Gets the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LoadBalancer, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_06_01.models.LoadBalancer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
parameters, # type: "_models.LoadBalancer"
**kwargs # type: Any
):
# type: (...) -> "_models.LoadBalancer"
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LoadBalancer')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
parameters, # type: "_models.LoadBalancer"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.LoadBalancer"]
"""Creates or updates a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param parameters: Parameters supplied to the create or update load balancer operation.
:type parameters: ~azure.mgmt.network.v2017_06_01.models.LoadBalancer
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either LoadBalancer or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_06_01.models.LoadBalancer]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancer"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LoadBalancerListResult"]
"""Gets all the load balancers in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_06_01.models.LoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LoadBalancerListResult"]
"""Gets all the load balancers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_06_01.models.LoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers'} # type: ignore
|
|
import os
import re
import tempfile
import urlparse
from djblets.util.filesystem import is_exe_in_path
from reviewboard.scmtools import sshutils
from reviewboard.scmtools.core import SCMTool, HEAD, PRE_CREATION
from reviewboard.scmtools.errors import SCMError, FileNotFoundError, \
RepositoryNotFoundError
from reviewboard.diffviewer.parser import DiffParser, DiffParserError
sshutils.register_rbssh('CVS_RSH')
class CVSTool(SCMTool):
name = "CVS"
supports_authentication = True
dependencies = {
'executables': ['cvs'],
}
rev_re = re.compile(r'^.*?(\d+(\.\d+)+)\r?$')
repopath_re = re.compile(r'^(?P<hostname>.*):(?P<port>\d+)?(?P<path>.*)')
ext_cvsroot_re = re.compile(r':ext:([^@]+@)?(?P<hostname>[^:/]+)')
def __init__(self, repository):
super(CVSTool, self).__init__(repository)
self.cvsroot, self.repopath = \
self.build_cvsroot(self.repository.path,
self.repository.username,
self.repository.password)
local_site_name = None
if repository.local_site:
local_site_name = repository.local_site.name
self.client = CVSClient(self.cvsroot, self.repopath, local_site_name)
def get_file(self, path, revision=HEAD):
if not path:
raise FileNotFoundError(path, revision)
return self.client.cat_file(path, revision)
def parse_diff_revision(self, file_str, revision_str, *args, **kwargs):
if revision_str == "PRE-CREATION":
return file_str, PRE_CREATION
m = self.rev_re.match(revision_str)
if not m:
raise SCMError("Unable to parse diff revision header '%s'" %
revision_str)
return file_str, m.group(1)
def get_diffs_use_absolute_paths(self):
return True
def get_fields(self):
return ['diff_path']
def get_parser(self, data):
return CVSDiffParser(data, self.repopath)
@classmethod
def build_cvsroot(cls, path, username, password):
# NOTE: According to cvs, the following formats are valid.
#
# :(gserver|kserver|pserver):[[user][:password]@]host[:[port]]/path
# [:(ext|server):][[user]@]host[:]/path
# :local:e:\path
# :fork:/path
if not path.startswith(":"):
# The user has a path or something. We'll want to parse out the
# server name, port (if specified) and path and build a :pserver:
# CVSROOT.
m = cls.repopath_re.match(path)
if m:
path = m.group("path")
cvsroot = ":pserver:"
if username:
if password:
cvsroot += '%s:%s@' % (username,
password)
else:
cvsroot += '%s@' % (username)
cvsroot += "%s:%s%s" % (m.group("hostname"),
m.group("port") or "",
path)
return cvsroot, path
# We couldn't parse this as a hostname:port/path. Assume it's a local
# path or a full CVSROOT and let CVS handle it.
return path, path
@classmethod
def check_repository(cls, path, username=None, password=None,
local_site_name=None):
"""
Performs checks on a repository to test its validity.
This should check if a repository exists and can be connected to.
This will also check if the repository requires an HTTPS certificate.
The result is returned as an exception. The exception may contain
extra information, such as a human-readable description of the problem.
If the repository is valid and can be connected to, no exception
will be thrown.
"""
# CVS paths are a bit strange, so we can't actually use the
# SSH checking in SCMTool.check_repository. Do our own.
m = cls.ext_cvsroot_re.match(path)
if m:
sshutils.check_host(m.group('hostname'), username, password,
local_site_name)
cvsroot, repopath = cls.build_cvsroot(path, username, password)
client = CVSClient(cvsroot, repopath, local_site_name)
try:
client.cat_file('CVSROOT/modules', HEAD)
except (SCMError, FileNotFoundError):
raise RepositoryNotFoundError()
@classmethod
def parse_hostname(cls, path):
"""Parses a hostname from a repository path."""
return urlparse.urlparse(path)[1] # netloc
class CVSDiffParser(DiffParser):
"""This class is able to parse diffs created with CVS. """
regex_small = re.compile('^RCS file: (.+)$')
def __init__(self, data, repo):
DiffParser.__init__(self, data)
self.regex_full = re.compile('^RCS file: %s/(.*),v$' % re.escape(repo))
def parse_special_header(self, linenum, info):
linenum = super(CVSDiffParser, self).parse_special_header(linenum, info)
if 'index' not in info:
# We didn't find an index, so the rest is probably bogus too.
return linenum
m = self.regex_full.match(self.lines[linenum])
if not m:
m = self.regex_small.match(self.lines[linenum])
if m:
info['filename'] = m.group(1)
linenum += 1
else:
raise DiffParserError('Unable to find RCS line', linenum)
while self.lines[linenum].startswith('retrieving '):
linenum += 1
if self.lines[linenum].startswith('diff '):
linenum += 1
return linenum
def parse_diff_header(self, linenum, info):
linenum = super(CVSDiffParser, self).parse_diff_header(linenum, info)
if info.get('origFile') == '/dev/null':
info['origFile'] = info['newFile']
info['origInfo'] = 'PRE-CREATION'
elif 'filename' in info:
info['origFile'] = info['filename']
if info.get('newFile') == '/dev/null':
info['deleted'] = True
return linenum
class CVSClient(object):
def __init__(self, cvsroot, path, local_site_name):
self.tempdir = ""
self.currentdir = os.getcwd()
self.cvsroot = cvsroot
self.path = path
self.local_site_name = local_site_name
if not is_exe_in_path('cvs'):
# This is technically not the right kind of error, but it's the
# pattern we use with all the other tools.
raise ImportError
def cleanup(self):
if self.currentdir != os.getcwd():
# Restore current working directory
os.chdir(self.currentdir)
# Remove temporary directory
if self.tempdir != "":
os.rmdir(self.tempdir)
def cat_file(self, filename, revision):
# We strip the repo off of the fully qualified path as CVS does
# not like to be given absolute paths.
repos_path = self.path.split(":")[-1]
if '@' in repos_path:
repos_path = '/' + repos_path.split('@')[-1].split('/', 1)[-1]
if filename.startswith(repos_path + "/"):
filename = filename[len(repos_path) + 1:]
# Strip off the ",v" we sometimes get for CVS paths.
if filename.endswith(",v"):
filename = filename.rstrip(",v")
# We want to try to fetch the files with different permutations of
# "Attic" and no "Attic". This means there are 4 various permutations
# that we have to check, based on whether we're using windows- or
# unix-type paths
filenameAttic = filename
if '/Attic/' in filename:
filename = '/'.join(filename.rsplit('/Attic/', 1))
elif '\\Attic\\' in filename:
filename = '\\'.join(filename.rsplit('\\Attic\\', 1))
elif '\\' in filename:
pos = filename.rfind('\\')
filenameAttic = filename[0:pos] + "\\Attic" + filename[pos:]
elif '/' in filename:
pos = filename.rfind('/')
filenameAttic = filename[0:pos] + "/Attic" + filename[pos:]
else:
# There isn't any path information, so we can't provide an
# Attic path that makes any kind of sense.
filenameAttic = None
try:
return self._cat_specific_file(filename, revision)
except FileNotFoundError:
if filenameAttic:
return self._cat_specific_file(filenameAttic, revision)
else:
raise
def _cat_specific_file(self, filename, revision):
# Somehow CVS sometimes seems to write .cvsignore files to current
# working directory even though we force stdout with -p.
self.tempdir = tempfile.mkdtemp()
os.chdir(self.tempdir)
p = SCMTool.popen(['cvs', '-f', '-d', self.cvsroot, 'checkout',
'-r', str(revision), '-p', filename],
self.local_site_name)
contents = p.stdout.read()
errmsg = p.stderr.read()
failure = p.wait()
# Unfortunately, CVS is not consistent about exiting non-zero on
# errors. If the file is not found at all, then CVS will print an
# error message on stderr, but it doesn't set an exit code with
# pservers. If the file is found but an invalid revision is requested,
# then cvs exits zero and nothing is printed at all. (!)
#
# But, when it is successful, cvs will print a header on stderr like
# so:
#
# ===================================================================
# Checking out foobar
# RCS: /path/to/repo/foobar,v
# VERS: 1.1
# ***************
# So, if nothing is in errmsg, or errmsg has a specific recognized
# message, call it FileNotFound.
if not errmsg or \
errmsg.startswith('cvs checkout: cannot find module') or \
errmsg.startswith('cvs checkout: could not read RCS file'):
self.cleanup()
raise FileNotFoundError(filename, revision)
# Otherwise, if there's an exit code, or errmsg doesn't look like
# successful header, then call it a generic SCMError.
#
# If the .cvspass file doesn't exist, CVS will return an error message
# stating this. This is safe to ignore.
if (failure and not errmsg.startswith('==========')) and \
not ".cvspass does not exist - creating new file" in errmsg:
self.cleanup()
raise SCMError(errmsg)
self.cleanup()
return contents
|
|
import re
import socket
from threading import Lock, Timer
from contextlib import contextmanager
import sys
try:
# Python 3
from urllib.parse import urlparse
str_cls = str
except (ImportError):
# Python 2
from urlparse import urlparse
str_cls = unicode
from . import __version__
from .show_error import show_error
from .console_write import console_write
from .cache import set_cache, get_cache
from .unicode import unicode_from_os
from . import text
from .downloaders import DOWNLOADERS
from .downloaders.urllib_downloader import UrlLibDownloader
from .downloaders.binary_not_found_error import BinaryNotFoundError
from .downloaders.rate_limit_exception import RateLimitException
from .downloaders.downloader_exception import DownloaderException
from .downloaders.win_downloader_exception import WinDownloaderException
from .http_cache import HttpCache
# A dict of domains - each points to a list of downloaders
_managers = {}
# How many managers are currently checked out
_in_use = 0
# Make sure connection management doesn't run into threading issues
_lock = Lock()
# A timer used to disconnect all managers after a period of no usage
_timer = None
@contextmanager
def downloader(url, settings):
try:
manager = None
manager = _grab(url, settings)
yield manager
finally:
if manager:
_release(url, manager)
def _grab(url, settings):
global _managers, _lock, _in_use, _timer
_lock.acquire()
try:
if _timer:
_timer.cancel()
_timer = None
parsed = urlparse(url)
if not parsed or not parsed.hostname:
raise DownloaderException(u'The URL "%s" is malformed' % url)
hostname = parsed.hostname.lower()
if hostname not in _managers:
_managers[hostname] = []
if not _managers[hostname]:
_managers[hostname].append(DownloadManager(settings))
_in_use += 1
return _managers[hostname].pop()
finally:
_lock.release()
def _release(url, manager):
global _managers, _lock, _in_use, _timer
_lock.acquire()
try:
hostname = urlparse(url).hostname.lower()
# This means the package was reloaded between _grab and _release,
# so the downloader is using old code and we want to discard it
if hostname not in _managers:
return
_managers[hostname].insert(0, manager)
_in_use -= 1
if _timer:
_timer.cancel()
_timer = None
if _in_use == 0:
_timer = Timer(5.0, close_all_connections)
_timer.start()
finally:
_lock.release()
def close_all_connections():
global _managers, _lock, _in_use, _timer
_lock.acquire()
try:
if _timer:
_timer.cancel()
_timer = None
for domain, managers in _managers.items():
for manager in managers:
manager.close()
_managers = {}
finally:
_lock.release()
def update_url(url, debug):
"""
Takes an old, out-dated URL and updates it. Mostly used with GitHub URLs
since they tend to be constantly evolving their infrastructure.
:param url:
The URL to update
:param debug:
If debugging is enabled
:return:
The updated URL
"""
if not url:
return url
original_url = url
url = url.replace('://raw.github.com/', '://raw.githubusercontent.com/')
url = url.replace('://nodeload.github.com/', '://codeload.github.com/')
url = re.sub('^(https://codeload.github.com/[^/]+/[^/]+/)zipball(/.*)$', '\\1zip\\2', url)
# Fix URLs from old versions of Package Control since we are going to
# remove all packages but Package Control from them to force upgrades
if url == 'https://sublime.wbond.net/repositories.json' or url == 'https://sublime.wbond.net/channel.json':
url = 'https://packagecontrol.io/channel_v3.json'
if debug and url != original_url:
console_write(
u'''
Fixed URL from %s to %s
''',
(original_url, url)
)
return url
class DownloadManager(object):
def __init__(self, settings):
# Cache the downloader for re-use
self.downloader = None
user_agent = settings.get('user_agent')
if user_agent and user_agent.find('%s') != -1:
settings['user_agent'] = user_agent % __version__
self.settings = settings
if settings.get('http_cache'):
cache_length = settings.get('http_cache_length', 604800)
self.settings['cache'] = HttpCache(cache_length)
def close(self):
if self.downloader:
self.downloader.close()
self.downloader = None
def fetch(self, url, error_message, prefer_cached=False):
"""
Downloads a URL and returns the contents
:param url:
The string URL to download
:param error_message:
The error message to include if the download fails
:param prefer_cached:
If cached version of the URL content is preferred over a new request
:raises:
DownloaderException: if there was an error downloading the URL
:return:
The string contents of the URL
"""
is_ssl = re.search('^https://', url) != None
url = update_url(url, self.settings.get('debug'))
# We don't use sublime.platform() here since this is used for
# the crawler on packagecontrol.io also
if sys.platform == 'darwin':
platform = 'osx'
elif sys.platform == 'win32':
platform = 'windows'
else:
platform = 'linux'
downloader_precedence = self.settings.get(
'downloader_precedence',
{
"windows": ["wininet"],
"osx": ["urllib"],
"linux": ["urllib", "curl", "wget"]
}
)
downloader_list = downloader_precedence.get(platform, [])
if not isinstance(downloader_list, list) or len(downloader_list) == 0:
error_string = text.format(
u'''
No list of preferred downloaders specified in the
"downloader_precedence" setting for the platform "%s"
''',
platform
)
show_error(error_string)
raise DownloaderException(error_string)
# Make sure we have a downloader, and it supports SSL if we need it
if not self.downloader or (is_ssl and not self.downloader.supports_ssl()):
for downloader_name in downloader_list:
if downloader_name not in DOWNLOADERS:
error_string = text.format(
u'''
The downloader "%s" from the "downloader_precedence"
setting for the platform "%s" is invalid
''',
(downloader_name, platform)
)
show_error(error_string)
raise DownloaderException(error_string)
try:
downloader = DOWNLOADERS[downloader_name](self.settings)
if is_ssl and not downloader.supports_ssl():
continue
self.downloader = downloader
break
except (BinaryNotFoundError):
pass
if not self.downloader:
error_string = text.format(
u'''
None of the preferred downloaders can download %s.
This is usually either because the ssl module is unavailable
and/or the command line curl or wget executables could not be
found in the PATH.
If you customized the "downloader_precedence" setting, please
verify your customization.
''',
url
)
show_error(error_string)
raise DownloaderException(error_string.replace('\n\n', ' '))
url = url.replace(' ', '%20')
hostname = urlparse(url).hostname
if hostname:
hostname = hostname.lower()
timeout = self.settings.get('timeout', 3)
rate_limited_domains = get_cache('rate_limited_domains', [])
if self.settings.get('debug'):
try:
port = 443 if is_ssl else 80
ipv6_info = socket.getaddrinfo(hostname, port, socket.AF_INET6)
if ipv6_info:
ipv6 = ipv6_info[0][4][0]
else:
ipv6 = None
except (socket.gaierror) as e:
ipv6 = None
except (TypeError) as e:
ipv6 = None
try:
ip = socket.gethostbyname(hostname)
except (socket.gaierror) as e:
ip = unicode_from_os(e)
except (TypeError) as e:
ip = None
console_write(
u'''
Download Debug
URL: %s
Timeout: %s
Resolved IP: %s
''',
(url, str_cls(timeout), ip)
)
if ipv6:
console_write(
u' Resolved IPv6: %s',
ipv6,
prefix=False
)
if hostname in rate_limited_domains:
error_string = u'Skipping due to hitting rate limit for %s' % hostname
if self.settings.get('debug'):
console_write(
u' %s',
error_string,
prefix=False
)
raise DownloaderException(error_string)
try:
return self.downloader.download(url, error_message, timeout, 3, prefer_cached)
except (RateLimitException) as e:
rate_limited_domains.append(hostname)
set_cache('rate_limited_domains', rate_limited_domains, self.settings.get('cache_length'))
console_write(
u'''
Hit rate limit of %s for %s. Skipping all futher download
requests for this domain.
''',
(e.limit, e.domain)
)
raise
except (WinDownloaderException) as e:
console_write(
u'''
Attempting to use Urllib downloader due to WinINet error: %s
''',
e
)
# Here we grab the proxy info extracted from WinInet to fill in
# the Package Control settings if those are not present. This should
# hopefully make a seamless fallback for users who run into weird
# windows errors related to network communication.
wininet_proxy = self.downloader.proxy or ''
wininet_proxy_username = self.downloader.proxy_username or ''
wininet_proxy_password = self.downloader.proxy_password or ''
http_proxy = self.settings.get('http_proxy', '')
https_proxy = self.settings.get('https_proxy', '')
proxy_username = self.settings.get('proxy_username', '')
proxy_password = self.settings.get('proxy_password', '')
settings = self.settings.copy()
if not http_proxy and wininet_proxy:
settings['http_proxy'] = wininet_proxy
if not https_proxy and wininet_proxy:
settings['https_proxy'] = wininet_proxy
has_proxy = settings.get('http_proxy') or settings.get('https_proxy')
if has_proxy and not proxy_username and wininet_proxy_username:
settings['proxy_username'] = wininet_proxy_username
if has_proxy and not proxy_password and wininet_proxy_password:
settings['proxy_password'] = wininet_proxy_password
self.downloader = UrlLibDownloader(settings)
# Try again with the new downloader!
return self.fetch(url, error_message, prefer_cached)
|
|
import mock
from nose import tools as nt
from django.test import RequestFactory
from django.db import transaction
from django.http import Http404
from tests.base import AdminTestCase
from osf_tests.factories import (
DraftRegistrationFactory,
AuthUserFactory,
ProjectFactory,
UserFactory
)
from osf.models.registrations import DraftRegistration
from website.files.models import StoredFileNode
from addons.osfstorage.models import OsfStorageFile, OsfStorageFileNode
from website.project.model import ensure_schemas
from website.prereg.utils import get_prereg_schema
from admin_tests.utilities import setup_view, setup_form_view, setup_user_view
from admin_tests.pre_reg import utils
from admin.pre_reg.views import (
DraftListView,
DraftDetailView,
DraftFormView,
CommentUpdateView,
get_metadata_files,
get_file_questions,
)
from admin.pre_reg.forms import DraftRegistrationForm
from osf.models.admin_log_entry import AdminLogEntry
class TestDraftListView(AdminTestCase):
def setUp(self):
super(TestDraftListView, self).setUp()
self.user = AuthUserFactory()
schema = utils.draft_reg_util()
self.dr1 = DraftRegistrationFactory(
initiator=self.user,
registration_schema=schema,
registration_metadata=utils.SCHEMA_DATA
)
self.dr1.submit_for_review(self.user, {}, save=True)
self.dr2 = DraftRegistrationFactory(
initiator=self.user,
registration_schema=schema,
registration_metadata=utils.SCHEMA_DATA
)
self.dr2.submit_for_review(self.user, {}, save=True)
self.request = RequestFactory().get('/fake_path')
self.view = DraftListView()
self.view = setup_view(self.view, self.request)
def test_get_queryset(self):
res = list(self.view.get_queryset())
nt.assert_equal(len(res), 2)
nt.assert_is_instance(res[0], DraftRegistration)
def test_get_context_data(self):
self.view.object_list = self.view.get_queryset()
res = self.view.get_context_data()
nt.assert_is_instance(res, dict)
nt.assert_is_instance(res['drafts'], list)
nt.assert_equal(len(res['drafts']), 2)
class TestDraftDetailView(AdminTestCase):
def setUp(self):
super(TestDraftDetailView, self).setUp()
self.user = AuthUserFactory()
schema = utils.draft_reg_util()
self.dr1 = DraftRegistrationFactory(
initiator=self.user,
registration_schema=schema,
registration_metadata=utils.SCHEMA_DATA
)
self.dr1.submit_for_review(self.user, {}, save=True)
self.request = RequestFactory().get('/fake_path')
self.view = DraftDetailView()
self.view = setup_view(self.view, self.request, draft_pk=self.dr1._id)
@mock.patch('admin.pre_reg.views.DraftDetailView.checkout_files')
def test_get_object(self, mock_files):
res = self.view.get_object()
nt.assert_is_instance(res, dict)
nt.assert_equal(res['pk'], self.dr1._id)
class TestDraftFormView(AdminTestCase):
def setUp(self):
super(TestDraftFormView, self).setUp()
self.user = AuthUserFactory()
self.dr1 = DraftRegistrationFactory(
initiator=self.user,
registration_schema=utils.draft_reg_util(),
registration_metadata=utils.SCHEMA_DATA
)
self.dr1.submit_for_review(self.user, {}, save=True)
self.dr1.flags # sets flags if there aren't any yet.
self.request = RequestFactory().get('/fake_path')
self.view = DraftFormView()
self.view = setup_view(self.view, self.request, draft_pk=self.dr1._id)
self.post = RequestFactory().post('/fake_path')
self.post.user = UserFactory()
self.post_view = DraftFormView()
self.form_data = {
'notes': 'Far between',
'proof_of_publication': 'approved',
}
def test_dispatch_raise_404(self):
view = setup_view(DraftFormView(), self.request, draft_pk='wrong')
with nt.assert_raises(Http404):
view.dispatch(self.request)
def test_get_initial(self):
self.view.draft = self.dr1
self.view.get_initial()
res = self.view.initial
nt.assert_is_instance(res, dict)
nt.assert_equal(res['notes'], self.dr1.notes)
nt.assert_equal(res['assignee'], self.dr1.flags['assignee'])
nt.assert_equal(res['payment_sent'], self.dr1.flags['payment_sent'])
nt.assert_equal(res['proof_of_publication'],
self.dr1.flags['proof_of_publication'])
def test_get_context_data(self):
self.view.draft = self.dr1
res = self.view.get_context_data()
nt.assert_is_instance(res, dict)
nt.assert_in('draft', res)
nt.assert_is_instance(res['draft'], dict)
nt.assert_in('IMMEDIATE', res)
def test_form_valid_notes(self):
form = DraftRegistrationForm(data=self.form_data)
nt.assert_true(form.is_valid())
view = setup_form_view(self.post_view, self.post, form,
draft_pk=self.dr1._id)
view.draft = self.dr1
count = AdminLogEntry.objects.count()
with transaction.atomic():
view.form_valid(form)
nt.assert_equal(count, AdminLogEntry.objects.count())
self.dr1.reload()
nt.assert_equal(self.dr1.notes, self.form_data['notes'])
@mock.patch('admin.pre_reg.views.DraftFormView.checkin_files')
@mock.patch('admin.pre_reg.views.DraftRegistration.approve')
def test_form_valid_approve(self, mock_approve, mock_files):
self.form_data.update(approve_reject='approve')
form = DraftRegistrationForm(data=self.form_data)
nt.assert_true(form.is_valid())
view = setup_form_view(self.post_view, self.post, form,
draft_pk=self.dr1._id)
view.draft = self.dr1
count = AdminLogEntry.objects.count()
with transaction.atomic():
view.form_valid(form)
nt.assert_true(mock_approve.called)
nt.assert_equal(count + 1, AdminLogEntry.objects.count())
@mock.patch('admin.pre_reg.views.DraftFormView.checkin_files')
@mock.patch('admin.pre_reg.views.DraftRegistration.reject')
def test_form_valid_reject(self, mock_reject, mock_files):
self.form_data.update(approve_reject='reject')
form = DraftRegistrationForm(data=self.form_data)
nt.assert_true(form.is_valid())
view = setup_form_view(self.post_view, self.post, form,
draft_pk=self.dr1._id)
view.draft = self.dr1
count = AdminLogEntry.objects.count()
with transaction.atomic():
view.form_valid(form)
nt.assert_true(mock_reject.called)
nt.assert_equal(count + 1, AdminLogEntry.objects.count())
class TestCommentUpdateView(AdminTestCase):
def setUp(self):
super(TestCommentUpdateView, self).setUp()
self.user = AuthUserFactory()
self.dr1 = DraftRegistrationFactory(
initiator=self.user,
registration_schema=utils.draft_reg_util(),
registration_metadata=utils.SCHEMA_DATA
)
self.dr1.submit_for_review(self.user, {}, save=True)
self.request = RequestFactory().post('/fake_path', data={'blah': 'arg'})
self.request.user = UserFactory()
self.view = CommentUpdateView()
self.view = setup_view(self.view, self.request, draft_pk=self.dr1._id)
@mock.patch('admin.pre_reg.views.json.loads')
@mock.patch('admin.pre_reg.views.DraftRegistration.update_metadata')
def test_post_comments(self, mock_json, mock_meta):
count = AdminLogEntry.objects.count()
self.view.post(self.request)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
class TestPreregFiles(AdminTestCase):
def setUp(self):
super(TestPreregFiles, self).setUp()
self.prereg_user = AuthUserFactory()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
ensure_schemas()
prereg_schema = get_prereg_schema()
self.d_of_qs = {
'q7': OsfStorageFileNode(node=self.node, name='7'),
'q11': OsfStorageFileNode(node=self.node, name='11'),
'q16': OsfStorageFileNode(node=self.node, name='16'),
'q12': OsfStorageFileNode(node=self.node, name='12'),
'q13': OsfStorageFileNode(node=self.node, name='13'),
'q19': OsfStorageFileNode(node=self.node, name='19'),
'q26': OsfStorageFileNode(node=self.node, name='26')
}
data = {}
for q, f in self.d_of_qs.iteritems():
guid = f.get_guid(create=True)._id
f.save()
if q == 'q26':
data[q] = {
'comments': [],
'value': '26',
'extra': [
{
'data': {
'provider': 'osfstorage',
'path': f.path,
},
'fileId': guid,
'nodeId': self.node._id,
}
]
}
continue
data[q] = {
'value': {
'uploader': {
'extra': [
{
'data': {
'provider': 'osfstorage',
'path': f.path,
},
'fileId': guid,
'nodeId': self.node._id,
}
]
}
}
}
self.draft = DraftRegistrationFactory(
initiator=self.user,
registration_schema=prereg_schema,
registration_metadata=data
)
self.prereg_user.save()
self.admin_user = UserFactory()
def test_checkout_files(self):
self.draft.submit_for_review(self.user, {}, save=True)
request = RequestFactory().get('/fake_path')
view = DraftDetailView()
view = setup_user_view(view, request, self.admin_user,
draft_pk=self.draft._id)
view.checkout_files(self.draft)
for q, f in self.d_of_qs.iteritems():
f.refresh_from_db()
nt.assert_equal(self.admin_user, f.checkout)
def test_checkin_files(self):
self.draft.submit_for_review(self.user, {}, save=True)
request = RequestFactory().get('/fake_path')
view = DraftDetailView()
view = setup_user_view(view, request, self.admin_user,
draft_pk=self.draft._id)
view.checkout_files(self.draft)
view2 = DraftFormView()
view2 = setup_view(view2, request, draft_pk=self.draft._id)
view2.checkin_files(self.draft)
for q, f in self.d_of_qs.iteritems():
nt.assert_equal(None, f.checkout)
def test_get_meta_data_files(self):
for item in get_metadata_files(self.draft):
nt.assert_in(type(item), [OsfStorageFile, StoredFileNode])
def test_get_file_questions(self):
questions = get_file_questions('prereg-prize.json')
nt.assert_equal(7, len(questions))
nt.assert_list_equal(
[
(u'q7', u'Data collection procedures'),
(u'q11', u'Manipulated variables'),
(u'q12', u'Measured variables'),
(u'q13', u'Indices'),
(u'q16', u'Study design'),
(u'q19', u'Statistical models'),
(u'q26', u'Upload an analysis script with clear comments')
],
questions
)
def test_file_id_missing(self):
data = self.draft.registration_metadata
data['q7']['value']['uploader']['extra'][0].pop('fileId')
self.draft.update_metadata(data)
for item in get_metadata_files(self.draft):
nt.assert_in(type(item), [OsfStorageFile, StoredFileNode])
def test_file_id_missing_odd(self):
data = self.draft.registration_metadata
data['q26']['extra'][0].pop('fileId')
self.draft.update_metadata(data)
for item in get_metadata_files(self.draft):
nt.assert_in(type(item), [OsfStorageFile, StoredFileNode])
def test_wrong_provider(self):
data = self.draft.registration_metadata
data['q7']['value']['uploader']['extra'][0]['data']['provider'] = 'box'
self.draft.update_metadata(data)
with nt.assert_raises(Http404):
for item in get_metadata_files(self.draft):
pass
def test_wrong_provider_odd(self):
data = self.draft.registration_metadata
data['q26']['extra'][0]['data']['provider'] = 'box'
self.draft.update_metadata(data)
with nt.assert_raises(Http404):
for item in get_metadata_files(self.draft):
pass
|
|
from .. utils import TranspileTestCase, UnaryOperationTestCase, BinaryOperationTestCase, InplaceOperationTestCase
class ListTests(TranspileTestCase):
def test_setattr(self):
self.assertCodeExecution("""
x = [1, 2, 3]
try:
x.attr = 42
except AttributeError as err:
print(err)
""")
def test_getattr(self):
self.assertCodeExecution("""
x = [1, 2, 3]
try:
print(x.attr)
except AttributeError as err:
print(err)
""")
def test_creation(self):
# Empty list
self.assertCodeExecution("""
x = []
print(x)
""")
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x)
""")
def test_getitem(self):
# Simple positive index
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[2])
""")
# Simple negative index
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[-2])
""")
# Positive index out of range
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
try:
print(x[10])
except IndexError as err:
print(err)
""")
# Negative index out of range
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
try:
print(x[-10])
except IndexError as err:
print(err)
""")
def test_setitem(self):
self.assertCodeExecution("""
x = [1]
x[0] = 5
print(x[0])
""")
self.assertCodeExecution("""
x = [1, 2, 3]
x[1] = "hello"
x[2] = "there"
print(x)
""")
# Out of bounds
self.assertCodeExecution("""
x = []
try:
x[0] = 5
except IndexError as err:
print(err)
""")
# Out of bounds (negative)
self.assertCodeExecution("""
x = [1]
try:
x[-2] = 5
except IndexError as err:
print(err)
""")
def test_append(self):
# New list
self.assertCodeExecution("""
x = []
x.append("hello")
x.append(5)
print(x[0], x[1])
""")
# Existing list
self.assertCodeExecution("""
x = [1, 2, 3, 4]
x.append(5)
x.append("hello")
print(x[4], x[5])
""")
def test_remove(self):
# Remove integer
self.assertCodeExecution("""
x = [1, 2, 3]
x.remove(1)
print(x)
""")
# Remove only first duplicate
self.assertCodeExecution("""
x = [1, 2, 2, 3, 2]
x.remove(2)
print(x)
""")
# Remove boolean
self.assertCodeExecution("""
x = [True, False, True, False]
x.remove(1)
print(x)
""")
# Not in list
self.assertCodeExecution("""
x = [1, 2]
try:
x.remove(3)
except ValueError as err:
print(err)
print(x)
""")
def test_slice(self):
# Full slice
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[:])
""")
# Left bound slice
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[1:])
""")
# Right bound slice
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[:4])
""")
# Slice bound in both directions
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[1:4])
""")
# Slice bound in both directions with end out of bounds
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[1:6])
""")
# Slice bound in both directions with start out of bounds
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[6:7])
""")
# when step is 0
def test_slice_with_zero_step(self):
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
try:
print(x[1:3:0])
except ValueError as err:
print(err)
""")
def test_slice_in_reverse(self):
# Full slice with a negative step
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print (x[::-1])
""")
# left bound slice with a negative step
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print (x[4::-2])
""")
# Right bound slice with a negative step
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print (x[:4:-1])
""")
# Right bound and left bound slice with a negative step
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print (x[1:4:-2])
""")
def test_count(self):
# Normal Count
self.assertCodeExecution("""
x = [1, 1, 1, 4, 5]
print(x.count(1))
""")
# Bool Count
self.assertCodeExecution("""
x = [1, 1, False, 1, 4, True, 5, True]
print(x.count(1))
""")
# Element doesn't exist count
self.assertCodeExecution("""
x = [1, False, 1, 1, True, 4, 5, True]
print(x.count(2))
""")
self.assertCodeExecution("""
x = [1, 1, 1, 4, 5, True]
print(x.count(1))
""")
def test_contains(self):
# Normal Contains
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(1 in x)
""")
# Element doesn't exist
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(0 in x)
""")
# Checking for boolean
self.assertCodeExecution("""
x = [True, False]
print(x.count(1))
""")
def test_sort(self):
self.assertCodeExecution("""
fixtures = [
[9, 4, 7],
['beta', 'theta', 'alpha'],
]
for x in fixtures:
x.sort()
print(x)
""")
self.assertCodeExecution("""
fixtures = [
[9, 4, 7],
['beta', 'theta', 'alpha'],
]
for x in fixtures:
x.sort(reverse=True)
print(x)
""")
self.assertCodeExecution("""
def second(s):
return s[1]
x = ['abc', 'bza', 'cda', 'daa']
x.sort(key=second)
print(x)
""")
self.assertCodeExecution("""
def second(s):
return s[1]
x = ['abc', 'bza', 'cda', 'daa']
x.sort(key=second, reverse=True)
print(x)
""")
def test_pop(self):
self.assertCodeExecution("""
x = [1, 2, 3]
print(x.pop())
print(x)
""")
self.assertCodeExecution("""
x = [1, 2, 3]
print(x.pop(0))
print(x)
""")
self.assertCodeExecution("""
x = [1, 2, 3]
print(x.pop(-2))
print(x)
""")
def test_pop_exceptions(self):
self.assertCodeExecution("""
x = []
try:
print(x.pop())
except IndexError as err:
print(err)
print(x)
""")
self.assertCodeExecution("""
x = [1, 2, 3]
try:
print(x.pop(3))
except IndexError as err:
print(err)
print(x)
""")
self.assertCodeExecution("""
x = [1, 2, 3]
try:
print(x.pop(-4))
except IndexError as err:
print(err)
print(x)
""")
def test_copy(self):
self.assertCodeExecution("""
x = [1, 2, 3]
y = x.copy()
print(y)
""")
self.assertCodeExecution("""
x = [1, 2, 3]
y = x.copy()
print(x == y)
""")
self.assertCodeExecution("""
x = [1, 2, 3]
y = x.copy()
print(x is not y)
""")
self.assertCodeExecution("""
x = [1, 2, 3]
y = x.copy()
y.append(4)
print(x == y)
""")
self.assertCodeExecution("""
x = [[1], 2, 3]
y = x.copy()
print(x[0] is y[0])
""")
def test_index(self):
self.assertCodeExecution("""
x = [1, 2, 3]
print(x.index(1))
""")
self.assertCodeExecution("""
x = [1, 2, 1]
print(x.index(1, 1))
""")
self.assertCodeExecution("""
x = [1, 2, 3, 4]
print(x.index(4, 0, len(x)))
""")
self.assertCodeExecution("""
x = [1, 2, 3, 4]
print(x.index(2, 1, 2))
""")
self.assertCodeExecution("""
x = [1, 2, 3, 4]
print(x.index(2, 0, 10))
""")
self.assertCodeExecution("""
x = [1, 2, 1]
print(x.index(1, 0, -2))
""")
self.assertCodeExecution("""
x = [1, 2, 1]
print(x.index(1, -3, -2))
""")
# cases for 'ValueError: not in list'
self.assertCodeExecution("""
x = [1, 2, 3]
try:
print(x.index(4))
except ValueError as err:
print(err)
x = [1, 2, 1]
try:
print(x.index(2, 0, 1))
except ValueError as err:
print(err)
x = [1, 2, 3, 4]
try:
print(x.index(4, 0, 3))
except ValueError as err:
print(err)
x = [1, 2, 1]
try:
print(x.index(3, 0, 10))
except ValueError as err:
print(err)
x = [1, 2, 3, 4]
try:
print(x.index(2, 10, 20))
except ValueError as err:
print(err)
x = [1, 2, 3, 4]
try:
print(x.index(2, 10, 0))
except ValueError as err:
print(err)
x = []
try:
print(x.index(1, 0, 10))
except ValueError as err:
print(err)
""")
def test_lt_reflected(self):
self.assertCodeExecution("""
class A:
def __gt__(self, other):
return True
x = A()
y = A()
# verify that A doesn't have __lt__()
print(x.__lt__(x))
# ensure rich comparison logic is used
print([x] < [x]) # False, x is x and same size
print([x] < [y]) # True, x is not y, reflected
# when elements are non-identical, return that comparison, even if size is not
print([x, y] < [y]) # True, x is not y, reflected
# ensure tie breaker by size is still used when identical elements
print([x, y] < [x]) # False, larger size
print([x] < [x, y]) # True, smaller size
""")
def test_le_reflected(self):
self.assertCodeExecution("""
class A:
def __ge__(self, other):
return True
x = A()
y = A()
# verify that A doesn't have __le__()
print(x.__le__(x))
# ensure rich comparison logic is used
print([x] <= [x]) # False, x is x and same size
print([x] <= [y]) # True, x is not y, reflected
# when elements are non-identical, return that comparison, even if size is not
print([x, y] <= [y]) # True, x is not y, reflected
# ensure tie breaker by size is still used when identical elements
print([x, y] <= [x]) # False, larger size
print([x] <= [x, y]) # True, smaller size
""")
def test_gt_reflected(self):
self.assertCodeExecution("""
class A:
def __lt__(self, other):
return True
x = A()
y = A()
# verify that A doesn't have __gt__()
print(x.__gt__(x))
# ensure rich comparison logic is used
print([x] > [x]) # False, x is x and same size
print([x] > [y]) # True, x is not y, reflected
# when elements are non-identical, return that comparison, even if size is not
print([x, y] > [y]) # True, x is not y, reflected
# ensure tie breaker by size is still used when identical elements
print([x, y] > [x]) # False, larger size
print([x] > [x, y]) # True, smaller size
""")
def test_ge_reflected(self):
self.assertCodeExecution("""
class A:
def __le__(self, other):
return True
x = A()
y = A()
# verify that A doesn't have __ge__()
print(x.__ge__(x))
# ensure rich comparison logic is used
print([x] >= [x]) # False, x is x and same size
print([x] >= [y]) # True, x is not y, reflected
# when elements are non-identical, return that comparison, even if size is not
print([x, y] >= [y]) # True, x is not y, reflected
# ensure tie breaker by size is still used when identical elements
print([x, y] >= [x]) # False, larger size
print([x] >= [x, y]) # True, smaller size
""")
def test_eq_reflected(self):
self.assertCodeExecution("""
class A:
def __eq__(self, other):
return True
class B:
def __eq__(self, other):
return False
x = A()
y = B()
print([x] == [x]) # True, identity implies equality
print([x, x] == [x]) # False, size not equal
print([x] == [y]) # True, x is not y, x.__eq__(y)
print([y] == [x]) # False, y is not x, y.__eq__(x)
""")
class UnaryListOperationTests(UnaryOperationTestCase, TranspileTestCase):
data_type = 'list'
class BinaryListOperationTests(BinaryOperationTestCase, TranspileTestCase):
data_type = 'list'
not_implemented = [
'test_add_class',
'test_add_frozenset',
'test_and_class',
'test_and_frozenset',
'test_direct_eq_bytes',
'test_direct_ge_bytes',
'test_direct_gt_bytes',
'test_direct_le_bytes',
'test_direct_lt_bytes',
'test_direct_ne_bytes',
'test_direct_ge_list',
'test_direct_gt_list',
'test_direct_le_list',
'test_direct_lt_list',
'test_direct_eq_frozenset',
'test_direct_ge_frozenset',
'test_direct_gt_frozenset',
'test_direct_le_frozenset',
'test_direct_lt_frozenset',
'test_direct_ne_frozenset',
'test_eq_class',
'test_eq_frozenset',
'test_ge_class',
'test_ge_frozenset',
'test_ge_list',
'test_gt_class',
'test_gt_frozenset',
'test_gt_list',
'test_le_class',
'test_le_frozenset',
'test_le_list',
'test_lshift_class',
'test_lshift_frozenset',
'test_lt_class',
'test_lt_frozenset',
'test_lt_list',
'test_modulo_class',
'test_modulo_complex',
'test_modulo_frozenset',
'test_multiply_class',
'test_multiply_frozenset',
'test_ne_class',
'test_ne_frozenset',
'test_or_class',
'test_or_frozenset',
'test_power_class',
'test_power_frozenset',
'test_rshift_class',
'test_rshift_frozenset',
'test_subscr_bool',
'test_subscr_class',
'test_subscr_frozenset',
'test_subscr_slice',
'test_subtract_class',
'test_subtract_frozenset',
'test_true_divide_class',
'test_true_divide_frozenset',
'test_xor_class',
'test_xor_frozenset',
]
class InplaceListOperationTests(InplaceOperationTestCase, TranspileTestCase):
data_type = 'list'
not_implemented = [
'test_add_bytearray',
'test_add_bytes',
'test_add_class',
'test_add_dict',
'test_add_frozenset',
'test_add_range',
'test_add_set',
'test_add_str',
'test_and_class',
'test_and_frozenset',
'test_floor_divide_class',
'test_floor_divide_complex',
'test_floor_divide_frozenset',
'test_lshift_class',
'test_lshift_frozenset',
'test_modulo_class',
'test_modulo_complex',
'test_modulo_frozenset',
'test_multiply_class',
'test_multiply_frozenset',
'test_or_class',
'test_or_frozenset',
'test_power_class',
'test_power_frozenset',
'test_rshift_class',
'test_rshift_frozenset',
'test_subtract_class',
'test_subtract_frozenset',
'test_true_divide_class',
'test_true_divide_frozenset',
'test_xor_class',
'test_xor_frozenset',
]
|
|
# dope wars world
import random
import math
import pickle
import common
import player
import events
config = {
"price_shift": 0.3,
"min_drugs": 10,
"loan_interest": 15,
"old_lady_chance": 20,
"rand_event_chance": 40
}
templates = {
"old_lady_bust": "<b style='color: purple'>I heard there's going to be a big %s bust tomorrow!</b><br>",
"new_game": "<h2>A new game begins!</h2> It's day <b>%i</b> of <b>%i</b> and <b style='color: red; font-size: large'>you've got a loan to pay off!</b>",
"new_day": "It's day <b>%i</b>"
}
def rand_percent(percent):
if percent > 100:
percent = 100
result = random.randint(1, 100)
if result > percent:
return False
else:
return True
class World():
def __init__(self):
self.day = [1, 30] # current/max
self.world_name = None
self.areas = None
self.current_area = None
self.events = []
self.last_prices = {}
self.log = []
self.highscores = {15: [], 30: [], 60: [], 90: []}
self.player = None
self.dealer = None
def old_lady(self):
name = events.random_drug()
self.add_event("bust", name, 1)
self.add_log(templates["old_lady_bust"] % common.drugs[name]["name"])
def add_log(self, msg):
self.log.append(msg)
def clear_log(self):
self.log = []
def new_world(self, player_name,
max_days=common.config["max_days"],
world=common.config["default_world"]):
"""Create a brand new game."""
self.day[1] = max_days
self.player = player.Player()
self.player.name = player_name
self.world_name = world
self.areas = common.worlds[self.world_name]
self.current_area = random.choice(self.areas)
self.new_dealer()
self.clear_log()
self.add_log(templates["new_game"] % (self.day[0], self.day[1]))
def next_day(self):
self.clear_log()
self.day[0] += 1
self.update_last_prices()
self.new_dealer()
self.update_loan()
self.add_log(templates["new_day"] % self.day[0])
if rand_percent(config["old_lady_chance"]):
self.old_lady()
if rand_percent(config["rand_event_chance"]):
self.add_event(events.pick_event(), None, 0)
ask = []
for event in self.process_events():
if event is not None:
ask.append(event)
return ask
def update_last_prices(self):
for drug in self.dealer.keys():
dealer_price = self.dealer[drug]
self.last_prices[drug] = dealer_price
def travel_to(self, index):
self.current_area = self.areas[index]
return self.next_day()
def dump_drug(self, name, count):
self.player.remove_drug(name, count)
def hospital_cost(self):
# TODO: move to config
return (self.player.health[1] - self.player.health[0]) * 10
def can_afford_hospital(self):
if self.player.cash >= self.hospital_cost():
return True
else:
return False
def visit_hospital(self):
if self.can_afford_hospital():
self.player.spend_cash(self.hospital_cost())
self.player.heal_all()
return True
else:
return False
# weapons
def buy_weapon(self, name):
price = common.weapons[name]["weapon_price"]
if self.player.spend_cash(price):
self.player.set_weapon(name)
return True
else:
return False
def buy_ammo(self, name):
price = common.weapons[name]["ammo_price"]
if self.player.spend_cash(price):
self.player.add_ammo(10)
return True
else:
return False
# dealer
def make_drug_price(self, name):
base_price = common.drugs[name]["base_price"]
difference = math.floor(base_price * config["price_shift"])
return base_price + random.randint(0 - difference, difference)
def new_dealer(self):
self.dealer = {}
drug_list = list(common.drugs.keys())
drug_count = random.randint(config["min_drugs"], len(drug_list) - 1)
random.shuffle(drug_list)
for drug in drug_list[0:drug_count]:
self.dealer[drug] = self.make_drug_price(drug)
def buy_from_dealer(self, name, count=1):
# check they actually have the drug
if name in self.dealer.keys():
price = self.dealer[name]
return self.player.buy_drug(name, price, count)
else:
return False
def sell_to_dealer(self, name, count=1):
dealer_drugs = self.dealer.keys()
player_drugs = self.player.trenchcoat["drugs"].keys()
if name in dealer_drugs and name in player_drugs:
price = self.dealer[name]
if self.player.remove_drug(name, count):
self.player.add_cash(count * price)
return True
else:
return False
else:
return False
# events
def add_event(self, name, args, wait=0):
"""Queue up a new event."""
self.events.append({"name": name, "args": args, "wait": wait})
def events_now(self):
"""Return queued events due now."""
return self.sort_events()[0]
def events_future(self):
"""Return queued events next turn or later."""
return self.sort_events()[1]
def do_event(self, event):
return events.do_event(self, event["name"], event["args"])
def process_events(self):
events = self.events
self.events = []
for event in events:
if event["wait"] > 0:
self.add_event(event["name"], event["args"], event["wait"] - 1)
else:
yield self.do_event(event)
# bank
def deposit_bank(self, amount):
if self.player.spend_cash(amount):
self.player.add_bank(amount)
return True
else:
return False
def withdraw_bank(self, amount):
if self.player.remove_bank(amount):
self.player.add_cash(amount)
return True
else:
return False
# loan
def pay_loan(self):
if self.player.loan > 0 and self.player.cash >= self.player.loan:
self.player.spend_cash(self.player.loan)
self.player.loan = 0
return True
else:
return False
def take_loan(self, amount):
if self.player.loan == 0:
self.player.add_loan(amount)
self.player.add_cash(amount)
self.update_loan()
return True
else:
return False
def update_loan(self):
if self.player.loan > 0:
interest = math.floor((self.player.loan / 100) * config["loan_interest"])
self.player.loan += interest
|
|
"""
Helper routines for generating gpu kernels for nvcc.
"""
try:
from pygpu import gpuarray
except ImportError:
pass
def nvcc_kernel(name, params, body):
"""
Return the c code of a kernel function.
Parameters
----------
params
The parameters to the function as one or more strings.
body
The [nested] list of statements for the body of the function.
These will be separated by ';' characters.
"""
paramstr = ', '.join(params)
def flatbody():
for b in body:
if isinstance(b, (list, tuple)):
for bb in b:
yield bb
else:
yield b
bodystr = ';\n'.join(flatbody())
return """KERNEL void %(name)s (%(paramstr)s)
{
%(bodystr)s;
}
""" % locals()
def code_version(version):
"""
Decorator to support version-based cache mechanism.
"""
if not isinstance(version, tuple):
raise TypeError('version must be tuple', version)
def deco(f):
f.code_version = version
return f
return deco
UNVERSIONED = ()
@code_version((1,))
def inline_reduce(N, buf, pos, count, manner_fn):
"""
Return C++ code for a function that reduces a contiguous buffer.
Parameters
----------
N
Length of the buffer.
buf
buffer pointer.
pos
Index of executing thread.
count
Number of executing threads.
manner_fn
A function that accepts strings of arguments a and b, and
returns c code for their reduction.
return "%(a)s + %(b)s"
for a sum reduction.
Notes
-----
`buf` should be in gpu shared memory, we access it many times.
This function leaves the answer in position 0 of the buffer. The
rest of the buffer is trashed by this function.
"""
loop_line = manner_fn("%s[%s]" % (buf, pos), "%s[i]" % (buf))
r_16 = manner_fn("%s[%s]" % (buf, pos), "%s[%s+16]" % (buf, pos))
r_8 = manner_fn("%s[%s]" % (buf, pos), "%s[%s+8]" % (buf, pos))
r_4 = manner_fn("%s[%s]" % (buf, pos), "%s[%s+4]" % (buf, pos))
r_2 = manner_fn("%s[%s]" % (buf, pos), "%s[%s+2]" % (buf, pos))
r_1 = manner_fn("%s[%s]" % (buf, pos), "%s[%s+1]" % (buf, pos))
return """
{
// This function trashes buf[1..warpSize],
// leaving the reduction result in buf[0].
if (%(pos)s < warpSize)
{
for (int i = %(pos)s + warpSize; i < %(N)s; i += warpSize)
{
%(buf)s[%(pos)s] = %(loop_line)s;
}
if (%(pos)s < 16)
{
//reduce so that %(pos)s 0 has the sum of everything
if(%(pos)s + 16 < %(N)s)
%(buf)s[%(pos)s] = %(r_16)s;
if(%(pos)s + 8 < %(N)s)
%(buf)s[%(pos)s] = %(r_8)s;
if(%(pos)s + 4 < %(N)s)
%(buf)s[%(pos)s] = %(r_4)s;
if(%(pos)s + 2 < %(N)s)
%(buf)s[%(pos)s] = %(r_2)s;
if(%(pos)s + 1 < %(N)s)
%(buf)s[%(pos)s] = %(r_1)s;
}
}
}
""" % locals()
@code_version(inline_reduce.code_version)
def inline_reduce_max(N, buf, pos, count):
return inline_reduce(N, buf, pos, count,
lambda a, b: "max(%s, %s)" % (a, b))
@code_version(inline_reduce.code_version)
def inline_reduce_sum(N, buf, pos, count):
return inline_reduce(N, buf, pos, count,
lambda a, b: "%s + %s" % (a, b))
@code_version(inline_reduce.code_version)
def inline_reduce_min(N, buf, pos, count):
return inline_reduce(N, buf, pos, count,
lambda a, b: "min(%s, %s)" % (a, b))
@code_version(inline_reduce.code_version)
def inline_reduce_prod(N, buf, pos, count):
return inline_reduce(N, buf, pos, count,
lambda a, b: "%s * %s" % (a, b))
@code_version((2,) + inline_reduce_max.code_version +
inline_reduce_sum.code_version)
def inline_softmax(N, buf, buf2, threadPos, threadCount, dtype="float32"):
"""
Generate code for a softmax.
On entry, `buf` and `buf2` must contain two identical copies of
the input to softmax.
After the code returns `buf` contains the softmax, `buf2` contains
un-normalized softmax.
Parameters
----------
N
Length of the buffer.
threadPos
Index of executing thread.
threadCount
Number of executing threads.
dtype
Dtype of the softmax's output.
Notes
-----
`buf` and `buf2` should be in gpu shared memory, we access it many
times.
We use __i as an int variable in a loop.
"""
ctype = gpuarray.dtype_to_ctype(dtype)
# get max of buf (trashing all but buf[0])
return [inline_reduce_max(N, buf, threadPos, threadCount),
'__syncthreads()',
('%s row_max = ' + buf + '[0]') % ctype,
'__syncthreads()',
'for(int __i=' + threadPos + '; __i<' + N +
'; __i+=' + threadCount + '){',
buf + '[__i] = exp(' + buf2 + '[__i] - row_max)',
buf2 + '[__i] = ' + buf + '[__i]',
'}',
'__syncthreads()',
inline_reduce_sum(N, buf, threadPos, threadCount),
'__syncthreads()',
('%s row_sum = ' + buf + '[0]') % ctype,
'__syncthreads()',
# divide each exp() result by the sum to complete the job.
'for(int __i=' + threadPos + '; __i<' + N +
'; __i+=' + threadCount + '){',
buf + '[__i] = ' + buf2 + '[__i] / row_sum',
'}',
'__syncthreads()',
]
@code_version((2,))
def inline_reduce_fixed_shared(N, buf, x, stride_x, load_x, pos, count,
manner_fn, manner_init,
b='', stride_b='', load_b='', dtype='float32'):
"""
Return C++ code for a function that reduces a contiguous buffer.
This function leaves the answer in position 0 of the buffer. The
rest of the buffer is trashed by this function.
Parameters
----------
N
Length of the buffer.
buf
Buffer pointer of size warpSize * sizeof(dtype).
x
Input data.
stride_x
Input data stride.
load_x
Wrapper to read from x.
pos
Index of executing thread.
count
Number of executing threads.
b
Optional, pointer to the bias.
stride_b
Optional, the stride of b if b is provided.
load_b
Optional, wrapper to read from b if b is provided.
dtype
Optional, the dtype of the output.
manner_fn
A function that accepts strings of arguments a and b, and
returns c code for their reduction.
return "%(a)s + %(b)s"
for a sum reduction.
manner_init
A function that accepts strings of arguments a and return c
code for its initialization.
Notes
-----
`buf` should be in gpu shared memory, we access it many times.
"""
if b:
init = manner_init("%(load_x)s(%(x)s[%(pos)s * %(stride_x)s]) +"
" %(load_b)s(%(b)s[%(pos)s * %(stride_b)s])" % locals())
loop_line = manner_fn("red",
manner_init("%(load_x)s(%(x)s[i * %(stride_x)s]) + "
"%(load_b)s(%(b)s[i * %(stride_b)s])" %
locals()))
else:
init = manner_init("%(load_x)s(%(x)s[%(pos)s * %(stride_x)s])" % locals())
loop_line = manner_fn("red", manner_init("%(load_x)s(%(x)s[i * %(stride_x)s])" %
locals()))
loop_line2 = manner_fn("%s[%s]" % (buf, pos),
"%s[i]" % buf)
r_16 = manner_fn("%s[%s]" % (buf, pos), "%s[%s+16]" % (buf, pos))
r_8 = manner_fn("%s[%s]" % (buf, pos), "%s[%s+8]" % (buf, pos))
r_4 = manner_fn("%s[%s]" % (buf, pos), "%s[%s+4]" % (buf, pos))
r_2 = manner_fn("%s[%s]" % (buf, pos), "%s[%s+2]" % (buf, pos))
r_1 = manner_fn("%s[%s]" % (buf, pos), "%s[%s+1]" % (buf, pos))
ctype = gpuarray.dtype_to_ctype(dtype)
return """
{
// This function trashes buf[1..n_threads],
// leaving the reduction result in buf[0].
%(ctype)s red = %(init)s;
#pragma unroll 16
for (int i = %(pos)s + %(count)s; i<%(N)s; i += %(count)s){
red = %(loop_line)s;
}
buf[%(pos)s] = red;
__syncthreads();
if (%(pos)s < warpSize)
{
for (int i = %(pos)s + warpSize; i < %(count)s; i += warpSize)
{
%(buf)s[%(pos)s] = %(loop_line2)s;
}
if (%(pos)s < 16)
{
//reduce so that %(pos)s 0 has the reduction of everything
if(%(pos)s + 16 < %(N)s)
%(buf)s[%(pos)s] = %(r_16)s;
if(%(pos)s + 8 < %(N)s)
%(buf)s[%(pos)s] = %(r_8)s;
if(%(pos)s + 4 < %(N)s)
%(buf)s[%(pos)s] = %(r_4)s;
if(%(pos)s + 2 < %(N)s)
%(buf)s[%(pos)s] = %(r_2)s;
if(%(pos)s + 1 < %(N)s)
%(buf)s[%(pos)s] = %(r_1)s;
}
}
}
""" % locals()
@code_version(inline_reduce_fixed_shared.code_version)
def inline_reduce_fixed_shared_max(N, buf, x, stride_x, load_x, pos, count,
b='', stride_b='', load_b='',
dtype='float32'):
return inline_reduce_fixed_shared(N, buf, x, stride_x, load_x, pos, count,
lambda a, b: "max(%s, %s)" % (a, b),
lambda a: a,
b, stride_b, load_b, dtype)
@code_version((2,) + inline_reduce_max.code_version +
inline_reduce_sum.code_version)
def inline_softmax_fixed_shared(N, buf, x, stride_x, load_x,
sm, sm_stride, write_sm,
threadPos, threadCount,
b='', stride_b='', load_b='',
dtype="float32"):
"""
Generate code to perform softmax with a fixed amount of shared
memory.
On entry, `buf` is assumed to be empty.
On exit, `buf[0]` contains the softmax, `buf2` contains
un-normalized softmax.
Parameters
----------
N
Length of the buffer, atleast waprSize(32).
buf
A shared memory buffer of size warpSize * sizeof(dtype).
x
A ptr to the gpu memory where the row is stored.
stride_x
The stride between each element in x.
load_x
Wrapper to read from x.
sm
A ptr to the gpu memory to store the result.
sm_stride
The stride between each sm element.
write_sm
Wrapper before writing to sm.
threadPos
Index of executing thread.
threadCount
Number of executing threads.
b
Optional, pointer to the bias.
stride_b
Optional, the stride of b if b is provided.
load_b
Optional, wrapper to read from b if b is provided.
dtype
Optional, the dtype of the softmax's output if not float32.
Notes
-----
`buf` should be in gpu shared memory, we access it many times.
We use tx as an int variable in a loop.
"""
ctype = gpuarray.dtype_to_ctype(dtype)
ret = [
# get max of buf (trashing all but buf[0])
inline_reduce_fixed_shared_max(N, buf, x, stride_x, load_x,
threadPos, threadCount,
b, stride_b, load_b,
dtype),
'__syncthreads()',
('%s row_max = ' + buf + '[0]') % ctype,
'__syncthreads()',
inline_reduce_fixed_shared(N, buf, x, stride_x, load_x,
threadPos, threadCount,
lambda a, b: "%s + %s" % (a, b),
lambda a: "exp(%s - row_max)" % a,
b, stride_b, load_b, dtype),
'__syncthreads()',
('%s row_sum = ' + buf + '[0]') % ctype,
'__syncthreads()',
"for (int tx = threadIdx.x; tx< N; tx += blockDim.x){",
]
# This set all value correctly
if b:
ret += [
"%(sm)s[tx * %(sm_stride)s] = "
" %(write_sm)s(exp(%(load_x)s(%(x)s[tx * %(stride_x)s]) +"
" %(load_b)s(%(b)s[tx * %(stride_b)s]) - row_max)"
" / row_sum)" % locals()]
else:
ret += [
"%(sm)s[tx * %(sm_stride)s] = "
"%(write_sm)s(exp(%(load_x)s(%(x)s[tx * %(stride_x)s]) - row_max)"
" / row_sum)" % locals()]
ret += [
"}",
'__syncthreads()',
]
return ret
|
|
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import uuid
import fixtures
from lxml import etree
import mock
from nova.compute import arch
# Allow passing None to the various connect methods
# (i.e. allow the client to rely on default URLs)
allow_default_uri_connection = True
# string indicating the CPU arch
node_arch = arch.X86_64 # or 'i686' (or whatever else uname -m might return)
# memory size in kilobytes
node_kB_mem = 4096
# the number of active CPUs
node_cpus = 2
# expected CPU frequency
node_mhz = 800
# the number of NUMA cell, 1 for unusual NUMA topologies or uniform
# memory access; check capabilities XML for the actual NUMA topology
node_nodes = 1 # NUMA nodes
# number of CPU sockets per node if nodes > 1, total number of CPU
# sockets otherwise
node_sockets = 1
# number of cores per socket
node_cores = 2
# number of threads per core
node_threads = 1
# CPU model
node_cpu_model = "Penryn"
# CPU vendor
node_cpu_vendor = "Intel"
# Has libvirt connection been used at least once
connection_used = False
def _reset():
global allow_default_uri_connection
allow_default_uri_connection = True
# virDomainState
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_XML_SECURE = 1
VIR_DOMAIN_XML_INACTIVE = 2
VIR_DOMAIN_BLOCK_REBASE_SHALLOW = 1
VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT = 2
VIR_DOMAIN_BLOCK_REBASE_COPY = 8
VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT = 2
VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0
VIR_DOMAIN_EVENT_DEFINED = 0
VIR_DOMAIN_EVENT_UNDEFINED = 1
VIR_DOMAIN_EVENT_STARTED = 2
VIR_DOMAIN_EVENT_SUSPENDED = 3
VIR_DOMAIN_EVENT_RESUMED = 4
VIR_DOMAIN_EVENT_STOPPED = 5
VIR_DOMAIN_EVENT_SHUTDOWN = 6
VIR_DOMAIN_EVENT_PMSUSPENDED = 7
VIR_DOMAIN_UNDEFINE_MANAGED_SAVE = 1
VIR_DOMAIN_AFFECT_CURRENT = 0
VIR_DOMAIN_AFFECT_LIVE = 1
VIR_DOMAIN_AFFECT_CONFIG = 2
VIR_CPU_COMPARE_ERROR = -1
VIR_CPU_COMPARE_INCOMPATIBLE = 0
VIR_CPU_COMPARE_IDENTICAL = 1
VIR_CPU_COMPARE_SUPERSET = 2
VIR_CRED_USERNAME = 1
VIR_CRED_AUTHNAME = 2
VIR_CRED_LANGUAGE = 3
VIR_CRED_CNONCE = 4
VIR_CRED_PASSPHRASE = 5
VIR_CRED_ECHOPROMPT = 6
VIR_CRED_NOECHOPROMPT = 7
VIR_CRED_REALM = 8
VIR_CRED_EXTERNAL = 9
VIR_MIGRATE_LIVE = 1
VIR_MIGRATE_PEER2PEER = 2
VIR_MIGRATE_TUNNELLED = 4
VIR_MIGRATE_UNDEFINE_SOURCE = 16
VIR_MIGRATE_NON_SHARED_INC = 128
VIR_NODE_CPU_STATS_ALL_CPUS = -1
VIR_DOMAIN_START_PAUSED = 1
# libvirtError enums
# (Intentionally different from what's in libvirt. We do this to check,
# that consumers of the library are using the symbolic names rather than
# hardcoding the numerical values)
VIR_FROM_QEMU = 100
VIR_FROM_DOMAIN = 200
VIR_FROM_NWFILTER = 330
VIR_FROM_REMOTE = 340
VIR_FROM_RPC = 345
VIR_FROM_NODEDEV = 666
VIR_ERR_NO_SUPPORT = 3
VIR_ERR_XML_DETAIL = 350
VIR_ERR_NO_DOMAIN = 420
VIR_ERR_OPERATION_INVALID = 55
VIR_ERR_OPERATION_TIMEOUT = 68
VIR_ERR_NO_NWFILTER = 620
VIR_ERR_SYSTEM_ERROR = 900
VIR_ERR_INTERNAL_ERROR = 950
VIR_ERR_CONFIG_UNSUPPORTED = 951
VIR_ERR_NO_NODE_DEVICE = 667
# Readonly
VIR_CONNECT_RO = 1
# virConnectBaselineCPU flags
VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES = 1
# snapshotCreateXML flags
VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA = 4
VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY = 16
VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
# blockCommit flags
VIR_DOMAIN_BLOCK_COMMIT_RELATIVE = 4
VIR_CONNECT_LIST_DOMAINS_ACTIVE = 1
VIR_CONNECT_LIST_DOMAINS_INACTIVE = 2
def _parse_disk_info(element):
disk_info = {}
disk_info['type'] = element.get('type', 'file')
disk_info['device'] = element.get('device', 'disk')
driver = element.find('./driver')
if driver is not None:
disk_info['driver_name'] = driver.get('name')
disk_info['driver_type'] = driver.get('type')
source = element.find('./source')
if source is not None:
disk_info['source'] = source.get('file')
if not disk_info['source']:
disk_info['source'] = source.get('dev')
if not disk_info['source']:
disk_info['source'] = source.get('path')
target = element.find('./target')
if target is not None:
disk_info['target_dev'] = target.get('dev')
disk_info['target_bus'] = target.get('bus')
return disk_info
class libvirtError(Exception):
"""This class was copied and slightly modified from
`libvirt-python:libvirt-override.py`.
Since a test environment will use the real `libvirt-python` version of
`libvirtError` if it's installed and not this fake, we need to maintain
strict compatibility with the original class, including `__init__` args
and instance-attributes.
To create a libvirtError instance you should:
# Create an unsupported error exception
exc = libvirtError('my message')
exc.err = (libvirt.VIR_ERR_NO_SUPPORT,)
self.err is a tuple of form:
(error_code, error_domain, error_message, error_level, str1, str2,
str3, int1, int2)
Alternatively, you can use the `make_libvirtError` convenience function to
allow you to specify these attributes in one shot.
"""
def __init__(self, defmsg, conn=None, dom=None, net=None, pool=None,
vol=None):
Exception.__init__(self, defmsg)
self.err = None
def get_error_code(self):
if self.err is None:
return None
return self.err[0]
def get_error_domain(self):
if self.err is None:
return None
return self.err[1]
def get_error_message(self):
if self.err is None:
return None
return self.err[2]
def get_error_level(self):
if self.err is None:
return None
return self.err[3]
def get_str1(self):
if self.err is None:
return None
return self.err[4]
def get_str2(self):
if self.err is None:
return None
return self.err[5]
def get_str3(self):
if self.err is None:
return None
return self.err[6]
def get_int1(self):
if self.err is None:
return None
return self.err[7]
def get_int2(self):
if self.err is None:
return None
return self.err[8]
class NWFilter(object):
def __init__(self, connection, xml):
self._connection = connection
self._xml = xml
self._parse_xml(xml)
def _parse_xml(self, xml):
tree = etree.fromstring(xml)
root = tree.find('.')
self._name = root.get('name')
def undefine(self):
self._connection._remove_filter(self)
class NodeDevice(object):
def __init__(self, connection, xml=None):
self._connection = connection
self._xml = xml
if xml is not None:
self._parse_xml(xml)
def _parse_xml(self, xml):
tree = etree.fromstring(xml)
root = tree.find('.')
self._name = root.get('name')
def attach(self):
pass
def dettach(self):
pass
def reset(self):
pass
class Domain(object):
def __init__(self, connection, xml, running=False, transient=False):
self._connection = connection
if running:
connection._mark_running(self)
self._state = running and VIR_DOMAIN_RUNNING or VIR_DOMAIN_SHUTOFF
self._transient = transient
self._def = self._parse_definition(xml)
self._has_saved_state = False
self._snapshots = {}
self._id = self._connection._id_counter
def _parse_definition(self, xml):
try:
tree = etree.fromstring(xml)
except etree.ParseError:
raise make_libvirtError(
libvirtError, "Invalid XML.",
error_code=VIR_ERR_XML_DETAIL,
error_domain=VIR_FROM_DOMAIN)
definition = {}
name = tree.find('./name')
if name is not None:
definition['name'] = name.text
uuid_elem = tree.find('./uuid')
if uuid_elem is not None:
definition['uuid'] = uuid_elem.text
else:
definition['uuid'] = str(uuid.uuid4())
vcpu = tree.find('./vcpu')
if vcpu is not None:
definition['vcpu'] = int(vcpu.text)
memory = tree.find('./memory')
if memory is not None:
definition['memory'] = int(memory.text)
os = {}
os_type = tree.find('./os/type')
if os_type is not None:
os['type'] = os_type.text
os['arch'] = os_type.get('arch', node_arch)
os_kernel = tree.find('./os/kernel')
if os_kernel is not None:
os['kernel'] = os_kernel.text
os_initrd = tree.find('./os/initrd')
if os_initrd is not None:
os['initrd'] = os_initrd.text
os_cmdline = tree.find('./os/cmdline')
if os_cmdline is not None:
os['cmdline'] = os_cmdline.text
os_boot = tree.find('./os/boot')
if os_boot is not None:
os['boot_dev'] = os_boot.get('dev')
definition['os'] = os
features = {}
acpi = tree.find('./features/acpi')
if acpi is not None:
features['acpi'] = True
definition['features'] = features
devices = {}
device_nodes = tree.find('./devices')
if device_nodes is not None:
disks_info = []
disks = device_nodes.findall('./disk')
for disk in disks:
disks_info += [_parse_disk_info(disk)]
devices['disks'] = disks_info
nics_info = []
nics = device_nodes.findall('./interface')
for nic in nics:
nic_info = {}
nic_info['type'] = nic.get('type')
mac = nic.find('./mac')
if mac is not None:
nic_info['mac'] = mac.get('address')
source = nic.find('./source')
if source is not None:
if nic_info['type'] == 'network':
nic_info['source'] = source.get('network')
elif nic_info['type'] == 'bridge':
nic_info['source'] = source.get('bridge')
nics_info += [nic_info]
devices['nics'] = nics_info
definition['devices'] = devices
return definition
def create(self):
self.createWithFlags(0)
def createWithFlags(self, flags):
# FIXME: Not handling flags at the moment
self._state = VIR_DOMAIN_RUNNING
self._connection._mark_running(self)
self._has_saved_state = False
def isActive(self):
return int(self._state == VIR_DOMAIN_RUNNING)
def undefine(self):
self._connection._undefine(self)
def undefineFlags(self, flags):
self.undefine()
if flags & VIR_DOMAIN_UNDEFINE_MANAGED_SAVE:
if self.hasManagedSaveImage(0):
self.managedSaveRemove()
def destroy(self):
self._state = VIR_DOMAIN_SHUTOFF
self._connection._mark_not_running(self)
def ID(self):
return self._id
def name(self):
return self._def['name']
def UUIDString(self):
return self._def['uuid']
def interfaceStats(self, device):
return [10000242400, 1234, 0, 2, 213412343233, 34214234, 23, 3]
def blockStats(self, device):
return [2, 10000242400, 234, 2343424234, 34]
def suspend(self):
self._state = VIR_DOMAIN_PAUSED
def shutdown(self):
self._state = VIR_DOMAIN_SHUTDOWN
self._connection._mark_not_running(self)
def reset(self, flags):
# FIXME: Not handling flags at the moment
self._state = VIR_DOMAIN_RUNNING
self._connection._mark_running(self)
def info(self):
return [self._state,
long(self._def['memory']),
long(self._def['memory']),
self._def['vcpu'],
123456789L]
def migrateToURI(self, desturi, flags, dname, bandwidth):
raise make_libvirtError(
libvirtError,
"Migration always fails for fake libvirt!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def migrateToURI2(self, dconnuri, miguri, dxml, flags, dname, bandwidth):
raise make_libvirtError(
libvirtError,
"Migration always fails for fake libvirt!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def attachDevice(self, xml):
disk_info = _parse_disk_info(etree.fromstring(xml))
disk_info['_attached'] = True
self._def['devices']['disks'] += [disk_info]
return True
def attachDeviceFlags(self, xml, flags):
if (flags & VIR_DOMAIN_AFFECT_LIVE and
self._state != VIR_DOMAIN_RUNNING):
raise make_libvirtError(
libvirtError,
"AFFECT_LIVE only allowed for running domains!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
self.attachDevice(xml)
def detachDevice(self, xml):
disk_info = _parse_disk_info(etree.fromstring(xml))
disk_info['_attached'] = True
return disk_info in self._def['devices']['disks']
def detachDeviceFlags(self, xml, _flags):
self.detachDevice(xml)
def XMLDesc(self, flags):
disks = ''
for disk in self._def['devices']['disks']:
disks += '''<disk type='%(type)s' device='%(device)s'>
<driver name='%(driver_name)s' type='%(driver_type)s'/>
<source file='%(source)s'/>
<target dev='%(target_dev)s' bus='%(target_bus)s'/>
<address type='drive' controller='0' bus='0' unit='0'/>
</disk>''' % disk
nics = ''
for nic in self._def['devices']['nics']:
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source %(type)s='%(source)s'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x0'/>
</interface>''' % nic
return '''<domain type='kvm'>
<name>%(name)s</name>
<uuid>%(uuid)s</uuid>
<memory>%(memory)s</memory>
<currentMemory>%(memory)s</currentMemory>
<vcpu>%(vcpu)s</vcpu>
<os>
<type arch='%(arch)s' machine='pc-0.12'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='localtime'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>/usr/bin/kvm</emulator>
%(disks)s
<controller type='ide' index='0'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01'
function='0x1'/>
</controller>
%(nics)s
<serial type='file'>
<source path='dummy.log'/>
<target port='0'/>
</serial>
<serial type='pty'>
<source pty='/dev/pts/27'/>
<target port='1'/>
</serial>
<serial type='tcp'>
<source host="-1" service="-1" mode="bind"/>
</serial>
<console type='file'>
<source path='dummy.log'/>
<target port='0'/>
</console>
<input type='tablet' bus='usb'/>
<input type='mouse' bus='ps2'/>
<graphics type='vnc' port='-1' autoport='yes'/>
<graphics type='spice' port='-1' autoport='yes'/>
<video>
<model type='cirrus' vram='9216' heads='1'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02'
function='0x0'/>
</video>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04'
function='0x0'/>
</memballoon>
</devices>
</domain>''' % {'name': self._def['name'],
'uuid': self._def['uuid'],
'memory': self._def['memory'],
'vcpu': self._def['vcpu'],
'arch': self._def['os']['arch'],
'disks': disks,
'nics': nics}
def managedSave(self, flags):
self._connection._mark_not_running(self)
self._has_saved_state = True
def managedSaveRemove(self, flags):
self._has_saved_state = False
def hasManagedSaveImage(self, flags):
return int(self._has_saved_state)
def resume(self):
self._state = VIR_DOMAIN_RUNNING
def snapshotCreateXML(self, xml, flags):
tree = etree.fromstring(xml)
name = tree.find('./name').text
snapshot = DomainSnapshot(name, self)
self._snapshots[name] = snapshot
return snapshot
def vcpus(self):
vcpus = ([], [])
for i in range(0, self._def['vcpu']):
vcpus[0].append((i, 1, 120405L, i))
vcpus[1].append((True, True, True, True))
return vcpus
def memoryStats(self):
return {}
def maxMemory(self):
return self._def['memory']
def blockJobInfo(self, disk, flags):
return {}
class DomainSnapshot(object):
def __init__(self, name, domain):
self._name = name
self._domain = domain
def delete(self, flags):
del self._domain._snapshots[self._name]
class Connection(object):
def __init__(self, uri=None, readonly=False, version=9011):
if not uri or uri == '':
if allow_default_uri_connection:
uri = 'qemu:///session'
else:
raise ValueError("URI was None, but fake libvirt is "
"configured to not accept this.")
uri_whitelist = ['qemu:///system',
'qemu:///session',
'lxc:///', # from LibvirtDriver.uri()
'xen:///', # from LibvirtDriver.uri()
'uml:///system',
'test:///default']
if uri not in uri_whitelist:
raise make_libvirtError(
libvirtError,
"libvirt error: no connection driver "
"available for No connection for URI %s" % uri,
error_code=5, error_domain=0)
self.readonly = readonly
self._uri = uri
self._vms = {}
self._running_vms = {}
self._id_counter = 1 # libvirt reserves 0 for the hypervisor.
self._nwfilters = {}
self._nodedevs = {}
self._event_callbacks = {}
self.fakeLibVersion = version
self.fakeVersion = version
def _add_filter(self, nwfilter):
self._nwfilters[nwfilter._name] = nwfilter
def _remove_filter(self, nwfilter):
del self._nwfilters[nwfilter._name]
def _add_nodedev(self, nodedev):
self._nodedevs[nodedev._name] = nodedev
def _remove_nodedev(self, nodedev):
del self._nodedevs[nodedev._name]
def _mark_running(self, dom):
self._running_vms[self._id_counter] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
self._id_counter += 1
def _mark_not_running(self, dom):
if dom._transient:
self._undefine(dom)
dom._id = -1
for (k, v) in self._running_vms.iteritems():
if v == dom:
del self._running_vms[k]
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STOPPED, 0)
return
def _undefine(self, dom):
del self._vms[dom.name()]
if not dom._transient:
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_UNDEFINED, 0)
def getInfo(self):
return [node_arch,
node_kB_mem,
node_cpus,
node_mhz,
node_nodes,
node_sockets,
node_cores,
node_threads]
def numOfDomains(self):
return len(self._running_vms)
def listDomainsID(self):
return self._running_vms.keys()
def lookupByID(self, id):
if id in self._running_vms:
return self._running_vms[id]
raise make_libvirtError(
libvirtError,
'Domain not found: no domain with matching id %d' % id,
error_code=VIR_ERR_NO_DOMAIN,
error_domain=VIR_FROM_QEMU)
def lookupByName(self, name):
if name in self._vms:
return self._vms[name]
raise make_libvirtError(
libvirtError,
'Domain not found: no domain with matching name "%s"' % name,
error_code=VIR_ERR_NO_DOMAIN,
error_domain=VIR_FROM_QEMU)
def listAllDomains(self, flags):
vms = []
for vm in self._vms:
if flags & VIR_CONNECT_LIST_DOMAINS_ACTIVE:
if vm.state != VIR_DOMAIN_SHUTOFF:
vms.append(vm)
if flags & VIR_CONNECT_LIST_DOMAINS_INACTIVE:
if vm.state == VIR_DOMAIN_SHUTOFF:
vms.append(vm)
return vms
def _emit_lifecycle(self, dom, event, detail):
if VIR_DOMAIN_EVENT_ID_LIFECYCLE not in self._event_callbacks:
return
cbinfo = self._event_callbacks[VIR_DOMAIN_EVENT_ID_LIFECYCLE]
callback = cbinfo[0]
opaque = cbinfo[1]
callback(self, dom, event, detail, opaque)
def defineXML(self, xml):
dom = Domain(connection=self, running=False, transient=False, xml=xml)
self._vms[dom.name()] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_DEFINED, 0)
return dom
def createXML(self, xml, flags):
dom = Domain(connection=self, running=True, transient=True, xml=xml)
self._vms[dom.name()] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
return dom
def getType(self):
if self._uri == 'qemu:///system':
return 'QEMU'
def getLibVersion(self):
return self.fakeLibVersion
def getVersion(self):
return self.fakeVersion
def getHostname(self):
return 'compute1'
def domainEventRegisterAny(self, dom, eventid, callback, opaque):
self._event_callbacks[eventid] = [callback, opaque]
def registerCloseCallback(self, cb, opaque):
pass
def getCapabilities(self):
"""Return spoofed capabilities."""
return '''<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
<feature name='tm2'/>
<feature name='est'/>
<feature name='vmx'/>
<feature name='ds_cpl'/>
<feature name='monitor'/>
<feature name='pbe'/>
<feature name='tm'/>
<feature name='ht'/>
<feature name='ss'/>
<feature name='acpi'/>
<feature name='ds'/>
<feature name='vme'/>
</cpu>
<migration_features>
<live/>
<uri_transports>
<uri_transport>tcp</uri_transport>
</uri_transports>
</migration_features>
<secmodel>
<model>apparmor</model>
<doi>0</doi>
</secmodel>
</host>
<guest>
<os_type>hvm</os_type>
<arch name='i686'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
<domain type='qemu'>
</domain>
<domain type='kvm'>
<emulator>/usr/bin/kvm</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<pae/>
<nonpae/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='x86_64'>
<wordsize>64</wordsize>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
<domain type='qemu'>
</domain>
<domain type='kvm'>
<emulator>/usr/bin/kvm</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='armv7l'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-arm</emulator>
<machine>integratorcp</machine>
<machine>vexpress-a9</machine>
<machine>syborg</machine>
<machine>musicpal</machine>
<machine>mainstone</machine>
<machine>n800</machine>
<machine>n810</machine>
<machine>n900</machine>
<machine>cheetah</machine>
<machine>sx1</machine>
<machine>sx1-v1</machine>
<machine>beagle</machine>
<machine>beaglexm</machine>
<machine>tosa</machine>
<machine>akita</machine>
<machine>spitz</machine>
<machine>borzoi</machine>
<machine>terrier</machine>
<machine>connex</machine>
<machine>verdex</machine>
<machine>lm3s811evb</machine>
<machine>lm3s6965evb</machine>
<machine>realview-eb</machine>
<machine>realview-eb-mpcore</machine>
<machine>realview-pb-a8</machine>
<machine>realview-pbx-a9</machine>
<machine>versatilepb</machine>
<machine>versatileab</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='mips'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-mips</emulator>
<machine>malta</machine>
<machine>mipssim</machine>
<machine>magnum</machine>
<machine>pica61</machine>
<machine>mips</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='mipsel'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-mipsel</emulator>
<machine>malta</machine>
<machine>mipssim</machine>
<machine>magnum</machine>
<machine>pica61</machine>
<machine>mips</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='sparc'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-sparc</emulator>
<machine>SS-5</machine>
<machine>leon3_generic</machine>
<machine>SS-10</machine>
<machine>SS-600MP</machine>
<machine>SS-20</machine>
<machine>Voyager</machine>
<machine>LX</machine>
<machine>SS-4</machine>
<machine>SPARCClassic</machine>
<machine>SPARCbook</machine>
<machine>SS-1000</machine>
<machine>SS-2000</machine>
<machine>SS-2</machine>
<domain type='qemu'>
</domain>
</arch>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='ppc'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-ppc</emulator>
<machine>g3beige</machine>
<machine>virtex-ml507</machine>
<machine>mpc8544ds</machine>
<machine canonical='bamboo-0.13'>bamboo</machine>
<machine>bamboo-0.13</machine>
<machine>bamboo-0.12</machine>
<machine>ref405ep</machine>
<machine>taihu</machine>
<machine>mac99</machine>
<machine>prep</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
</capabilities>'''
def compareCPU(self, xml, flags):
tree = etree.fromstring(xml)
arch_node = tree.find('./arch')
if arch_node is not None:
if arch_node.text not in [arch.X86_64,
arch.I686]:
return VIR_CPU_COMPARE_INCOMPATIBLE
model_node = tree.find('./model')
if model_node is not None:
if model_node.text != node_cpu_model:
return VIR_CPU_COMPARE_INCOMPATIBLE
vendor_node = tree.find('./vendor')
if vendor_node is not None:
if vendor_node.text != node_cpu_vendor:
return VIR_CPU_COMPARE_INCOMPATIBLE
# The rest of the stuff libvirt implements is rather complicated
# and I don't think it adds much value to replicate it here.
return VIR_CPU_COMPARE_IDENTICAL
def getCPUStats(self, cpuNum, flag):
if cpuNum < 2:
return {'kernel': 5664160000000L,
'idle': 1592705190000000L,
'user': 26728850000000L,
'iowait': 6121490000000L}
else:
raise make_libvirtError(
libvirtError,
"invalid argument: Invalid cpu number",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def nwfilterLookupByName(self, name):
try:
return self._nwfilters[name]
except KeyError:
raise make_libvirtError(
libvirtError,
"no nwfilter with matching name %s" % name,
error_code=VIR_ERR_NO_NWFILTER,
error_domain=VIR_FROM_NWFILTER)
def nwfilterDefineXML(self, xml):
nwfilter = NWFilter(self, xml)
self._add_filter(nwfilter)
def nodeDeviceLookupByName(self, name):
try:
return self._nodedevs[name]
except KeyError:
raise make_libvirtError(
libvirtError,
"no nodedev with matching name %s" % name,
error_code=VIR_ERR_NO_NODE_DEVICE,
error_domain=VIR_FROM_NODEDEV)
def listDefinedDomains(self):
return []
def listDevices(self, cap, flags):
return []
def baselineCPU(self, cpu, flag):
"""Add new libvirt API."""
return """<cpu mode='custom' match='exact'>
<model>Penryn</model>
<vendor>Intel</vendor>
<feature name='xtpr'/>
<feature name='tm2'/>
<feature name='est'/>
<feature name='vmx'/>
<feature name='ds_cpl'/>
<feature name='monitor'/>
<feature name='pbe'/>
<feature name='tm'/>
<feature name='ht'/>
<feature name='ss'/>
<feature name='acpi'/>
<feature name='ds'/>
<feature name='vme'/>
<feature policy='require' name='aes'/>
</cpu>"""
def openAuth(uri, auth, flags):
if type(auth) != list:
raise Exception("Expected a list for 'auth' parameter")
if type(auth[0]) != list:
raise Exception("Expected a function in 'auth[0]' parameter")
if not callable(auth[1]):
raise Exception("Expected a function in 'auth[1]' parameter")
return Connection(uri, (flags == VIR_CONNECT_RO))
def virEventRunDefaultImpl():
time.sleep(1)
def virEventRegisterDefaultImpl():
if connection_used:
raise Exception("virEventRegisterDefaultImpl() must be "
"called before connection is used.")
def registerErrorHandler(handler, ctxt):
pass
def make_libvirtError(error_class, msg, error_code=None,
error_domain=None, error_message=None,
error_level=None, str1=None, str2=None, str3=None,
int1=None, int2=None):
"""Convenience function for creating `libvirtError` exceptions which
allow you to specify arguments in constructor without having to manipulate
the `err` tuple directly.
We need to pass in `error_class` to this function because it may be
`libvirt.libvirtError` or `fakelibvirt.libvirtError` depending on whether
`libvirt-python` is installed.
"""
exc = error_class(msg)
exc.err = (error_code, error_domain, error_message, error_level,
str1, str2, str3, int1, int2)
return exc
virDomain = Domain
virNodeDevice = NodeDevice
virConnect = Connection
class FakeLibvirtFixture(fixtures.Fixture):
"""This fixture patches the libvirt.openAuth method so that it
always returns an instance of fakelibvirt.virConnect. This
ensures the tests don't mistakenly connect to a real libvirt
daemon instance which would lead to non-deterministic behaviour.
"""
def setUp(self):
super(FakeLibvirtFixture, self).setUp()
try:
import libvirt
patcher = mock.patch.object(
libvirt, "openAuth",
return_value=virConnect("qemu:///system"))
patcher.start()
self.addCleanup(patcher.stop)
except ImportError:
# If we can't import libvirt, the tests will use
# fakelibvirt regardless, so nothing todo here
pass
|
|
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import json
import logging
from nose.tools import eq_
import os
import sys
import unittest
from ryu.lib import ofctl_v1_0
from ryu.lib import ofctl_v1_2
from ryu.lib import ofctl_v1_3
from ryu.ofproto import ofproto_parser
from ryu.ofproto.ofproto_protocol import ProtocolDesc
from ryu.tests import test_lib
LOG = logging.getLogger(__name__)
class DummyDatapath(ProtocolDesc):
def __init__(self, version):
super(DummyDatapath, self).__init__(version)
self.id = 1 # XXX
self.request_msg = None
self.reply_msg = None
self.waiters = None
@staticmethod
def set_xid(msg):
msg.set_xid(0)
return 0
def send_msg(self, msg):
msg.serialize()
self.request_msg = msg
if self.reply_msg:
lock, msgs = self.waiters[self.id][msg.xid]
msgs.append(self.reply_msg)
del self.waiters[self.id][msg.xid]
lock.set()
def set_reply(self, msg, waiters):
self.reply_msg = msg
self.waiters = waiters
class Test_ofctl(unittest.TestCase):
def _test(self, name, dp, method, args, request, reply, expected):
print('processing %s ...' % name)
waiters = {}
dp.set_reply(reply, waiters)
if reply:
output = method(dp=dp, waiters=waiters, **args)
else:
output = method(dp=dp, **args)
# expected message <--> sent message
request.serialize()
try:
eq_(request.buf, dp.request_msg.buf)
except AssertionError as e:
# For debugging
json.dump(dp.request_msg.to_jsondict(),
open('/tmp/' + name, 'w'), indent=3, sort_keys=True)
raise e
# expected output <--> return of ofctl
def _remove(d, names):
f = lambda x: _remove(x, names)
if isinstance(d, list):
return list(map(f, d))
if isinstance(d, dict):
d2 = {}
for k, v in d.items():
if k in names:
continue
d2[k] = f(v)
return d2
return d
try:
eq_(_remove(expected, ['len', 'length']),
_remove(output, ['len', 'length']))
except AssertionError as e:
# For debugging
json.dump(output, open('/tmp/' + name, 'w'), indent=4)
raise e
def _add_tests():
_ofp_vers = {
'of10': 0x01,
'of12': 0x03,
'of13': 0x04
}
_test_cases = {
'of10': [
{
'method': ofctl_v1_0.mod_flow_entry,
'request': '1-2-ofp_flow_mod.packet.json',
'reply': None
},
],
'of12': [
{
'method': ofctl_v1_2.get_desc_stats,
'request': '3-24-ofp_desc_stats_request.packet.json',
'reply': '3-0-ofp_desc_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_queue_stats,
'request': '3-37-ofp_queue_stats_request.packet.json',
'reply': '3-38-ofp_queue_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_queue_config,
'request': '3-35-ofp_queue_get_config_request.packet.json',
'reply': '3-36-ofp_queue_get_config_reply.packet.json'
},
{
'method': ofctl_v1_2.get_flow_stats,
'request': '3-11-ofp_flow_stats_request.packet.json',
'reply': '3-12-ofp_flow_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_aggregate_flow_stats,
'request': '3-25-ofp_aggregate_stats_request.packet.json',
'reply': '3-26-ofp_aggregate_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_table_stats,
'request': '3-27-ofp_table_stats_request.packet.json',
'reply': '3-28-ofp_table_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_port_stats,
'request': '3-29-ofp_port_stats_request.packet.json',
'reply': '3-30-ofp_port_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_group_stats,
'request': '3-61-ofp_group_stats_request.packet.json',
'reply': '3-62-ofp_group_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_group_features,
'request': '3-31-ofp_group_features_stats_request.packet.json',
'reply': '3-32-ofp_group_features_stats_reply.packet.json'
},
{
'method': ofctl_v1_2.get_group_desc,
'request': '3-33-ofp_group_desc_stats_request.packet.json',
'reply': '3-34-ofp_group_desc_stats_reply.packet.json'
},
# In OpenFlow 1.2, ofp_port_desc is not defined.
# We use ofp_features_request to get ports description instead.
{
'method': ofctl_v1_2.get_port_desc,
'request': '3-5-ofp_features_request.packet.json',
'reply': '3-6-ofp_features_reply.packet.json'
},
{
'method': ofctl_v1_2.mod_flow_entry,
'request': '3-2-ofp_flow_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_2.mod_group_entry,
'request': '3-21-ofp_group_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_2.mod_port_behavior,
'request': '3-22-ofp_port_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_2.send_experimenter,
'request': '3-16-ofp_experimenter.packet.json',
'reply': None
},
],
'of13': [
{
'method': ofctl_v1_3.get_desc_stats,
'request': '4-24-ofp_desc_request.packet.json',
'reply': '4-0-ofp_desc_reply.packet.json'
},
{
'method': ofctl_v1_3.get_queue_stats,
'request': '4-37-ofp_queue_stats_request.packet.json',
'reply': '4-38-ofp_queue_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_queue_config,
'request': '4-35-ofp_queue_get_config_request.packet.json',
'reply': '4-36-ofp_queue_get_config_reply.packet.json'
},
{
'method': ofctl_v1_3.get_flow_stats,
'request': '4-11-ofp_flow_stats_request.packet.json',
'reply': '4-12-ofp_flow_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_aggregate_flow_stats,
'request': '4-25-ofp_aggregate_stats_request.packet.json',
'reply': '4-26-ofp_aggregate_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_table_stats,
'request': '4-27-ofp_table_stats_request.packet.json',
'reply': '4-28-ofp_table_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_table_features,
'request': 'lib-ofctl-ofp_table_features_request.packet.json',
'reply': '4-56-ofp_table_features_reply.packet.json'
},
{
'method': ofctl_v1_3.get_port_stats,
'request': '4-29-ofp_port_stats_request.packet.json',
'reply': '4-30-ofp_port_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_meter_stats,
'request': '4-49-ofp_meter_stats_request.packet.json',
'reply': '4-50-ofp_meter_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_meter_features,
'request': '4-51-ofp_meter_features_request.packet.json',
'reply': '4-52-ofp_meter_features_reply.packet.json'
},
{
'method': ofctl_v1_3.get_meter_config,
'request': '4-47-ofp_meter_config_request.packet.json',
'reply': '4-48-ofp_meter_config_reply.packet.json'
},
{
'method': ofctl_v1_3.get_group_stats,
'request': '4-57-ofp_group_stats_request.packet.json',
'reply': '4-58-ofp_group_stats_reply.packet.json'
},
{
'method': ofctl_v1_3.get_group_features,
'request': '4-31-ofp_group_features_request.packet.json',
'reply': '4-32-ofp_group_features_reply.packet.json'
},
{
'method': ofctl_v1_3.get_group_desc,
'request': '4-33-ofp_group_desc_request.packet.json',
'reply': '4-34-ofp_group_desc_reply.packet.json'
},
{
'method': ofctl_v1_3.get_port_desc,
'request': '4-53-ofp_port_desc_request.packet.json',
'reply': '4-54-ofp_port_desc_reply.packet.json'
},
{
'method': ofctl_v1_3.mod_flow_entry,
'request': '4-2-ofp_flow_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_3.mod_meter_entry,
'request': '4-45-ofp_meter_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_3.mod_group_entry,
'request': '4-21-ofp_group_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_3.mod_port_behavior,
'request': '4-22-ofp_port_mod.packet.json',
'reply': None
},
{
'method': ofctl_v1_3.send_experimenter,
'request': '4-16-ofp_experimenter.packet.json',
'reply': None
},
]
}
def _jsonfile_to_msg(datapath, jsonfile):
return ofproto_parser.ofp_msg_from_jsondict(
datapath, json.load(open(jsonfile)))
this_dir = os.path.dirname(sys.modules[__name__].__file__)
parser_json_root = os.path.join(this_dir, '../ofproto/json/')
ofctl_json_root = os.path.join(this_dir, 'ofctl_json/')
for ofp_ver, tests in _test_cases.items():
dp = DummyDatapath(_ofp_vers[ofp_ver])
parser_json_dir = os.path.join(parser_json_root, ofp_ver)
ofctl_json_dir = os.path.join(ofctl_json_root, ofp_ver)
for test in tests:
name = 'test_ofctl_' + test['request']
print('adding %s ...' % name)
args = {}
args_json_path = os.path.join(ofctl_json_dir, test['request'])
if os.path.exists(args_json_path):
args = json.load(open(args_json_path))
request = _jsonfile_to_msg(
dp, os.path.join(parser_json_dir, test['request']))
reply = None
expected = None
if test['reply']:
reply = _jsonfile_to_msg(
dp, os.path.join(parser_json_dir, test['reply']))
expected = json.load(
open(os.path.join(ofctl_json_dir, test['reply'])))
f = functools.partial(
Test_ofctl._test, name=name, dp=dp, method=test['method'],
args=args, request=request, reply=reply, expected=expected)
test_lib.add_method(Test_ofctl, name, f)
_add_tests()
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six.moves.urllib.parse as urlparse
import swiftclient
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.openstack.common import timeutils
LOG = logging.getLogger(__name__)
FOLDER_DELIMITER = "/"
# Swift ACL
GLOBAL_READ_ACL = ".r:*"
LIST_CONTENTS_ACL = ".rlistings"
class Container(base.APIDictWrapper):
pass
class StorageObject(base.APIDictWrapper):
def __init__(self, apidict, container_name, orig_name=None, data=None):
super(StorageObject, self).__init__(apidict)
self.container_name = container_name
self.orig_name = orig_name
self.data = data
@property
def id(self):
return self.name
class PseudoFolder(base.APIDictWrapper):
def __init__(self, apidict, container_name):
super(PseudoFolder, self).__init__(apidict)
self.container_name = container_name
@property
def id(self):
return '%s/%s' % (self.container_name, self.name)
@property
def name(self):
return self.subdir.rstrip(FOLDER_DELIMITER)
@property
def bytes(self):
return None
@property
def content_type(self):
return "application/pseudo-folder"
def _objectify(items, container_name):
"""Splits a listing of objects into their appropriate wrapper classes."""
objects = []
# Deal with objects and object pseudo-folders first, save subdirs for later
for item in items:
if item.get("subdir", None) is not None:
object_cls = PseudoFolder
else:
object_cls = StorageObject
objects.append(object_cls(item, container_name))
return objects
def _metadata_to_header(metadata):
headers = {}
public = metadata.get('is_public')
if public is True:
public_container_acls = [GLOBAL_READ_ACL, LIST_CONTENTS_ACL]
headers['x-container-read'] = ",".join(public_container_acls)
elif public is False:
headers['x-container-read'] = ""
return headers
@memoized
def swift_api(request):
endpoint = base.url_for(request, 'object-store')
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
LOG.debug('Swift connection created using token "%s" and url "%s"'
% (request.user.token.id, endpoint))
return swiftclient.client.Connection(None,
request.user.username,
None,
preauthtoken=request.user.token.id,
preauthurl=endpoint,
cacert=cacert,
insecure=insecure,
auth_version="2.0")
def swift_container_exists(request, container_name):
try:
swift_api(request).head_container(container_name)
return True
except swiftclient.client.ClientException:
return False
def swift_object_exists(request, container_name, object_name):
try:
swift_api(request).head_object(container_name, object_name)
return True
except swiftclient.client.ClientException:
return False
def swift_get_containers(request, marker=None):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
headers, containers = swift_api(request).get_account(limit=limit + 1,
marker=marker,
full_listing=True)
container_objs = [Container(c) for c in containers]
if(len(container_objs) > limit):
return (container_objs[0:-1], True)
else:
return (container_objs, False)
def swift_get_container(request, container_name, with_data=True):
if with_data:
headers, data = swift_api(request).get_object(container_name, "")
else:
data = None
headers = swift_api(request).head_container(container_name)
timestamp = None
is_public = False
public_url = None
try:
is_public = GLOBAL_READ_ACL in headers.get('x-container-read', '')
if is_public:
swift_endpoint = base.url_for(request,
'object-store',
endpoint_type='publicURL')
public_url = swift_endpoint + '/' + urlparse.quote(container_name)
ts_float = float(headers.get('x-timestamp'))
timestamp = timeutils.iso8601_from_timestamp(ts_float)
except Exception:
pass
container_info = {
'name': container_name,
'container_object_count': headers.get('x-container-object-count'),
'container_bytes_used': headers.get('x-container-bytes-used'),
'timestamp': timestamp,
'data': data,
'is_public': is_public,
'public_url': public_url,
}
return Container(container_info)
def swift_create_container(request, name, metadata=({})):
if swift_container_exists(request, name):
raise exceptions.AlreadyExists(name, 'container')
headers = _metadata_to_header(metadata)
swift_api(request).put_container(name, headers=headers)
return Container({'name': name})
def swift_update_container(request, name, metadata=({})):
headers = _metadata_to_header(metadata)
swift_api(request).post_container(name, headers=headers)
return Container({'name': name})
def swift_delete_container(request, name):
# It cannot be deleted if it's not empty. The batch remove of objects
# be done in swiftclient instead of Horizon.
objects, more = swift_get_objects(request, name)
if objects:
error_msg = unicode(_("The container cannot be deleted "
"since it's not empty."))
exc = exceptions.Conflict(error_msg)
exc._safe_message = error_msg
raise exc
swift_api(request).delete_container(name)
return True
def swift_get_objects(request, container_name, prefix=None, marker=None,
limit=None):
limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)
kwargs = dict(prefix=prefix,
marker=marker,
limit=limit + 1,
delimiter=FOLDER_DELIMITER,
full_listing=True)
headers, objects = swift_api(request).get_container(container_name,
**kwargs)
object_objs = _objectify(objects, container_name)
if(len(object_objs) > limit):
return (object_objs[0:-1], True)
else:
return (object_objs, False)
def swift_filter_objects(request, filter_string, container_name, prefix=None,
marker=None):
# FIXME(kewu): Swift currently has no real filtering API, thus the marker
# parameter here won't actually help the pagination. For now I am just
# getting the largest number of objects from a container and filtering
# based on those objects.
limit = 9999
objects = swift_get_objects(request,
container_name,
prefix=prefix,
marker=marker,
limit=limit)
filter_string_list = filter_string.lower().strip().split(' ')
def matches_filter(obj):
for q in filter_string_list:
return wildcard_search(obj.name.lower(), q)
return filter(matches_filter, objects[0])
def wildcard_search(string, q):
q_list = q.split('*')
if all(map(lambda x: x == '', q_list)):
return True
elif q_list[0] not in string:
return False
else:
if q_list[0] == '':
tail = string
else:
head, delimiter, tail = string.partition(q_list[0])
return wildcard_search(tail, '*'.join(q_list[1:]))
def swift_copy_object(request, orig_container_name, orig_object_name,
new_container_name, new_object_name):
if swift_object_exists(request, new_container_name, new_object_name):
raise exceptions.AlreadyExists(new_object_name, 'object')
headers = {"X-Copy-From": FOLDER_DELIMITER.join([orig_container_name,
orig_object_name])}
return swift_api(request).put_object(new_container_name,
new_object_name,
None,
headers=headers)
def swift_upload_object(request, container_name, object_name,
object_file=None):
headers = {}
size = 0
if object_file:
headers['X-Object-Meta-Orig-Filename'] = object_file.name
size = object_file.size
etag = swift_api(request).put_object(container_name,
object_name,
object_file,
headers=headers)
obj_info = {'name': object_name, 'bytes': size, 'etag': etag}
return StorageObject(obj_info, container_name)
def swift_create_pseudo_folder(request, container_name, pseudo_folder_name):
headers = {}
etag = swift_api(request).put_object(container_name,
pseudo_folder_name,
None,
headers=headers)
obj_info = {
'name': pseudo_folder_name,
'etag': etag
}
return PseudoFolder(obj_info, container_name)
def swift_delete_object(request, container_name, object_name):
swift_api(request).delete_object(container_name, object_name)
return True
def swift_get_object(request, container_name, object_name, with_data=True):
if with_data:
headers, data = swift_api(request).get_object(container_name,
object_name)
else:
data = None
headers = swift_api(request).head_object(container_name,
object_name)
orig_name = headers.get("x-object-meta-orig-filename")
timestamp = None
try:
ts_float = float(headers.get('x-timestamp'))
timestamp = timeutils.iso8601_from_timestamp(ts_float)
except Exception:
pass
obj_info = {
'name': object_name,
'bytes': headers.get('content-length'),
'content_type': headers.get('content-type'),
'etag': headers.get('etag'),
'timestamp': timestamp,
}
return StorageObject(obj_info,
container_name,
orig_name=orig_name,
data=data)
|
|
"""
Metrics for scoring predictions and also some more specialized
math needed for skedm
"""
import numpy as np
from scipy import stats as stats
from numba import jit
def weighted_mean(X, distances ):
"""
Calculates the weighted mean given a set of values and their corresponding
distances. Only 1/distance is implemented. This essentially is just a
weighted mean down axis=1.
Parameters
----------
X : 2d array
Training values. shape(nsamples,number near neighbors)
distances : 2d array
Sorted distances to the near neighbors for the indices.
shape(nsamples,number near neighbors)
Returns
-------
w_mean : 2d array
Weighted predictions
"""
distances = distances+0.00001 #ensures no zeros when dividing
W = 1./distances
denom = np.sum(W, axis=1,keepdims=True)
W/=denom
w_mean = np.sum(X * W, axis=1)
return w_mean.ravel()
def mi_digitize(X):
"""
Digitize a time series for mutual information analysis
Parameters
----------
X : 1D array
array to be digitized of length m
Returns
-------
Y : 1D array
digitized array of length m
"""
minX = np.min(X) - 1e-5 #subtract for correct binning
maxX = np.max(X) + 1e-5 #add for correct binning
nbins = int(np.sqrt(len(X)/20))
nbins = max(4,nbins) #make sure there are atleast four bins
bins = np.linspace(minX, maxX, nbins+1) #add one for correct num bins
Y = np.digitize(X, bins)
return Y
def corrcoef(preds,actual):
"""
Correlation Coefficient of between predicted values and actual values
Parameters
----------
preds : array shape (num samples,num targets)
test : array of shape (num samples, num targets)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
cc = np.corrcoef(preds,actual)[1,0]
return cc
def classCompare(preds,actual):
"""
Percent correct between predicted values and actual values
Parameters
----------
preds : array shape (num samples,num targets)
test : array of shape (num samples, num targets)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
cc = np.mean( preds == actual )
return cc
def classificationError(preds,actual):
"""
Percent correct between predicted values and actual values scaled
to the most common prediction of the space
Parameters
----------
preds : array shape (num samples,)
test : array of shape (num samples,)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
most_common,_=stats.mode(actual,axis=None)
num = np.mean(preds == actual)
denom = np.mean(actual == most_common)
cc = num/denom.astype('float')
return cc
def kleckas_tau(preds,actual):
"""
Calculates kleckas tau
Parameters
----------
preds : array shape (num samples,)
test : array of shape (num samples,)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
ncorr = np.sum(preds == actual) #number correctly classified
cats_unique = np.unique(actual)
sum_t = 0
for cat in cats_unique:
ni = np.sum(cat==actual)
pi = float(ni)/len(preds)
sum_t += ni*pi
tau = (ncorr - sum_t) / (len(preds) - sum_t)
return tau
def cohens_kappa(preds,actual):
"""
Calculates cohens kappa
Parameters
----------
preds : array shape (num samples,)
test : array of shape (num samples,)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
c = cohen_kappa_score(preds,actual)
return c
def klekas_tau_spatial(X,max_lag,percent_calc=.5):
"""
Similar to mutual_information_spatial, it calculates the kleckas tau value
between a shifted and unshifted slice of the space. It makes slices in both
the rows and the columns.
Parameters
----------
X : 2-D array
input two-dimensional image
max_lag : integer
maximum amount to shift the space
percent_calc : float
How many rows and columns to use average over. Using the whole space
is overkill.
Returns
-------
R_mut : 1-D array
the mutual inforation averaged down the rows (vertical)
C_mut : 1-D array
the mutual information averaged across the columns (horizontal)
r_mi : 2-D array
the mutual information down each row (vertical)
c_mi : 2-D array
the mutual information across each columns (horizontal)
"""
rs, cs = np.shape(X)
rs_iters = int(rs*percent_calc)
cs_iters = int(cs*percent_calc)
r_picks = np.random.choice(np.arange(rs),size=rs_iters,replace=False)
c_picks = np.random.choice(np.arange(cs),size=cs_iters,replace=False)
# The r_picks are used to calculate the MI in the columns
# and the c_picks are used to calculate the MI in the rows
c_mi = np.zeros((max_lag,rs_iters))
r_mi = np.zeros((max_lag,cs_iters))
for ii in range(rs_iters):
m_slice = X[r_picks[ii],:]
for j in range(max_lag):
shift = j+1
new_m = m_slice[:-shift]
shifted = m_slice[shift:]
c_mi[j,ii] = kleckas_tau(new_m,shifted)
for ii in range(cs_iters):
m_slice = X[:,c_picks[ii]]
for j in range(max_lag):
shift = j+1
new_m = m_slice[:-shift]
shifted = m_slice[shift:]
r_mi[j,ii] = kleckas_tau(new_m,shifted)
r_mut = np.mean(r_mi,axis=1)
c_mut = np.mean(c_mi,axis=1)
return r_mut, c_mut, r_mi, c_mi
def varianceExplained(preds,actual):
"""
Explained variance between predicted values and actual values scaled
to the most common prediction of the space
Parameters
----------
preds : array shape (num samples,num targets)
actual : array of shape (num samples, num targets)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
cc = np.var(preds - actual) / np.var(actual)
return cc
def score(preds,actual):
"""
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum(). Best possible
score is 1.0, lower values are worse.
Parameters
----------
preds : array shape (num samples,num targets)
test : array of shape (num samples, num targets)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
u = np.square(actual - preds ).sum()
v = np.square(actual - actual.mean()).sum()
r2 = 1 - u/v
if v == 0.:
print('Targets are all the same. Returning 0.')
r2=0
return r2
def weighted_mode(a, w, axis=0):
"""This function is borrowed from sci-kit learn's extmath.py
Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
print('both weights')
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
@jit
def quick_mode_axis1(X):
"""
Takes the mode of an array across the columns. aka axis=1
X : np.array
"""
X = X.astype(int)
len_x = len(X)
mode = np.zeros(len_x)
for i in range(len_x):
mode[i] = np.bincount(X[i,:]).argmax()
return mode
@jit
def quick_mode_axis1_keep_nearest_neigh(X):
"""
The current implementation of the mode takes the lowest value instead of
the closest value. For example if the neighbors have values:
[7,7,2,3,4,1,1] the current implementation will keep 1 as the value. For
our purposes, the ordering is important, so we want to keep the first value.
"""
X = X.astype(int)
len_x = len(X)
mode = np.zeros(len_x)
for i in range(len_x):
loc = np.bincount(X[i,:])[X[i,:]].argmax() #reorder before argmax
mode[i] = X[i,:][loc]
return mode
def keep_diversity(X,thresh=1.):
"""
Throws out rows of only one class.
X : 2d array of ints
Returns
keep : 1d boolean
ex:
[1 1 1 1]
[2 1 2 3]
[2 2 2 2]
[3 2 1 4]
returns:
[F]
[T]
[F]
[T]
"""
X = X.astype(int)
mode = quick_mode_axis1(X).reshape(-1,1)
compare = np.repeat(mode,X.shape[1],axis=1)
thresh = int(thresh*X.shape[1])
keep = np.sum(compare==X, axis=1) < X.shape[1]
return keep
|
|
# data_generator.py
# @author: Lisa Wang
# @created: Jan 30 2017
#
#===============================================================================
# DESCRIPTION:
# Ground truth student model for data generation.
# 1. n concepts (e.g. 10)
# 2. k exercises (e.g. 1000)
#
# ### Student Model
# At any time t, a student s can be represented by the concepts she knows.
# Hence, s is a n-dim vector, where each index i corresponds to concept i.
#===============================================================================
# CURRENT STATUS: Working
#===============================================================================
# USAGE: from data_generator import *
# generate_data(n_students=5, seqlen=50, policy='expert', filename="synthetic_data/toy_expert.pickle")
from __future__ import absolute_import, division, print_function
# Python libraries
import numpy as np
import random
import pickle
import time
import copy
import six
from collections import defaultdict, deque, Counter
# Custom Modules
from filepaths import *
from constants import *
import dataset_utils
import concept_dependency_graph as cdg
import student as st
def fulfilled_prereqs(concept_tree, knowledge, concepts):
'''
for each concept tested in the exercise, check if all prereqs are fulfilled.
if prereqs for at least one concept are not fulfilled, then function returns False.
:return: bool
'''
for i in six.moves.range(len(concepts)):
c = concepts[i]
if c == 1:
prereqs = concept_tree.get_prereqs(i)
if np.sum(np.multiply(knowledge, prereqs)) != np.sum(prereqs):
return False
return True
def sample_expert_action(concept_tree, knowledge):
'''
Samples an optimal action given the current knowledge and the concept tree.
Samples uniformly from all optimal actions.
Returns a StudentAction
'''
next_concepts = []
# find all possible concepts that have not been learned yet but whose prereq are fulfilled
for i in six.moves.range(concept_tree.n):
if not knowledge[i]:
cur_concept = np.zeros((concept_tree.n,),dtype=np.int)
cur_concept[i] = 1
if fulfilled_prereqs(concept_tree, knowledge, cur_concept):
next_concepts.append(i)
if not next_concepts:
# nothing new can be learned, then just be random
next_action = np.random.randint(0,concept_tree.n)
else:
# uniformly pick an optimal action
next_action = np.random.choice(next_concepts)
next_c = np.zeros((concept_tree.n,),dtype=np.int)
next_c[next_action] = 1
return st.StudentAction(next_action, next_c)
def egreedy_expert(concept_tree, knowledge, epsilon):
'''
egreedy over the expert policy
'''
if np.random.random() < epsilon:
# random action
next_action = np.random.randint(0,concept_tree.n)
next_c = np.zeros((concept_tree.n,),dtype=np.int)
next_c[next_action] = 1
next_act = st.StudentAction(next_action,next_c)
else:
next_act = sample_expert_action(concept_tree, knowledge)
return next_act
def generate_student_sample(concept_tree, seqlen=100, student=None, initial_knowledge=None, policy=None, epsilon=None, verbose=False):
'''
:param n: number of concepts; if None use N_CONCEPTS
:param concept_tree: Concept dependency graph
:param seqlen: number of exercises the student will do.
:param initial_knowledge: initial knowledge of student. If None, will be set to 0 for all concepts.
:param policy: if no exercise_seq provided, use the specified policy to generate exercise sequence.
:param epsilon: epsilon for egreedy policy
:param verbose: if True, print out debugging / progress statements
:return: array of tuples, where each tuple consists of
(exercise, 0 or 1 indicating success of student on that exercise, knowledge of student after doing exercise)
Note that this array will have length seqlen, inclusive
'''
n_concepts = concept_tree.n
if initial_knowledge is None:
initial_knowledge = np.zeros((n_concepts,))
initial_knowledge[0] = 1
if student is None:
s = st.Student()
else:
s = student
s.reset() # make sure to reset to intial conditions for this sample
s.knowledge = initial_knowledge
# if not exercise_seq and policy == 'expert':
# return _generate_student_sample_with_expert_policy(student=s, seqlen=seqlen, verbose=verbose)
if (policy == 'modulo' or policy == 'random'):
# for expert policy, we have to choose the next exercise online.
exercise_seq = []
for i in six.moves.range(seqlen):
concepts = np.zeros((n_concepts,),dtype=np.int)
if policy == 'modulo':
# choose exercise with modulo op. This imposes an ordering on exercises.
conceptix = i % n_concepts
concepts[conceptix] = 1
elif policy == 'random':
# choose one random concept for this exercise
conceptix = np.random.randint(n_concepts)
concepts[conceptix] = 1
ex = st.StudentAction(conceptix, concepts)
exercise_seq.append(ex)
# Go through sequence of exercises and record whether student solved each or not
student_performance = []
student_knowledge = []
student_state = []
n_exercises_to_mastery = -1
exercises = [] # so we can store sequence of exercises as numpy arrays (instead of arrays of exercise objects)
for i in six.moves.range(seqlen):
# print (s.knowledge)
# store current states
student_state.append(s.get_state())
if policy == 'expert':
ex = sample_expert_action(concept_tree, s.knowledge)
elif policy == 'egreedy':
ex = egreedy_expert(concept_tree, s.knowledge, epsilon)
else:
ex = exercise_seq[i]
result = s.do_exercise(concept_tree, ex)
exercises.append(ex.conceptvec) # makes the assumption that an exercise is equivalent to the concepts it practices)
student_performance.append(result)
student_knowledge.append(copy.deepcopy(s.knowledge))
if np.sum(s.knowledge) == n_concepts and n_exercises_to_mastery == -1:
# if verbose and n_exercises_to_mastery == -1:
n_exercises_to_mastery = i + 1
if verbose:
if n_exercises_to_mastery != -1:
print ("learned all concepts after {} exercises.".format(n_exercises_to_mastery))
else:
print ("Did not learn all concepts after doing {} exercises.".format(seqlen))
#six.print_(student_performance)
student_sample = tuple(six.moves.zip(exercises, student_performance, student_knowledge, student_state))
#six.print_(student_sample)
return student_sample
def generate_data(concept_tree, student=None, filter_mastery=False, n_students=100, seqlen=100, policy='modulo', epsilon=0.0, filename=None, verbose=False):
"""
This is the main data generation function.
:param concept_tree: Concept dependency graph
:param student: Student environment
:param filter_mastery: boolean indicating whether want to remove trajectories that end in full mastery
:param seqlen: max length of exercises for a student. if student learns all concepts, sequence can be shorter.
:param policy: which policy to use to generate data. can be 'expert', 'modulo', 'random', 'egreedy'
:param epsilon: epsilon for egreedy policy only; not used by other policies
:param filename: where to store the generated data. If None, will not save to file.
:param verbose: if True, prints debugging statements
:return:
"""
data = []
print ("Generating data for {} students with behavior policy {} and sequence length {}.".format(n_students, policy, seqlen))
for i in six.moves.range(n_students):
if verbose:
print ("Creating sample for {}th student".format(i))
student_sample = generate_student_sample(concept_tree, student=student, seqlen=seqlen, initial_knowledge=None,
policy=policy, epsilon=epsilon, verbose=verbose)
if filter_mastery:
final_knowledge = student_sample[-1][2]
if np.mean(final_knowledge) < 0.999:
data.append(student_sample)
else:
data.append(student_sample)
if filename:
pickle.dump(data, open(filename, 'wb+'))
return data
# def load_data(filename=None):
# data = pickle.load(open(filename, 'rb+'))
# return data
def get_data_stats(data):
average_n_exercises = 0
for i, sample in enumerate(data):
n_exercises = len(sample)
average_n_exercises += n_exercises
average_n_exercises /= float(len(data))
print ("Average number of exercises needed to get all concepts learned: {}".format(average_n_exercises))
def make_toy_data(concept_tree):
filename = "toy.pickle"
generate_data(concept_tree, n_students=5, seqlen=50, filename= "{}{}".format(SYN_DATA_DIR, filename))
def load_toy_data():
filename = "toy.pickle"
data = dataset_utils.load_data(filename= "{}{}".format(SYN_DATA_DIR, filename))
print ("Loaded data. # samples: {}".format(len(data)))
def main_test():
"""
Run this to test this module.
- Tests ConceptDependencytree
- Generates sample for a single student using three different policies
- Generates toy data set with 5 students
- Loads generated toy data set
"""
concept_tree = cdg.ConceptDependencyGraph()
concept_tree.init_default_tree(n=11)
print (concept_tree.children)
print (concept_tree.parents)
print (concept_tree.prereq_map)
print ("Generate one sample using expert policy. ")
generate_student_sample(concept_tree, policy='expert', verbose=True)
print ("Generate one sample using random policy. ")
generate_student_sample(concept_tree, policy='random', verbose=True)
print ("Generate one sample using modulo policy. ")
generate_student_sample(concept_tree, policy='modulo', verbose=True)
make_toy_data(concept_tree)
load_toy_data()
def init_synthetic_data():
"""
Run this to generate the default synthetic data sets.
:return:
"""
concept_tree = cdg.ConceptDependencyGraph()
concept_tree.init_default_tree(n=N_CONCEPTS)
print ("Initializing synthetic data sets...")
n_students = 10000
seqlen = 100
for policy in ['random', 'expert', 'modulo']:
filename = "{}stud_{}seq_{}.pickle".format(n_students, seqlen, policy)
generate_data(concept_tree, n_students=n_students, seqlen=seqlen, policy=policy, filename="{}{}".format(SYN_DATA_DIR, filename))
print ("Data generation completed. ")
if __name__ == "__main__":
# main_test()
init_synthetic_data()
|
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2017 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import datetime
from . import utils
from .colour import Colour
class _EmptyEmbed:
def __bool__(self):
return False
def __repr__(self):
return 'Embed.Empty'
EmptyEmbed = _EmptyEmbed()
class EmbedProxy:
def __init__(self, layer):
self.__dict__.update(layer)
def __repr__(self):
return 'EmbedProxy(%s)' % ', '.join(('%s=%r' % (k, v) for k, v in self.__dict__.items() if not k.startswith('_')))
def __getattr__(self, attr):
return EmptyEmbed
class Embed:
"""Represents a Discord embed.
The following attributes can be set during creation
of the object:
Certain properties return an ``EmbedProxy``. Which is a type
that acts similar to a regular `dict` except access the attributes
via dotted access, e.g. ``embed.author.icon_url``. If the attribute
is invalid or empty, then a special sentinel value is returned,
:attr:`Embed.Empty`.
For ease of use, all parameters that expect a ``str`` are implicitly
casted to ``str`` for you.
Attributes
-----------
title: str
The title of the embed.
type: str
The type of embed. Usually "rich".
description: str
The description of the embed.
url: str
The URL of the embed.
timestamp: `datetime.datetime`
The timestamp of the embed content.
colour: :class:`Colour` or int
The colour code of the embed. Aliased to ``color`` as well.
Empty
A special sentinel value used by ``EmbedProxy`` and this class
to denote that the value or attribute is empty.
"""
__slots__ = ('title', 'url', 'type', '_timestamp', '_colour', '_footer',
'_image', '_thumbnail', '_video', '_provider', '_author',
'_fields', 'description')
Empty = EmptyEmbed
def __init__(self, **kwargs):
# swap the colour/color aliases
try:
colour = kwargs['colour']
except KeyError:
colour = kwargs.get('color', EmptyEmbed)
self.colour = colour
self.title = kwargs.get('title', EmptyEmbed)
self.type = kwargs.get('type', 'rich')
self.url = kwargs.get('url', EmptyEmbed)
self.description = kwargs.get('description', EmptyEmbed)
try:
timestamp = kwargs['timestamp']
except KeyError:
pass
else:
self.timestamp = timestamp
@classmethod
def from_data(cls, data):
# we are bypassing __init__ here since it doesn't apply here
self = cls.__new__(cls)
# fill in the basic fields
self.title = data.get('title', EmptyEmbed)
self.type = data.get('type', EmptyEmbed)
self.description = data.get('description', EmptyEmbed)
self.url = data.get('url', EmptyEmbed)
# try to fill in the more rich fields
try:
self._colour = Colour(value=data['color'])
except KeyError:
pass
try:
self._timestamp = utils.parse_time(data['timestamp'])
except KeyError:
pass
for attr in ('thumbnail', 'video', 'provider', 'author', 'fields', 'image', 'footer'):
try:
value = data[attr]
except KeyError:
continue
else:
setattr(self, '_' + attr, value)
return self
@property
def colour(self):
return getattr(self, '_colour', EmptyEmbed)
@colour.setter
def colour(self, value):
if isinstance(value, (Colour, _EmptyEmbed)):
self._colour = value
elif isinstance(value, int):
self._colour = Colour(value=value)
else:
raise TypeError('Expected discord.Colour, int, or Embed.Empty but received %s instead.' % value.__class__.__name__)
color = colour
@property
def timestamp(self):
return getattr(self, '_timestamp', EmptyEmbed)
@timestamp.setter
def timestamp(self, value):
if isinstance(value, (datetime.datetime, _EmptyEmbed)):
self._timestamp = value
else:
raise TypeError("Expected datetime.datetime or Embed.Empty received %s instead" % value.__class__.__name__)
@property
def footer(self):
"""Returns a ``EmbedProxy`` denoting the footer contents.
See :meth:`set_footer` for possible values you can access.
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, '_footer', {}))
def set_footer(self, *, text=EmptyEmbed, icon_url=EmptyEmbed):
"""Sets the footer for the embed content.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
text: str
The footer text.
icon_url: str
The URL of the footer icon. Only HTTP(S) is supported.
"""
self._footer = {}
if text is not EmptyEmbed:
self._footer['text'] = str(text)
if icon_url is not EmptyEmbed:
self._footer['icon_url'] = str(icon_url)
return self
@property
def image(self):
"""Returns a ``EmbedProxy`` denoting the image contents.
Possible attributes you can access are:
- ``url``
- ``proxy_url``
- ``width``
- ``height``
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, '_image', {}))
def set_image(self, *, url):
"""Sets the image for the embed content.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
url: str
The source URL for the image. Only HTTP(S) is supported.
"""
self._image = {
'url': str(url)
}
return self
@property
def thumbnail(self):
"""Returns a ``EmbedProxy`` denoting the thumbnail contents.
Possible attributes you can access are:
- ``url``
- ``proxy_url``
- ``width``
- ``height``
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, '_thumbnail', {}))
def set_thumbnail(self, *, url):
"""Sets the thumbnail for the embed content.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
url: str
The source URL for the thumbnail. Only HTTP(S) is supported.
"""
self._thumbnail = {
'url': str(url)
}
return self
@property
def video(self):
"""Returns a ``EmbedProxy`` denoting the video contents.
Possible attributes include:
- ``url`` for the video URL.
- ``height`` for the video height.
- ``width`` for the video width.
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, '_video', {}))
@property
def provider(self):
"""Returns a ``EmbedProxy`` denoting the provider contents.
The only attributes that might be accessed are ``name`` and ``url``.
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, '_provider', {}))
@property
def author(self):
"""Returns a ``EmbedProxy`` denoting the author contents.
See :meth:`set_author` for possible values you can access.
If the attribute has no value then :attr:`Empty` is returned.
"""
return EmbedProxy(getattr(self, '_author', {}))
def set_author(self, *, name, url=EmptyEmbed, icon_url=EmptyEmbed):
"""Sets the author for the embed content.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
name: str
The name of the author.
url: str
The URL for the author.
icon_url: str
The URL of the author icon. Only HTTP(S) is supported.
"""
self._author = {
'name': str(name)
}
if url is not EmptyEmbed:
self._author['url'] = str(url)
if icon_url is not EmptyEmbed:
self._author['icon_url'] = str(icon_url)
return self
@property
def fields(self):
"""Returns a list of ``EmbedProxy`` denoting the field contents.
See :meth:`add_field` for possible values you can access.
If the attribute has no value then :attr:`Empty` is returned.
"""
return [EmbedProxy(d) for d in getattr(self, '_fields', [])]
def add_field(self, *, name, value, inline=True):
"""Adds a field to the embed object.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
name: str
The name of the field.
value: str
The value of the field.
inline: bool
Whether the field should be displayed inline.
"""
field = {
'inline': inline,
'name': str(name),
'value': str(value)
}
try:
self._fields.append(field)
except AttributeError:
self._fields = [field]
return self
def clear_fields(self):
"""Removes all fields from this embed."""
try:
self._fields.clear()
except AttributeError:
self._fields = []
def remove_field(self, index):
"""Removes a field at a specified index.
If the index is invalid or out of bounds then the error is
silently swallowed.
.. note::
When deleting a field by index, the index of the other fields
shift to fill the gap just like a regular list.
Parameters
-----------
index: int
The index of the field to remove.
"""
try:
del self._fields[index]
except (AttributeError, IndexError):
pass
def set_field_at(self, index, *, name, value, inline=True):
"""Modifies a field to the embed object.
The index must point to a valid pre-existing field.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
index: int
The index of the field to modify.
name: str
The name of the field.
value: str
The value of the field.
inline: bool
Whether the field should be displayed inline.
Raises
-------
IndexError
An invalid index was provided.
"""
try:
field = self._fields[index]
except (TypeError, IndexError, AttributeError):
raise IndexError('field index out of range')
field['name'] = str(name)
field['value'] = str(value)
field['inline'] = inline
return self
def to_dict(self):
"""Converts this embed object into a dict."""
# add in the raw data into the dict
result = {
key[1:]: getattr(self, key)
for key in self.__slots__
if key[0] == '_' and hasattr(self, key)
}
# deal with basic convenience wrappers
try:
colour = result.pop('colour')
except KeyError:
pass
else:
if colour:
result['color'] = colour.value
try:
timestamp = result.pop('timestamp')
except KeyError:
pass
else:
if timestamp:
result['timestamp'] = timestamp.isoformat()
# add in the non raw attribute ones
if self.type:
result['type'] = self.type
if self.description:
result['description'] = self.description
if self.url:
result['url'] = self.url
if self.title:
result['title'] = self.title
return result
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from parameterized import parameterized
from airflow import DAG
from airflow.api_connexion.exceptions import EXCEPTIONS_LINK_MAP
from airflow.models import Log, TaskInstance
from airflow.operators.dummy_operator import DummyOperator
from airflow.security import permissions
from airflow.utils import timezone
from airflow.utils.session import provide_session
from airflow.www import app
from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_user
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_logs
class TestEventLogEndpoint(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
cls.app = app.create_app(testing=True) # type:ignore
create_user(
cls.app, # type:ignore
username="test",
role_name="Test",
permissions=[(permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG)], # type: ignore
)
create_user(cls.app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
@classmethod
def tearDownClass(cls) -> None:
delete_user(cls.app, username="test") # type: ignore
delete_user(cls.app, username="test_no_permissions") # type: ignore
def setUp(self) -> None:
self.client = self.app.test_client() # type:ignore
clear_db_logs()
self.default_time = "2020-06-10T20:00:00+00:00"
self.default_time_2 = '2020-06-11T07:00:00+00:00'
def tearDown(self) -> None:
clear_db_logs()
def _create_task_instance(self):
dag = DAG(
'TEST_DAG_ID',
start_date=timezone.parse(self.default_time),
end_date=timezone.parse(self.default_time),
)
op1 = DummyOperator(
task_id="TEST_TASK_ID",
owner="airflow",
)
dag.add_task(op1)
ti = TaskInstance(task=op1, execution_date=timezone.parse(self.default_time))
return ti
class TestGetEventLog(TestEventLogEndpoint):
@provide_session
def test_should_respond_200(self, session):
log_model = Log(
event='TEST_EVENT',
task_instance=self._create_task_instance(),
)
log_model.dttm = timezone.parse(self.default_time)
session.add(log_model)
session.commit()
event_log_id = log_model.id
response = self.client.get(
f"/api/v1/eventLogs/{event_log_id}", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
self.assertEqual(
response.json,
{
"event_log_id": event_log_id,
"event": "TEST_EVENT",
"dag_id": "TEST_DAG_ID",
"task_id": "TEST_TASK_ID",
"execution_date": self.default_time,
"owner": 'airflow',
"when": self.default_time,
"extra": None,
},
)
def test_should_respond_404(self):
response = self.client.get("/api/v1/eventLogs/1", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 404
self.assertEqual(
{'detail': None, 'status': 404, 'title': 'Event Log not found', 'type': EXCEPTIONS_LINK_MAP[404]},
response.json,
)
@provide_session
def test_should_raises_401_unauthenticated(self, session):
log_model = Log(
event='TEST_EVENT',
task_instance=self._create_task_instance(),
)
log_model.dttm = timezone.parse(self.default_time)
session.add(log_model)
session.commit()
event_log_id = log_model.id
response = self.client.get(f"/api/v1/eventLogs/{event_log_id}")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
"/api/v1/eventLogs", environ_overrides={'REMOTE_USER': "test_no_permissions"}
)
assert response.status_code == 403
class TestGetEventLogs(TestEventLogEndpoint):
@provide_session
def test_should_respond_200(self, session):
log_model_1 = Log(
event='TEST_EVENT_1',
task_instance=self._create_task_instance(),
)
log_model_2 = Log(
event='TEST_EVENT_2',
task_instance=self._create_task_instance(),
)
log_model_3 = Log(event="cli_scheduler", owner='root', extra='{"host_name": "e24b454f002a"}')
log_model_1.dttm = timezone.parse(self.default_time)
log_model_2.dttm = timezone.parse(self.default_time_2)
log_model_3.dttm = timezone.parse(self.default_time_2)
session.add_all([log_model_1, log_model_2, log_model_3])
session.commit()
response = self.client.get("/api/v1/eventLogs", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
self.assertEqual(
response.json,
{
"event_logs": [
{
"event_log_id": log_model_1.id,
"event": "TEST_EVENT_1",
"dag_id": "TEST_DAG_ID",
"task_id": "TEST_TASK_ID",
"execution_date": self.default_time,
"owner": 'airflow',
"when": self.default_time,
"extra": None,
},
{
"event_log_id": log_model_2.id,
"event": "TEST_EVENT_2",
"dag_id": "TEST_DAG_ID",
"task_id": "TEST_TASK_ID",
"execution_date": self.default_time,
"owner": 'airflow',
"when": self.default_time_2,
"extra": None,
},
{
"event_log_id": log_model_3.id,
"event": "cli_scheduler",
"dag_id": None,
"task_id": None,
"execution_date": None,
"owner": 'root',
"when": self.default_time_2,
"extra": '{"host_name": "e24b454f002a"}',
},
],
"total_entries": 3,
},
)
@provide_session
def test_should_raises_401_unauthenticated(self, session):
log_model_1 = Log(
event='TEST_EVENT_1',
task_instance=self._create_task_instance(),
)
log_model_2 = Log(
event='TEST_EVENT_2',
task_instance=self._create_task_instance(),
)
log_model_1.dttm = timezone.parse(self.default_time)
log_model_2.dttm = timezone.parse(self.default_time_2)
session.add_all([log_model_1, log_model_2])
session.commit()
response = self.client.get("/api/v1/eventLogs")
assert_401(response)
class TestGetEventLogPagination(TestEventLogEndpoint):
@parameterized.expand(
[
("api/v1/eventLogs?limit=1", ["TEST_EVENT_1"]),
("api/v1/eventLogs?limit=2", ["TEST_EVENT_1", "TEST_EVENT_2"]),
(
"api/v1/eventLogs?offset=5",
[
"TEST_EVENT_6",
"TEST_EVENT_7",
"TEST_EVENT_8",
"TEST_EVENT_9",
"TEST_EVENT_10",
],
),
(
"api/v1/eventLogs?offset=0",
[
"TEST_EVENT_1",
"TEST_EVENT_2",
"TEST_EVENT_3",
"TEST_EVENT_4",
"TEST_EVENT_5",
"TEST_EVENT_6",
"TEST_EVENT_7",
"TEST_EVENT_8",
"TEST_EVENT_9",
"TEST_EVENT_10",
],
),
("api/v1/eventLogs?limit=1&offset=5", ["TEST_EVENT_6"]),
("api/v1/eventLogs?limit=1&offset=1", ["TEST_EVENT_2"]),
(
"api/v1/eventLogs?limit=2&offset=2",
["TEST_EVENT_3", "TEST_EVENT_4"],
),
]
)
@provide_session
def test_handle_limit_and_offset(self, url, expected_events, session):
log_models = self._create_event_logs(10)
session.add_all(log_models)
session.commit()
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
self.assertEqual(response.json["total_entries"], 10)
events = [event_log["event"] for event_log in response.json["event_logs"]]
self.assertEqual(events, expected_events)
@provide_session
def test_should_respect_page_size_limit_default(self, session):
log_models = self._create_event_logs(200)
session.add_all(log_models)
session.commit()
response = self.client.get("/api/v1/eventLogs", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
self.assertEqual(response.json["total_entries"], 200)
self.assertEqual(len(response.json["event_logs"]), 100) # default 100
@provide_session
@conf_vars({("api", "maximum_page_limit"): "150"})
def test_should_return_conf_max_if_req_max_above_conf(self, session):
log_models = self._create_event_logs(200)
session.add_all(log_models)
session.commit()
response = self.client.get("/api/v1/eventLogs?limit=180", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
self.assertEqual(len(response.json['event_logs']), 150)
def _create_event_logs(self, count):
return [
Log(event="TEST_EVENT_" + str(i), task_instance=self._create_task_instance())
for i in range(1, count + 1)
]
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Spectral operations (e.g. Short-time Fourier Transform)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.ops.signal import reconstruction_ops
from tensorflow.python.ops.signal import shape_ops
from tensorflow.python.ops.signal import window_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('signal.stft')
def stft(signals, frame_length, frame_step, fft_length=None,
window_fn=window_ops.hann_window,
pad_end=False, name=None):
"""Computes the [Short-time Fourier Transform][stft] of `signals`.
Implemented with GPU-compatible ops and supports gradients.
Args:
signals: A `[..., samples]` `float32` `Tensor` of real-valued signals.
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT to apply.
If not provided, uses the smallest power of 2 enclosing `frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
pad_end: Whether to pad the end of `signals` with zeros when the provided
frame length and step produces a frame that lies partially past its end.
name: An optional name for the operation.
Returns:
A `[..., frames, fft_unique_bins]` `Tensor` of `complex64` STFT values where
`fft_unique_bins` is `fft_length // 2 + 1` (the unique components of the
FFT).
Raises:
ValueError: If `signals` is not at least rank 1, `frame_length` is
not scalar, or `frame_step` is not scalar.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'stft', [signals, frame_length,
frame_step]):
signals = ops.convert_to_tensor(signals, name='signals')
signals.shape.with_rank_at_least(1)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
framed_signals = shape_ops.frame(
signals, frame_length, frame_step, pad_end=pad_end)
# Optionally window the framed signals.
if window_fn is not None:
window = window_fn(frame_length, dtype=framed_signals.dtype)
framed_signals *= window
# fft_ops.rfft produces the (fft_length/2 + 1) unique components of the
# FFT of the real windowed signals in framed_signals.
return fft_ops.rfft(framed_signals, [fft_length])
@tf_export('signal.inverse_stft_window_fn')
def inverse_stft_window_fn(frame_step,
forward_window_fn=window_ops.hann_window,
name=None):
"""Generates a window function that can be used in `inverse_stft`.
Constructs a window that is equal to the forward window with a further
pointwise amplitude correction. `inverse_stft_window_fn` is equivalent to
`forward_window_fn` in the case where it would produce an exact inverse.
See examples in `inverse_stft` documentation for usage.
Args:
frame_step: An integer scalar `Tensor`. The number of samples to step.
forward_window_fn: window_fn used in the forward transform, `stft`.
name: An optional name for the operation.
Returns:
A callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype.
The returned window is suitable for reconstructing original waveform in
inverse_stft.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
def inverse_stft_window_fn_inner(frame_length, dtype):
"""Computes a window that can be used in `inverse_stft`.
Args:
frame_length: An integer scalar `Tensor`. The window length in samples.
dtype: Data type of waveform passed to `stft`.
Returns:
A window suitable for reconstructing original waveform in `inverse_stft`.
Raises:
ValueError: If `frame_length` is not scalar, `forward_window_fn` is not a
callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype
`frame_step` is not scalar, or `frame_step` is not scalar.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
# Use equation 7 from Griffin + Lim.
forward_window = forward_window_fn(frame_length, dtype=dtype)
denom = math_ops.square(forward_window)
overlaps = -(-frame_length // frame_step) # Ceiling division.
denom = array_ops.pad(denom, [(0, overlaps * frame_step - frame_length)])
denom = array_ops.reshape(denom, [overlaps, frame_step])
denom = math_ops.reduce_sum(denom, 0, keepdims=True)
denom = array_ops.tile(denom, [overlaps, 1])
denom = array_ops.reshape(denom, [overlaps * frame_step])
return forward_window / denom[:frame_length]
return inverse_stft_window_fn_inner
@tf_export('signal.inverse_stft')
def inverse_stft(stfts,
frame_length,
frame_step,
fft_length=None,
window_fn=window_ops.hann_window,
name=None):
"""Computes the inverse [Short-time Fourier Transform][stft] of `stfts`.
To reconstruct an original waveform, a complimentary window function should
be used in inverse_stft. Such a window function can be constructed with
tf.signal.inverse_stft_window_fn.
Example:
```python
frame_length = 400
frame_step = 160
waveform = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1000])
stft = tf.signal.stft(waveform, frame_length, frame_step)
inverse_stft = tf.signal.inverse_stft(
stft, frame_length, frame_step,
window_fn=tf.signal.inverse_stft_window_fn(frame_step))
```
if a custom window_fn is used in stft, it must be passed to
inverse_stft_window_fn:
```python
frame_length = 400
frame_step = 160
window_fn = functools.partial(window_ops.hamming_window, periodic=True),
waveform = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1000])
stft = tf.signal.stft(
waveform, frame_length, frame_step, window_fn=window_fn)
inverse_stft = tf.signal.inverse_stft(
stft, frame_length, frame_step,
window_fn=tf.signal.inverse_stft_window_fn(
frame_step, forward_window_fn=window_fn))
```
Implemented with GPU-compatible ops and supports gradients.
Args:
stfts: A `complex64` `[..., frames, fft_unique_bins]` `Tensor` of STFT bins
representing a batch of `fft_length`-point STFTs where `fft_unique_bins`
is `fft_length // 2 + 1`
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT that produced
`stfts`. If not provided, uses the smallest power of 2 enclosing
`frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
name: An optional name for the operation.
Returns:
A `[..., samples]` `Tensor` of `float32` signals representing the inverse
STFT for each input STFT in `stfts`.
Raises:
ValueError: If `stfts` is not at least rank 2, `frame_length` is not scalar,
`frame_step` is not scalar, or `fft_length` is not scalar.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'inverse_stft', [stfts]):
stfts = ops.convert_to_tensor(stfts, name='stfts')
stfts.shape.with_rank_at_least(2)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
fft_length.shape.assert_has_rank(0)
real_frames = fft_ops.irfft(stfts, [fft_length])
# frame_length may be larger or smaller than fft_length, so we pad or
# truncate real_frames to frame_length.
frame_length_static = tensor_util.constant_value(frame_length)
# If we don't know the shape of real_frames's inner dimension, pad and
# truncate to frame_length.
if (frame_length_static is None or real_frames.shape.ndims is None or
real_frames.shape.as_list()[-1] is None):
real_frames = real_frames[..., :frame_length]
real_frames_rank = array_ops.rank(real_frames)
real_frames_shape = array_ops.shape(real_frames)
paddings = array_ops.concat(
[array_ops.zeros([real_frames_rank - 1, 2],
dtype=frame_length.dtype),
[[0, math_ops.maximum(0, frame_length - real_frames_shape[-1])]]], 0)
real_frames = array_ops.pad(real_frames, paddings)
# We know real_frames's last dimension and frame_length statically. If they
# are different, then pad or truncate real_frames to frame_length.
elif real_frames.shape.as_list()[-1] > frame_length_static:
real_frames = real_frames[..., :frame_length_static]
elif real_frames.shape.as_list()[-1] < frame_length_static:
pad_amount = frame_length_static - real_frames.shape.as_list()[-1]
real_frames = array_ops.pad(real_frames,
[[0, 0]] * (real_frames.shape.ndims - 1) +
[[0, pad_amount]])
# The above code pads the inner dimension of real_frames to frame_length,
# but it does so in a way that may not be shape-inference friendly.
# Restore shape information if we are able to.
if frame_length_static is not None and real_frames.shape.ndims is not None:
real_frames.set_shape([None] * (real_frames.shape.ndims - 1) +
[frame_length_static])
# Optionally window and overlap-add the inner 2 dimensions of real_frames
# into a single [samples] dimension.
if window_fn is not None:
window = window_fn(frame_length, dtype=stfts.dtype.real_dtype)
real_frames *= window
return reconstruction_ops.overlap_and_add(real_frames, frame_step)
def _enclosing_power_of_two(value):
"""Return 2**N for integer N such that 2**N >= value."""
value_static = tensor_util.constant_value(value)
if value_static is not None:
return constant_op.constant(
int(2**np.ceil(np.log(value_static) / np.log(2.0))), value.dtype)
return math_ops.cast(
math_ops.pow(
2.0,
math_ops.ceil(
math_ops.log(math_ops.cast(value, dtypes.float32)) /
math_ops.log(2.0))), value.dtype)
|
|
import numpy as np
from sklearn.grid_search import GridSearchCV
import sklearn.metrics as metrics
from sklearn import preprocessing as prep
from tr_utils import merge_two_dicts, isEmpty
class SKSupervisedLearning (object):
"""
Thin wrapper around some learning methods
"""
def __init__(self, classifier, X_train, Y_train, X_test, Y_test):
"""
X_train, Y_train - training data: examples + corresponding class labels
X_test, Y_test - validation data: examples + corresponding class labels
"""
self.X_train = X_train
self.X_test = X_test
self.Y_train = Y_train
self.Y_test = Y_test
self.X_train_scaled = np.array([])
self.X_test_scaled = np.array([])
self._classifier = classifier
self._clf = None
self._proba_train = None
self._proba_test = None
self._train_params = None
self._estimation_params = None
self._scaler = None
# parameters for sklearn grid search
self._jobs = -1
self._cv = 10
self._verbose = 0
self._scoring = "log_loss"
@property
def scaler(self):
return self._scaler
@property
def clf(self):
if self._clf == None:
self._clf = self._classifier(**self.train_params) if self.train_params != None else self._classifier()
return self._clf
@property
def proba_train(self):
return self._proba_train
@property
def proba_test(self):
return self._proba_test
@property
def train_params(self):
"""
Training parameter dictionary specific to each learner
"""
return self._train_params
@train_params.setter
def train_params(self, val):
self._train_params = val
@property
def estimation_params(self):
"""
Dictionary of paramters to estimate, specific to each learner:
e.g.:
{'gamma': [0.001, 0.1, 1], 'C': [1, 10, 100]}
"""
return self._estimation_params
@estimation_params.setter
def estimation_params(self, val):
self._estimation_params = val
@property
def jobs(self):
return self._jobs
@jobs.setter
def jobs(self, val):
self._jobs = val
@property
def cv(self):
return self._cv
@cv.setter
def cv(self, val):
self._cv = val
@property
def scoring(self):
return self._scoring
@scoring.setter
def scoring(self, val):
self._scoring = val
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, val):
self._verbose = val
@property
def proba_train(self):
return self._proba_train
@property
def proba_test(self):
return self._proba_test
def _pick_examples(self):
'''
If we have scaled examples - pick them, else pick X_train, X_test
'''
return (self.X_train, self.X_test) \
if isEmpty(self.X_train_scaled) or isEmpty(self.X_test_scaled) \
else (self.X_train_scaled, self.X_test_scaled)
def remove_scaling(self):
self.X_test_scaled = None
self.X_train_scaled = None
def grid_search_classifier(self) :
"""
Grid search for the best classifier, given parameters.
Returns best score
Sets the classifier to the best classifier given training and estimation parameters
See sklearn GridSearchCV for details
"""
gs = False
if self.train_params == None and self.estimation_params == None:
raise AttributeError("Cannot have train_params and estimation_params both absent")
# first - grid-search for the best parameters
if self.estimation_params:
X_train, X_test = self._pick_examples()
Y_train = self.Y_train
clf = self._classifier(**self.train_params) if self.train_params != None else self._classifier()
gs = GridSearchCV(clf, self.estimation_params, scoring = self.scoring, cv = self.cv, n_jobs=self.jobs, verbose = self.verbose)
gs.fit(X_train, Y_train)
print gs.best_params_
print gs.best_score_
# if we have specified parameters of our own - we need to add those
if gs:
self.train_params = merge_two_dicts(gs.best_params_, self.train_params) if self.train_params != None else gs.best_params_
self._clf = self._classifier(**self.train_params)
return gs.best_score_
def _fit_scaler(self, scaler_class, X):
return scaler_class().fit(X)
# TODO: other scalers?
def fit_standard_scaler(self):
"""
Standard scaler scales samples 'vertically', (by feature), by removing the mean and reducing to unit std.
Computes a scaler and transforms both train and validation sets based upon it
"""
self._scaler = self._fit_scaler(prep.StandardScaler, self.X_train)
self.X_train_scaled = self._scaler.transform(self.X_train)
self.X_test_scaled = self._scaler.transform(self.X_test)
def fit_and_validate(self):
'''
Returns training & testing log loss
'''
X_train, X_test = self._pick_examples()
# shorthand
Y_train = self.Y_train
Y_test = self.Y_test
self.clf.fit(X_train, Y_train)
# get probabilities
self._proba_train = self.clf.predict_proba(X_train)
self._proba_test = self.clf.predict_proba(X_test)
return metrics.log_loss(Y_train, self.proba_train), np.array([]) if isEmpty(Y_test) else metrics.log_loss(Y_test, self.proba_test)
def predict_actual(self, X_actual_test):
'''
Return actual prediction on a set where we don't have labels
'''
return self.clf.predict_proba(X_actual_test)
|
|
import contextlib
import io
import os
import socketserver
import string
import sys
import threading
import time
import queue
import win32all
# int(md5.md5('ibb').hexdigest()[-4:], 16)
IBB_PORT = 26830
class StopServer(Exception):
pass
class CommandHandler:
def __init__(self):
self.systems = {}
def handle(self, cwd, args, wfile):
if '--stop' in args:
raise StopServer
else:
self.build(cwd, args[1:], wfile)
def build(self, cwd, targets, wfile):
try:
buildSystem = self.systems[cwd]
except KeyError:
print('loading build')
buildSystem = self.systems[cwd] = BuildSystem(cwd)
else:
print('reusing build')
buildSystem.build(targets, wfile)
class BuildServer:
def __init__(self):
self.commandHandler = CommandHandler()
def handle(self, rfile, wfile):
start = time.time()
try:
self.__handle(rfile, wfile)
finally:
elapsed = time.time() - start
print('Build took', elapsed, 'seconds')
def __handle(self, rfile, wfile):
rfile = io.TextIOWrapper(rfile, encoding='UTF-16LE')
wfile = io.TextIOWrapper(wfile, encoding='UTF-16LE')
exitCode = -1
cwd = None
args = []
while True:
command = rfile.readline()[:-1]
if command == 'build':
try:
self.commandHandler.handle(cwd, args, wfile)
except SystemExit as e:
exitCode = e.code
except StopServer:
# nasty way to shut down without deadlock
self.server._BaseServer__serving = False
return
else:
exitCode = 0
break
elif command.startswith('version: '):
pass
elif command.startswith('cwd: '):
cwd = command[5:]
elif command.startswith('arg: '):
args.append(command[5:])
elif not command:
print('no build command? ignoring')
break
wfile.write('exit code: %s\n' % (exitCode,))
def main(self):
class Handler(socketserver.StreamRequestHandler):
def handle(handler):
print('got connection')
return self.handle(handler.rfile, handler.wfile)
self.server = socketserver.TCPServer(("127.0.0.1", IBB_PORT), Handler)
self.server.serve_forever()
class TrayIcon:
pass
class FileSystem:
def __init__(self, directory):
self.directory = directory
self.drives = {}
def getNode(self, path):
path = os.path.normcase(os.path.normpath(self.abspath(path)))
drive, path = os.path.splitdrive(path)
path_list = self.splitall(path)
current = self.drives
abs = ''
for elt in (drive,) + path_list:
abs = os.path.join(abs, elt)
current.setdefault(elt, File(self, abs))
lastNode = current[elt]
current = current[elt].childNodes
return lastNode
# This is a better abspath
def abspath(self, path):
drive, path = os.path.splitdrive(path)
if drive:
if not path.startswith(os.sep):
path = os.sep + path
return os.path.join(drive, path)
else:
return os.path.abspath(os.path.join(self.directory, path))
def splitall(self, path):
ls = os.path.split(path)
while ls[0] != os.sep:
ls = os.path.split(ls[0]) + ls[1:]
return ls
class BuildConfig:
def __init__(self, nodeFactory):
self.nodeFactory = nodeFactory
self.nodes = []
self.subcommands = {}
def File(self, *args, **kw):
node = self.nodeFactory.getNode(*args, **kw)
self.nodes.append(node)
return node
def Command(self, *args, **kw):
node = Command(*args, **kw)
#self.nodes.append(node)
return node
def subcommand(self, commandFunction):
self.subcommands[commandFunction.__name__] = commandFunction
class DirectoryWatcher:
DIE = 'DIE'
def __init__(self, directory, onFileChange, onResetAll):
self.BUFFER_SIZE = 1 << 22 # 4 MiB
# if directory is SMB:
# self.BUFFER_SIZE = 1 << 16 # the maximum value allowed over SMB
# else:
self.directory = directory
self.onFileChange = onFileChange
self.onResetAll = onResetAll
self.directoryHandle = win32all.CreateFileW(
self.directory,
win32all.GENERIC_READ,
win32all.FILE_SHARE_READ | win32all.FILE_SHARE_WRITE | win32all.FILE_SHARE_DELETE,
None,
win32all.OPEN_EXISTING,
win32all.FILE_FLAG_BACKUP_SEMANTICS | win32all.FILE_FLAG_OVERLAPPED,
None)
self.bufferQueue = queue.Queue()
self.overlapped = win32all.OVERLAPPED()
self.overlapped.hEvent = win32all.CreateEvent(None, False, False, None)
self.started = threading.Event()
self.stopped = win32all.CreateEvent(None, False, False, None)
# Why two threads? If the internal ReadDirectoryChangesW
# change buffer fills, we lose change notifications. In that
# case, we have to reset the build system and rescan
# everything for dependencies, which we'd like to avoid. One
# thread is responsible for calling ReadDirectoryChangesW as
# fast as it can, queuing work for the thread to consume. If
# we temporarily queue 500 MB of change events, no
# problem...
self.changeThread = threading.Thread(target=self.watchForChanges)
self.changeThread.setDaemon(True)
self.processThread = threading.Thread(target=self.processChangeEvents)
self.processThread.setDaemon(True)
self.changeThread.start()
self.processThread.start()
# Once we know the thread has called ReadDirectoryChangesW
# once, we will not miss change notifications. The change
# queue is created on the first call to ReadDirectoryChangesW.
self.started.wait()
def dispose(self):
win32all.SetEvent(self.stopped)
self.bufferQueue.put(self.DIE)
self.changeThread.join()
self.processThread.join()
win32all.CloseHandle(self.directoryHandle)
win32all.CloseHandle(self.overlapped.hEvent)
def watchForChanges(self):
FILE_NOTIFY_CHANGE_ALL = win32all.FILE_NOTIFY_CHANGE_FILE_NAME | \
win32all.FILE_NOTIFY_CHANGE_DIR_NAME | \
win32all.FILE_NOTIFY_CHANGE_ATTRIBUTES | \
win32all.FILE_NOTIFY_CHANGE_SIZE | \
win32all.FILE_NOTIFY_CHANGE_LAST_WRITE | \
win32all.FILE_NOTIFY_CHANGE_LAST_ACCESS | \
win32all.FILE_NOTIFY_CHANGE_CREATION | \
win32all.FILE_NOTIFY_CHANGE_SECURITY
lastReadSize = 0
while True:
buffer = win32all.AllocateReadBuffer(self.BUFFER_SIZE)
win32all.ReadDirectoryChangesW(
self.directoryHandle,
buffer,
True, # watch subdirectories
FILE_NOTIFY_CHANGE_ALL,
self.overlapped)
self.started.set()
waited = win32all.WaitForMultipleObjects(
[self.stopped, self.overlapped.hEvent],
False,
win32all.INFINITE)
if waited == win32all.WAIT_OBJECT_0:
win32all.CancelIo(self.directoryHandle)
return
lastReadSize = win32all.GetOverlappedResult(self.directoryHandle, self.overlapped, True)
if lastReadSize == 0:
# This is easy to induce: add a sleep to the
# ReadDirectoryChangesW loop or make the buffer size
# tiny.
self.onResetAll()
#print('numBytes', lastReadSize)
self.bufferQueue.put(buffer[:lastReadSize].tobytes())
def processChangeEvents(self):
mapping = {
win32all.FILE_ACTION_ADDED: 'Create',
win32all.FILE_ACTION_REMOVED: 'Delete',
win32all.FILE_ACTION_MODIFIED: 'Change',
win32all.FILE_ACTION_RENAMED_OLD_NAME: 'RenameOld',
win32all.FILE_ACTION_RENAMED_NEW_NAME: 'RenameNew',
}
while True:
next = self.bufferQueue.get()
if next is self.DIE:
return
for action, fileName in win32all.FILE_NOTIFY_INFORMATION(next, len(next)):
#print(action, fileName)
self.onFileChange(mapping[action], os.path.join(self.directory, fileName))
class BuildSystem:
def __init__(self, directory):
self.directory = directory
self.directoryWatcher = DirectoryWatcher(directory, self.onFileChange, self.onResetAll)
self.fileSystem = FileSystem(directory)
self.__buildNode = self.fileSystem.getNode('main.ibb')
self.buildConfig = BuildConfig(self.fileSystem)
def readBuildScript(self):
self.fileSystem = FileSystem(self.directory)
self.__buildNode = self.fileSystem.getNode('main.ibb')
self.buildConfig = BuildConfig(self.fileSystem)
globals = {'build': self.buildConfig}
fn = 'main.ibb'
with open(os.path.join(self.directory, fn)) as f:
exec(compile(f.read(), fn, 'exec'), globals, globals)
def build(self, targets, wfile):
with self.__overrideOutput(wfile, wfile):
self.__build(targets)
@contextlib.contextmanager
def __overrideOutput(self, new_stdout, new_stderr):
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = new_stdout
sys.stderr = new_stderr
try:
yield
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
def __build(self, targets):
if self.__buildNode.dirty:
self.readBuildScript()
self.__buildNode.build()
if targets:
subcommandName = targets[0]
try:
sc = self.buildConfig.subcommands[subcommandName]
except KeyError:
pass
else:
sc(targets[1:])
return
for node in self.buildConfig.nodes:
node.build()
def onFileChange(self, change_type, absolute_path):
self.fileSystem.getNode(absolute_path).invalidate()
def onResetAll(self):
#self.fileSystem.dirtyAll()
pass
class Node:
def __init__(self):
self.dependencies = []
self.dependents = []
def addDependency(self, node):
self.dependencies.append(node)
def addDependent(self, node):
self.dependents.append(node)
def invalidate(self):
for dep in self.dependents:
dep.invalidate()
NoData = object()
class File(Node):
def __init__(self, fileSystem, path):
Node.__init__(self)
self.__fileSystem = fileSystem
self.path = path
self.dirty = True
self.childNodes = {}
self.__exists = NoData
self.__data = NoData
self.__children = NoData
self.__lock = threading.Lock() # hack: need to think about real safety
def __lt__(self, other):
return self.path < other.path
def __repr__(self):
return '<ibb.File %s>' % (self.path,)
def build(self):
if self.dirty:
for dep in self.dependencies:
dep.build()
self.dirty = False
def invalidate(self):
with self.__lock:
self.dirty = True
self.__exists = NoData
self.__data = NoData
self.__children = NoData
Node.invalidate(self)
@property
def abspath(self):
return self.path
@property
def exists(self):
if NoData is self.__exists:
self.__exists = os.path.exists(self.path)
return self.__exists
@property
def data(self):
with self.__lock:
if NoData is self.__data:
if os.path.exists(self.path):
self.__data = open(self.path, 'rb').read()
else:
self.__data = None
return self.__data
@property
def children(self):
with self.__lock:
if NoData is self.__children:
#print('getting children of', self.path)
self.__children = set(self.childNodes.values())
if os.path.isdir(self.path):
for path in os.listdir(self.path):
self.__children.add(self.__fileSystem.getNode(os.path.join(self.path, path)))
return self.__children
def walk(self):
stack = [self]
while stack:
node = stack.pop()
yield node
stack.extend(reversed(sorted(node.children)))
def flatten(ls):
out = []
for l in ls:
if isinstance(l, list):
out.extend(flatten(l))
else:
out.append(l)
return out
class IBBFormatter(string.Formatter):
def vformat(self, format_string, args, kwargs, recursion_depth=2):
if recursion_depth < 0:
raise ValueError('Max string recursion exceeded')
result = []
for literal_text, field_name, format_spec, conversion in \
self.parse(format_string):
# output the literal text
if literal_text:
result.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
# given the field_name, find the object it references
# and the argument it came from
obj, arg_used = self.get_field(field_name, args, kwargs)
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion)
# expand the format spec, if needed
format_spec = self.vformat(format_spec, args, kwargs,
recursion_depth-1)
# format the object and append to the result
result.append(self.format_field(obj, format_spec))
if any(isinstance(r, list) for r in result):
return flatten(result)
else:
return ''.join(result)
def format_field(self, value, format_spec):
if isinstance(value, list):
return value
else:
return format(value, format_spec)
def subst(ls, args):
return flatten(
IBBFormatter().format(elt, **args)
for elt in ls)
class BuildFailed(SystemExit):
pass
class Command(Node):
def __init__(self, targets, sources, command, cwd=None, env=None):
Node.__init__(self)
self.__dirty = True
for node in targets:
node.addDependency(self)
self.addDependent(node)
for node in sources:
self.addDependency(node)
node.addDependent(self)
def fmt(c):
if isinstance(c, Node):
return c.path
else:
return c
self.command = subst(command, dict(
targets=list(map(fmt, targets)),
sources=list(map(fmt, sources))))
def invalidate(self):
self.__dirty = True
Node.invalidate(self)
def build(self):
if self.__dirty:
# todo: use subprocess
# opportunity for tools to hook output (for dependency scanning)
print('executing:', ' '.join(self.command))
rv = os.system(' '.join(self.command))
if rv:
print('build failure:', rv)
raise BuildFailed(rv)
self.__dirty = False
if __name__ == '__main__':
BuildServer().main()
|
|
from __future__ import with_statement
import os
import sys
from ConfigParser import ConfigParser
from itertools import chain
from bpython.keys import cli_key_dispatch as key_dispatch
from bpython.autocomplete import SIMPLE as default_completion
MAGIC_METHODS = ", ".join("__%s__" % s for s in [
"init", "repr", "str", "lt", "le", "eq", "ne", "gt", "ge", "cmp", "hash",
"nonzero", "unicode", "getattr", "setattr", "get", "set","call", "len",
"getitem", "setitem", "iter", "reversed", "contains", "add", "sub", "mul",
"floordiv", "mod", "divmod", "pow", "lshift", "rshift", "and", "xor", "or",
"div", "truediv", "neg", "pos", "abs", "invert", "complex", "int", "float",
"oct", "hex", "index", "coerce", "enter", "exit"]
)
class Struct(object):
"""Simple class for instantiating objects we can add arbitrary attributes
to and use for various arbitrary things."""
def get_config_home():
"""Returns the base directory for bpython's configuration files."""
xdg_config_home = os.environ.get('XDG_CONFIG_HOME', '~/.config')
return os.path.join(xdg_config_home, 'bpython')
def default_config_path():
"""Returns bpython's default configuration file path."""
return os.path.join(get_config_home(), 'config')
def fill_config_with_default_values(config, default_values):
for section in default_values.iterkeys():
if not config.has_section(section):
config.add_section(section)
for (opt, val) in default_values[section].iteritems():
if not config.has_option(section, opt):
config.set(section, opt, str(val))
def loadini(struct, configfile):
"""Loads .ini configuration file and stores its values in struct"""
config_path = os.path.expanduser(configfile)
if not os.path.isfile(config_path) and configfile == default_config_path():
# We decided that '~/.bpython/config' still was a crappy
# place, use XDG Base Directory Specification instead. Fall
# back to old config, though.
config_path = os.path.expanduser('~/.bpython/config')
config = ConfigParser()
fill_config_with_default_values(config, {
'general': {
'arg_spec': True,
'auto_display_list': True,
'color_scheme': 'default',
'complete_magic_methods' : True,
'magic_methods' : MAGIC_METHODS,
'autocomplete_mode': default_completion,
'dedent_after': 1,
'flush_output': True,
'highlight_show_source': True,
'hist_file': '~/.pythonhist',
'hist_length': 100,
'hist_duplicates': True,
'paste_time': 0.02,
'syntax': True,
'tab_length': 4,
'pastebin_confirm': True,
'pastebin_private': False,
'pastebin_url': 'http://bpaste.net/xmlrpc/',
'pastebin_private': True,
'pastebin_show_url': 'http://bpaste.net/show/$paste_id/',
'pastebin_helper': '',
},
'keyboard': {
'clear_line': 'C-u',
'clear_screen': 'C-l',
'clear_word': 'C-w',
'cut_to_buffer': 'C-k',
'delete': 'C-d',
'down_one_line': 'C-n',
'exit': '',
'last_output': 'F9',
'pastebin': 'F8',
'save': 'C-s',
'show_source': 'F2',
'suspend': 'C-z',
'undo': 'C-r',
'search': 'C-o',
'up_one_line': 'C-p',
'yank_from_buffer': 'C-y'},
'cli': {
'suggestion_width': 0.8,
'trim_prompts': False,
},
'gtk': {
'font': 'monospace 10',
'color_scheme': 'default'}})
if not config.read(config_path):
# No config file. If the user has it in the old place then complain
if os.path.isfile(os.path.expanduser('~/.bpython.ini')):
sys.stderr.write("Error: It seems that you have a config file at "
"~/.bpython.ini. Please move your config file to "
"%s\n" % default_config_path())
sys.exit(1)
struct.dedent_after = config.getint('general', 'dedent_after')
struct.tab_length = config.getint('general', 'tab_length')
struct.auto_display_list = config.getboolean('general',
'auto_display_list')
struct.syntax = config.getboolean('general', 'syntax')
struct.arg_spec = config.getboolean('general', 'arg_spec')
struct.paste_time = config.getfloat('general', 'paste_time')
struct.highlight_show_source = config.getboolean('general',
'highlight_show_source')
struct.hist_file = config.get('general', 'hist_file')
struct.hist_length = config.getint('general', 'hist_length')
struct.hist_duplicates = config.getboolean('general', 'hist_duplicates')
struct.flush_output = config.getboolean('general', 'flush_output')
struct.pastebin_key = config.get('keyboard', 'pastebin')
struct.save_key = config.get('keyboard', 'save')
struct.search_key = config.get('keyboard', 'search')
struct.show_source_key = config.get('keyboard', 'show_source')
struct.suspend_key = config.get('keyboard', 'suspend')
struct.undo_key = config.get('keyboard', 'undo')
struct.up_one_line_key = config.get('keyboard', 'up_one_line')
struct.down_one_line_key = config.get('keyboard', 'down_one_line')
struct.cut_to_buffer_key = config.get('keyboard', 'cut_to_buffer')
struct.yank_from_buffer_key = config.get('keyboard', 'yank_from_buffer')
struct.clear_word_key = config.get('keyboard', 'clear_word')
struct.clear_line_key = config.get('keyboard', 'clear_line')
struct.clear_screen_key = config.get('keyboard', 'clear_screen')
struct.delete_key = config.get('keyboard', 'delete')
struct.exit_key = config.get('keyboard', 'exit')
struct.last_output_key = config.get('keyboard', 'last_output')
struct.pastebin_confirm = config.getboolean('general', 'pastebin_confirm')
struct.pastebin_private = config.getboolean('general', 'pastebin_private')
struct.pastebin_url = config.get('general', 'pastebin_url')
struct.pastebin_private = config.get('general', 'pastebin_private')
struct.pastebin_show_url = config.get('general', 'pastebin_show_url')
struct.pastebin_helper = config.get('general', 'pastebin_helper')
struct.cli_suggestion_width = config.getfloat('cli',
'suggestion_width')
struct.cli_trim_prompts = config.getboolean('cli',
'trim_prompts')
struct.complete_magic_methods = config.getboolean('general',
'complete_magic_methods')
methods = config.get('general', 'magic_methods')
struct.magic_methods = [meth.strip() for meth in methods.split(",")]
struct.autocomplete_mode = config.get('general', 'autocomplete_mode')
struct.gtk_font = config.get('gtk', 'font')
color_scheme_name = config.get('general', 'color_scheme')
color_gtk_scheme_name = config.get('gtk', 'color_scheme')
default_colors = {
'keyword': 'y',
'name': 'c',
'comment': 'b',
'string': 'm',
'error': 'r',
'number': 'G',
'operator': 'Y',
'punctuation': 'y',
'token': 'C',
'background': 'd',
'output': 'w',
'main': 'c',
'paren': 'R',
'prompt': 'c',
'prompt_more': 'g',
}
default_gtk_colors = {
'keyword': 'b',
'name': 'k',
'comment': 'b',
'string': 'm',
'error': 'r',
'number': 'G',
'operator': 'B',
'punctuation': 'g',
'token': 'C',
'background': 'w',
'output': 'k',
'main': 'c',
'paren': 'R',
'prompt': 'b',
'prompt_more': 'g',
}
if color_scheme_name == 'default':
struct.color_scheme = default_colors
else:
struct.color_scheme = dict()
theme_filename = color_scheme_name + '.theme'
path = os.path.expanduser(os.path.join(get_config_home(),
theme_filename))
old_path = os.path.expanduser(os.path.join('~/.bpython',
theme_filename))
for path in [path, old_path]:
try:
load_theme(struct, path, struct.color_scheme, default_colors)
except EnvironmentError:
continue
else:
break
else:
sys.stderr.write("Could not load theme '%s'.\n" %
(color_scheme_name, ))
sys.exit(1)
if color_gtk_scheme_name == 'default':
struct.color_gtk_scheme = default_gtk_colors
else:
struct.color_gtk_scheme = dict()
# Note: This is a new config option, hence we don't have a
# fallback directory.
path = os.path.expanduser(os.path.join(get_config_home(),
color_gtk_scheme_name + '.theme'))
try:
load_theme(struct, path, struct.color_gtk_scheme, default_colors)
except EnvironmentError:
sys.stderr.write("Could not load gtk theme '%s'.\n" %
(color_gtk_scheme_name, ))
sys.exit(1)
# checks for valid key configuration this part still sucks
for key in (struct.pastebin_key, struct.save_key):
key_dispatch[key]
def load_theme(struct, path, colors, default_colors):
theme = ConfigParser()
with open(path, 'r') as f:
theme.readfp(f)
for k, v in chain(theme.items('syntax'), theme.items('interface')):
if theme.has_option('syntax', k):
colors[k] = theme.get('syntax', k)
else:
colors[k] = theme.get('interface', k)
# Check against default theme to see if all values are defined
for k, v in default_colors.iteritems():
if k not in colors:
colors[k] = v
f.close()
|
|
from __future__ import unicode_literals
from django import forms
from django.conf import settings
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from django.db import transaction
from django.forms import widgets
from django.http import (
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseNotAllowed,
JsonResponse,
)
from django.urls import reverse
from django.utils.html import mark_safe
from django.utils.translation import ugettext_lazy as _
from admin_sort.utils.model_sorting import position_object, set_position_for_new_obj
POSITION_CHOICES = (
('', _('Current Position')),
('first-child', _('At the top')),
('last-child', _('At the Bottom')),
)
MOVE_CHOICES = (
('left', _('On top of the target')),
('right', _('At the bottom of the target')),
)
class SortableAdminMixin(object):
"""
_field<required>: which is the positioning field
_insert_position<default:'last'>: defines where a new object is inserted
last: at the end of the list
first: at the start
"""
_field = None
_insert_position = None
change_list_template = 'admin/admin_sort/change_list.html'
@property
def media(self):
css = {
'all': [
'admin_sort/css/sortable.css',
]
}
if 'djangocms_admin_style' in settings.INSTALLED_APPS:
css['all'].append('admin_sort/css/sortable.cms.css')
js = [
'admin_sort/js/sortable.js',
'admin_sort/js/sortable.list.js',
]
original_media = super(SortableAdminMixin, self).media
return original_media + widgets.Media(css=css, js=js)
def __init__(self, model, admin_site):
self._field = getattr(self, 'position_field', None)
self._insert_position = getattr(self, 'insert_position', 'last')
if not self._field:
msg = _('You have to define a position_field on your {} for SortableAdminMixin to work.').format(
self.__class__.__name__
)
raise ImproperlyConfigured(msg)
if '-{}'.format(self._field) in model._meta.ordering:
msg = _(
'{0} can not be in reverse order (-{0}).'
'Use {1}.insert_position = first instead'
).format(self._field, self.__class__.__name__)
raise ImproperlyConfigured(msg)
if self._field not in model._meta.ordering:
msg = _(
'{} has to be in MetaClass.ordering of your Model'
).format(self._field)
raise ImproperlyConfigured(msg)
# Force ordering by position, for this admin!
self.ordering = [self._field]
super(SortableAdminMixin, self).__init__(model, admin_site)
def get_exclude(self, request, obj=None):
exclude = self.exclude or []
if self._field not in exclude:
exclude.append(self._field)
return exclude
def get_list_display(self, request):
list_display = ['_col_move_node'] + [
d for d in super(SortableAdminMixin, self).get_list_display(
request
)
]
# Be sure the position_field is not in list_display
if self._field in list_display:
pass # list_display[self._field]
return list_display
def get_list_display_links(self, request, list_display):
if (self.list_display_links or self.list_display_links is None or not list_display):
return self.list_display_links
else:
# Use only the second item in list_display as link
# second because first is aur drag handle
return list(list_display)[1:2]
def get_queryset(self, request):
qs = super(SortableAdminMixin, self).get_queryset(request)
return qs
def get_urls(self):
info = [self.model._meta.app_label, self.model._meta.model_name]
urls = [
url(
r'^update/$',
self.admin_site.admin_view(self.update_view),
name='{}_{}_update'.format(*info)
),
url(
r'^reorder/$',
self.admin_site.admin_view(self.reorder_view),
name='{}_{}_reorder'.format(*info)
),
]
urls += super(SortableAdminMixin, self).get_urls()
return urls
def changelist_view(self, request, node_id=None, extra_context=None):
extra_context = extra_context or {}
extra_context.update({
'update_url': self.get_update_url(),
'reorder_url': self.get_reorder_url(),
})
return super(SortableAdminMixin, self).changelist_view(
request,
extra_context,
)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
if not getattr(obj, 'pk', None):
set_position_for_new_obj(obj, self._field, self._insert_position)
obj.save()
def reorder_view(self, request):
error_response = self.check_ajax_request(request)
if error_response:
return error_response
data = self._reorder_all()
return JsonResponse(data)
def update_view(self, request):
error_response = self.check_ajax_request(request)
if error_response:
return error_response
data = {}
Form = self.get_update_form_class()
form = Form(request.POST)
if form.is_valid():
data = self._move_obj(
form.cleaned_data.get('obj'),
form.cleaned_data.get('target'),
form.cleaned_data.get('position'),
)
else:
# TODO admin message
data = {
'message': 'error',
'error': _('There seams to be a problem with your list')
}
if data.get('message') == 'error':
# TODO cleanup or provide the user a cleanup choice
self._reorder_all()
return JsonResponse(data)
def check_ajax_request(self, request):
if not request.is_ajax():
return HttpResponseBadRequest(
'Not an XMLHttpRequest'
)
if request.method != 'POST':
return HttpResponseNotAllowed(
'Must be a POST request'
)
if not self.has_change_permission(request):
return HttpResponseForbidden(
'Missing permissions to perform this request'
)
return None
def get_update_form_class(self):
class UpdateForm(forms.Form):
position = forms.ChoiceField(
choices=MOVE_CHOICES
)
obj = forms.ModelChoiceField(
queryset=self.model._default_manager.get_queryset()
)
target = forms.ModelChoiceField(
queryset=self.model._default_manager.get_queryset()
)
return UpdateForm
def get_reorder_url(self):
info = [self.model._meta.app_label, self.model._meta.model_name]
return reverse(
'admin:{}_{}_reorder'.format(*info),
current_app=self.admin_site.name
)
def get_update_url(self):
info = [self.model._meta.app_label, self.model._meta.model_name]
return reverse(
'admin:{}_{}_update'.format(*info),
current_app=self.admin_site.name
)
def _move_obj(self, obj, target, position):
base_qs = self.model._default_manager.get_queryset()
obj_start = getattr(obj, self._field, None)
target_start = getattr(target, self._field, None)
# EDGE Cases
if obj_start == target_start:
# TODO this is an ugly hack try to find a better way
self._reorder_all()
obj = base_qs.get(pk=obj.pk)
target = base_qs.get(pk=target.pk)
obj_start = getattr(obj, self._field, None)
target_start = getattr(target, self._field, None)
direction = 'down'
start, end = obj_start, target_start
# set Direction
if obj_start < target_start:
direction = 'down'
start, end = obj_start, target_start
if obj_start > target_start:
direction = 'up'
start, end = target_start, obj_start
# the affected objects
kwargs = {'%s__gte' % self._field: start, '%s__lte' % self._field: end}
# build queryset kwargs
if direction == 'down':
if position == 'left':
# Nasty exception this should not happen
# it happen because js/sortable is not always behaving the same
# set the obj position to the targets position
new_position = getattr(obj, self._field)
elif position == 'right':
# set the obj position to the targets position
new_position = getattr(target, self._field)
elif direction == 'up':
if position == 'right':
# set the obj position to 1 greater than the targets position
new_position = getattr(target, self._field) + 1
elif position == 'left':
# set the obj position to the targets position
new_position = getattr(target, self._field)
# do it, with helper method
position_object(obj, self._field, new_position)
# return info
return_data = {
'message': 'ok',
'dir': direction,
'pos': position,
'start': start,
'end': end,
'object_list': [
o for o in base_qs.filter(**kwargs).values('pk', self._field)
]
}
return return_data
def _reorder_all(self):
# TODO implement
object_list = []
with transaction.atomic():
pos = 1
for o in self.model._default_manager.get_queryset():
setattr(o, self._field, pos)
o.save()
object_list.append([pos, o.pk, '{}'.format(o)])
pos += 1
return {
'message': 'ok',
'objects': object_list,
}
# CHANGE LIST AUXILIARY COLUMNS
def _col_move_node(self, obj):
data_attrs = [
'data-pk="{}"'.format(obj.pk),
'data-name="{}"'.format(obj),
]
html = '<span class="admin-sort-drag" {}></span>'.format(
' '.join(data_attrs)
)
return mark_safe(html)
_col_move_node.short_description = ''
|
|
"""
Test distributed simulation.
"""
import cPickle
import hashlib
import logging
import os
import shutil
import socket
import sys
import traceback
import unittest
from math import pi
from multiprocessing import AuthenticationError
from multiprocessing.managers import RemoteError
from Crypto.Random import get_random_bytes
from openmdao.lib.casehandlers.api import ListCaseRecorder
from openmdao.main.api import Assembly, Component, Container, Driver, set_as_top
from openmdao.main.container import get_closest_proxy
from openmdao.main.datatypes.api import Float, Int, FileRef
from openmdao.main.hasobjective import HasObjectives
from openmdao.main.hasparameters import HasParameters
from openmdao.main.interfaces import IComponent
from openmdao.main.mp_support import has_interface, is_instance
from openmdao.main.mp_util import read_server_config
from openmdao.main.objserverfactory import connect, start_server, RemoteFile
from openmdao.main.rbac import Credentials, get_credentials, set_credentials, \
AccessController, RoleError, rbac
from openmdao.test.execcomp import ExecComp
from openmdao.util.decorators import add_delegate
from openmdao.util.fileutil import onerror
from openmdao.util.publickey import get_key_pair
from openmdao.util.testutil import assert_raises, assert_rel_error
from traits.api import CTrait
# Used for naming classes we want to create instances of.
_MODULE = 'openmdao.main.test.test_distsim'
# Used for naming server directories.
_SERVER_ID = 0
class Box(ExecComp):
""" Simple component for testing. """
pid = Int(iotype='out')
def __init__(self):
super(Box, self).__init__([
'surface_area = (width*(height+depth) + depth*height)*2',
'volume = width*height*depth'])
self.pid = os.getpid()
# For get_closest_proxy().
sub = self.add('subcontainer', Container())
sub.add('subvar', Int())
def execute(self):
print 'Box.execute(), %f %f %f on %s:%d' \
% (self.width, self.height, self.depth,
socket.gethostname(), self.pid)
sys.stdout.flush()
super(Box, self).execute()
def no_rbac(self):
pass
@rbac('owner', proxy_types=[RemoteFile])
def open_in_parent(self, path, mode):
try:
return self.parent.open(path, mode)
except Exception as exc:
self._logger.debug('open_in_parent() caught %s:', exc)
self._logger.debug(traceback.format_exc())
@rbac('owner')
def cause_parent_error1(self):
return self.parent.no_such_variable
@rbac('owner')
def cause_parent_error2(self):
return self.parent.get_trait('no-such-trait')
@rbac('owner')
def cause_parent_error3(self):
return self.parent.xyzzy()
class HollowSphere(Component):
""" Simple component for testing. """
radius = Float(1.0, low=0., exclude_low=True, iotype='in', units='cm')
thickness = Float(0.05, iotype='in', units='cm')
inner_volume = Float(iotype='out', units='cm**3')
volume = Float(iotype='out', units='cm**3')
solid_volume = Float(iotype='out', units='cm**3')
surface_area = Float(iotype='out', units='cm**2')
pid = Int(iotype='out')
def __init__(self):
super(HollowSphere, self).__init__()
self.pid = os.getpid()
def execute(self):
self.surface_area = 4.0 * pi * self.radius * self.radius
self.inner_volume = 4.0 / 3.0 * pi * self.radius ** 3
self.volume = 4.0 / 3.0 * pi * (self.radius + self.thickness) ** 3
self.solid_volume = self.volume - self.inner_volume
@add_delegate(HasParameters)
@add_delegate(HasObjectives)
class BoxDriver(Driver):
""" Just drives :class:`Box` inputs and records results. """
def execute(self):
""" Runs with various box parameter values. """
for width in range(1, 2):
for height in range(1, 3):
for depth in range(1, 4):
self._logger.debug('w,h,d %s, %s, %s', width, height, depth)
self.set_parameters((width, height, depth))
self.workflow.run()
volume, area = self.eval_objectives()
self._logger.debug(' v,a %s, %s', volume, area)
class BoxSource(ExecComp):
""" Just a pass-through for :class:`BoxDriver` input values. """
def __init__(self):
super(BoxSource, self).__init__(['width_out = width_in',
'height_out = height_in',
'depth_out = depth_in'])
# For get_closest_proxy().
sub = self.add('subcontainer', Container())
sub.add('subvar', Int())
class BoxSink(ExecComp):
""" Just a pass-through for :class:`BoxDriver` result values. """
def __init__(self):
super(BoxSink, self).__init__(['volume_out = volume_in',
'area_out = area_in'])
class Model(Assembly):
""" Drive a remote :class:`Box` via connections to local components. """
def __init__(self, box):
super(Model, self).__init__()
self.recorders = [ListCaseRecorder()]
self.add('driver', BoxDriver())
self.driver.workflow.add(self.add('source', BoxSource()).name)
self.driver.workflow.add(self.add('box', box).name)
self.driver.workflow.add(self.add('sink', BoxSink()).name)
self.driver.add_parameter('source.width_in', low=1e-99, high=1e99)
self.driver.add_parameter('source.height_in', low=1e-99, high=1e99)
self.driver.add_parameter('source.depth_in', low=1e-99, high=1e99)
self.connect('source.width_out', 'box.width')
self.connect('source.height_out', 'box.height')
self.connect('source.depth_out', 'box.depth')
self.connect('box.volume', 'sink.volume_in')
self.connect('box.surface_area', 'sink.area_in')
self.driver.add_objective('sink.volume_out', 'volume')
self.driver.add_objective('sink.area_out', 'area')
@rbac('owner', proxy_types=[RemoteFile])
def open(self, path, mode):
""" Return opened file. """
return RemoteFile(open(path, mode))
@rbac('xyzzy')
def xyzzy(self):
""" No access by 'owner', etc. """
return None
class Protector(AccessController):
""" Special :class:`AccessController` to protect secrets. """
def check_access(self, role, methodname, obj, attr):
if not role:
raise RoleError('No access by null role')
if role == 'owner':
return
if methodname != '__delattr__' and self.user_attribute(obj, attr):
return
raise RoleError("No %s access to '%s' by role '%s'"
% (methodname, attr, role))
@staticmethod
def user_attribute(obj, attr):
if attr in obj.list_inputs() or \
attr in obj.list_outputs() or \
attr in ('parent', 'name'):
return True
return False
class ProtectedBox(Box):
""" Box which can be used but the innards are hidden. """
secret = Int()
def __init__(self):
super(ProtectedBox, self).__init__()
# Protector will use current credentials as 'owner'.
self.protector = Protector()
@rbac('owner')
def proprietary_method(self):
pass
def get_access_controller(self):
return self.protector
@rbac(('owner', 'user'), proxy_types=[FileRef])
def get(self, path, index=None):
if self.protector.user_attribute(self, path):
return super(ProtectedBox, self).get(path, index)
raise RoleError('No get access to %r' % path)
@rbac(('owner', 'user'), proxy_types=[CTrait])
def get_dyn_trait(self, name, iotype=None, trait=None):
if self.protector.user_attribute(self, name):
return super(ProtectedBox, self).get_dyn_trait(name, iotype, trait)
raise RoleError('No get_dyn_trait access to %r' % name)
@rbac(('owner', 'user'))
def get_attr(self, name, index=None):
if self.protector.user_attribute(self, name):
return super(ProtectedBox, self).get_attr(name)
raise RoleError('No get_attr access to %r' % name)
@rbac(('owner', 'user'))
def set(self, path, value, index=None, force=False):
if self.protector.user_attribute(self, path):
return super(ProtectedBox, self).set(path, value, index, force)
raise RoleError('No set access to %r' % path)
class TestCase(unittest.TestCase):
""" Test distributed simulation. """
def run(self, result=None):
"""
Record the :class:`TestResult` used so we can conditionally cleanup
directories in :meth:`tearDown`.
"""
self.test_result = result or unittest.TestResult()
return super(TestCase, self).run(self.test_result)
def setUp(self):
""" Called before each test. """
self.n_errors = len(self.test_result.errors)
self.n_failures = len(self.test_result.failures)
self.factories = []
self.servers = []
self.server_dirs = []
# Ensure we control directory cleanup.
self.keepdirs = os.environ.get('OPENMDAO_KEEPDIRS', '0')
os.environ['OPENMDAO_KEEPDIRS'] = '1'
def start_factory(self, port=None, allowed_users=None):
""" Start each factory process in a unique directory. """
global _SERVER_ID
_SERVER_ID += 1
server_dir = 'Factory_%d' % _SERVER_ID
if os.path.exists(server_dir):
shutil.rmtree(server_dir, onerror=onerror)
os.mkdir(server_dir)
os.chdir(server_dir)
self.server_dirs.append(server_dir)
try:
logging.debug('')
logging.debug('tester pid: %s', os.getpid())
logging.debug('starting server...')
if port is None:
# Exercise both AF_INET and AF_UNIX/AF_PIPE.
port = -1 if _SERVER_ID & 1 else 0
if allowed_users is None:
credentials = get_credentials()
allowed_users = {credentials.user: credentials.public_key}
allowed_types = ['openmdao.main.test.test_distsim.HollowSphere',
'openmdao.main.test.test_distsim.Box',
'openmdao.main.test.test_distsim.ProtectedBox']
server, server_cfg = start_server(port=port,
allowed_users=allowed_users,
allowed_types=allowed_types,
log_prefix=server_dir)
self.servers.append(server)
cfg = read_server_config(server_cfg)
self.address = cfg['address']
self.port = cfg['port']
self.tunnel = cfg['tunnel']
self.key = cfg['key']
logging.debug('server pid: %s', server.pid)
logging.debug('server address: %s', self.address)
logging.debug('server port: %s', self.port)
logging.debug('server key: %s', self.key)
finally:
os.chdir('..')
factory = connect(self.address, self.port, self.tunnel, pubkey=self.key)
self.factories.append(factory)
logging.debug('factory: %r', factory)
return factory
def tearDown(self):
""" Shut down server process. """
try:
for factory in self.factories:
factory.cleanup()
for server in self.servers:
logging.debug('terminating server pid %s', server.pid)
server.terminate(timeout=10)
# Cleanup only if there weren't any new errors or failures.
if len(self.test_result.errors) == self.n_errors and \
len(self.test_result.failures) == self.n_failures and \
not int(self.keepdirs):
for server_dir in self.server_dirs:
shutil.rmtree(server_dir, onerror=onerror)
finally:
os.environ['OPENMDAO_KEEPDIRS'] = self.keepdirs
def test_1_client(self):
logging.debug('')
logging.debug('test_client')
factory = self.start_factory()
# List available types.
types = factory.get_available_types()
logging.debug('Available types:')
for typname, version in types:
logging.debug(' %s %s', typname, version)
# First a HollowSphere, accessed via get()/set().
obj = factory.create(_MODULE + '.HollowSphere')
sphere_pid = obj.get('pid')
self.assertNotEqual(sphere_pid, os.getpid())
radius = obj.get('radius')
self.assertEqual(radius, 1.)
radius += 1
obj.set('radius', radius)
new_radius = obj.get('radius')
self.assertEqual(new_radius, 2.)
self.assertEqual(obj.get('inner_volume'), 0.)
self.assertEqual(obj.get('volume'), 0.)
self.assertEqual(obj.get('solid_volume'), 0.)
self.assertEqual(obj.get('surface_area'), 0.)
obj.run()
assert_rel_error(self, obj.get('inner_volume'), 33.510321638, 0.000001)
assert_rel_error(self, obj.get('volume'), 36.086951213, 0.000001)
assert_rel_error(self, obj.get('solid_volume'), 2.5766295747, 0.000001)
assert_rel_error(self, obj.get('surface_area'), 50.265482457, 0.000001)
try:
obj.set('radius', -1)
except RemoteError as exc:
fragment = ": Variable 'radius' must be a float in the range (0.0, "
if fragment not in str(exc):
self.fail('%s not found in %s' % (fragment, exc))
else:
self.fail('Expected RemoteError')
# Now a Box, accessed via attribute methods.
obj = factory.create(_MODULE + '.Box')
box_pid = obj.get('pid')
self.assertNotEqual(box_pid, os.getpid())
self.assertNotEqual(box_pid, sphere_pid)
obj.width += 2
obj.height += 2
obj.depth += 2
self.assertEqual(obj.width, 2.)
self.assertEqual(obj.height, 2.)
self.assertEqual(obj.depth, 2.)
self.assertEqual(obj.volume, 0.)
self.assertEqual(obj.surface_area, 0.)
obj.run()
self.assertEqual(obj.volume, 8.0)
self.assertEqual(obj.surface_area, 24.0)
try:
obj.no_rbac()
except RemoteError as exc:
msg = "AttributeError: method 'no_rbac' of"
logging.debug('msg: %s', msg)
logging.debug('exc: %s', exc)
self.assertTrue(msg in str(exc))
else:
self.fail('Expected RemoteError')
def test_2_model(self):
logging.debug('')
logging.debug('test_model')
factory = self.start_factory()
# Create model and run it.
box = factory.create(_MODULE + '.Box')
model = set_as_top(Model(box))
model.run()
# Check results.
for width in range(1, 2):
for height in range(1, 3):
for depth in range(1, 4):
case = model.recorders[0].cases.pop(0)
self.assertEqual(case.get_output('_pseudo_0'),
width * height * depth)
self.assertTrue(is_instance(model.box.parent, Assembly))
self.assertTrue(has_interface(model.box.parent, IComponent))
# Upcall to use parent to resolve sibling.
# At one time this caused proxy problems.
source = model.box.parent.source
self.assertEqual(source.width_in, 1.)
# Proxy resolution.
obj, path = get_closest_proxy(model, 'box.subcontainer.subvar')
self.assertEqual(obj, model.box)
self.assertEqual(path, 'subcontainer.subvar')
obj, path = get_closest_proxy(model, 'source.subcontainer.subvar')
self.assertEqual(obj, model.source.subcontainer)
self.assertEqual(path, 'subvar')
obj, path = get_closest_proxy(model.source.subcontainer, 'subvar')
self.assertEqual(obj, model.source.subcontainer)
self.assertEqual(path, 'subvar')
# Observable proxied type.
tmp = model.box.open_in_parent('tmp', 'w')
tmp.close()
os.remove('tmp')
# Cause server-side errors we can see.
try:
box.cause_parent_error1()
except RemoteError as exc:
msg = "AttributeError: 'Model' object has no attribute 'no_such_variable'"
logging.debug('msg: %s', msg)
logging.debug('exc: %s', exc)
self.assertTrue(msg in str(exc))
else:
self.fail('Expected RemoteError')
try:
box.cause_parent_error2()
except RemoteError as exc:
msg = "AttributeError: method 'get_trait' of"
logging.debug('msg: %s', msg)
logging.debug('exc: %s', exc)
self.assertTrue(msg in str(exc))
else:
self.fail('Expected RemoteError')
try:
box.cause_parent_error3()
except RemoteError as exc:
msg = "RoleError: xyzzy(): No access for role 'owner'"
logging.debug('msg: %s', msg)
logging.debug('exc: %s', exc)
self.assertTrue(msg in str(exc))
else:
self.fail('Expected RemoteError')
def test_3_access(self):
logging.debug('')
logging.debug('test_access')
# This 'spook' creation is only for testing.
# Normally the protector would run with regular credentials
# in effect at the proprietary site.
user = 'i am a spy@' + socket.gethostname()
key_pair = get_key_pair(user)
data = '\n'.join([user, '0', key_pair.publickey().exportKey()])
hash = hashlib.sha256(data).digest()
signature = key_pair.sign(hash, get_random_bytes)
spook = Credentials((data, signature, None))
credentials = get_credentials()
allowed_users = {credentials.user: credentials.public_key,
spook.user: spook.public_key}
factory = self.start_factory(allowed_users=allowed_users)
# Create model and run it.
saved = get_credentials()
set_credentials(spook)
box = factory.create(_MODULE + '.ProtectedBox',
allowed_users=allowed_users)
set_credentials(saved)
model = set_as_top(Model(box))
model.run()
# Check results.
for width in range(1, 2):
for height in range(1, 3):
for depth in range(1, 4):
case = model.recorders[0].cases.pop(0)
self.assertEqual(case.get_output('_pseudo_0'),
width * height * depth)
# Check access protections.
try:
i = model.box.secret
except RemoteError as exc:
msg = "RoleError: No __getattribute__ access to 'secret' by role 'user'"
logging.debug('msg: %s', msg)
logging.debug('exc: %s', exc)
self.assertTrue(msg in str(exc))
else:
self.fail('Expected RemoteError')
try:
model.box.proprietary_method()
except RemoteError as exc:
msg = "RoleError: proprietary_method(): No access for role 'user'"
logging.debug('msg: %s', msg)
logging.debug('exc: %s', exc)
self.assertTrue(msg in str(exc))
else:
self.fail('Expected RemoteError')
saved = get_credentials()
set_credentials(spook)
try:
i = model.box.secret
model.box.proprietary_method()
finally:
# Reset credentials to allow factory shutdown.
set_credentials(saved)
def test_4_authkey(self):
logging.debug('')
logging.debug('test_authkey')
factory = self.start_factory()
# Start server in non-public-key mode.
# Connections must have matching authkey,
# but data is sent in the clear!?
# This is standard multiprocessing behaviour.
authkey = 'password'
server_dir = 'Factory_authkey'
if os.path.exists(server_dir):
shutil.rmtree(server_dir, onerror=onerror)
os.mkdir(server_dir)
os.chdir(server_dir)
self.server_dirs.append(server_dir)
try:
logging.debug('starting server (authkey %s)...', authkey)
allowed_types = ['openmdao.main.test.test_distsim.Box']
server, server_cfg = start_server(authkey=authkey,
allowed_types=allowed_types,
timeout=30)
cfg = read_server_config(server_cfg)
address = cfg['address']
port = cfg['port']
key = cfg['key']
logging.debug('server address: %s', address)
logging.debug('server port: %s', port)
logging.debug('server tunnel: %s', cfg['tunnel'])
logging.debug('server key: %s', key)
finally:
os.chdir('..')
factory = None
try:
assert_raises(self, 'connect(address, port, pubkey=key)',
globals(), locals(), AuthenticationError,
'digest sent was rejected')
factory = connect(address, port, authkey=authkey)
logging.debug('factory: %r', factory)
# Create model and run it.
box = factory.create(_MODULE + '.Box')
model = set_as_top(Model(box))
model.run()
# Check results.
for width in range(1, 2):
for height in range(1, 3):
for depth in range(1, 4):
case = model.recorders[0].cases.pop(0)
self.assertEqual(case.get_output('_pseudo_0'),
width * height * depth)
finally:
if factory is not None:
factory.cleanup()
logging.debug('terminating server (authkey %s) pid %s',
authkey, server.pid)
server.terminate(timeout=10)
server = None
def test_5_misc(self):
logging.debug('')
logging.debug('test_misc')
factory = self.start_factory()
# Try using a server after being released, server never used before.
# This usually results in a "Can't connect" error, but sometimes gets a
# "Can't send" error, based on timing/proxying.
server = factory.create('')
factory.release(server)
msg1 = "Can't connect to server at"
msg2 = "Can't send to server at"
try:
reply = server.echo('hello')
except RuntimeError as exc:
if str(exc)[:len(msg1)] != msg1 and str(exc)[:len(msg2)] != msg2:
self.fail('Expected connect/send error, got %r' % exc)
else:
self.fail('Expected RuntimeError')
# Try using a server after being released, server has been used before.
# This usually results in a "Can't send" error, but sometimes gets a
# "Can't connect" error, based on timing/proxying.
server = factory.create('')
reply = server.echo('hello')
factory.release(server)
msg1 = "Can't send to server at"
msg2 = "Can't connect to server at"
try:
reply = server.echo('hello')
except RuntimeError as exc:
if str(exc)[:len(msg1)] != msg1 and str(exc)[:len(msg2)] != msg2:
self.fail('Expected send/connect error, got %r' % exc)
else:
self.fail('Expected RuntimeError')
# Try releasing a server twice. Depending on timing, this could
# result in a ValueError trying to identify the server to release or
# a RemoteError where the request can't be unpacked. The timing seems
# to be sensitive to AF_INET/AF_UNIX connection type.
server = factory.create('')
factory.release(server)
msg1 = "can't identify server "
msg2 = "RuntimeError: Can't decrypt/unpack request." \
" This could be the result of referring to a dead server."
try:
factory.release(server)
except ValueError as exc:
self.assertEqual(str(exc)[:len(msg1)], msg1)
except RemoteError as exc:
self.assertTrue(msg2 in str(exc))
else:
self.fail('Expected ValueError or RemoteError')
# Check false return of has_interface().
self.assertFalse(has_interface(factory, HasObjectives))
# Try to connect to wrong port (assuming junk_port isn't being used!)
address = socket.gethostname()
junk_port = 12345
assert_raises(self, 'connect(address, junk_port, pubkey=self.key)',
globals(), locals(), RuntimeError, "Can't connect to ")
# Unpickleable argument.
code = compile('3 + 4', '<string>', 'eval')
assert_raises(self, 'factory.echo(code)', globals(), locals(),
cPickle.PicklingError, "Can't pickle <type 'code'>")
# Server startup failure.
assert_raises(self, 'self.start_factory(port=0, allowed_users={})',
globals(), locals(), RuntimeError,
'Server startup failed')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
# unittest.main()
# sys.argv.append('--cover-package=openmdao.main')
# sys.argv.append('--cover-erase')
import nose
nose.runmodule()
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from base import Updater
from elasticsearch import Elasticsearch, ConnectionError
class ElasticUpdater(Updater):
"""
Updates a database
"""
def use_store(self):
"""
Opens a database to save data
"""
logging.info('Using Elasticsearch database')
self.db = Elasticsearch(
[self.settings.get('host', 'localhost:9200')],
)
try:
self.db.indices.create(index='mcp-watch', ignore=400) # may exist
except ConnectionError as feedback:
logging.error('- unable to connect')
raise
return self.db
def reset_store(self):
"""
Opens a database for points
"""
logging.info('Resetting Elasticsearch database')
self.db = Elasticsearch(
[self.settings.get('host', 'localhost:9200')],
)
try:
self.db.indices.create(index='mcp-watch', ignore=400) # may exist
except ConnectionError as feedback:
logging.error('- unable to connect')
raise
return self.db
def update_summary_usage(self, items=[], region='dd-eu'):
"""
Updates summary usage records
:param items: new items to push to the database
:type items: ``list`` of ``list``
:param region: source of the information, e.g., 'dd-eu' or other region
:type region: ``str``
"""
if len(items) > 0:
headers = items.pop(0)
logging.debug("- headers: {}".format(headers))
updated = 0
for item in items:
if len(item[1]) < 1:
continue
measurement = {
"measurement": 'Summary usage',
"region": region,
"location": item[1],
"stamp": item[0],
"CPU Hours": int(item[2]),
"High Performance CPU Hours": int(item[3]),
"RAM Hours": int(item[4]),
"Storage Hours": int(item[5]),
"High Performance Storage Hours": int(item[6]),
"Economy Storage Hours": int(item[7]),
"Bandwidth In": int(item[8]),
"Bandwidth Out": int(item[9]),
"Sub-Admin Hours": float(item[10]),
"Network Hours": float(item[11]),
"Essentials Network Domain Hours": int(item[12]),
"Advanced Network Domain Hours": int(item[13]),
"VLAN Hours": int(item[14]),
"Public IP Hours": int(item[15]),
"Cloud Files Account Hours": float(item[16]),
"Cloud Files (GB Days)": int(item[17]),
"Software Units": int(item[18]),
"Essentials Client Days": int(item[19]),
"Advanced Client Days": int(item[20]),
"Enterprise Client Days": int(item[21]),
"Essentials Backups (GB)": int(item[22]),
"Advanced Backups (GB)": int(item[23]),
"Enterprise Backups (GB)": int(item[24]),
"Essentials Monitoring Hours": int(item[25]),
"Advanced Monitoring Hours": int(item[26]),
}
try:
result = self.db.index(index="mcp-watch",
doc_type='summary',
body=measurement)
updated += 1
except Exception as feedback:
logging.error('- unable to update elasticsearch')
logging.debug(feedback)
return
if updated:
logging.info(
"- stored {} measurements for {} in elasticsearch".format(
updated, region))
def update_detailed_usage(self, items=[], region='dd-eu'):
"""
Updates detailed usage records
:param items: new items to push to the database
:type items: ``list`` of ``list``
:param region: source of the information, e.g., 'dd-eu' or other region
:type region: ``str``
Note that headers can change dynamically, so it is important to map
them appropriately.
"""
if len(items) > 0:
headers = items.pop(0)
logging.debug("- headers: {}".format(headers))
updated = 0
for item in items:
if len(item[2]) < 1: # no type (e.g., total line)
continue
if item[ headers.index('CPU Count') ] > '0': # with CPU
measurement = {
"measurement": item[2],
"name": item[0],
"UUID": item[1],
"region": region,
"location": item[3],
"private_ip": item[4],
"status": item[5],
"stamp": item[ headers.index('End Time') ],
"duration": float(item[ headers.index('Duration (Hours)') ]),
"CPU": int(item[ headers.index('CPU Count') ]),
"RAM": int(item[ headers.index('RAM (GB)') ]),
"Storage": int(item[ headers.index('Storage (GB)') ]),
"HP Storage": int(item[ headers.index('High Performance Storage (GB)') ]),
"Eco Storage": int(item[ headers.index('Economy Storage (GB)') ]),
}
doc_type = 'detailed'
elif len(item[3]) > 0: # at some location
measurement = {
"measurement": item[2],
"name": item[0],
"UUID": item[1],
"region": region,
"location": item[3],
"stamp": item[ headers.index('End Time') ],
"duration": float(item[ headers.index('Duration (Hours)') ]),
}
doc_type = 'detailed-location'
else: # global
measurement = {
"measurement": item[2],
"name": item[0],
"UUID": item[1],
"stamp": item[ headers.index('End Time') ],
"duration": float(item[ headers.index('Duration (Hours)') ]),
}
doc_type = 'detailed-global'
try:
result = self.db.index(index="mcp-watch",
doc_type=doc_type,
body=measurement)
updated += 1
except Exception as feedback:
logging.error('- unable to update elasticsearch')
logging.debug(feedback)
return
if updated:
logging.info(
"- stored {} measurements for {} in elasticsearch".format(
updated, region))
def update_audit_log(self, items=[], region='dd-eu'):
"""
Updates audit log records
:param items: new items to push to the database
:type items: ``list`` of ``list``
:param region: source of the information, e.g., 'dd-eu' or other region
:type region: ``str``
"""
if len(items) > 0:
headers = items.pop(0)
logging.debug("- headers: {}".format(headers))
updated = 0
for item in items:
measurement = {
"measurement": 'Audit log',
"region": region,
"caller": item[2].lower().replace('.', ' '),
"department": item[3],
"custom-1": item[4],
"custom-2": item[5],
"type": item[6],
"name": item[7],
"action": item[8],
"details": item[9],
"status": item[10],
"stamp": item[1],
}
try:
result = self.db.index(index="mcp-watch",
doc_type='audit',
body=measurement)
updated += 1
except Exception as feedback:
logging.error('- unable to update elasticsearch')
logging.debug(feedback)
return
if updated:
logging.info(
"- stored {} measurements for {} in elasticsearch".format(
updated, region))
def on_servers(self, updates=[], region='dd-eu'):
"""
Signals the deployment, start or reboot of cloud servers
:param updates: description of new servers
:type updates: ``list`` of ``dict``
:param region: source of the information, e.g., 'dd-eu' or other region
:type region: ``str``
"""
updated = 0
for item in updates:
updated += 1
try:
result = self.db.index(index="mcp-watch",
doc_type='server',
body=item)
except Exception as feedback:
logging.error('- unable to update elasticsearch')
logging.debug(feedback)
return
if updated:
logging.info(
"- triggered {} updated to elasticsearch for {}".format(
updated, region))
else:
logging.info("- nothing to report to elasticsearch for {}".format(
region))
|
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import glob
import json
import os
import shutil
import tempfile
import time
import unittest
import uuid
from pyflink.common import ExecutionConfig, RestartStrategies
from pyflink.common.serialization import JsonRowDeserializationSchema
from pyflink.common.typeinfo import Types
from pyflink.datastream import (StreamExecutionEnvironment, CheckpointConfig,
CheckpointingMode, MemoryStateBackend, TimeCharacteristic)
from pyflink.datastream.connectors import FlinkKafkaConsumer
from pyflink.datastream.functions import SourceFunction
from pyflink.datastream.tests.test_util import DataStreamTestSinkFunction
from pyflink.find_flink_home import _find_flink_source_root
from pyflink.java_gateway import get_gateway
from pyflink.pyflink_gateway_server import on_windows
from pyflink.table import DataTypes, CsvTableSource, CsvTableSink, StreamTableEnvironment
from pyflink.testing.test_case_utils import PyFlinkTestCase, exec_insert_table
class StreamExecutionEnvironmentTests(PyFlinkTestCase):
def setUp(self):
self.env = StreamExecutionEnvironment.get_execution_environment()
self.test_sink = DataStreamTestSinkFunction()
def test_get_config(self):
execution_config = self.env.get_config()
self.assertIsInstance(execution_config, ExecutionConfig)
def test_get_set_parallelism(self):
self.env.set_parallelism(10)
parallelism = self.env.get_parallelism()
self.assertEqual(parallelism, 10)
def test_get_set_buffer_timeout(self):
self.env.set_buffer_timeout(12000)
timeout = self.env.get_buffer_timeout()
self.assertEqual(timeout, 12000)
def test_get_set_default_local_parallelism(self):
self.env.set_default_local_parallelism(8)
parallelism = self.env.get_default_local_parallelism()
self.assertEqual(parallelism, 8)
def test_set_get_restart_strategy(self):
self.env.set_restart_strategy(RestartStrategies.no_restart())
restart_strategy = self.env.get_restart_strategy()
self.assertEqual(restart_strategy, RestartStrategies.no_restart())
def test_add_default_kryo_serializer(self):
self.env.add_default_kryo_serializer(
"org.apache.flink.runtime.state.StateBackendTestBase$TestPojo",
"org.apache.flink.runtime.state.StateBackendTestBase$CustomKryoTestSerializer")
class_dict = self.env.get_config().get_default_kryo_serializer_classes()
self.assertEqual(class_dict,
{'org.apache.flink.runtime.state.StateBackendTestBase$TestPojo':
'org.apache.flink.runtime.state'
'.StateBackendTestBase$CustomKryoTestSerializer'})
def test_register_type_with_kryo_serializer(self):
self.env.register_type_with_kryo_serializer(
"org.apache.flink.runtime.state.StateBackendTestBase$TestPojo",
"org.apache.flink.runtime.state.StateBackendTestBase$CustomKryoTestSerializer")
class_dict = self.env.get_config().get_registered_types_with_kryo_serializer_classes()
self.assertEqual(class_dict,
{'org.apache.flink.runtime.state.StateBackendTestBase$TestPojo':
'org.apache.flink.runtime.state'
'.StateBackendTestBase$CustomKryoTestSerializer'})
def test_register_type(self):
self.env.register_type("org.apache.flink.runtime.state.StateBackendTestBase$TestPojo")
type_list = self.env.get_config().get_registered_pojo_types()
self.assertEqual(type_list,
['org.apache.flink.runtime.state.StateBackendTestBase$TestPojo'])
def test_get_set_max_parallelism(self):
self.env.set_max_parallelism(12)
parallelism = self.env.get_max_parallelism()
self.assertEqual(parallelism, 12)
def test_operation_chaining(self):
self.assertTrue(self.env.is_chaining_enabled())
self.env.disable_operator_chaining()
self.assertFalse(self.env.is_chaining_enabled())
def test_get_checkpoint_config(self):
checkpoint_config = self.env.get_checkpoint_config()
self.assertIsInstance(checkpoint_config, CheckpointConfig)
def test_get_set_checkpoint_interval(self):
self.env.enable_checkpointing(30000)
interval = self.env.get_checkpoint_interval()
self.assertEqual(interval, 30000)
def test_get_set_checkpointing_mode(self):
mode = self.env.get_checkpointing_mode()
self.assertEqual(mode, CheckpointingMode.EXACTLY_ONCE)
self.env.enable_checkpointing(30000, CheckpointingMode.AT_LEAST_ONCE)
mode = self.env.get_checkpointing_mode()
self.assertEqual(mode, CheckpointingMode.AT_LEAST_ONCE)
def test_get_state_backend(self):
state_backend = self.env.get_state_backend()
self.assertIsNone(state_backend)
def test_set_state_backend(self):
input_backend = MemoryStateBackend()
self.env.set_state_backend(input_backend)
output_backend = self.env.get_state_backend()
self.assertEqual(output_backend._j_memory_state_backend,
input_backend._j_memory_state_backend)
def test_get_set_stream_time_characteristic(self):
default_time_characteristic = self.env.get_stream_time_characteristic()
self.assertEqual(default_time_characteristic, TimeCharacteristic.ProcessingTime)
self.env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
time_characteristic = self.env.get_stream_time_characteristic()
self.assertEqual(time_characteristic, TimeCharacteristic.EventTime)
@unittest.skip("Python API does not support DataStream now. refactor this test later")
def test_get_execution_plan(self):
tmp_dir = tempfile.gettempdir()
source_path = os.path.join(tmp_dir + '/streaming.csv')
tmp_csv = os.path.join(tmp_dir + '/streaming2.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
t_env = StreamTableEnvironment.create(self.env)
csv_source = CsvTableSource(source_path, field_names, field_types)
t_env.register_table_source("Orders", csv_source)
t_env.register_table_sink(
"Results",
CsvTableSink(field_names, field_types, tmp_csv))
t_env.from_path("Orders").execute_insert("Results").wait()
plan = self.env.get_execution_plan()
json.loads(plan)
def test_execute(self):
tmp_dir = tempfile.gettempdir()
field_names = ['a', 'b', 'c']
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env = StreamTableEnvironment.create(self.env)
t_env.register_table_sink(
'Results',
CsvTableSink(field_names, field_types,
os.path.join('{}/{}.csv'.format(tmp_dir, round(time.time())))))
execution_result = exec_insert_table(
t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c']),
'Results')
self.assertIsNotNone(execution_result.get_job_id())
self.assertIsNotNone(execution_result.get_net_runtime())
self.assertEqual(len(execution_result.get_all_accumulator_results()), 0)
self.assertIsNone(execution_result.get_accumulator_result('accumulator'))
self.assertIsNotNone(str(execution_result))
def test_from_collection_without_data_types(self):
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')])
ds.add_sink(self.test_sink)
self.env.execute("test from collection")
results = self.test_sink.get_results(True)
# user does not specify data types for input data, the collected result should be in
# in tuple format as inputs.
expected = ["(1, 'Hi', 'Hello')", "(2, 'Hello', 'Hi')"]
results.sort()
expected.sort()
self.assertEqual(expected, results)
def test_from_collection_with_data_types(self):
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')],
type_info=Types.ROW([Types.INT(),
Types.STRING(),
Types.STRING()]))
ds.add_sink(self.test_sink)
self.env.execute("test from collection")
results = self.test_sink.get_results(False)
# if user specifies data types of input data, the collected result should be in row format.
expected = ['1,Hi,Hello', '2,Hello,Hi']
results.sort()
expected.sort()
self.assertEqual(expected, results)
def test_add_custom_source(self):
custom_source = SourceFunction("org.apache.flink.python.util.MyCustomSourceFunction")
ds = self.env.add_source(custom_source, type_info=Types.ROW([Types.INT(), Types.STRING()]))
ds.add_sink(self.test_sink)
self.env.execute("test add custom source")
results = self.test_sink.get_results(False)
expected = ['3,Mike', '1,Marry', '4,Ted', '5,Jack', '0,Bob', '2,Henry']
results.sort()
expected.sort()
self.assertEqual(expected, results)
def test_read_text_file(self):
texts = ["Mike", "Marry", "Ted", "Jack", "Bob", "Henry"]
text_file_path = self.tempdir + '/text_file'
with open(text_file_path, 'a') as f:
for text in texts:
f.write(text)
f.write('\n')
ds = self.env.read_text_file(text_file_path)
ds.add_sink(self.test_sink)
self.env.execute("test read text file")
results = self.test_sink.get_results()
results.sort()
texts.sort()
self.assertEqual(texts, results)
def test_execute_async(self):
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')],
type_info=Types.ROW(
[Types.INT(), Types.STRING(), Types.STRING()]))
ds.add_sink(self.test_sink)
job_client = self.env.execute_async("test execute async")
job_id = job_client.get_job_id()
self.assertIsNotNone(job_id)
execution_result = job_client.get_job_execution_result().result()
self.assertEqual(str(job_id), str(execution_result.get_job_id()))
def test_add_python_file(self):
import uuid
python_file_dir = os.path.join(self.tempdir, "python_file_dir_" + str(uuid.uuid4()))
os.mkdir(python_file_dir)
python_file_path = os.path.join(python_file_dir, "test_stream_dependency_manage_lib.py")
with open(python_file_path, 'w') as f:
f.write("def add_two(a):\n return a + 2")
def plus_two_map(value):
from test_stream_dependency_manage_lib import add_two
return add_two(value)
self.env.add_python_file(python_file_path)
ds = self.env.from_collection([1, 2, 3, 4, 5])
ds.map(plus_two_map).add_sink(self.test_sink)
self.env.execute("test add python file")
result = self.test_sink.get_results(True)
expected = ['3', '4', '5', '6', '7']
result.sort()
expected.sort()
self.assertEqual(expected, result)
def test_set_requirements_without_cached_directory(self):
import uuid
requirements_txt_path = os.path.join(self.tempdir, str(uuid.uuid4()))
with open(requirements_txt_path, 'w') as f:
f.write("cloudpickle==1.2.2")
self.env.set_python_requirements(requirements_txt_path)
def check_requirements(i):
import cloudpickle
assert os.path.abspath(cloudpickle.__file__).startswith(
os.environ['_PYTHON_REQUIREMENTS_INSTALL_DIR'])
return i
ds = self.env.from_collection([1, 2, 3, 4, 5])
ds.map(check_requirements).add_sink(self.test_sink)
self.env.execute("test set requirements without cache dir")
result = self.test_sink.get_results(True)
expected = ['1', '2', '3', '4', '5']
result.sort()
expected.sort()
self.assertEqual(expected, result)
def test_set_requirements_with_cached_directory(self):
import uuid
tmp_dir = self.tempdir
requirements_txt_path = os.path.join(tmp_dir, "requirements_txt_" + str(uuid.uuid4()))
with open(requirements_txt_path, 'w') as f:
f.write("python-package1==0.0.0")
requirements_dir_path = os.path.join(tmp_dir, "requirements_dir_" + str(uuid.uuid4()))
os.mkdir(requirements_dir_path)
package_file_name = "python-package1-0.0.0.tar.gz"
with open(os.path.join(requirements_dir_path, package_file_name), 'wb') as f:
import base64
# This base64 data is encoded from a python package file which includes a
# "python_package1" module. The module contains a "plus(a, b)" function.
# The base64 can be recomputed by following code:
# base64.b64encode(open("python-package1-0.0.0.tar.gz", "rb").read()).decode("utf-8")
f.write(base64.b64decode(
"H4sICNefrV0C/2Rpc3QvcHl0aG9uLXBhY2thZ2UxLTAuMC4wLnRhcgDtmVtv2jAYhnPtX2H1CrRCY+ckI"
"XEx7axuUA11u5imyICTRc1JiVnHfv1MKKWjYxwKEdPehws7xkmUfH5f+3PyqfqWpa1cjG5EKFnLbOvfhX"
"FQTI3nOPPSdavS5Pa8nGMwy3Esi3ke9wyTObbnGNQxamBSKlFQavzUryG8ldG6frpbEGx4yNmDLMp/hPy"
"P8b+6fNN613vdP1z8XdteG3+ug/17/F3Hcw1qIv5H54NUYiyUaH2SRRllaYeytkl6IpEdujI2yH2XapCQ"
"wSRJRDHt0OveZa//uUfeZonUvUO5bHo+0ZcoVo9bMhFRvGx9H41kWj447aUsR0WUq+pui8arWKggK5Jli"
"wGOo/95q79ovXi6/nfyf246Dof/n078fT9KI+X77Xx6BP83bX4Xf5NxT7dz7toO/L8OxjKgeTwpG+KcDp"
"sdQjWFVJMipYI+o0MCk4X/t2UYtqI0yPabCHb3f861XcD/Ty/+Y5nLdCzT0dSPo/SmbKsf6un+b7KV+Ls"
"W4/D/OoC9w/930P9eGwM75//csrD+Q/6P/P/k9D/oX3988Wqw1bS/tf6tR+s/m3EG/ddBqXO9XKf15C8p"
"P9k4HZBtBgzZaVW5vrfKcj+W32W82ygEB9D/Xu9+4/qfP9L/rBv0X1v87yONKRX61/qfzwqjIDzIPTbv/"
"7or3/88i0H/tfBFW7s/s/avRInQH06ieEy7tDrQeYHUdRN7wP+n/vf62LOH/pld7f9xz7a5Pfufedy0oP"
"86iJI8KxStAq6yLC4JWdbbVbWRikR2z1ZGytk5vauW3QdnBFE6XqwmykazCesAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAOBw/AJw5CHBAFAAAA=="))
self.env.set_python_requirements(requirements_txt_path, requirements_dir_path)
def add_one(i):
from python_package1 import plus
return plus(i, 1)
ds = self.env.from_collection([1, 2, 3, 4, 5])
ds.map(add_one).add_sink(self.test_sink)
self.env.execute("test set requirements with cachd dir")
result = self.test_sink.get_results(True)
expected = ['2', '3', '4', '5', '6']
result.sort()
expected.sort()
self.assertEqual(expected, result)
def test_add_python_archive(self):
import uuid
import shutil
tmp_dir = self.tempdir
archive_dir_path = os.path.join(tmp_dir, "archive_" + str(uuid.uuid4()))
os.mkdir(archive_dir_path)
with open(os.path.join(archive_dir_path, "data.txt"), 'w') as f:
f.write("2")
archive_file_path = \
shutil.make_archive(os.path.dirname(archive_dir_path), 'zip', archive_dir_path)
self.env.add_python_archive(archive_file_path, "data")
def add_from_file(i):
with open("data/data.txt", 'r') as f:
return i + int(f.read())
ds = self.env.from_collection([1, 2, 3, 4, 5])
ds.map(add_from_file).add_sink(self.test_sink)
self.env.execute("test set python archive")
result = self.test_sink.get_results(True)
expected = ['3', '4', '5', '6', '7']
result.sort()
expected.sort()
self.assertEqual(expected, result)
@unittest.skipIf(on_windows(), "Symbolic link is not supported on Windows, skipping.")
def test_set_stream_env(self):
import sys
python_exec = sys.executable
tmp_dir = self.tempdir
python_exec_link_path = os.path.join(tmp_dir, "py_exec")
os.symlink(python_exec, python_exec_link_path)
self.env.set_python_executable(python_exec_link_path)
def check_python_exec(i):
import os
assert os.environ["python"] == python_exec_link_path
return i
ds = self.env.from_collection([1, 2, 3, 4, 5])
ds.map(check_python_exec).add_sink(self.test_sink)
self.env.execute("test set python executable")
result = self.test_sink.get_results(True)
expected = ['1', '2', '3', '4', '5']
result.sort()
expected.sort()
self.assertEqual(expected, result)
def test_add_jars(self):
# find kafka connector jars
flink_source_root = _find_flink_source_root()
jars_abs_path = flink_source_root + '/flink-connectors/flink-sql-connector-kafka'
specific_jars = glob.glob(jars_abs_path + '/target/flink*.jar')
specific_jars = ['file://' + specific_jar for specific_jar in specific_jars]
self.env.add_jars(*specific_jars)
source_topic = 'test_source_topic'
props = {'bootstrap.servers': 'localhost:9092', 'group.id': 'test_group'}
type_info = Types.ROW([Types.INT(), Types.STRING()])
# Test for kafka consumer
deserialization_schema = JsonRowDeserializationSchema.builder() \
.type_info(type_info=type_info).build()
# Will get a ClassNotFoundException if not add the kafka connector into the pipeline jars.
kafka_consumer = FlinkKafkaConsumer(source_topic, deserialization_schema, props)
self.env.add_source(kafka_consumer).print()
self.env.get_execution_plan()
def test_add_classpaths(self):
# find kafka connector jars
flink_source_root = _find_flink_source_root()
jars_abs_path = flink_source_root + '/flink-connectors/flink-sql-connector-kafka'
specific_jars = glob.glob(jars_abs_path + '/target/flink*.jar')
specific_jars = ['file://' + specific_jar for specific_jar in specific_jars]
self.env.add_classpaths(*specific_jars)
source_topic = 'test_source_topic'
props = {'bootstrap.servers': 'localhost:9092', 'group.id': 'test_group'}
type_info = Types.ROW([Types.INT(), Types.STRING()])
# Test for kafka consumer
deserialization_schema = JsonRowDeserializationSchema.builder() \
.type_info(type_info=type_info).build()
# It Will raise a ClassNotFoundException if the kafka connector is not added into the
# pipeline classpaths.
kafka_consumer = FlinkKafkaConsumer(source_topic, deserialization_schema, props)
self.env.add_source(kafka_consumer).print()
self.env.get_execution_plan()
def test_generate_stream_graph_with_dependencies(self):
python_file_dir = os.path.join(self.tempdir, "python_file_dir_" + str(uuid.uuid4()))
os.mkdir(python_file_dir)
python_file_path = os.path.join(python_file_dir, "test_stream_dependency_manage_lib.py")
with open(python_file_path, 'w') as f:
f.write("def add_two(a):\n return a + 2")
self.env.add_python_file(python_file_path)
def plus_two_map(value):
from test_stream_dependency_manage_lib import add_two
return value[0], add_two(value[1])
def add_from_file(i):
with open("data/data.txt", 'r') as f:
return i[0], i[1] + int(f.read())
from_collection_source = self.env.from_collection([('a', 0), ('b', 0), ('c', 1), ('d', 1),
('e', 2)],
type_info=Types.ROW([Types.STRING(),
Types.INT()]))
from_collection_source.name("From Collection")
keyed_stream = from_collection_source.key_by(lambda x: x[1], key_type_info=Types.INT())
plus_two_map_stream = keyed_stream.map(plus_two_map).name("Plus Two Map").set_parallelism(3)
add_from_file_map = plus_two_map_stream.map(add_from_file).name("Add From File Map")
test_stream_sink = add_from_file_map.add_sink(self.test_sink).name("Test Sink")
test_stream_sink.set_parallelism(4)
archive_dir_path = os.path.join(self.tempdir, "archive_" + str(uuid.uuid4()))
os.mkdir(archive_dir_path)
with open(os.path.join(archive_dir_path, "data.txt"), 'w') as f:
f.write("3")
archive_file_path = \
shutil.make_archive(os.path.dirname(archive_dir_path), 'zip', archive_dir_path)
self.env.add_python_archive(archive_file_path, "data")
nodes = eval(self.env.get_execution_plan())['nodes']
# The StreamGraph should be as bellow:
# Source: From Collection -> _stream_key_by_map_operator -> _keyed_stream_values_operator ->
# Plus Two Map -> Add From File Map -> Sink: Test Sink.
# Source: From Collection and _stream_key_by_map_operator should have same parallelism.
self.assertEqual(nodes[0]['parallelism'], nodes[1]['parallelism'])
# _keyed_stream_values_operator and Plus Two Map should have same parallisim.
self.assertEqual(nodes[3]['parallelism'], 3)
self.assertEqual(nodes[2]['parallelism'], nodes[3]['parallelism'])
# The ship_strategy for Source: From Collection and _stream_key_by_map_operator shoule be
# FORWARD
self.assertEqual(nodes[1]['predecessors'][0]['ship_strategy'], "FORWARD")
# The ship_strategy for _keyed_stream_values_operator and Plus Two Map shoule be
# FORWARD
self.assertEqual(nodes[3]['predecessors'][0]['ship_strategy'], "FORWARD")
# The parallelism of Sink: Test Sink should be 4
self.assertEqual(nodes[5]['parallelism'], 4)
env_config_with_dependencies = dict(get_gateway().jvm.org.apache.flink.python.util
.PythonConfigUtil.getEnvConfigWithDependencies(
self.env._j_stream_execution_environment).toMap())
# Make sure that user specified files and archives are correctly added.
self.assertIsNotNone(env_config_with_dependencies['python.files'])
self.assertIsNotNone(env_config_with_dependencies['python.archives'])
def tearDown(self) -> None:
self.test_sink.clear()
|
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' main.py '''
from __future__ import print_function
import argparse
import os
import signal
import sys
import tornado.httpserver
import tornado.ioloop
import tornado.web
from tornado.options import define
from tornado.httpclient import AsyncHTTPClient
import heron.tools.common.src.python.utils.config as common_config
import heron.common.src.python.utils.log as log
from heron.tools.tracker.src.python import constants
from heron.tools.tracker.src.python import handlers
from heron.tools.tracker.src.python import utils
from heron.tools.tracker.src.python.config import Config, STATEMGRS_KEY
from heron.tools.tracker.src.python.tracker import Tracker
Log = log.Log
class Application(tornado.web.Application):
""" Tornado server application """
def __init__(self, config):
AsyncHTTPClient.configure(None, defaults=dict(request_timeout=120.0))
self.tracker = Tracker(config)
self.tracker.synch_topologies()
tornadoHandlers = [
(r"/", handlers.MainHandler),
(r"/clusters", handlers.ClustersHandler, {"tracker":self.tracker}),
(r"/topologies", handlers.TopologiesHandler, {"tracker":self.tracker}),
(r"/topologies/states", handlers.StatesHandler, {"tracker":self.tracker}),
(r"/topologies/info", handlers.TopologyHandler, {"tracker":self.tracker}),
(r"/topologies/logicalplan", handlers.LogicalPlanHandler, {"tracker":self.tracker}),
(r"/topologies/config", handlers.TopologyConfigHandler, {"tracker":self.tracker}),
(r"/topologies/containerfiledata", handlers.ContainerFileDataHandler,
{"tracker":self.tracker}),
(r"/topologies/containerfiledownload", handlers.ContainerFileDownloadHandler,
{"tracker":self.tracker}),
(r"/topologies/containerfilestats",
handlers.ContainerFileStatsHandler, {"tracker":self.tracker}),
(r"/topologies/physicalplan", handlers.PhysicalPlanHandler, {"tracker":self.tracker}),
(r"/topologies/packingplan", handlers.PackingPlanHandler, {"tracker":self.tracker}),
# Deprecated. See https://github.com/apache/incubator-heron/issues/1754
(r"/topologies/executionstate", handlers.ExecutionStateHandler, {"tracker":self.tracker}),
(r"/topologies/schedulerlocation", handlers.SchedulerLocationHandler,
{"tracker":self.tracker}),
(r"/topologies/metadata", handlers.MetaDataHandler, {"tracker":self.tracker}),
(r"/topologies/runtimestate", handlers.RuntimeStateHandler, {"tracker":self.tracker}),
(r"/topologies/metrics", handlers.MetricsHandler, {"tracker":self.tracker}),
(r"/topologies/metricstimeline", handlers.MetricsTimelineHandler, {"tracker":self.tracker}),
(r"/topologies/metricsquery", handlers.MetricsQueryHandler, {"tracker":self.tracker}),
(r"/topologies/exceptions", handlers.ExceptionHandler, {"tracker":self.tracker}),
(r"/topologies/exceptionsummary", handlers.ExceptionSummaryHandler,
{"tracker":self.tracker}),
(r"/machines", handlers.MachinesHandler, {"tracker":self.tracker}),
(r"/topologies/pid", handlers.PidHandler, {"tracker":self.tracker}),
(r"/topologies/jstack", handlers.JstackHandler, {"tracker":self.tracker}),
(r"/topologies/jmap", handlers.JmapHandler, {"tracker":self.tracker}),
(r"/topologies/histo", handlers.MemoryHistogramHandler, {"tracker":self.tracker}),
(r"(.*)", handlers.DefaultHandler),
]
settings = dict(
debug=True,
serve_traceback=True,
static_path=os.path.dirname(__file__)
)
tornado.web.Application.__init__(self, tornadoHandlers, **settings)
Log.info("Tracker has started")
def stop(self):
self.tracker.stop_sync()
# pylint: disable=protected-access
class _HelpAction(argparse._HelpAction):
""" HelpAction """
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
# retrieve subparsers from parser
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
# there will probably only be one subparser_action,
# but better save than sorry
for subparsers_action in subparsers_actions:
# get all subparsers and print help
for choice, subparser in list(subparsers_action.choices.items()):
print("Subparser '{}'".format(choice))
print(subparser.format_help())
parser.exit()
# pylint: disable=bad-super-call
class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
""" Subcommand help formatter """
def _format_action(self, action):
parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)
if action.nargs == argparse.PARSER:
parts = "\n".join(parts.split("\n")[1:])
return parts
def add_titles(parser):
""" add titles """
parser._positionals.title = "Required arguments"
parser._optionals.title = "Optional arguments"
return parser
def add_arguments(parser):
""" add arguments """
default_config_file = os.path.join(
utils.get_heron_tracker_conf_dir(), constants.DEFAULT_CONFIG_FILE)
parser.add_argument(
'--config-file',
metavar='(a string; path to config file; default: "' + default_config_file + '")',
default=default_config_file)
parser.add_argument(
'--type',
metavar='(an string; type of state manager (zookeeper or file, etc.); example: ' \
+ str(constants.DEFAULT_STATE_MANAGER_TYPE) + ')',
choices=["file", "zookeeper"])
parser.add_argument(
'--name',
metavar='(an string; name to be used for the state manager; example: ' \
+ str(constants.DEFAULT_STATE_MANAGER_NAME) + ')')
parser.add_argument(
'--rootpath',
metavar='(an string; where all the states are stored; example: ' \
+ str(constants.DEFAULT_STATE_MANAGER_ROOTPATH) + ')')
parser.add_argument(
'--tunnelhost',
metavar='(an string; if ssh tunneling needs to be established to connect to it; example: ' \
+ str(constants.DEFAULT_STATE_MANAGER_TUNNELHOST) + ')')
parser.add_argument(
'--hostport',
metavar='(an string; only used to connect to zk, must be of the form \'host:port\';'\
' example: ' + str(constants.DEFAULT_STATE_MANAGER_HOSTPORT) + ')')
parser.add_argument(
'--port',
metavar='(an integer; port to listen; default: ' + str(constants.DEFAULT_PORT) + ')',
type=int,
default=constants.DEFAULT_PORT)
parser.add_argument(
'--verbose',
action='store_true')
return parser
def create_parsers():
""" create argument parser """
parser = argparse.ArgumentParser(
epilog='For detailed documentation, go to http://github.com/apache/incubator-heron',
usage="%(prog)s [options] [help]",
add_help=False)
parser = add_titles(parser)
parser = add_arguments(parser)
ya_parser = argparse.ArgumentParser(
parents=[parser],
formatter_class=SubcommandHelpFormatter,
add_help=False)
subparsers = ya_parser.add_subparsers(
title="Available commands")
help_parser = subparsers.add_parser(
'help',
help='Prints help',
add_help=False)
help_parser.set_defaults(help=True)
subparsers.add_parser(
'version',
help='Prints version',
add_help=True)
return parser, ya_parser
def define_options(port, config_file):
""" define Tornado global variables """
define("port", default=port)
define("config_file", default=config_file)
def create_tracker_config(namespace):
# try to parse the config file if we find one
config_file = namespace["config_file"]
config = utils.parse_config_file(config_file)
if config is None:
Log.debug("Config file does not exists: %s" % config_file)
config = {STATEMGRS_KEY:[{}]}
# update the config if we have any flags
config_flags = ["type", "name", "rootpath", "tunnelhost", "hostport"]
config_to_update = config[STATEMGRS_KEY][0]
for flag in config_flags:
value = namespace.get(flag, None)
if value is not None:
config_to_update[flag] = value
return config
def main():
""" main """
# create the parser and parse the arguments
(parser, _) = create_parsers()
(args, remaining) = parser.parse_known_args()
if remaining == ['help']:
parser.print_help()
parser.exit()
elif remaining == ['version']:
common_config.print_build_info()
parser.exit()
elif remaining != []:
Log.error('Invalid subcommand')
sys.exit(1)
namespace = vars(args)
log.set_logging_level(namespace)
# set Tornado global option
define_options(namespace['port'], namespace['config_file'])
config = Config(create_tracker_config(namespace))
# create Tornado application
application = Application(config)
# pylint: disable=unused-argument
# SIGINT handler:
# 1. stop all the running zkstatemanager and filestatemanagers
# 2. stop the Tornado IO loop
def signal_handler(signum, frame):
# start a new line after ^C character because this looks nice
print('\n', end='')
application.stop()
tornado.ioloop.IOLoop.instance().stop()
# associate SIGINT and SIGTERM with a handler
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
Log.info("Running on port: %d", namespace['port'])
if namespace["config_file"]:
Log.info("Using config file: %s", namespace['config_file'])
Log.info("Using state manager:\n" + str(config))
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(namespace['port'])
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import mock
import requests
from six.moves import http_client
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_response(status=http_client.OK, content=b'', headers={}):
response = requests.Response()
response.status_code = status
response._content = content
response.headers = headers
response.request = requests.Request()
return response
def _make_json_response(data, status=http_client.OK, headers=None):
headers = headers or {}
headers['Content-Type'] = 'application/json'
return _make_response(
status=status,
content=json.dumps(data).encode('utf-8'),
headers=headers)
def _make_requests_session(responses):
session = mock.create_autospec(requests.Session, instance=True)
session.request.side_effect = responses
return session
class TestClient(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.client import Client
return Client
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_connection_type(self):
from google.cloud.storage._http import Connection
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
self.assertEqual(client.project, PROJECT)
self.assertIsInstance(client._connection, Connection)
self.assertIs(client._connection.credentials, CREDENTIALS)
self.assertIsNone(client.current_batch)
self.assertEqual(list(client._batch_stack), [])
def test__push_batch_and__pop_batch(self):
from google.cloud.storage.batch import Batch
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
batch1 = Batch(client)
batch2 = Batch(client)
client._push_batch(batch1)
self.assertEqual(list(client._batch_stack), [batch1])
self.assertIs(client.current_batch, batch1)
client._push_batch(batch2)
self.assertIs(client.current_batch, batch2)
# list(_LocalStack) returns in reverse order.
self.assertEqual(list(client._batch_stack), [batch2, batch1])
self.assertIs(client._pop_batch(), batch2)
self.assertEqual(list(client._batch_stack), [batch1])
self.assertIs(client._pop_batch(), batch1)
self.assertEqual(list(client._batch_stack), [])
def test__connection_setter(self):
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
client._base_connection = None # Unset the value from the constructor
client._connection = connection = object()
self.assertIs(client._base_connection, connection)
def test__connection_setter_when_set(self):
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
self.assertRaises(ValueError, setattr, client, '_connection', None)
def test__connection_getter_no_batch(self):
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
self.assertIs(client._connection, client._base_connection)
self.assertIsNone(client.current_batch)
def test__connection_getter_with_batch(self):
from google.cloud.storage.batch import Batch
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
batch = Batch(client)
client._push_batch(batch)
self.assertIsNot(client._connection, client._base_connection)
self.assertIs(client._connection, batch)
self.assertIs(client.current_batch, batch)
def test_bucket(self):
from google.cloud.storage.bucket import Bucket
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
BUCKET_NAME = 'BUCKET_NAME'
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
bucket = client.bucket(BUCKET_NAME)
self.assertIsInstance(bucket, Bucket)
self.assertIs(bucket.client, client)
self.assertEqual(bucket.name, BUCKET_NAME)
def test_batch(self):
from google.cloud.storage.batch import Batch
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
batch = client.batch()
self.assertIsInstance(batch, Batch)
self.assertIs(batch._client, client)
def test_get_bucket_miss(self):
from google.cloud.exceptions import NotFound
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
NONESUCH = 'nonesuch'
URI = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b',
'nonesuch?projection=noAcl',
])
http = _make_requests_session([
_make_json_response({}, status=http_client.NOT_FOUND)])
client._http_internal = http
with self.assertRaises(NotFound):
client.get_bucket(NONESUCH)
http.request.assert_called_once_with(
method='GET', url=URI, data=mock.ANY, headers=mock.ANY)
def test_get_bucket_hit(self):
from google.cloud.storage.bucket import Bucket
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
BLOB_NAME = 'blob-name'
URI = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b',
'%s?projection=noAcl' % (BLOB_NAME,),
])
data = {'name': BLOB_NAME}
http = _make_requests_session([_make_json_response(data)])
client._http_internal = http
bucket = client.get_bucket(BLOB_NAME)
self.assertIsInstance(bucket, Bucket)
self.assertEqual(bucket.name, BLOB_NAME)
http.request.assert_called_once_with(
method='GET', url=URI, data=mock.ANY, headers=mock.ANY)
def test_lookup_bucket_miss(self):
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
NONESUCH = 'nonesuch'
URI = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b',
'nonesuch?projection=noAcl',
])
http = _make_requests_session([
_make_json_response({}, status=http_client.NOT_FOUND)])
client._http_internal = http
bucket = client.lookup_bucket(NONESUCH)
self.assertIsNone(bucket)
http.request.assert_called_once_with(
method='GET', url=URI, data=mock.ANY, headers=mock.ANY)
def test_lookup_bucket_hit(self):
from google.cloud.storage.bucket import Bucket
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
BLOB_NAME = 'blob-name'
URI = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b',
'%s?projection=noAcl' % (BLOB_NAME,),
])
data = {'name': BLOB_NAME}
http = _make_requests_session([_make_json_response(data)])
client._http_internal = http
bucket = client.lookup_bucket(BLOB_NAME)
self.assertIsInstance(bucket, Bucket)
self.assertEqual(bucket.name, BLOB_NAME)
http.request.assert_called_once_with(
method='GET', url=URI, data=mock.ANY, headers=mock.ANY)
def test_create_bucket_conflict(self):
from google.cloud.exceptions import Conflict
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
BLOB_NAME = 'blob-name'
URI = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b?project=%s' % (PROJECT,),
])
data = {'error': {'message': 'Conflict'}}
http = _make_requests_session([
_make_json_response(data, status=http_client.CONFLICT)])
client._http_internal = http
self.assertRaises(Conflict, client.create_bucket, BLOB_NAME)
http.request.assert_called_once_with(
method='POST', url=URI, data=mock.ANY, headers=mock.ANY)
def test_create_bucket_success(self):
from google.cloud.storage.bucket import Bucket
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
BLOB_NAME = 'blob-name'
URI = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b?project=%s' % (PROJECT,),
])
data = {'name': BLOB_NAME}
http = _make_requests_session([_make_json_response(data)])
client._http_internal = http
bucket = client.create_bucket(BLOB_NAME)
self.assertIsInstance(bucket, Bucket)
self.assertEqual(bucket.name, BLOB_NAME)
http.request.assert_called_once_with(
method='POST', url=URI, data=mock.ANY, headers=mock.ANY)
def test_list_buckets_empty(self):
from six.moves.urllib.parse import parse_qs
from six.moves.urllib.parse import urlparse
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
http = _make_requests_session([_make_json_response({})])
client._http_internal = http
buckets = list(client.list_buckets())
self.assertEqual(len(buckets), 0)
http.request.assert_called_once_with(
method='GET', url=mock.ANY, data=mock.ANY, headers=mock.ANY)
requested_url = http.request.mock_calls[0][2]['url']
expected_base_url = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b',
])
self.assertTrue(requested_url.startswith(expected_base_url))
expected_query = {
'project': [PROJECT],
'projection': ['noAcl'],
}
uri_parts = urlparse(requested_url)
self.assertEqual(parse_qs(uri_parts.query), expected_query)
def test_list_buckets_non_empty(self):
PROJECT = 'PROJECT'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
BUCKET_NAME = 'bucket-name'
data = {'items': [{'name': BUCKET_NAME}]}
http = _make_requests_session([_make_json_response(data)])
client._http_internal = http
buckets = list(client.list_buckets())
self.assertEqual(len(buckets), 1)
self.assertEqual(buckets[0].name, BUCKET_NAME)
http.request.assert_called_once_with(
method='GET', url=mock.ANY, data=mock.ANY, headers=mock.ANY)
def test_list_buckets_all_arguments(self):
from six.moves.urllib.parse import parse_qs
from six.moves.urllib.parse import urlparse
PROJECT = 'foo-bar'
CREDENTIALS = _make_credentials()
client = self._make_one(project=PROJECT, credentials=CREDENTIALS)
MAX_RESULTS = 10
PAGE_TOKEN = 'ABCD'
PREFIX = 'subfolder'
PROJECTION = 'full'
FIELDS = 'items/id,nextPageToken'
data = {'items': []}
http = _make_requests_session([_make_json_response(data)])
client._http_internal = http
iterator = client.list_buckets(
max_results=MAX_RESULTS,
page_token=PAGE_TOKEN,
prefix=PREFIX,
projection=PROJECTION,
fields=FIELDS,
)
buckets = list(iterator)
self.assertEqual(buckets, [])
http.request.assert_called_once_with(
method='GET', url=mock.ANY, data=mock.ANY, headers=mock.ANY)
requested_url = http.request.mock_calls[0][2]['url']
expected_base_url = '/'.join([
client._connection.API_BASE_URL,
'storage',
client._connection.API_VERSION,
'b',
])
self.assertTrue(requested_url.startswith(expected_base_url))
expected_query = {
'project': [PROJECT],
'maxResults': [str(MAX_RESULTS)],
'pageToken': [PAGE_TOKEN],
'prefix': [PREFIX],
'projection': [PROJECTION],
'fields': [FIELDS],
}
uri_parts = urlparse(requested_url)
self.assertEqual(parse_qs(uri_parts.query), expected_query)
def test_page_empty_response(self):
from google.api.core import page_iterator
project = 'PROJECT'
credentials = _make_credentials()
client = self._make_one(project=project, credentials=credentials)
iterator = client.list_buckets()
page = page_iterator.Page(iterator, (), None)
iterator._page = page
self.assertEqual(list(page), [])
def test_page_non_empty_response(self):
import six
from google.cloud.storage.bucket import Bucket
project = 'PROJECT'
credentials = _make_credentials()
client = self._make_one(project=project, credentials=credentials)
blob_name = 'blob-name'
response = {'items': [{'name': blob_name}]}
def dummy_response():
return response
iterator = client.list_buckets()
iterator._get_next_page_response = dummy_response
page = six.next(iterator.pages)
self.assertEqual(page.num_items, 1)
bucket = six.next(page)
self.assertEqual(page.remaining, 0)
self.assertIsInstance(bucket, Bucket)
self.assertEqual(bucket.name, blob_name)
|
|
"""Logic expressions handling
NOTE
----
at present this is mainly needed for facts.py , feel free however to improve
this stuff for general purpose.
"""
from sympy.core.compatibility import iterable, cmp
def fuzzy_bool(x):
"""Return True, False or None according to x.
Whereas bool(x) returns True or False, fuzzy_bool allows
for the None value.
"""
if x is None:
return None
return bool(x)
def fuzzy_and(*args):
"""Return True (all True), False (any False) or None.
If `a` is an iterable it must have more than one element."""
if (len(args) == 1 and iterable(args[0]) or
len(args) > 2):
if len(args) == 1:
args = args[0]
rv = True
i = 0
for ai in args:
ai = fuzzy_bool(ai)
if ai is False:
return False
if rv: # this will stop updating if a None is ever trapped
rv = ai
i += 1
if i < 2:
raise ValueError('iterables must have 2 or more elements')
return rv
a, b = [fuzzy_bool(i) for i in args]
if a is True and b is True:
return True
elif a is False or b is False:
return False
def fuzzy_not(v):
"""'not' in fuzzy logic"""
if v is None:
return v
else:
return not v
def name_not(k):
"""negate a name
>>> from sympy.core.logic import name_not
>>> name_not('zero')
'!zero'
>>> name_not('!zero')
'zero'
"""
if k[:1] != '!':
return '!'+k
else:
return k[1:]
class Logic(object):
"""Logical expression"""
__slots__ = ['args']
# {} 'op' -> LogicClass
op_2class = {}
def __new__(cls, args):
obj = object.__new__(cls)
obj.args = tuple(args)
# XXX do we need this:
#print 'L: %s' % (obj.args,)
assert not isinstance(obj.args[0], tuple)
return obj
def __hash__(self):
return hash( (type(self).__name__, self.args) )
def __eq__(a, b):
if not isinstance(b, type(a)):
return False
else:
return a.args == b.args
def __ne__(a, b):
if not isinstance(b, type(a)):
return True
else:
return a.args != b.args
def __lt__(cls, other):
if cls.__cmp__(other) == -1:
return True
return False
def __cmp__(a, b):
if type(a) is not type(b):
return cmp( str(type(a)), str(type(b)) )
else:
return cmp(a.args, b.args)
def __str__(self):
return '%s(%s)' % (self.op, ', '.join(str(a) for a in self.args))
__repr__ = __str__
@staticmethod
def fromstring(text):
"""Logic from string
e.g.
!a & !b | c
"""
# XXX this is not general, but good enough
terms = text.split()
lexpr = None # current logical expression
schedop = None # scheduled operation
while True:
# pop next term and exit loop if there is no terms left
try:
term = terms.pop(0)
except IndexError:
break
# operation symbol
if term in '&|':
if schedop is not None:
raise ValueError('double op forbidden: "%s %s"' % (term, schedop))
if lexpr is None:
raise ValueError('%s cannot be in the beginning of expression' % term)
schedop = term
continue
# already scheduled operation, e.g. '&'
if schedop:
lexpr = Logic.op_2class[schedop] ( *(lexpr, term) )
schedop = None
continue
# this should be atom
if lexpr is not None:
raise ValueError('missing op between "%s" and "%s"' % (lexpr, term))
lexpr = term
# let's check that we ended up in correct state
if schedop is not None:
raise ValueError('premature end-of-expression in "%s"' % text)
if lexpr is None:
raise ValueError('"%s" is empty' % text)
# everything looks good now
return lexpr
# XXX better name?
class AndOr_Base(Logic):
__slots__ = []
def __new__(cls, *args):
if len(args) == 0:
raise TypeError('%s requires at least one argument' % cls.__name__)
# process bool args early
bargs = []
for a in args:
# &(F, ...) -> F
# |(T, ...) -> T
if a == cls.op_x_notx:
return a
# &(T, ...) -> &(...)
# |(F, ...) -> |(...)
elif a == (not cls.op_x_notx):
continue # skip this argument
bargs.append(a)
args = bargs
# &(a, !a) -> F
# |(a, !a) -> T
# XXX suboptinal
for a in args:
if Not(a) in args:
return cls.op_x_notx
args = cls.flatten(args)
# canonicalize arguments
# XXX do we always need this?
# NB: this is needed to reduce number of &-nodes in beta-network
args = sorted(args, key=hash)
# now let's kill duplicate arguments, e.g. &(a,a,b) -> &(a,b)
prev = None
uargs= []
for a in args:
if a != prev:
uargs.append(a)
prev = a
args = uargs
# &(a) -> a
# |(a) -> a
if len(args) == 1:
return args[0]
# when we are at this stage, it means that _all_ arguments were T/F and
# all arguments were accepted as "let's see what follows next", so at
# _this_ point the rule is:
# |() -> F (*not* T)
# &() -> T (*not* F)
elif len(args) == 0:
return not cls.op_x_notx
return Logic.__new__(cls, args)
@classmethod
def flatten(cls, args):
# quick-n-dirty flattening for And and Or
args_queue = list(args)
res = []
while True:
try:
arg = args_queue.pop(0)
except IndexError:
break
if isinstance(arg, Logic):
if arg.op == cls.op:
#print 'flattening...', fargs, i, arg.args
args_queue.extend( arg.args )
continue
# another op -- leave it as is
res.append( arg )
args = tuple(res)
return args
expand_lvl=0
class And(AndOr_Base):
op = '&'
op_x_notx = False
__slots__ = []
def _eval_propagate_not(self):
# !(a&b&c ...) == !a | !b | !c ...
return Or( *[Not(a) for a in self.args] )
# (a|b|...) & c == (a&c) | (b&c) | ...
def expand(self):
# first locate Or
for i in range(len(self.args)):
arg = self.args[i]
if isinstance(arg, Or):
arest = self.args[:i] + self.args[i+1:]
orterms = [And( *(arest + (a,)) ) for a in arg.args]
for j in range(len(orterms)):
if isinstance(orterms[j], Logic):
orterms[j] = orterms[j].expand()
res = Or(*orterms)
return res
else:
return self
def dbg_expand(self):
global expand_lvl
print '%sexpand %s' % (' '*expand_lvl, self)
expand_lvl += 1
try:
return self.old_expand()
finally:
expand_lvl -= 1
#old_expand = expand
#expand = dbg_expand
class Or(AndOr_Base):
op = '|'
op_x_notx = True
__slots__ = []
def _eval_propagate_not(self):
# !(a|b|c ...) == !a & !b & !c ...
return And( *[Not(a) for a in self.args] )
class Not(Logic):
op = '!'
__slots__ = []
def __new__(cls, arg):
if isinstance(arg, str):
return name_not(arg)
elif isinstance(arg, bool):
return not arg
elif isinstance(arg, Logic):
# XXX this is a hack to expand right from the beginning
arg = arg._eval_propagate_not()
return arg
obj = Logic.__new__(cls, (arg,))
return obj
else:
raise ValueError('Not: unknown argument %r' % (arg,))
Logic.op_2class['&'] = And
Logic.op_2class['|'] = Or
Logic.op_2class['!'] = Not
|
|
'''Screen Manager
==============
.. versionadded:: 1.4.0
The screen manager is a widget dedicated to managing multiple screens for your
application. The default :class:`ScreenManager` displays only one
:class:`Screen` at a time and uses a :class:`TransitionBase` to switch from one
Screen to another.
Multiple transitions are supported based on changing the screen coordinates /
scale or even performing fancy animation using custom shaders.
Basic Usage
-----------
Let's construct a Screen Manager with 4 named screens. When you are creating
a screen, **you absolutely need to give a name to it**::
from kivy.uix.screenmanager import ScreenManager, Screen
# Create the manager
sm = ScreenManager()
# Add few screens
for i in range(4):
screen = Screen(name='Title %d' % i)
sm.add_widget(screen)
# By default, the first screen added into the ScreenManager will be
# displayed. You can then change to another screen.
# Let's display the screen named 'Title 2'
# A transition will automatically be used.
sm.current = 'Title 2'
The default :attr:`ScreenManager.transition` is a :class:`SlideTransition` with
options :attr:`~SlideTransition.direction` and
:attr:`~TransitionBase.duration`.
Please note that by default, a :class:`Screen` displays nothing: it's just a
:class:`~kivy.uix.relativelayout.RelativeLayout`. You need to use that class as
a root widget for your own screen, the best way being to subclass.
.. warning::
As :class:`Screen` is a :class:`~kivy.uix.relativelayout.RelativeLayout`,
it is important to understand the
:ref:`kivy-uix-relativelayout-common-pitfalls`.
Here is an example with a 'Menu Screen' and a 'Settings Screen'::
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
# Create both screens. Please note the root.manager.current: this is how
# you can control the ScreenManager from kv. Each screen has by default a
# property manager that gives you the instance of the ScreenManager used.
Builder.load_string("""
<MenuScreen>:
BoxLayout:
Button:
text: 'Goto settings'
on_press: root.manager.current = 'settings'
Button:
text: 'Quit'
<SettingsScreen>:
BoxLayout:
Button:
text: 'My settings button'
Button:
text: 'Back to menu'
on_press: root.manager.current = 'menu'
""")
# Declare both screens
class MenuScreen(Screen):
pass
class SettingsScreen(Screen):
pass
# Create the screen manager
sm = ScreenManager()
sm.add_widget(MenuScreen(name='menu'))
sm.add_widget(SettingsScreen(name='settings'))
class TestApp(App):
def build(self):
return sm
if __name__ == '__main__':
TestApp().run()
Changing Direction
------------------
A common use case for :class:`ScreenManager` involves using a
:class:`SlideTransition` which slides right to the next screen
and slides left to the previous screen. Building on the previous
example, this can be accomplished like so::
Builder.load_string("""
<MenuScreen>:
BoxLayout:
Button:
text: 'Goto settings'
on_press:
root.manager.transition.direction = 'left'
root.manager.current = 'settings'
Button:
text: 'Quit'
<SettingScreen>:
BoxLayout:
Button:
text: 'My settings button'
Button:
text: 'Back to menu'
on_press:
root.manager.transition.direction = 'right'
root.manager.current = 'menu'
""")
Advanced Usage
--------------
From 1.8.0, you can now switch dynamically to a new screen, change the
transition options and remove the previous one by using
:meth:`~ScreenManager.switch_to`::
sm = ScreenManager()
screens = [Screen(name='Title {}'.format(i)) for i in range(4)]
sm.switch_to(screens[0])
# later
sm.switch_to(screens[1], direction='right')
Note that this method adds the screen to the :class:`ScreenManager` instance
and should not be used if your screens have already been added to this
instance. To switch to a screen which is already added, you should use the
:attr:`~ScreenManager.current` property.
Changing transitions
--------------------
You have multiple transitions available by default, such as:
- :class:`NoTransition` - switches screens instantly with no animation
- :class:`SlideTransition` - slide the screen in/out, from any direction
- :class:`SwapTransition` - implementation of the iOS swap transition
- :class:`FadeTransition` - shader to fade the screen in/out
- :class:`WipeTransition` - shader to wipe the screens from right to left
- :class:`FallOutTransition` - shader where the old screen 'falls' and
becomes transparent, revealing the new one behind it.
- :class:`RiseInTransition` - shader where the new screen rises from the
screen centre while fading from transparent to opaque.
You can easily switch transitions by changing the
:attr:`ScreenManager.transition` property::
sm = ScreenManager(transition=FadeTransition())
.. note::
Currently, none of Shader based Transitions use
anti-aliasing. This is because they use the FBO which doesn't have
any logic to handle supersampling. This is a known issue and we
are working on a transparent implementation that will give the
same results as if it had been rendered on screen.
To be more concrete, if you see sharp edged text during the animation, it's
normal.
'''
__all__ = ('Screen', 'ScreenManager', 'ScreenManagerException',
'TransitionBase', 'ShaderTransition', 'SlideTransition',
'SwapTransition', 'FadeTransition', 'WipeTransition',
'FallOutTransition', 'RiseInTransition', 'NoTransition')
from kivy.compat import iteritems
from kivy.logger import Logger
from kivy.event import EventDispatcher
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import (StringProperty, ObjectProperty, AliasProperty,
NumericProperty, ListProperty, OptionProperty,
BooleanProperty)
from kivy.animation import Animation, AnimationTransition
from kivy.uix.relativelayout import RelativeLayout
from kivy.lang import Builder
from kivy.graphics import (RenderContext, Rectangle, Fbo,
ClearColor, ClearBuffers, BindTexture, PushMatrix,
PopMatrix, Translate)
class ScreenManagerException(Exception):
'''Exception for the :class:`ScreenManager`.
'''
pass
class Screen(RelativeLayout):
'''Screen is an element intended to be used with a :class:`ScreenManager`.
Check module documentation for more information.
:Events:
`on_pre_enter`: ()
Event fired when the screen is about to be used: the entering
animation is started.
`on_enter`: ()
Event fired when the screen is displayed: the entering animation is
complete.
`on_pre_leave`: ()
Event fired when the screen is about to be removed: the leaving
animation is started.
`on_leave`: ()
Event fired when the screen is removed: the leaving animation is
finished.
.. versionchanged:: 1.6.0
Events `on_pre_enter`, `on_enter`, `on_pre_leave` and `on_leave` were
added.
'''
name = StringProperty('')
'''
Name of the screen which must be unique within a :class:`ScreenManager`.
This is the name used for :attr:`ScreenManager.current`.
:attr:`name` is a :class:`~kivy.properties.StringProperty` and defaults to
''.
'''
manager = ObjectProperty(None, allownone=True)
''':class:`ScreenManager` object, set when the screen is added to a
manager.
:attr:`manager` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None, read-only.
'''
transition_progress = NumericProperty(0.)
'''Value that represents the completion of the current transition, if any
is occuring.
If a transition is in progress, whatever the mode, the value will change
from 0 to 1. If you want to know if it's an entering or leaving animation,
check the :attr:`transition_state`.
:attr:`transition_progress` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.
'''
transition_state = OptionProperty('out', options=('in', 'out'))
'''Value that represents the state of the transition:
- 'in' if the transition is going to show your screen
- 'out' if the transition is going to hide your screen
After the transition is complete, the state will retain it's last value (in
or out).
:attr:`transition_state` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'out'.
'''
__events__ = ('on_pre_enter', 'on_enter', 'on_pre_leave', 'on_leave')
def on_pre_enter(self, *args):
pass
def on_enter(self, *args):
pass
def on_pre_leave(self, *args):
pass
def on_leave(self, *args):
pass
def __repr__(self):
return '<Screen name=%r>' % self.name
class TransitionBase(EventDispatcher):
'''TransitionBase is used to animate 2 screens within the
:class:`ScreenManager`. This class acts as a base for other
implementations like the :class:`SlideTransition` and
:class:`SwapTransition`.
:Events:
`on_progress`: Transition object, progression float
Fired during the animation of the transition.
`on_complete`: Transition object
Fired when the transition is fininshed.
'''
screen_out = ObjectProperty()
'''Property that contains the screen to hide.
Automatically set by the :class:`ScreenManager`.
:class:`screen_out` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
screen_in = ObjectProperty()
'''Property that contains the screen to show.
Automatically set by the :class:`ScreenManager`.
:class:`screen_in` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
duration = NumericProperty(.4)
'''Duration in seconds of the transition.
:class:`duration` is a :class:`~kivy.properties.NumericProperty` and
defaults to .4 (= 400ms).
.. versionchanged:: 1.8.0
Default duration has been changed from 700ms to 400ms.
'''
manager = ObjectProperty()
''':class:`ScreenManager` object, set when the screen is added to a
manager.
:attr:`manager` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None, read-only.
'''
is_active = BooleanProperty(False)
'''Indicate whether the transition is currently active or not.
:attr:`is_active` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False, read-only.
'''
# privates
_anim = ObjectProperty(allownone=True)
__events__ = ('on_progress', 'on_complete')
def start(self, manager):
'''(internal) Starts the transition. This is automatically
called by the :class:`ScreenManager`.
'''
if self.is_active:
raise ScreenManagerException('start() is called twice!')
self.manager = manager
self._anim = Animation(d=self.duration, s=0)
self._anim.bind(on_progress=self._on_progress,
on_complete=self._on_complete)
self.add_screen(self.screen_in)
self.screen_in.transition_progress = 0.
self.screen_in.transition_state = 'in'
self.screen_out.transition_progress = 0.
self.screen_out.transition_state = 'out'
self.screen_in.dispatch('on_pre_enter')
self.screen_out.dispatch('on_pre_leave')
self.is_active = True
self._anim.start(self)
self.dispatch('on_progress', 0)
def stop(self):
'''(internal) Stops the transition. This is automatically called by the
:class:`ScreenManager`.
'''
if self._anim:
self._anim.cancel(self)
self.dispatch('on_complete')
self._anim = None
self.is_active = False
def add_screen(self, screen):
'''(internal) Used to add a screen to the :class:`ScreenManager`.
'''
self.manager.real_add_widget(screen)
def remove_screen(self, screen):
'''(internal) Used to remove a screen from the :class:`ScreenManager`.
'''
self.manager.real_remove_widget(screen)
def on_complete(self):
self.remove_screen(self.screen_out)
def on_progress(self, progression):
pass
def _on_progress(self, *l):
progress = l[-1]
self.screen_in.transition_progress = progress
self.screen_out.transition_progress = 1. - progress
self.dispatch('on_progress', progress)
def _on_complete(self, *l):
self.is_active = False
self.dispatch('on_complete')
self.screen_in.dispatch('on_enter')
self.screen_out.dispatch('on_leave')
self._anim = None
class ShaderTransition(TransitionBase):
'''Transition class that uses a Shader for animating the transition between
2 screens. By default, this class doesn't assign any fragment/vertex
shader. If you want to create your own fragment shader for the transition,
you need to declare the header yourself and include the "t", "tex_in" and
"tex_out" uniform::
# Create your own transition. This shader implements a "fading"
# transition.
fs = """$HEADER
uniform float t;
uniform sampler2D tex_in;
uniform sampler2D tex_out;
void main(void) {
vec4 cin = texture2D(tex_in, tex_coord0);
vec4 cout = texture2D(tex_out, tex_coord0);
gl_FragColor = mix(cout, cin, t);
}
"""
# And create your transition
tr = ShaderTransition(fs=fs)
sm = ScreenManager(transition=tr)
'''
fs = StringProperty(None)
'''Fragment shader to use.
:attr:`fs` is a :class:`~kivy.properties.StringProperty` and defaults to
None.'''
vs = StringProperty(None)
'''Vertex shader to use.
:attr:`vs` is a :class:`~kivy.properties.StringProperty` and defaults to
None.'''
clearcolor = ListProperty([0, 0, 0, 1])
'''Sets the color of Fbo ClearColor.
.. versionadded:: 1.9.0
:attr:`clearcolor` is a :class:`~kivy.properties.ListProperty`
and defaults to [0, 0, 0, 1].'''
def make_screen_fbo(self, screen):
fbo = Fbo(size=screen.size)
with fbo:
ClearColor(*self.clearcolor)
ClearBuffers()
fbo.add(screen.canvas)
with fbo.before:
PushMatrix()
Translate(-screen.x, -screen.y, 0)
with fbo.after:
PopMatrix()
return fbo
def on_progress(self, progress):
self.render_ctx['t'] = progress
def on_complete(self):
self.render_ctx['t'] = 1.
super(ShaderTransition, self).on_complete()
def add_screen(self, screen):
self.screen_in.pos = self.screen_out.pos
self.screen_in.size = self.screen_out.size
self.manager.real_remove_widget(self.screen_out)
self.fbo_in = self.make_screen_fbo(self.screen_in)
self.fbo_out = self.make_screen_fbo(self.screen_out)
self.manager.canvas.add(self.fbo_in)
self.manager.canvas.add(self.fbo_out)
self.render_ctx = RenderContext(fs=self.fs, vs=self.vs,
use_parent_modelview=True,
use_parent_projection=True)
with self.render_ctx:
BindTexture(texture=self.fbo_out.texture, index=1)
BindTexture(texture=self.fbo_in.texture, index=2)
x, y = self.screen_in.pos
w, h = self.fbo_in.texture.size
Rectangle(size=(w, h), pos=(x, y),
tex_coords=self.fbo_in.texture.tex_coords)
self.render_ctx['tex_out'] = 1
self.render_ctx['tex_in'] = 2
self.manager.canvas.add(self.render_ctx)
def remove_screen(self, screen):
self.manager.canvas.remove(self.fbo_in)
self.manager.canvas.remove(self.fbo_out)
self.manager.canvas.remove(self.render_ctx)
self.manager.real_add_widget(self.screen_in)
class NoTransition(TransitionBase):
'''No transition, instantly switches to the next screen with no delay or
animation.
.. versionadded:: 1.8.0
'''
duration = NumericProperty(0.0)
def on_complete(self):
self.screen_in.pos = self.manager.pos
self.screen_out.pos = self.manager.pos
super(NoTransition, self).on_complete()
class SlideTransition(TransitionBase):
'''Slide Transition, can be used to show a new screen from any direction:
left, right, up or down.
'''
direction = OptionProperty('left', options=('left', 'right', 'up', 'down'))
'''Direction of the transition.
:attr:`direction` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'left'. Can be one of 'left', 'right', 'up' or 'down'.
'''
def on_progress(self, progression):
a = self.screen_in
b = self.screen_out
manager = self.manager
x, y = manager.pos
width, height = manager.size
direction = self.direction
al = AnimationTransition.out_quad
progression = al(progression)
if direction == 'left':
a.y = b.y = y
a.x = x + width * (1 - progression)
b.x = x - width * progression
elif direction == 'right':
a.y = b.y = y
b.x = x + width * progression
a.x = x - width * (1 - progression)
elif direction == 'up':
a.x = b.x = x
a.y = y + height * (1 - progression)
b.y = y - height * progression
elif direction == 'down':
a.x = b.x = manager.x
b.y = y + height * progression
a.y = y - height * (1 - progression)
def on_complete(self):
self.screen_in.pos = self.manager.pos
self.screen_out.pos = self.manager.pos
super(SlideTransition, self).on_complete()
class SwapTransition(TransitionBase):
'''Swap transition that looks like iOS transition when a new window
appears on the screen.
'''
def add_screen(self, screen):
self.manager.real_add_widget(screen, 1)
def on_complete(self):
self.screen_in.scale = 1.
self.screen_out.scale = 1.
self.screen_in.pos = self.manager.pos
self.screen_out.pos = self.manager.pos
super(SwapTransition, self).on_complete()
def on_progress(self, progression):
a = self.screen_in
b = self.screen_out
manager = self.manager
b.scale = 1. - progression * 0.7
a.scale = 0.5 + progression * 0.5
a.center_y = b.center_y = manager.center_y
al = AnimationTransition.in_out_sine
if progression < 0.5:
p2 = al(progression * 2)
width = manager.width * 0.7
widthb = manager.width * 0.2
a.x = manager.center_x + p2 * width / 2.
b.center_x = manager.center_x - p2 * widthb / 2.
else:
if self.screen_in is self.manager.children[-1]:
self.manager.real_remove_widget(self.screen_in)
self.manager.real_add_widget(self.screen_in)
p2 = al((progression - 0.5) * 2)
width = manager.width * 0.85
widthb = manager.width * 0.2
a.x = manager.x + width * (1 - p2)
b.center_x = manager.center_x - (1 - p2) * widthb / 2.
class WipeTransition(ShaderTransition):
'''Wipe transition, based on a fragment Shader.
'''
WIPE_TRANSITION_FS = '''$HEADER$
uniform float t;
uniform sampler2D tex_in;
uniform sampler2D tex_out;
void main(void) {
vec4 cin = texture2D(tex_in, tex_coord0);
vec4 cout = texture2D(tex_out, tex_coord0);
gl_FragColor = mix(cout, cin, clamp((-1.5 + 1.5*tex_coord0.x + 2.5*t),
0.0, 1.0));
}
'''
fs = StringProperty(WIPE_TRANSITION_FS)
class FadeTransition(ShaderTransition):
'''Fade transition, based on a fragment Shader.
'''
FADE_TRANSITION_FS = '''$HEADER$
uniform float t;
uniform sampler2D tex_in;
uniform sampler2D tex_out;
void main(void) {
vec4 cin = vec4(texture2D(tex_in, tex_coord0.st));
vec4 cout = vec4(texture2D(tex_out, tex_coord0.st));
vec4 frag_col = vec4(t * cin) + vec4((1.0 - t) * cout);
gl_FragColor = frag_col;
}
'''
fs = StringProperty(FADE_TRANSITION_FS)
class FallOutTransition(ShaderTransition):
'''Transition where the new screen 'falls' from the screen centre,
becoming smaller and more transparent until it disappears, and
revealing the new screen behind it. Mimics the popular/standard
Android transition.
.. versionadded:: 1.8.0
'''
duration = NumericProperty(0.15)
'''Duration in seconds of the transition, replacing the default of
:class:`TransitionBase`.
:class:`duration` is a :class:`~kivy.properties.NumericProperty` and
defaults to .15 (= 150ms).
'''
FALLOUT_TRANSITION_FS = '''$HEADER$
uniform float t;
uniform sampler2D tex_in;
uniform sampler2D tex_out;
void main(void) {
/* quantities for position and opacity calculation */
float tr = 0.5*sin(t); /* 'real' time */
vec2 diff = (tex_coord0.st - 0.5) * (1.0/(1.0-tr));
vec2 dist = diff + 0.5;
float max_dist = 1.0 - tr;
/* in and out colors */
vec4 cin = vec4(texture2D(tex_in, tex_coord0.st));
vec4 cout = vec4(texture2D(tex_out, dist));
/* opacities for in and out textures */
float oin = clamp(1.0-cos(t), 0.0, 1.0);
float oout = clamp(cos(t), 0.0, 1.0);
bvec2 outside_bounds = bvec2(abs(tex_coord0.s - 0.5) > 0.5*max_dist,
abs(tex_coord0.t - 0.5) > 0.5*max_dist);
vec4 frag_col;
if (any(outside_bounds) ){
frag_col = vec4(cin.x, cin.y, cin.z, 1.0);
}
else {
frag_col = vec4(oout*cout.x + oin*cin.x, oout*cout.y + oin*cin.y,
oout*cout.z + oin*cin.z, 1.0);
}
gl_FragColor = frag_col;
}
'''
fs = StringProperty(FALLOUT_TRANSITION_FS)
class RiseInTransition(ShaderTransition):
'''Transition where the new screen rises from the screen centre,
becoming larger and changing from transparent to opaque until it
fills the screen. Mimics the popular/standard Android transition.
.. versionadded:: 1.8.0
'''
duration = NumericProperty(0.2)
'''Duration in seconds of the transition, replacing the default of
:class:`TransitionBase`.
:class:`duration` is a :class:`~kivy.properties.NumericProperty` and
defaults to .2 (= 200ms).
'''
RISEIN_TRANSITION_FS = '''$HEADER$
uniform float t;
uniform sampler2D tex_in;
uniform sampler2D tex_out;
void main(void) {
/* quantities for position and opacity calculation */
float tr = 0.5 - 0.5*sqrt(sin(t)); /* 'real' time */
vec2 diff = (tex_coord0.st - 0.5) * (1.0/(1.0-tr));
vec2 dist = diff + 0.5;
float max_dist = 1.0 - tr;
/* in and out colors */
vec4 cin = vec4(texture2D(tex_in, dist));
vec4 cout = vec4(texture2D(tex_out, tex_coord0.st));
/* opacities for in and out textures */
float oin = clamp(sin(2.0*t), 0.0, 1.0);
float oout = clamp(1.0 - sin(2.0*t), 0.0, 1.0);
bvec2 outside_bounds = bvec2(abs(tex_coord0.s - 0.5) > 0.5*max_dist,
abs(tex_coord0.t - 0.5) > 0.5*max_dist);
vec4 frag_col;
if (any(outside_bounds) ){
frag_col = vec4(cout.x, cout.y, cout.z, 1.0);
}
else {
frag_col = vec4(oout*cout.x + oin*cin.x, oout*cout.y + oin*cin.y,
oout*cout.z + oin*cin.z, 1.0);
}
gl_FragColor = frag_col;
}
'''
fs = StringProperty(RISEIN_TRANSITION_FS)
class ScreenManager(FloatLayout):
'''Screen manager. This is the main class that will control your
:class:`Screen` stack and memory.
By default, the manager will show only one screen at a time.
'''
current = StringProperty(None)
'''Name of the screen currently shown, or the screen to show.
::
from kivy.uix.screenmanager import ScreenManager, Screen
sm = ScreenManager()
sm.add_widget(Screen(name='first'))
sm.add_widget(Screen(name='second'))
# By default, the first added screen will be shown. If you want to
# show another one, just set the 'current' property.
sm.current = 'second'
'''
transition = ObjectProperty(SlideTransition(), baseclass=TransitionBase)
'''Transition object to use for animating the screen that will be hidden
and the screen that will be shown. By default, an instance of
:class:`SlideTransition` will be given.
For example, if you want to change to a :class:`WipeTransition`::
from kivy.uix.screenmanager import ScreenManager, Screen,
WipeTransition
sm = ScreenManager(transition=WipeTransition())
sm.add_widget(Screen(name='first'))
sm.add_widget(Screen(name='second'))
# by default, the first added screen will be shown. If you want to
# show another one, just set the 'current' property.
sm.current = 'second'
.. versionchanged:: 1.8.0
Default transition has been changed from :class:`SwapTransition` to
:class:`SlideTransition`.
'''
screens = ListProperty()
'''List of all the :class:`Screen` widgets added. You must not change the
list manually. Use :meth:`Screen.add_widget` instead.
:attr:`screens` is a :class:`~kivy.properties.ListProperty` and defaults to
[], read-only.
'''
current_screen = ObjectProperty(None)
'''Contains the currently displayed screen. You must not change this
property manually, use :attr:`current` instead.
:attr:`current_screen` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None, read-only.
'''
def _get_screen_names(self):
return [s.name for s in self.screens]
screen_names = AliasProperty(_get_screen_names,
None, bind=('screens', ))
'''List of the names of all the :class:`Screen` widgets added. The list
is read only.
:attr:`screens_names` is an :class:`~kivy.properties.AliasProperty` and
is read-only. It is updated if the screen list changes or the name
of a screen changes.
'''
def __init__(self, **kwargs):
super(ScreenManager, self).__init__(**kwargs)
self.bind(pos=self._update_pos)
def _screen_name_changed(self, screen, name):
self.property('screen_names').dispatch(self)
if screen == self.current_screen:
self.current = name
def add_widget(self, screen):
if not isinstance(screen, Screen):
raise ScreenManagerException(
'ScreenManager accepts only Screen widget.')
if screen.manager:
if screen.manager is self:
raise ScreenManagerException(
'Screen already managed by this ScreenManager (are you '
'calling `switch_to` when you should be setting '
'`current`?)')
raise ScreenManagerException(
'Screen already managed by another ScreenManager.')
screen.manager = self
screen.bind(name=self._screen_name_changed)
self.screens.append(screen)
if self.current is None:
self.current = screen.name
def remove_widget(self, *l):
screen = l[0]
if not isinstance(screen, Screen):
raise ScreenManagerException(
'ScreenManager uses remove_widget only to remove' +
'screens added via add_widget! use real_remove_widget.')
if not screen in self.screens:
return
if self.current_screen == screen:
other = next(self)
if other:
self.current = other
screen.manager = None
screen.unbind(name=self._screen_name_changed)
self.screens.remove(screen)
def real_add_widget(self, *l):
# ensure screen is removed from it's previous parent before adding'
if l[0].parent:
l[0].parent.remove_widget(l[0])
super(ScreenManager, self).add_widget(*l)
def real_remove_widget(self, *l):
super(ScreenManager, self).remove_widget(*l)
def on_current(self, instance, value):
screen = self.get_screen(value)
if not screen:
return
if screen == self.current_screen:
return
self.transition.stop()
previous_screen = self.current_screen
self.current_screen = screen
if previous_screen:
self.transition.screen_in = screen
self.transition.screen_out = previous_screen
self.transition.start(self)
else:
screen.pos = self.pos
self.real_add_widget(screen)
screen.dispatch('on_pre_enter')
screen.dispatch('on_enter')
def get_screen(self, name):
'''Return the screen widget associated with the name or raise a
:class:`ScreenManagerException` if not found.
'''
matches = [s for s in self.screens if s.name == name]
num_matches = len(matches)
if num_matches == 0:
raise ScreenManagerException('No Screen with name "%s".' % name)
if num_matches > 1:
Logger.warn('Multiple screens named "%s": %s' % (name, matches))
return matches[0]
def has_screen(self, name):
'''Return True if a screen with the `name` has been found.
.. versionadded:: 1.6.0
'''
return bool([s for s in self.screens if s.name == name])
def __next__(self):
'''Py2K backwards compatability without six or other lib.
'''
screens = self.screens
if not screens:
return
try:
index = screens.index(self.current_screen)
index = (index + 1) % len(screens)
return screens[index].name
except ValueError:
return
def next(self):
'''Return the name of the next screen from the screen list.'''
return self.__next__()
def previous(self):
'''Return the name of the previous screen from the screen list.
'''
screens = self.screens
if not screens:
return
try:
index = screens.index(self.current_screen)
index = (index - 1) % len(screens)
return screens[index].name
except ValueError:
return
def switch_to(self, screen, **options):
'''Add a new screen to the ScreenManager and switch to it. The previous
screen will be removed from the children. `options` are the
:attr:`transition` options that will be changed before the animation
happens.
If no previous screens are available, the screen will be used as the
main one::
sm = ScreenManager()
sm.switch_to(screen1)
# later
sm.switch_to(screen2, direction='left')
# later
sm.switch_to(screen3, direction='right', duration=1.)
If any animation is in progress, it will be stopped and replaced by
this one: you should avoid this because the animation will just look
weird. Use either :meth:`switch_to` or :attr:`current` but not both.
The `screen` name will be changed if there is any conflict with the
current screen.
.. versionadded: 1.8.0
'''
assert(screen is not None)
if not isinstance(screen, Screen):
raise ScreenManagerException(
'ScreenManager accepts only Screen widget.')
# stop any transition that might be happening already
self.transition.stop()
# ensure the screen name will be unique
if screen not in self.children:
if self.has_screen(screen.name):
screen.name = self._generate_screen_name()
# change the transition if given explicitly
old_transition = self.transition
specified_transition = options.pop("transition", None)
if specified_transition:
self.transition = specified_transition
# change the transition options
for key, value in iteritems(options):
setattr(self.transition, key, value)
# add and leave if we are set as the current screen
self.add_widget(screen)
if self.current_screen is screen:
return
old_current = self.current_screen
def remove_old_screen(transition):
if old_current in self.children:
self.remove_widget(old_current)
self.transition = old_transition
transition.unbind(on_complete=remove_old_screen)
self.transition.bind(on_complete=remove_old_screen)
self.current = screen.name
def _generate_screen_name(self):
i = 0
while True:
name = '_screen{}'.format(i)
if not self.has_screen(name):
return name
i += 1
def _update_pos(self, instance, value):
for child in self.children:
if self.transition.is_active and \
(child == self.transition.screen_in or
child == self.transition.screen_out):
continue
child.pos = value
def on_touch_down(self, touch):
if self.transition.is_active:
return False
return super(ScreenManager, self).on_touch_down(touch)
def on_touch_move(self, touch):
if self.transition.is_active:
return False
return super(ScreenManager, self).on_touch_move(touch)
def on_touch_up(self, touch):
if self.transition.is_active:
return False
return super(ScreenManager, self).on_touch_up(touch)
if __name__ == '__main__':
from kivy.app import App
from kivy.uix.button import Button
Builder.load_string('''
<Screen>:
canvas:
Color:
rgb: .2, .2, .2
Rectangle:
size: self.size
GridLayout:
cols: 2
Button:
text: 'Hello world'
Button:
text: 'Hello world'
Button:
text: 'Hello world'
Button:
text: 'Hello world'
''')
class TestApp(App):
def change_view(self, *l):
#d = ('left', 'up', 'down', 'right')
#di = d.index(self.sm.transition.direction)
#self.sm.transition.direction = d[(di + 1) % len(d)]
self.sm.current = next(self.sm)
def remove_screen(self, *l):
self.sm.remove_widget(self.sm.get_screen('test1'))
def build(self):
root = FloatLayout()
self.sm = sm = ScreenManager(transition=SwapTransition())
sm.add_widget(Screen(name='test1'))
sm.add_widget(Screen(name='test2'))
btn = Button(size_hint=(None, None))
btn.bind(on_release=self.change_view)
btn2 = Button(size_hint=(None, None), x=100)
btn2.bind(on_release=self.remove_screen)
root.add_widget(sm)
root.add_widget(btn)
root.add_widget(btn2)
return root
TestApp().run()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# vsgen documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 4 23:23:51 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import datetime
import pkg_resources
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
docroot = os.path.dirname(__file__)
pkgroot = os.path.abspath(os.path.join(docroot, '..', '..'))
sys.path.insert(0, pkgroot)
# -- Preprocessing --------------------------------------------------------
# Special flag if we're building on a read-the-docs server
rtd = os.environ.get('READTHEDOCS', None) == 'True'
# Our make file calls sphinx-apidoc, but read-the-docs uses our config instead
# (so it skips that step). Calling apidoc here instead if we're being built
# there.
if rtd:
os.system("sphinx-apidoc --no-toc --separate --private -o {} {}".format(os.path.join(docroot, 'apidoc'), os.path.join(pkgroot, 'vsgen')))
# -- Mock -----------------------------------------------------------------
# Read The Docs requires modules relying on Windows-only DLLs etc. be "mocked".
# https://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
if rtd:
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
class _Mock(Mock):
@classmethod
def __getattr__(cls, name):
return _Mock()
MOCK_MODULES = ['_winreg']
sys.modules.update((mod_name, _Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinxarg.ext'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'vsgen'
author = 'dbarsam'
copyright = '{}, {}'.format(datetime.date.today().year, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pkg_resources.get_distribution(project).version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# only import and set the theme if we're building docs locally
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
if not rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'vsgendoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'vsgen.tex', 'vsgen Documentation',
'dbarsam', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'vsgen', 'vsgen Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'vsgen', 'vsgen Documentation',
author, 'vsgen', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
|
#!/usr/bin/env python
# basic_alarm_a.py
"""
Copyright (c) 2015 ContinuumBridge Limited
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
import time
import json
from cbutils import nicetime
from cbcommslib import CbApp, CbClient
from cbconfig import *
from twisted.internet import reactor
# Default values:
config = {
"ignore_time": 120
}
ENFILE = CB_CONFIG_DIR + "basic_alarm.state"
CONFIG_FILE = CB_CONFIG_DIR + "basic_alarm.config"
CID = "CID164" # Client ID
class EnableState():
def __init__(self):
self.switch_ids = []
def setSwitch(self, deviceID):
if deviceID not in self.switch_ids:
self.switch_ids.append(deviceID)
def isEnabled(self):
try:
with open(ENFILE, 'r') as f:
self.val = int(f.read())
if self.val == 1:
return True
else:
return False
except Exception as ex:
self.cbLog("warning", "Could not read enable state from file")
self.cbLog("warning", "Exception: " + str(type(ex)) + str(ex.args))
return False
def enable(self, en):
if en:
val = 1
else:
val = 0
try:
with open(ENFILE, 'w') as f:
f.write(str(val))
except Exception as ex:
self.cbLog("warning", "Could not write enable state to file")
self.cbLog("warning", "Exception: " + str(type(ex)) + str(ex.args))
for s in self.switch_ids:
command = {"id": self.id,
"request": "command"}
if en:
command["data"] = "on"
else:
command["data"] = "off"
self.sendMessage(command, s)
class App(CbApp):
def __init__(self, argv):
self.appClass = "control"
self.state = "stopped"
self.sensorsID = []
self.onSensors = []
self.devices = []
self.idToName = {}
self.lastTrigger = 0
reactor.callLater(10, self.resetSensors)
# Super-class init must be called
CbApp.__init__(self, argv)
def setState(self, action):
self.state = action
msg = {"id": self.id,
"status": "state",
"state": self.state}
self.sendManagerMessage(msg)
def resetSensors(self):
if time.time() - self.lastTrigger > config["ignore_time"]:
self.onSensors = []
reactor.callLater(10, self.resetSensors)
def readLocalConfig(self):
global config
try:
with open(CONFIG_FILE, 'r') as f:
newConfig = json.load(f)
self.cbLog("debug", "Read local config")
config.update(newConfig)
except Exception as ex:
self.cbLog("warning", "Local config does not exist or file is corrupt. Exception: " + str(type(ex)) + str(ex.args))
self.cbLog("debug", "Config: " + str(json.dumps(config, indent=4)))
def onConcMessage(self, message):
#self.cbLog("debug", "onConcMessage, message: " + str(json.dumps(message, indent=4)))
if "status" in message:
if message["status"] == "ready":
# Do this after we have established communications with the concentrator
msg = {
"m": "req_config",
"d": self.id
}
self.client.send(msg)
self.client.receive(message)
def onClientMessage(self, message):
#self.cbLog("debug", "onClientMessage, message: " + str(json.dumps(message, indent=4)))
global config
if "config" in message:
if "warning" in message["config"]:
self.cbLog("warning", "onClientMessage: " + str(json.dumps(message["config"], indent=4)))
else:
try:
newConfig = message["config"]
copyConfig = config.copy()
copyConfig.update(newConfig)
if copyConfig != config or not os.path.isfile(CONFIG_FILE):
self.cbLog("debug", "onClientMessage. Updating config from client message")
config = copyConfig.copy()
with open(CONFIG_FILE, 'w') as f:
json.dump(config, f)
#self.cbLog("info", "Config updated")
self.readLocalConfig()
# With a new config, send init message to all connected adaptors
for i in self.adtInstances:
init = {
"id": self.id,
"appClass": self.appClass,
"request": "init"
}
self.sendMessage(init, i)
except Exception as ex:
self.cbLog("warning", "onClientMessage, could not write to file. Type: " + str(type(ex)) + ", exception: " + str(ex.args))
def onAdaptorService(self, message):
#self.cbLog("debug", "onAdaptorService. message: " + str(message))
sensor = False
switch = False
buttons = False
binary_sensor = False
number_buttons = False
for p in message["service"]:
if p["characteristic"] == "buttons":
buttons = True
if p["characteristic"] == "number_buttons":
number_buttons = True
if p["characteristic"] == "switch":
switch = True
if p["characteristic"] == "binary_sensor":
binary_sensor = True
if switch and binary_sensor:
binary_sensor = False # Don't trigger on an indicator device
if buttons:
self.sensorsID.append(message["id"])
req = {"id": self.id,
"request": "service",
"service": [
{"characteristic": "buttons",
"interval": 0
}
]
}
self.sendMessage(req, message["id"])
if number_buttons:
self.sensorsID.append(message["id"])
req = {"id": self.id,
"request": "service",
"service": [
{"characteristic": "number_buttons",
"interval": 0
}
]
}
self.sendMessage(req, message["id"])
if binary_sensor:
self.sensorsID.append(message["id"])
req = {"id": self.id,
"request": "service",
"service": [
{"characteristic": "binary_sensor",
"interval": 0
}
]
}
self.sendMessage(req, message["id"])
if switch:
self.enableState.setSwitch(message["id"])
self.setState("running")
def onAdaptorData(self, message):
#self.cbLog("debug", "onAdaptorData. message: " + str(message))
if message["id"] in self.sensorsID:
if message["characteristic"] == "buttons":
if message["data"]["rightButton"] == 1:
self.enableState.enable(True)
elif message["data"]["leftButton"] == 1:
self.enableState.enable(False)
self.cbLog("debug", "onAdaptorData. alarm: " + str(self.enableState.isEnabled()))
elif message["characteristic"] == "number_buttons":
for m in message["data"].keys():
if m == "1":
self.enableState.enable(True)
elif m == "3":
self.enableState.enable(False)
self.cbLog("debug", "onAdaptorData. alarm: " + str(self.enableState.isEnabled()))
elif message["characteristic"] == "binary_sensor":
if self.enableState.isEnabled() and message["data"] == "on":
if not message["id"] in self.onSensors:
now = time.time()
self.lastTrigger = now
self.onSensors.append(message["id"])
active = []
for a in self.onSensors:
active.append(self.idToName[a])
msg = {"m": "alert",
"a": "Intruder detected by " + str(", ".join(active)) + " at " + nicetime(now),
"t": now
}
self.client.send(msg)
#self.cbLog("debug", "onSensors: " + str(self.onSensors))
def onConfigureMessage(self, managerConfig):
for adaptor in managerConfig["adaptors"]:
adtID = adaptor["id"]
if adtID not in self.devices:
# Because managerConfigure may be re-called if devices are added
name = adaptor["name"]
friendly_name = adaptor["friendly_name"]
#self.cbLog("debug", "managerConfigure app. Adaptor id: " + adtID + " name: " + name + " friendly_name: " + friendly_name)
self.idToName[adtID] = friendly_name.replace(" ", "_")
self.devices.append(adtID)
self.readLocalConfig()
self.enableState = EnableState()
self.enableState.cbLog = self.cbLog
self.enableState.id = self.id
self.enableState.sendMessage = self.sendMessage
self.client = CbClient(self.id, CID, 5)
self.client.onClientMessage = self.onClientMessage
self.client.sendMessage = self.sendMessage
self.client.cbLog = self.cbLog
self.client.loadSaved()
self.setState("starting")
if __name__ == '__main__':
app = App(sys.argv)
|
|
#Interphase - Copyright (C) 2009 James Garnon <http://gatc.ca/>
#Released under the MIT License <http://opensource.org/licenses/MIT>
from __future__ import division
from control import Control, FunctionControl, Label, Textbox
from util import Text, load_image
import os
from env import engine
__docformat__ = 'restructuredtext'
"""
:undocumented:EVENT
"""
EVENT = {'controlselect':30, 'controlinteract':31}
class Interface(engine.sprite.Sprite):
"""
**Interface Object**
To design an interface panel, interphase.Interface can be subclassed. Within the __init__() method call interphase.Interface.__init__(). Interface Object provides several methods to design and utilize an interface panel. Use add() to add controls to panel, place into a method called add_controls(); if added otherwise call activate() after to activate the panel. The program maintains the update of the panel including changes through the methods, however panel_update() can be used to force an update if required. If the Interface Object is subclassed, when overriding the update() method call interphase.Interface.update().
Interface interaction can be maintained with the InterfaceState object, that is returned by update() or get_state(), or through Pygame event queue checking for event.type interphase.EVENT[ 'controlselect' ] and interphase.EVENT[ 'controlinteract' ] with the attribute event.state that references the InterfaceState object. To turn the panel off, deactivate() sets state.active to false. The panel can be drawn to the display with the draw() method.
The module includes demo.py that demonstrates some of Interphase functionality.
"""
_image_default = None
_image_source = None
_clipboard = None
_clipboard_type = None
_event_queue = []
def __init__(self,
identity='Interface_Panel',
position=None,
image=None,
color=(0,0,0),
size=(350,100),
screen=(500,500),
moveable=False,
position_offset=(0,0),
move_rate=(200,200),
fixed=False,
button_image=None,
control_image=None,
color_key=None,
control_minsize=None,
control_size='min',
button_size=(12,12),
function_button='left',
control_button='right',
scroll_button=None,
font_color=(125,130,135),
font_type=None,
font_size=10,
label_display=False,
info_display=False,
info_fontsize=10,
info_fontcolor=(125,130,135),
info_position=(2,0),
tips_display=False,
tips_fontsize=8,
tips_fontcolor=(125,130,135),
tips_position=(0,-15),
control_response=125,
pointer_interact=False,
data_folder='data',
data_zip=None,
text_paste=False,
event=False):
"""
**Interface Object: Define panel.**
Optional Parameters <default>:
identity: 'id' panel name <'Interface_Panel'>.
position: (x,y) panel placement on screen <None>.
values < 1 are %screen.
None centers on screen.
image: 'image' panel image <None>.
None use default image, 'none' suppress default image.
Image in data folder.
color: (r,g,b) panel color <(0,0,0)>.
size: (w,h) dimension of panel <(350,100)>.
screen: (w,h) dimension of screen <(500,500)>.
moveable: bool panel can move <False>.
position_offset: (x,y) panel move offset <(0,0)>.
move_rate: (x,y) panel move rate pix/s <(200,200)>.
values < 1 are %position_offset/s.
fixed: bool panel fixed in place <False>.
button_image: ['U','D'] or 'composite' control button image <None>.
None use default image, 'none' suppress default image.
Image in data folder.
control_image: 'image' control background image <None>.
None use default image, 'none' suppress default image.
Image in data folder.
color_key: (r,g,b) image color key transparency <None>.
transparency set by image alpha value or color_key.
value -1 color_key from pixel at (0,0).
control_minsize: (x,y) minimum control size <None>.
control_size: '' global control size if control_minsize set <'min'>.
'auto', 'auto_width': fit items.
'min', 'min_width': fit using control_minsize.
'panel': use exact control_minsize.
button_size: (x,y) button size <(12,12)>.
function_button: placement of buttons of function_select <'left'>.
control_button: placement of buttons of control_select <'right'>.
scroll_button: activate scroll wheel <None>.
- None,'vertical','horizontal','both'
font_color: (r,g,b) font color of control text <(125,130,135)>.
font_type: [] font type list <None>.
None: default system font; []: use first font available.
<control>.set_display_text(info='system') gets system fonts.
'file:<font_name>' path to font file.
font_size: int font size of control text <10>.
label_display: bool label displayed <False>.
info_display: bool info text displayed <False>.
info_fontsize: int font size used for info text <10>.
info_fontcolor: (r,g,b) font color used for info text <(125,130,135)>.
info_position: (x,y) position of info text <(2,0)>.
tips_display: bool tip text displayed <False>.
tips_fontsize: int font size used for tip text <8>.
tips_fontcolor: (r,g,b) font color used for tip text <(125,130,135)>.
tips_position: (x,y) position offset of tip text <(0,-15)>.
control_response: int control click response (ms) <125>.
pointer_interact: bool pointer interact monitored <False>.
data_folder: '' image data folder <'data'>.
data_zip: '' image data zip <None>.
text_paste: bool clipboard support <False>.
event: bool interaction generates events <False>.
"""
engine.sprite.Sprite.__init__(self)
self._panel = engine.sprite.RenderUpdates(self)
self._text = Text
self._load_image = load_image
self._data = data_folder
self._data_zip = data_zip
if self._data_zip and self._data:
self._data_zip = os.path.join(self._data, self._data_zip)
self._zipfile = None
self._id = identity
self._width, self._height = screen
self._size = size
if position:
pos_x, pos_y = position
if pos_x < 1:
pos_x = pos_x * self._width
if pos_y < 1:
pos_y = pos_y * self._height
self._x, self._y = int(pos_x), int(pos_y)
else:
self._x, self._y = self._width//2, self._height//2
self._moveable = moveable #panel moveable
self._positionx, self._positiony = self._x, self._y #panel original placement
self._offsetx, self._offsety = position_offset
directionx, directiony = move_rate #panel move speed
if directionx < 1:
directionx = directionx * abs(self._offsetx)
if directiony < 1:
directiony = directiony * abs(self._offsety)
self._directionx, self._directiony = int(directionx), int(directiony)
self._move_ratex, self._move_ratey = int(self._directionx/40), int(self._directiony/40)
self._move_initiate = False
self._color = color
self._initialized = False
self._controls = {} #panel controls
self._control_values = {}
self._color_key = color_key
self.image = None
self.rect = None
self._control_image = {}
self._button_image = {}
self._button_size = button_size
self._set_image(image, control_image, button_image) #load panel images
if control_minsize:
self._control_minsize = {}
padding = ( min(control_minsize[0],control_minsize[1]) // 10 ) + 4
if padding % 2:
padding -= 1
self._control_minsize['size'] = control_minsize
self._control_minsize['min'] = control_minsize[0]-padding, control_minsize[1]-padding
self._control_minsize['pad'] = padding
else:
self._control_minsize = None
self._control_size = control_size
self._button_placement = { 'function_select':function_button, 'control_select':control_button, 'textbox':'right' }
if scroll_button:
try:
if scroll_button == 'vertical':
self._scroll_button = set([4,5])
elif scroll_button == 'horizontal':
self._scroll_button = set([6,7])
else:
self._scroll_button = set([4,5,6,7])
except NameError: #set module not available
from java.util import HashSet
if scroll_button == 'vertical':
self._scroll_button = HashSet([4,5])
elif scroll_button == 'horizontal':
self._scroll_button = HashSet([6,7])
else:
self._scroll_button = HashSet([4,5,6,7])
else:
self._scroll_button = None
self._scroll_button_selected = {4:'_top', 5:'_bottom', 6:'_top', 7:'_bottom'}
self._scroll_button_selected_alt = {4:'_top', 5:'_bottom', 6:'_bottom', 7:'_top'}
self._control_events = 1
if moveable:
self._displayed = False
else:
self._displayed = True
self._panel_disabled = False #disabled on move
self._display_fixed = fixed #moveable panel fixed in place
self._active = True
self._panel_active = False
self._panel_display = True #panel controls display on or toggled with pointer interact
self._panel_rect = engine.Rect(self.rect)
self._font_color = font_color
self._font_type = font_type
self._font_size = font_size
self._text(self.image, self._font_type, self._font_size) #initialize Text defaults
self._info_display = info_display #info display toggle
self._info_displaying = False #info currently displaying
self._info = self._text(self.image) #info displayed on panel
self._info.set_font_size(info_fontsize)
self._info.set_font_color(info_fontcolor)
self._info.set_font_bgcolor(None)
self._info.set_position(info_position)
self._tips_display = tips_display
self._tips = self._text(self.image)
self._tips.set_font_size(tips_fontsize)
self._tips.set_font_color(tips_fontcolor)
self._tips.set_font_bgcolor(None)
self._tips_position = tips_position #default tips over pointer
self._control_hover = None #interact during tips_display
self._controls_disabled = {}
self._active_color = (255,0,0)
self._update_display = True #update panel display
self._control_response = control_response #response speed of control
self._control_press = {'control':None, 'button':None, 'response':0, 'hold':0, 'rtime':0, 'htime':0}
self._label_display = label_display #show control labels
self._panel_interact = False
self._control_moveable = False #control moveable
self._control_move = None #control selected to move
self._pointer_position = (0,0)
self._pointer_interact = pointer_interact #detect control hover
self._clock = engine.time.Clock()
self._update_panel = True #set for panel update
self._initial_update = 10 #panel updates for short duration
self._panel_function = [] #list of panel functions to run on panel update
self._sustain_update = False #panel register sustained update status
self._control_event = [] #controls recently pressed
self._interface = {'state':None, 'update':False} #interface control state
self._event = event
self._events = {}
self._events['controlselect'] = engine.event.Event(EVENT['controlselect'], self._interface)
self._events['controlinteract'] = engine.event.Event(EVENT['controlinteract'], self._interface)
self.add_controls()
self.activate()
def add_controls(self):
"""Method to overide in subclass for adding controls."""
pass
def add(self, identity, control_type, position, **parameters):
"""Add control to panel."""
panel = self
if control_type in ('control_select', 'control_toggle'):
interface_control = Control(panel, identity, control_type, position, **parameters)
elif control_type in ('function_select', 'function_toggle'):
interface_control = FunctionControl(panel, identity, control_type, position, **parameters)
elif control_type == 'label':
interface_control = Label(panel, identity, control_type, position, **parameters)
elif control_type == 'textbox':
interface_control = Textbox(panel, identity, control_type, position, **parameters)
self._controls[identity] = interface_control
return interface_control
def activate(self, activate_panel=True):
"""Panel activation."""
if activate_panel:
self._active = True
self._panel_active = True
self._panel_disabled = False
self._activate_controls()
if not self._initialized:
if self._moveable and not self._display_fixed:
self._x, self._y = self._positionx+self._offsetx, self._positiony+self._offsety
self._initialized = True
if self._zipfile:
self._zip_file(close=True)
self._panel_function.append(self._force_update)
self.panel_update()
else:
self.deactivate()
def deactivate(self):
"""Panel deactivation."""
self._active = False
self._control_press['control'] = None
self._panel_disabled = True
self.panel_update()
def _force_update(self):
if self._initial_update:
self._update_panel = True
self._initial_update -= 1
else:
self._initial_update = 10
self._panel_function.pop()
def _activate_controls(self):
"""Panel controls activation."""
for ctrl in self._controls:
self._controls[ctrl]._activate()
def add_control(self, identity, control_type, position, **parameters):
"""Add control to panel."""
interface_control = self.add(identity, control_type, position, **parameters)
return interface_control
def get_control(self, *control):
"""Retrieve control object. Multiple controls return a dictionary of control objects, and if no parameter given return dictionary of all control objects."""
if not control:
return self._controls.copy()
elif len(control) == 1:
return self._controls[control[0]]
ctr = {}
for ctrl in control:
ctr[ctrl] = self._controls[ctrl]
return ctr
def remove_control(self, *control):
"""Remove control from panel."""
if control:
for ctrl in control:
del self._controls[ctrl]
del self._control_values[ctrl]
for item in self._controls:
if self._controls[item].control_type in ('function_select', 'function_toggle'):
for function in self._controls[item].link:
if ctrl in self._controls[item].link[function]:
self._controls[item].link[function].remove(ctrl)
else:
self._controls.clear()
self._control_values.clear()
def enable_control(self, *control):
"""Set control enabled."""
if not control:
control = self._controls.keys()
control_unchanged = []
for ctrl in control:
if ctrl in self._controls_disabled:
self._controls[ctrl].rects = self._controls_disabled[ctrl].copy()
#panel move - change rect pos or define controls
del self._controls_disabled[ctrl]
self._controls[ctrl].enabled = True
else:
control_unchanged.append(ctrl)
return control_unchanged
def disable_control(self, *control):
"""Set control disabled."""
if not control:
control = self._controls.keys()
for ctrl in control:
if not self._controls[ctrl].enabled:
continue
self._controls_disabled[ctrl] = self._controls[ctrl].rects.copy()
self._controls[ctrl].rects = {}
if self._controls[ctrl] is self._control_press['control']:
self._control_press['control'] = None
self._controls[ctrl].enabled = False
def get_value(self, *control):
"""Retrieve current value of control. Multiple controls return a dictionary of values, and if no parameter given return dictionary of all values."""
if not control:
return self._control_values
elif len(control) == 1:
return self._control_values[control[0]]
value = {}
for ctrl in control:
value[ctrl] = self._control_values[ctrl]
return value
def get_position(self):
"""Retrieve panel position."""
return self._x, self._y
def get_size(self):
"""Retrieve panel size."""
return self._size
def _zip_file(self, zip_file=None, close=False):
"""Retrieve zipfile object."""
import zipfile
if not close:
if not zip_file:
zip_file = self._data_zip
if not self._zipfile:
self._zipfile = zipfile.ZipFile(zip_file)
else:
self._zipfile.close()
self._zipfile = None
return self._zipfile
def _data_source(self, data_folder=None, data_zip=None, color_key=None, file_obj=None):
"""Retrieve default data source."""
if not file_obj:
if data_folder is None:
data_folder = self._data
if data_zip is None and data_folder == self._data:
if self._data_zip:
data_zip = self._zip_file()
else:
data_folder = data_zip = None
if color_key is None:
color_key = self._color_key
return data_folder, data_zip, color_key
def get_default_image(self, mode=None, path=None):
"""Get or save default images."""
image = {}
try:
img_obj = self._default_image()
for img in img_obj:
if not img_obj[img]:
return image
if mode == 'save':
try:
for img in img_obj:
filename = '_'+img_obj[img][0]
if path:
filename = os.path.join(path, filename)
image_file = file(filename, 'w')
image_file.write(img_obj[img][1].read())
img_obj[img][1].seek(0)
image_file.close()
except IOError:
pass
image['panel_image'] = self.set_panel_image(img_obj['panel'][0], file_obj=img_obj['panel'][1])
image['control_image'] = self.set_control_image(img_obj['control'][0], file_obj=img_obj['control'][1])
image['button_image'] = self.set_button_image(img_obj['button'][0], file_obj=img_obj['button'][1])
except IOError:
pass
return image
def _default_image(self):
"""Set default images."""
try:
from image import _image_decode
image_obj = _image_decode()
except:
image_obj = {}
for img in ('panel', 'control', 'button'):
image_obj[img] = ('none', None)
return image_obj
def _set_image(self, image, control_image, button_image):
"""Set panel, control and button images."""
if Interface._image_default:
if image == Interface._image_source['panel_image']:
image = None
if control_image == Interface._image_source['control_image']:
control_image = None
if button_image == Interface._image_source['button_image']:
button_image = None
default_image = {}
if not Interface._image_default and (not image or not control_image or not button_image):
img_obj = self._default_image()
default_image = {}
default_image['panel_image'] = self.set_panel_image(img_obj['panel'][0], file_obj=img_obj['panel'][1])
default_image['control_image'] = self.set_control_image(img_obj['control'][0], file_obj=img_obj['control'][1])
default_image['button_image'] = self.set_button_image(img_obj['button'][0], file_obj=img_obj['button'][1])
if image:
self._panel_image = self.set_panel_image(image)
if control_image:
self._control_image = self.set_control_image(control_image)
if button_image:
self._button_image = self.set_button_image(button_image)
if not Interface._image_default:
Interface._image_default = {}
Interface._image_source = {}
if image:
default_image['panel_image'] = self._panel_image.copy()
if control_image:
default_image['control_image'] = self._control_image.copy()
if button_image:
default_image['button_image'] = self._button_image.copy()
Interface._image_default = default_image
Interface._image_source['panel_image'] = image
Interface._image_source['control_image'] = control_image
Interface._image_source['button_image'] = button_image
if not image:
self._panel_image = Interface._image_default['panel_image'].copy()
if self._panel_image.get_size() != self._size:
self._panel_image = engine.transform.smoothscale(self._panel_image, self._size)
self.image = self._panel_image.copy()
self.rect = self.image.get_rect(center=(self._x,self._y))
if not control_image:
self._control_image = Interface._image_default['control_image'].copy()
if not button_image:
self._button_image = Interface._image_default['button_image'].copy()
def get_panel_image(self, change=False):
"""Get panel image to modify. Parameter Change - True: mod permanent, False: mod transitory."""
if change:
return self._panel_image
else:
return self.image
def set_panel_image(self, image=None, data_folder=None, data_zip=None, file_obj=None, color_key=None, surface=None):
"""Set image used for panel."""
if image:
if isinstance(image, str):
image = [image]
if image[0] != 'none':
data_folder, data_zip, color_key = self._data_source(data_folder, data_zip, color_key, file_obj)
self._panel_image = self._load_image(image[0], path=data_folder, zipobj=data_zip, fileobj=file_obj, colorkey=color_key)
if self._panel_image.get_size() != self._size:
self._panel_image = engine.transform.smoothscale(self._panel_image, self._size)
else:
self._panel_image = engine.Surface(self._size)
self._panel_image.fill(self._color)
elif surface:
self._panel_image = surface.copy()
if color_key:
if color_key is -1:
color_key = self._panel_image.get_at((0,0))
self._panel_image.set_colorkey(color_key, engine.RLEACCEL)
if self._panel_image.get_size() != self._size:
self._panel_image = engine.transform.smoothscale(self._panel_image, self._size)
else:
self._panel_image = Interface._image_default['panel_image'].copy()
self.image = self._panel_image.copy()
self.rect = self.image.get_rect(center=(self._x,self._y))
if self._initialized:
self._display_controls()
return self._panel_image
def set_control_image(self, control_image=None, data_folder=None, data_zip=None, file_obj=None, color_key=None, surface=None):
"""Set image used for control."""
if control_image:
if isinstance(control_image, str):
control_image = [control_image]
if control_image[0] != 'none':
data_folder, data_zip, color_key = self._data_source(data_folder, data_zip, color_key, file_obj)
self._control_image['bg'] = self._load_image(control_image[0], path=data_folder, zipobj=data_zip, fileobj=file_obj, colorkey=color_key)
else:
if 'bg' in self._control_image:
del self._control_image['bg']
elif surface:
self._control_image['bg'] = surface.copy()
if color_key:
if color_key is -1:
color_key = self._control_image['bg'].get_at((0,0))
self._control_image['bg'].set_colorkey(color_key, engine.RLEACCEL)
else:
try:
self._control_image['bg'] = Interface._image_default['control_image']['bg'].copy()
except:
if 'bg' in self._control_image:
del self._control_image['bg']
for ctrl in self._controls:
if not self._control_image:
self._controls[ctrl].control_outline = True
else:
self._controls[ctrl].control_outline = self._controls[ctrl].outline
self._controls[ctrl]._set_buttonlist()
if self._initialized:
for ctrl in self._controls:
self._controls[ctrl]._define_buttons(self._controls[ctrl].control_type, self._controls[ctrl].size, self._controls[ctrl].color['normal'], self._controls[ctrl].color['fill'], initialize=False)
self._display_controls()
return self._control_image
def set_button_image(self, button_image=None, data_folder=None, data_zip=None, file_obj=None, color_key=None, surface=None):
"""Set image used for buttons."""
if button_image:
if isinstance(button_image, str):
button_image = [button_image]
if button_image[0] != 'none':
data_folder, data_zip, color_key = self._data_source(data_folder, data_zip, color_key, file_obj)
self._button_image = {}
button_frames = 2
if len(button_image) == 1:
images = self._load_image(button_image[0], button_frames, path=data_folder, zipobj=data_zip, fileobj=file_obj, colorkey=color_key)
self._button_image['t'] = engine.transform.smoothscale(images[0], self._button_size)
self._button_image['b'] = engine.transform.smoothscale(images[1], self._button_size)
else:
for num, frame in enumerate(['t','b']):
img = self._load_image(button_image[num], path=data_folder, zipobj=data_zip, fileobj=file_obj, colorkey=color_key)
self._button_image[frame] = engine.transform.smoothscale(img, self._button_size)
else:
self._button_image = {}
elif surface:
for num, frame in enumerate(['t','b']):
img = surface[num].copy()
if color_key:
if color_key is -1:
color_key = img.get_at((0,0))
img.set_colorkey(color_key, engine.RLEACCEL)
self._button_image[frame] = engine.transform.smoothscale(img, self._button_size)
else:
self._button_image = Interface._image_default['button_image'].copy()
if self._initialized:
for ctrl in self._controls:
self._controls[ctrl]._define_buttons(self._controls[ctrl].control_type, self._controls[ctrl].size, self._controls[ctrl].color['normal'], self._controls[ctrl].color['fill'], initialize=False)
self._display_controls()
return self._button_image
def get_clipboard(self):
"""Retrieve text from clipboard."""
raise AttributeError, "clipboard unavailable"
def set_clipboard(self, text):
"""Save text to clipboard."""
raise AttributeError, "clipboard unavailable"
def _clipboard_init(self):
if not Interface._clipboard:
try:
from gtk import Clipboard
Interface._clipboard = Clipboard()
Interface._clipboard_type = 'gtk'
except ImportError:
try:
from Tkinter import Tk
Interface._clipboard = Tk()
Interface._clipboard.withdraw()
Interface._clipboard_type = 'tk'
except ImportError:
try:
global StringSelection, DataFlavor, UnsupportedFlavorException, IOException, IllegalStateException
from java.awt.datatransfer import StringSelection, DataFlavor
from java.awt.datatransfer import UnsupportedFlavorException
from java.io import IOException
from java.lang import IllegalStateException
from java.awt import Toolkit
Interface._clipboard = Toolkit.getDefaultToolkit().getSystemClipboard()
Interface._clipboard_type = 'jtk'
except ImportError:
try:
engine.display.textbox_init()
Interface._clipboard = engine.display.textarea
Interface._clipboard_type = 'js'
except AttributeError:
Interface._clipboard = None
Interface._clipboard_type = None
if Interface._clipboard_type == 'gtk':
self.get_clipboard = self._get_clipboard_gtk
self.set_clipboard = self._set_clipboard_gtk
elif Interface._clipboard_type == 'tk':
self.get_clipboard = self._get_clipboard_tk
self.set_clipboard = self._set_clipboard_tk
elif Interface._clipboard_type == 'jtk':
self.get_clipboard = self._get_clipboard_jtk
self.set_clipboard = self._set_clipboard_jtk
elif Interface._clipboard_type == 'js':
self.get_clipboard = self._get_clipboard_js
self.set_clipboard = self._set_clipboard_js
def _get_clipboard_gtk(self):
text = Interface._clipboard.wait_for_text()
return text
def _set_clipboard_gtk(self, text):
Interface._clipboard.set_text(text)
Interface._clipboard.store()
return
def _get_clipboard_tk(self):
text = Interface._clipboard.clipboard_get()
return text
def _set_clipboard_tk(self, text):
Interface._clipboard.clipboard_clear()
Interface._clipboard.clipboard_append(text)
def _get_clipboard_jtk(self):
contents = Interface._clipboard.getContents(None)
if contents != None:
try:
text = contents.getTransferData(DataFlavor.stringFlavor)
except (UnsupportedFlavorException, IOException):
text = None
else:
text = None
return text
def _set_clipboard_jtk(self, text):
try:
Interface._clipboard.setContents(StringSelection(text), None)
except IllegalStateException:
pass
return
def _get_clipboard_js(self):
text = Interface._clipboard.getText()
return text
def _set_clipboard_js(self, text):
Interface._clipboard.setText(text)
return
def is_active(self):
"""Check whether panel is active."""
return self._active
def is_moveable(self, setting=None):
"""Check whether panel is moveable."""
if not setting:
return self._moveable
elif setting == 'Fixed':
return self._display_fixed
def set_moveable(self, setting='Toggle', position_offset=None, move_rate=None):
"""Set panel moveable setting."""
if position_offset:
self._offsetx, self._offsety = position_offset
if move_rate:
self._directionx, self._directiony = move_rate
if setting == 'Toggle':
self._moveable = not self._moveable
return self._moveable
elif setting in (True, False):
self._moveable = setting
return self._moveable
elif setting == 'Fixed':
self._display_fixed = not self._display_fixed
return self._display_fixed
else:
return None
def move(self, x, y):
"""Move panel to new position x,y."""
self._x, self._y = x, y
self._panel_rect.x = self._x-(self.width//2)
self._panel_rect.y = self._y-(self.height//2)
for ctrl in self._controls:
control_type = self._controls[ctrl].control_type
size = self._controls[ctrl].size
color = self._controls[ctrl].color['normal']
fill = self._controls[ctrl].color['fill']
self._controls[ctrl].button, self._controls[ctrl].rects = self._controls[ctrl]._define_buttons(control_type, size, color, fill)
self._update_panel = True
def set_panel_display(self, setting='Toggle'):
"""Set whether panel display toggled with pointer interaction."""
if setting == 'Toggle':
self._panel_display = not self._panel_display
return self._panel_display
elif setting in (True, False):
self._panel_display = setting
return self._panel_display
else:
return None
def is_info_display(self):
"""Check whether info is displayed."""
return self._info_display
def set_info_display(self, setting='Toggle'):
"""Set info display setting."""
if setting == 'Toggle':
self._info_display = not self._info_display
return self._info_display
elif setting in (True, False):
self._info_display = setting
return self._info_display
else:
return None
def is_label_display(self):
"""Check whether label is displayed."""
return self._label_display
def set_label_display(self, setting='Toggle'):
"""Set label display setting."""
if setting == 'Toggle':
self._label_display = not self._label_display
self._update_panel = True
return self._label_display
elif setting in (True, False):
self._label_display = setting
self._update_panel = True
return self._label_display
else:
return None
def is_tips_display(self):
"""Check whether tips are displayed."""
return self._tips_display
def set_tips_display(self, setting='Toggle'):
"""Set tips display setting."""
if setting == 'Toggle':
self._tips_display = not self._tips_display
return self._tips_display
elif setting in (True, False):
self._tips_display = setting
return self._tips_display
else:
return None
def add_info(self, *message_append):
"""Add text to info."""
self._info.add(*message_append)
self._info_displaying = True
def clear_info(self):
"""Clear text from info."""
self._info.clear_text()
def get_id(self):
"""Get panel identity"""
return self._id
def get_state(self):
"""Get the state object of the panel."""
return self._interface['state']
def set_update(self, sustain=False):
"""Set panel update. Parameter: sustain bool for continuous update, default False."""
if not sustain:
self._sustain_update = False
if self._force_update not in self._panel_function:
self._panel_function.append(self._force_update)
self._update_panel = True
else:
self._sustain_update = True
self._update_panel = True
def is_update(self):
"""Check if panel was updated or set to sustained update."""
if not self._sustain_update:
return self._interface['update']
else:
return self._sustain_update
def draw(self, surface):
"""Draw panel on surface. Return Rect of surface area changed."""
rect = self._panel.draw(surface)
return rect
def clear(self, surface, background):
"""Clear panel from surface of previous draw, using background."""
self._panel.clear(surface, background)
return None
def get_pointer_position(self):
"""Get position of pointer determined at latest update."""
return self._pointer_position
def set_pointer_interact(self, setting='Toggle'):
"""Set pointer interact monitoring."""
if setting == 'Toggle':
self._pointer_interact = not self._pointer_interact
return self._pointer_interact
elif setting in (True, False):
self._pointer_interact = setting
return self._pointer_interact
else:
return None
def process_event(self, clear=False):
"""
Internally process event handlers.
Required if no call to framework event method such as event.get or event.pump.
Optional clear argument to remove events from framework event queue.
With scroll_button active, call panel update prior to framework event method calls.
"""
if not clear:
engine.event.pump()
else:
engine.event.clear()
def get_event_queue(self):
"""
Return interface event queue.
Event queue has mouse press events sequestered upon panel interaction with scroll_button active.
"""
return self._event_queue
def move_control(self, control=None, position=None, offset=None):
"""Move selected control. If no position supplied, use position of mouse pointer."""
if not control:
if self._control_move:
control = self._control_move
else:
return
control = self.get_control(control)
x, y = self.get_position()
size = self.get_size()
if not position:
if not offset:
mouse_x, mouse_y = self._pointer_position
pos = mouse_x - x + (size[0]//2), mouse_y - y + (size[1]//2)
pos = [pos[0]-(control.size[0]//2), pos[1]-(control.size[1]//2)]
else:
pos = [control.position[0]+offset[0], control.position[1]+offset[1]]
else:
pos = position[0], position[1]
pos = [pos[0]-(control.size[0]//2), pos[1]-(control.size[1]//2)]
ladj = 0
radj = 0
if control.control_type in self._button_placement:
if self._button_placement[control.control_type] == 'left':
ladj = 16
elif self._button_placement[control.control_type] == 'right':
radj = 16
if pos[0] - ladj < 0:
pos[0] = ladj
elif pos[0]+control.size[0]+radj > size[0]:
pos[0] = size[0]-(control.size[0]+radj)
if pos[1] < 0:
pos[1] = 0
elif pos[1]+control.size[1] > size[1]:
pos[1] = size[1]-control.size[1]
pos = (pos[0], pos[1])
dx, dy = pos[0]-control.position[0], pos[1]-control.position[1]
control.position = pos
control_type = control.control_type
size = control.size
color = control.color['normal']
fill = control.color['fill']
control._define_buttons(control_type, size, color, fill, initialize=False)
for rect in control.rects:
control.rects[rect].move_ip(dx,dy)
width, height = control.display.check_size('x')
pos = ( control.position[0]+(size[0]//2), control.position[1]+(size[1]//2)-(height//2) )
control.display.set_position((pos),center=True)
control.text_image = {}
pos = ( control.position[0]+(size[0]//2), control.position[1]-(control.font_size+3) )
control.label.set_position((pos),center=True)
self._update_panel = True
def set_control_move(self, control=None, mouse_visible=True):
"""Select control to move."""
if control:
self._control_move = control
if not mouse_visible:
engine.mouse.set_visible(False)
else:
self._control_move = None
engine.mouse.set_visible(True)
def get_control_move(self):
"""Return selected control to move."""
return self._control_move
def is_control_moveable(self):
"""Check whether control is moveable."""
return self._control_moveable
def set_control_moveable(self, setting='Toggle'):
"""Set control moveable."""
if setting == 'Toggle':
self._control_moveable = not self._control_moveable
return self._control_moveable
elif setting in (True, False):
self._control_moveable = setting
return self._control_moveable
def _display_controls(self):
"""Draws controls on panel."""
if self._panel_active:
if not hasattr(self.image, 'clear'):
self.image = self._panel_image.copy()
else:
self.image.clear()
self.image.blit(self._panel_image, (0,0))
for ctrl in self._controls:
if self._controls[ctrl].active:
self.image = self._controls[ctrl]._display(self.image)
if self._label_display and self._controls[ctrl].label_display:
if not self._controls[ctrl].label_text.startswith('__'):
self._controls[ctrl].label.add(self._controls[ctrl].label_text)
self.image = self._controls[ctrl].label.render(self.image)
if self._tips_display:
if self._panel_interact:
mouse_x, mouse_y = self._pointer_position
if self._control_hover:
if not self._controls[self._control_hover].active or not self._controls[self._control_hover].rects[self._control_hover].collidepoint(mouse_x,mouse_y):
self._control_hover = None
if not self._control_hover:
for ctrl in self._controls:
if self._controls[ctrl].active:
if self._controls[ctrl].tips:
try:
if self._controls[ctrl].rects[ctrl].collidepoint(mouse_x,mouse_y):
self._control_hover = ctrl
break
except:
if not ctrl in self._controls_disabled:
self._controls[ctrl].tips = None
try:
if self._control_hover:
if len(self._controls[self._control_hover].tips) == 1:
tip = self._controls[self._control_hover].tips[self._controls[self._control_hover].tips.keys()[0]]
else:
tip = self._controls[self._control_hover].tips[self._controls[self._control_hover].value]
pos = mouse_x-(self._x-(self._size[0]//2)), mouse_y-(self._y-(self._size[1]//2))
pos = pos[0]+self._tips_position[0], pos[1]+self._tips_position[1]
self._tips.set_position(pos, center=True)
self._tips.add(tip)
self.image = self._tips.render(self.image)
except:
pass
else:
self._control_hover = None
if self._info_display:
if self._info_displaying:
if self._info.has_text():
self.image = self._info.render(self.image)
else:
self._info_displaying = False
self.rect = self.image.get_rect(center=(self._x,self._y))
def _display_update(self):
"""Update control panel on display."""
if self._moveable:
self._moveable_panel()
if self._info_displaying:
self._update_panel = True
if self._control_event:
for ctrl in self._control_event:
if ctrl._check():
self._update_panel = True
self._control_event[:] = []
update, self._pointer_position = self._panel_interaction()
self._panel_action()
return update
def _panel_action(self):
for function in self._panel_function:
try:
function()
except TypeError: #pyjs -O function>unbound method
function(self)
def set_panel_function(self, function=None):
"""Add function to panel update list, call without function to delete list."""
if function:
self._panel_function.append(function)
else:
self._panel_function = []
def _moveable_panel(self):
"""Update moveable panel."""
def move_panel(pos_i, pos_f, z, z_dir, rate_x=0, rate_y=0):
if not self._move_initiate:
fps = self._clock.get_fps()
self._move_ratex = int(self._directionx/fps)
if not self._move_ratex:
self._move_ratex = 1
self._move_ratey = int(self._directiony/fps)
if not self._move_ratey:
self._move_ratey = 1
self._move_initiate = True
if rate_x:
rate_x = rate_x*z_dir * z
rate = rate_x
else:
rate_y = rate_y*z_dir * z
rate = rate_y
if abs(pos_i-pos_f) > abs(rate):
self.rect.move_ip((rate_x, rate_y))
pos_i += rate
self._panel_disabled = True
else:
adj = abs(pos_i-pos_f)
if rate_x:
rate_x = adj*z_dir * z
else:
rate_y = adj*z_dir * z
self.rect.move_ip((rate_x, rate_y))
pos_i = pos_f
self._panel_disabled = False
self._move_initiate = False
return pos_i
if self._displayed or self._display_fixed:
if self._offsetx:
if self._x != self._positionx:
z = self._offsetx//abs(self._offsetx)
z_dir = -1
self._x = move_panel(self._x, self._positionx, z, z_dir, rate_x=self._move_ratex)
self._update_panel = True
if self._offsety:
if self._y != self._positiony:
z = self._offsety//abs(self._offsety)
z_dir = -1
self._y = move_panel(self._y, self._positiony, z, z_dir, rate_y=self._move_ratey)
self._update_panel = True
else:
if self._offsetx:
if self._x != self._positionx+self._offsetx:
z = self._offsetx//abs(self._offsetx)
z_dir = 1
self._x = move_panel(self._x, self._positionx+self._offsetx, z, z_dir, rate_x=self._move_ratex)
self._update_panel = True
if self._offsety:
if self._y != self._positiony+self._offsety:
z = self._offsety//abs(self._offsety)
z_dir = 1
self._y = move_panel(self._y, self._positiony+self._offsety, z, z_dir, rate_y=self._move_ratey)
self._update_panel = True
return self._panel_disabled
def _panel_interaction(self):
"""Check for mouse interaction with panel."""
self._pointer_position = engine.mouse.get_pos()
if self._displayed:
if not self.rect.collidepoint(self._pointer_position):
self._panel_interact = False
self._displayed = False
if not self._panel_display:
if self._panel_active:
self._panel_active = False
self.image = self._panel_image.copy()
else:
self._panel_interact = True
else:
if self.rect.collidepoint(self._pointer_position):
self._panel_interact = True
self._panel_active = True
self._displayed = True
return self._panel_interact, self._pointer_position
def _control_interact(self, pos):
"""Check control interaction."""
if not self._displayed or not self._panel_active or self._panel_disabled:
return None, None
control_interact = None
button_interact = None
if self._tips_display:
self._update_panel = True
if self._control_hover: #control interaction in tips display
control_interact, button_interact = self._control_hover, self._control_hover
else:
if self._pointer_interact: #detect pointer move interact
self._update_panel = True
for ctrl in self._controls:
if self._controls[ctrl].active:
for rect in self._controls[ctrl].rects:
if self._controls[ctrl].rects[rect].collidepoint(pos):
control_interact, button_interact = ctrl, rect
break
return control_interact, button_interact
def _control_scroll(self, pos, btn):
for control in self._controls:
if not self._controls[control].active or self._controls[control].control_type not in ['function_select', 'control_select', 'textbox']:
continue
for button in self._controls[control].rects:
if button.endswith('_bg'):
continue
if self._controls[control].rects[button].collidepoint(pos):
if not self._controls[control].listing[0][:-2] == '__numeric': #TODO: encapsulate in control
return control, control+self._scroll_button_selected[btn]
else:
return control, control+self._scroll_button_selected_alt[btn]
return None, None
def _control_select(self, pos):
"""Check control selected."""
if not self._displayed or not self._panel_active or self._panel_disabled:
return None, None
if not engine.mouse.get_pressed()[0]:
if self._control_press['control']:
self._control_press['control'] = None
if self._scroll_button is None:
return None, None
else:
self._event_queue[:] = engine.event.get(engine.MOUSEBUTTONDOWN)
scroll_event = None
for event in self._event_queue:
if event.button in self._scroll_button:
if not scroll_event:
scroll_event = event.button
else:
self._control_events += 1
if scroll_event:
return self._control_scroll(pos, scroll_event)
else:
return None, None
control_select = None
button_select = None
if not self._control_press['control']:
for control in self._controls:
if not self._controls[control].active:
continue
for button in self._controls[control].rects:
if button.endswith('_bg'):
continue
if self._controls[control].rects[button].collidepoint(pos):
self._control_press['control'] = self._controls[control]
self._control_press['button'] = button
self._control_press['hold'] = self._control_press['control'].hold_response
if self._control_press['control'].delay_response:
self._control_press['response'] = self._control_press['control'].delay_response
if self._control_press['hold']:
self._control_press['hold'] += self._control_press['control'].delay_response
else:
self._control_press['response'] = self._control_press['control'].control_response
control_select, button_select = control, button
self._control_press['rtime'] = self._control_press['htime'] = engine.time.get_ticks()
return control_select, button_select
else:
if self._control_press['control'].active:
time = engine.time.get_ticks()
if (time-self._control_press['rtime']) > self._control_press['response']:
self._control_press['rtime'] = engine.time.get_ticks()
control_select = self._control_press['control'].id
button_select = self._control_press['button']
if not self._control_press['hold'] or (time-self._control_press['htime']) < self._control_press['hold']:
self._control_press['response'] = self._control_press['control'].control_response
else:
self._control_press['response'] = self._control_press['control'].control_response_hold
else:
self._control_press['control'] = None
return control_select, button_select
def _interact(self):
"""Check for mouse interaction with controls."""
control_interact, button_interact = self._control_interact(self._pointer_position)
control_select, button_select = self._control_select(self._pointer_position)
return control_interact, button_interact, control_select, button_select
def _control_action(self, control, button):
"""Does control action, returns button pressed and current control value."""
if button:
if self._control_events == 1:
button, value = self._controls[control]._action(button)
return button, value
else:
for evt in range(self._control_events):
button, value = self._controls[control]._action(button)
self._control_events = 1
return button, value
else:
return None, None
def panel_update(self, force_update=True):
"""Update control panel, determines interaction, does control action."""
update = self._display_update()
if update:
control_interact, button_interact, control_select, button_select = self._interact()
if control_select:
button_select, value = self._control_action(control_select, button_select)
self._update_panel = True
else:
button_select, value = None, None
self._clock.tick()
if force_update:
self._update_panel = True
if self._update_panel:
self._display_controls()
panel = self
if update:
self._interface['state'] = InterfaceState(panel, control_interact, button_interact, control_select, button_select, value)
if control_select:
if self._controls[control_select].event:
engine.event.post(self._events['controlselect'])
if control_interact:
if self._controls[control_interact].event:
engine.event.post(self._events['controlinteract'])
else:
self._interface['state'] = InterfaceState(panel)
self._interface['update'] = True
self._update_panel = False
else:
if self._interface['update']:
panel = self
self._interface['state'] = InterfaceState(panel)
self._interface['update'] = False
self._interface['state'].panel_interact = self._panel_interact
return self._interface['state']
def update(self):
"""Update control panel. If overriding in interface subclass, call Interface.update(self)."""
interface_state = self.panel_update(0)
return interface_state
class InterfaceState(object):
"""
**State Object**
* Attributes:
* panel: Interface panel
* controls: Interface controls
* panel_active Panel active
* panel_update Panel update
* panel_interact: Pointer interface interact
* control_interact: Pointer control interact
* button_interact: Pointer button interact
* control: Control selected
* button: Button selected
* value: Control value
* values: Panel control values
State Object shows current state of control panel. The control_interact, button_interact register only with pointer_interact or tips_display. When control event is active, generates pygame event.type interphase.EVENT['controlselect'] and interphase.EVENT['controlinteract'] with attribute event.state.
"""
__slots__ = [
'panel',
'controls',
'panel_active',
'panel_update',
'panel_interact',
'control_interact',
'button_interact',
'control',
'button',
'value',
'values']
def __init__(self,
panel,
control_interact=None,
button_interact=None,
control_select=None,
button_select=None,
value=None):
self.panel = panel
self.controls = panel._controls
self.panel_active = panel._active
self.panel_update = panel._update_panel
self.panel_interact = panel._panel_interact
self.control_interact = control_interact
self.button_interact = button_interact
self.control = control_select
self.button = button_select
self.value = value
self.values = panel._control_values
|
|
from __future__ import division
from libtbx.test_utils import approx_equal
from libtbx.utils import Usage
from libtbx import easy_run
import libtbx.load_env
import platform
import time
import sys, os
op = os.path
__this_script__ = "cctbx_project/fable/test/sf_times.py"
# based on cctbx_project/compcomm/newsletter09/sf_times.py
setup_dir = "/net/cci/setup/Linux"
ifort_versions = ["intel121.sh", "intel111.sh", "ifort91.sh"]
icc_versions = [
"intel121.sh",
"intel111.sh",
"icc101.sh",
"icc91.sh"]
gcc_versions = [
"gcc-4.6.1_fc8.sh",
"gcc-4.5.3_fc8.sh",
"gcc-4.4.6_fc8.sh",
"gcc-4.3.6_fc8.sh",
"gcc-4.2.4_fc8.sh"]
fortran_template = r"""C %(this_script)s
subroutine cos_wrapper(result, arg)
REAL result
REAL arg
result = COS(arg)
return
end
subroutine exp_wrapper(result, arg)
REAL result
REAL arg
result = EXP(arg)
return
end
subroutine sf(abcss, n_scatt, xyz, b_iso, n_refl, hkl, f_calc)
implicit none
REAL abcss(3)
integer n_scatt
REAL xyz(3, *)
REAL b_iso(*)
integer n_refl
integer hkl(3, *)
REAL f_calc(2, *)
integer i_refl, i_scatt, j, h
REAL phi, cphi, sphi, dss, ldw, dw, a, b
DO i_refl=1,n_refl
a = 0
b = 0
DO i_scatt=1,n_scatt
phi = 0
DO j=1,3
phi = phi + hkl(j,i_refl) * xyz(j,i_scatt)
enddo
phi = phi * 2 * 3.1415926535897931
call cos_wrapper(cphi, phi)
call cos_wrapper(sphi, phi - 3.1415926535897931*0.5)
dss = 0
DO j=1,3
h = hkl(j,i_refl)
dss = dss + h*h * abcss(j)
enddo
ldw = -0.25 * dss * b_iso(i_scatt)
call exp_wrapper(dw, ldw)
a = a + dw * cphi
b = b + dw * sphi
enddo
f_calc(1, i_refl) = a
f_calc(2, i_refl) = b
enddo
return
end
program run
implicit none
REAL abcss(3)
integer n_scatt
parameter(n_scatt=%(n_scatt)s)
REAL xyz(3, n_scatt)
REAL b_iso(n_scatt)
integer n_refl
parameter(n_refl=%(n_refl)s)
integer hkl(3, n_refl)
REAL f_calc(2, n_refl)
integer i, j, jr
REAL a, b, max_a, max_b
abcss(1) = 1/(11.0*11.0)
abcss(2) = 1/(12.0*12.0)
abcss(3) = 1/(13.0*13.0)
jr = 0
DO i=1,n_scatt
DO j=1,3
jr = mod(jr*1366+150889, 714025)
xyz(j,i) = (mod(jr, 20000) - 10000) / 10000.0
enddo
enddo
DO i=1,n_scatt
jr = mod(jr*1366+150889, 714025)
b_iso(i) = mod(jr, 10000) / 100.0
enddo
if (n_scatt .le. 10) then
DO i=1,n_scatt
write(6, '(4(1x,f9.6))')
& xyz(1,i), xyz(2,i), xyz(3, i), b_iso(i)
enddo
endif
DO i=1,n_refl
DO j=1,3
jr = mod(jr*1366+150889, 714025)
hkl(j,i) = mod(jr, 10) - 5
enddo
enddo
call sf(abcss, n_scatt, xyz, b_iso, n_refl, hkl, f_calc)
if (n_refl .le. 100) then
DO i=1,n_refl
write(6, '(3(1x,i3),1x,f12.6,1x,f12.6)')
& hkl(1,i), hkl(2,i), hkl(3,i),
& f_calc(1,i), f_calc(2,i)
enddo
else
max_a = 0
max_b = 0
DO i=1,n_refl
a = f_calc(1,i)
b = f_calc(2,i)
if (max_a .lt. a) max_a = a
if (max_b .lt. b) max_b = b
enddo
write(6, '(2(1x,f12.6))') max_a, max_b
endif
end
"""
def compare_with_cctbx_structure_factors(n_scatt, n_refl, output_lines):
from cctbx import xray
from cctbx import miller
from cctbx import crystal
from cctbx.array_family import flex
crystal_symmetry = crystal.symmetry(
unit_cell=(11,12,13,90,90,90),
space_group_symbol="P1")
scatterers = flex.xray_scatterer()
miller_indices = flex.miller_index()
f_calc = flex.complex_double()
for line in output_lines:
flds = line.split()
assert len(flds) in [4,5]
if (len(flds) == 4):
x,y,z,b_iso = [float(s) for s in flds]
scatterers.append(
xray.scatterer(site=(x,y,z), b=b_iso, scattering_type="const"))
else:
miller_indices.append([int(s) for s in flds[:3]])
f_calc.append(complex(float(flds[3]), float(flds[4])))
assert scatterers.size() == n_scatt
assert miller_indices.size() == n_refl
xs = xray.structure(
crystal_symmetry=crystal_symmetry,
scatterers=scatterers)
fc = miller_array = miller.set(
crystal_symmetry=crystal_symmetry,
indices=miller_indices,
anomalous_flag=False).array(data=f_calc)
fc2 = fc.structure_factors_from_scatterers(
xray_structure=xs,
algorithm="direct",
cos_sin_table=False).f_calc()
for f1,f2 in zip(fc.data(), fc2.data()):
assert approx_equal(f1, f2, eps=1e-5)
def build_run(
setup_cmd, ld_preload_flag, n_scatt, n_refl, build_cmd, check_max_a_b):
if (op.isfile("a.out")):
os.remove("a.out")
assert not op.isfile("a.out")
print build_cmd
buffers = easy_run.fully_buffered(command=build_cmd)
msg = buffers.format_errors_if_any()
if (msg is not None):
if (0):
print build_cmd
print
print msg
print
STOP()
return None
assert op.isfile("a.out")
run_cmd = setup_cmd
if (ld_preload_flag):
run_cmd += 'env LD_PRELOAD='\
'"/net/marbles/raid1/rwgk/dist/opt_resources/linux64/libimf.so:"'\
'"/net/marbles/raid1/rwgk/dist/opt_resources/linux64/libirc.so" '
utimes = []
run_cmd += '/usr/bin/time -p ./a.out'
def run_once():
buffers = easy_run.fully_buffered(command=run_cmd)
if (len(buffers.stderr_lines) != 3):
print "v"*79
print "\n".join(buffers.stderr_lines)
print "^"*79
raise RuntimeError(
"Unexpected number of output lines"
" (3 expected; acutal output see above).")
if (n_scatt == 0):
pass
elif (n_scatt <= 10 and n_refl <= 100):
assert len(buffers.stdout_lines) == n_scatt + n_refl
else:
assert len(buffers.stdout_lines) == 1
max_a, max_b = [float(s) for s in buffers.stdout_lines[0].split()]
if (check_max_a_b):
if (n_scatt == 2000 and n_refl == 20000):
assert approx_equal(max_a, 35.047157, eps=1e-4)
assert approx_equal(max_b, 25.212738, eps=1e-4)
elif (n_scatt == 100 and n_refl == 1000):
assert approx_equal(max_a, 4.493645, eps=1e-4)
assert approx_equal(max_b, 10.515532, eps=1e-4)
elif (n_scatt <= 10 and n_refl <= 100):
if (libtbx.env.has_module(name="cctbx")):
compare_with_cctbx_structure_factors(
n_scatt=n_scatt,
n_refl=n_refl,
output_lines=buffers.stdout_lines)
else:
raise RuntimeError, (max_a, max_b)
utime = float(buffers.stderr_lines[1].split()[1])
utimes.append(utime)
print "sample utime: %.2f" % utime
sys.stdout.flush()
for _ in xrange(8):
run_once()
return min(utimes)
def finalize_cpp_build_cmd(source_cpp):
from fable import simple_compilation
comp_env = simple_compilation.environment()
return comp_env.assemble_include_search_paths(no_quotes=False) \
+ " " + source_cpp
def write_build_run(
setup_cmd, ld_preload_flag, n_scatt, n_refl, real, lang, build_cmd,
replace_cos, replace_exp):
this_script = __this_script__
for_txt = fortran_template % vars()
if (replace_cos):
for_txt = for_txt.replace(
"COS(arg)",
"arg / (abs(arg)+1.0)")
if (replace_exp):
for_txt = for_txt.replace(
"EXP(arg)",
"max(0.0, 1.0 - arg*arg)")
for_txt = for_txt.replace("REAL", real)
open("tmp.f", "w").write(for_txt)
from fable import cout
cpp_txt = cout.process(
file_names=["tmp.f"],
namespace="sf_test",
fem_do_safe=False,
inline_all=True)
open("tmp.cpp", "w").write("\n".join(cpp_txt)+"\n")
if (lang.lower() == "f"):
build_cmd += " tmp.f"
elif (lang.lower() == "c"):
build_cmd += finalize_cpp_build_cmd("tmp.cpp")
else:
raise RuntimeError('Unknown lang: "%s"' % lang)
return build_run(
setup_cmd=setup_cmd,
ld_preload_flag=ld_preload_flag,
n_scatt=n_scatt,
n_refl=n_refl,
build_cmd=build_cmd,
check_max_a_b=(not (replace_cos or replace_exp)))
def run_combinations(
compiler_versions,
all_utimes,
n_scatt,
n_refl,
compiler_build_opts_list,
real_list):
for lang,setup_sh_list,compiler,build_opts in compiler_build_opts_list:
for setup_sh in setup_sh_list:
if (setup_sh is None):
setup_cmd = ""
else:
setup_cmd = ". %s/%s; " % (setup_dir, setup_sh)
compiler_version = easy_run.fully_buffered(
command=setup_cmd+compiler+" --version",
join_stdout_stderr=True).stdout_lines[0]
if (lang in ["f", "c"]):
ld_preload_flags = [False, True]
else:
ld_preload_flags = [False]
for ld_preload_flag in ld_preload_flags:
iml = ["", " Intel Math Lib"][int(ld_preload_flag)]
compiler_versions.append(compiler_version + iml)
build_cmd = " ".join([setup_cmd+compiler, build_opts])
print build_cmd
utimes = []
if (n_scatt != 0):
for real in real_list:
print " %s" % real
for replace_cos in [False, True]:
print " replace_cos", replace_cos
for replace_exp in [False, True]:
print " replace_exp", replace_exp
sys.stdout.flush()
if (compiler_version != "n/a"):
utime = write_build_run(
setup_cmd=setup_cmd,
ld_preload_flag=ld_preload_flag,
n_scatt=n_scatt,
n_refl=n_refl,
real=real,
lang=lang,
build_cmd=build_cmd,
replace_cos=replace_cos,
replace_exp=replace_exp)
if (utime is not None):
print " %4.2f" % utime
else:
utime = -1.0
print " err"
else:
utime = -1.0
print " n/a"
utimes.append(utime)
sys.stdout.flush()
else:
if (lang.lower() == "f"):
f_source = libtbx.env.find_in_repositories(
relative_path="lapack_fem/dsyev_test.f",
test=op.isfile,
optional=False)
build_cmd_compl = build_cmd + " " + f_source
else:
cpp_source = libtbx.env.find_in_repositories(
relative_path="lapack_fem/dsyev_test.cpp",
test=op.isfile,
optional=False)
build_cmd_compl = build_cmd + finalize_cpp_build_cmd(cpp_source)
utime = build_run(
setup_cmd=setup_cmd,
ld_preload_flag=ld_preload_flag,
n_scatt=n_scatt,
n_refl=n_refl,
build_cmd=build_cmd_compl,
check_max_a_b=False)
if (utime is None):
print "err"
utime = -1.0
else:
print "min utime: %.2f" % utime
sys.stdout.flush()
utimes.append(utime)
all_utimes.append((utimes, build_cmd + iml))
def usage():
raise Usage("fable.python sf_times.py unit_test|quick|production")
def run(args):
if (len(args) != 1): usage()
t_start = time.time()
build_platform = platform.platform()
build_node = platform.node()
compiler_versions = []
if (args[0] == "unit_test"):
n_scatt, n_refl = 10, 100
elif (args[0] == "quick"):
n_scatt, n_refl = 100, 1000
elif (args[0] == "production"):
n_scatt, n_refl = 2000, 20000
elif (args[0] == "dsyev"):
n_scatt, n_refl = 0, 0
else:
usage()
gcc_sh = gcc_versions + [None]
icc_sh = icc_versions
if (args[0] == "quick"):
gcc_sh = gcc_sh[:2]
icc_sh = icc_sh[:1]
all_utimes = []
run_combinations(
compiler_versions,
all_utimes,
n_scatt=n_scatt,
n_refl=n_refl,
compiler_build_opts_list=[
("F", ifort_versions, "ifort", "-O"),
("f", gcc_sh, "gfortran", "-O3 -ffast-math"),
("f", gcc_sh, "gfortran", "-O3 -ffast-math -march=native"),
("C", icc_sh, "icpc", "-O"),
("c", gcc_sh, "g++", "-O3 -ffast-math"),
("c", gcc_sh, "g++", "-O3 -ffast-math -march=native"),
("c", [None], "clang++",
"-O3 -U__GXX_WEAK__ -Wno-logical-op-parentheses -ffast-math"),
("c", [None], "clang++",
"-O3 -U__GXX_WEAK__ -Wno-logical-op-parentheses -ffast-math"
" -march=native")],
real_list=["real*4", "real*8"])
print
print "current_platform:", platform.platform()
print "current_node:", platform.node()
print "build_platform:", build_platform
print "build_node:", build_node
for compiler_version in compiler_versions:
print "compiler:", compiler_version
if (n_scatt != 0):
print "n_scatt * n_refl: %d * %d" % (n_scatt, n_refl)
print '''\
"s" or "d": single-precision or double-precision floating-point variables
"E" or "e": using the library exp(arg) function or "max(0.0, 1.0 - arg*arg)"
"C" or "c": using the library cos(arg) function or "arg / (abs(arg)+1.0)"'''
print " sEC seC sEc sec dEC deC dEc dec"
else:
print "dsyev times:"
useful_utimes = []
for utimes,build_cmd in all_utimes:
if (max(utimes) != -1.0):
print " ".join(["%6.2f" % u for u in utimes]), build_cmd
useful_utimes.append((utimes,build_cmd))
if (len(useful_utimes) > 1):
print "Relative to first:"
for utimes,build_cmd in useful_utimes:
print " ".join(["%6.2f" % (u/max(u0,0.01))
for u,u0 in zip(utimes,useful_utimes[0][0])]), build_cmd
print "Wall clock time: %.2f s" % (time.time()-t_start)
if (__name__ == "__main__"):
run(args=sys.argv[1:])
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for fused batch norm operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import test_utils
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
DATA_FORMATS = (
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
)
class FusedBatchNormTest(xla_test.XLATestCase, parameterized.TestCase):
def _reference_training(self, x, scale, offset, epsilon, data_format):
if data_format != "NHWC":
raise ValueError("data_format must be NHWC, got %s." % data_format)
x_square = x * x
x_square_sum = np.sum(x_square, (0, 1, 2))
x_sum = np.sum(x, axis=(0, 1, 2))
element_count = np.size(x) / int(np.shape(x)[-1])
mean = x_sum / element_count
var = x_square_sum / element_count - mean * mean
factor = element_count / max(element_count - 1, 1)
corrected_var = var * factor
normalized = (x - mean) / np.sqrt(var + epsilon)
return (normalized * scale + offset), mean, var, corrected_var
def _reference_grad(self, x, grad_y, scale, mean, var, epsilon, data_format):
# Use the following formulas to calculate gradients:
# grad_scale =
# sum(grad_y * (x - mean)) * rsqrt(var + epsilon)
#
# grad_offset = sum(output_y)
#
# grad_x =
# 1/N * scale * rsqrt(var + epsilon) * (N * grad_y - sum(grad_y) -
# (x - mean) * sum(grad_y * (x - mean)) / (var + epsilon))
if data_format != "NHWC":
raise ValueError("data_format must be NHWC, got %s." % data_format)
grad_x = scale * (grad_y - np.mean(grad_y, axis=(0, 1, 2)) -
(x - mean) * np.mean(grad_y *
(x - mean), axis=(0, 1, 2)) /
(var + epsilon)) / np.sqrt(var + epsilon)
grad_scale = np.sum(
grad_y * (x - mean) / np.sqrt(var + epsilon), axis=(0, 1, 2))
grad_offset = np.sum(grad_y, axis=(0, 1, 2))
return grad_x, grad_scale, grad_offset
@parameterized.named_parameters(*DATA_FORMATS)
def testInference(self, data_format):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
data_format_src = "NHWC"
y_ref, mean_ref, var_ref, _ = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format_src)
with self.cached_session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
y, mean, variance = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=mean_ref,
variance=var_ref,
epsilon=epsilon,
data_format=data_format,
is_training=False)
y_val, _, _ = sess.run([y, mean, variance], {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
def _testLearning(self, use_gradient_checker, data_format):
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
data_format_src = "NHWC"
# When in training mode, fused_batchnorm applies an implicit Bessel's
# correction. So we have to use the corrected variance here, as well.
y_ref, mean_ref, _, var_ref_corr = self._reference_training(
x_val, scale_val, offset_val, epsilon, data_format_src)
with self.cached_session() as sess, self.test_scope():
# To avoid constant folding
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
y_ref_converted = test_utils.ConvertBetweenDataFormats(
y_ref, data_format_src, data_format)
t_val = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
offset = array_ops.placeholder(
np.float32, shape=scale_shape, name="offset")
y, mean, var = nn.fused_batch_norm(
t_val,
scale,
offset,
mean=None,
variance=None,
epsilon=epsilon,
data_format=data_format,
is_training=True)
# Check gradient.
if use_gradient_checker:
err = gradient_checker.compute_gradient_error(
t_val,
x_val_converted.shape,
y,
x_val_converted.shape,
extra_feed_dict={
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertLess(err, 1e-3)
y_val, mean_val, var_val = sess.run([y, mean, var], {
t_val: x_val_converted,
scale: scale_val,
offset: offset_val
})
self.assertAllClose(mean_val, mean_ref, atol=1e-3)
self.assertAllClose(y_val, y_ref_converted, atol=1e-3)
self.assertAllClose(var_val, var_ref_corr, atol=1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testLearning(self, data_format):
self._testLearning(False, data_format)
@parameterized.named_parameters(*DATA_FORMATS)
def testLearningWithGradientChecker(self, data_format):
self._testLearning(True, data_format)
@parameterized.named_parameters(*DATA_FORMATS)
def testGradientTraining(self, data_format):
# TODO(b/64270657): Use gradient_checker here in addition to comparing with
# this reference implementation.
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
grad_val = np.random.random_sample(x_shape).astype(np.float32)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
epsilon = 0.001
# The TensorFlow FusedBatchNormGrad training operation takes two inputs with
# implementation defined values. In theory the only correct value these
# inputs are the corresponding reserve_space_{1|2} outputs from the
# FusedBatchNorm training operation. However, in practice, we rely on the
# first one being mean on {C|G}PU, and the second one being variance on CPU
# and inverse(sqrt(variance + epsilon)) on GPU (we test this assumption
# separately).
reserve_space_1_val = mean_val
if self.device == "XLA_GPU":
reserve_space_2_val = np.reciprocal(np.sqrt(var_val + epsilon))
else:
reserve_space_2_val = var_val
data_format_src = "NHWC"
grad_x_ref, grad_scale_ref, grad_offset_ref = self._reference_grad(
x_val, grad_val, scale_val, mean_val, var_val, epsilon, data_format_src)
with self.cached_session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
grad_x_ref_converted = test_utils.ConvertBetweenDataFormats(
grad_x_ref, data_format_src, data_format)
grad = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="grad")
x = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
reserve_space_1 = array_ops.placeholder(
np.float32, shape=scale_shape, name="reserve_space_1")
reserve_space_2 = array_ops.placeholder(
np.float32, shape=scale_shape, name="reserve_space_2")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
grad_x, grad_scale, grad_offset, _, _ = gen_nn_ops.fused_batch_norm_grad(
grad,
x,
scale,
reserve_space_1,
reserve_space_2,
data_format=data_format,
is_training=True)
grad_x_val, grad_scale_val, grad_offset_val = sess.run(
[grad_x, grad_scale, grad_offset], {
grad: grad_val_converted,
x: x_val_converted,
reserve_space_1: reserve_space_1_val,
reserve_space_2: reserve_space_2_val,
scale: scale_val
})
self.assertAllClose(grad_x_val, grad_x_ref_converted, atol=1e-2)
self.assertAllClose(grad_scale_val, grad_scale_ref, atol=1e-2)
self.assertAllClose(grad_offset_val, grad_offset_ref, atol=1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testGradientInference(self, data_format):
# TODO(b/64270657): Use gradient_checker here in addition to comparing with
# this reference implementation.
channel = 3
x_shape = [2, 2, 6, channel]
scale_shape = [channel]
grad_val = np.random.random_sample(x_shape).astype(np.float32)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
data_format_src = "NHWC"
with self.cached_session() as sess, self.test_scope():
grad_val_converted = test_utils.ConvertBetweenDataFormats(
grad_val, data_format_src, data_format)
x_val_converted = test_utils.ConvertBetweenDataFormats(
x_val, data_format_src, data_format)
grad = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="grad")
x = array_ops.placeholder(
np.float32, shape=x_val_converted.shape, name="x")
mean = array_ops.placeholder(np.float32, shape=scale_shape, name="mean")
var = array_ops.placeholder(np.float32, shape=scale_shape, name="var")
scale = array_ops.placeholder(np.float32, shape=scale_shape, name="scale")
with self.test_scope():
out = gen_nn_ops.fused_batch_norm_grad(
grad,
x,
scale,
mean,
var,
data_format=data_format,
is_training=False)
grad_x, grad_scale, grad_offset, _, _ = out
ref_x, ref_scale, ref_offset, _, _ = gen_nn_ops.fused_batch_norm_grad(
grad, x, scale, mean, var, data_format=data_format, is_training=False)
grad_x_val, grad_scale_val, grad_offset_val, = sess.run(
[grad_x, grad_scale, grad_offset], {
grad: grad_val_converted,
x: x_val_converted,
mean: mean_val,
var: var_val,
scale: scale_val
})
grad_x_ref, grad_scale_ref, grad_offset_ref, = sess.run(
[ref_x, ref_scale, ref_offset], {
grad: grad_val_converted,
x: x_val_converted,
mean: mean_val,
var: var_val,
scale: scale_val
})
self.assertAllClose(grad_x_val, grad_x_ref, atol=1e-2)
self.assertAllClose(grad_scale_val, grad_scale_ref, atol=1e-2)
self.assertAllClose(grad_offset_val, grad_offset_ref, atol=1e-3)
if __name__ == "__main__":
test.main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.iot.v1",
manifest={
"MqttState",
"HttpState",
"LogLevel",
"GatewayType",
"GatewayAuthMethod",
"PublicKeyCertificateFormat",
"PublicKeyFormat",
"Device",
"GatewayConfig",
"DeviceRegistry",
"MqttConfig",
"HttpConfig",
"EventNotificationConfig",
"StateNotificationConfig",
"RegistryCredential",
"X509CertificateDetails",
"PublicKeyCertificate",
"DeviceCredential",
"PublicKeyCredential",
"DeviceConfig",
"DeviceState",
},
)
class MqttState(proto.Enum):
r"""Indicates whether an MQTT connection is enabled or disabled.
See the field description for details.
"""
MQTT_STATE_UNSPECIFIED = 0
MQTT_ENABLED = 1
MQTT_DISABLED = 2
class HttpState(proto.Enum):
r"""Indicates whether DeviceService (HTTP) is enabled or disabled
for the registry. See the field description for details.
"""
HTTP_STATE_UNSPECIFIED = 0
HTTP_ENABLED = 1
HTTP_DISABLED = 2
class LogLevel(proto.Enum):
r"""**Beta Feature**
The logging verbosity for device activity. Specifies which events
should be written to logs. For example, if the LogLevel is ERROR,
only events that terminate in errors will be logged. LogLevel is
inclusive; enabling INFO logging will also enable ERROR logging.
"""
LOG_LEVEL_UNSPECIFIED = 0
NONE = 10
ERROR = 20
INFO = 30
DEBUG = 40
class GatewayType(proto.Enum):
r"""Gateway type."""
GATEWAY_TYPE_UNSPECIFIED = 0
GATEWAY = 1
NON_GATEWAY = 2
class GatewayAuthMethod(proto.Enum):
r"""The gateway authorization/authentication method. This setting
determines how Cloud IoT Core authorizes/authenticate devices to
access the gateway.
"""
GATEWAY_AUTH_METHOD_UNSPECIFIED = 0
ASSOCIATION_ONLY = 1
DEVICE_AUTH_TOKEN_ONLY = 2
ASSOCIATION_AND_DEVICE_AUTH_TOKEN = 3
class PublicKeyCertificateFormat(proto.Enum):
r"""The supported formats for the public key."""
UNSPECIFIED_PUBLIC_KEY_CERTIFICATE_FORMAT = 0
X509_CERTIFICATE_PEM = 1
class PublicKeyFormat(proto.Enum):
r"""The supported formats for the public key."""
UNSPECIFIED_PUBLIC_KEY_FORMAT = 0
RSA_PEM = 3
RSA_X509_PEM = 1
ES256_PEM = 2
ES256_X509_PEM = 4
class Device(proto.Message):
r"""The device resource.
Attributes:
id (str):
The user-defined device identifier. The
device ID must be unique within a device
registry.
name (str):
The resource path name. For example,
``projects/p1/locations/us-central1/registries/registry0/devices/dev0``
or
``projects/p1/locations/us-central1/registries/registry0/devices/{num_id}``.
When ``name`` is populated as a response from the service,
it always ends in the device numeric ID.
num_id (int):
[Output only] A server-defined unique numeric ID for the
device. This is a more compact way to identify devices, and
it is globally unique.
credentials (Sequence[google.cloud.iot_v1.types.DeviceCredential]):
The credentials used to authenticate this device. To allow
credential rotation without interruption, multiple device
credentials can be bound to this device. No more than 3
credentials can be bound to a single device at a time. When
new credentials are added to a device, they are verified
against the registry credentials. For details, see the
description of the ``DeviceRegistry.credentials`` field.
last_heartbeat_time (google.protobuf.timestamp_pb2.Timestamp):
[Output only] The last time an MQTT ``PINGREQ`` was
received. This field applies only to devices connecting
through MQTT. MQTT clients usually only send ``PINGREQ``
messages if the connection is idle, and no other messages
have been sent. Timestamps are periodically collected and
written to storage; they may be stale by a few minutes.
last_event_time (google.protobuf.timestamp_pb2.Timestamp):
[Output only] The last time a telemetry event was received.
Timestamps are periodically collected and written to
storage; they may be stale by a few minutes.
last_state_time (google.protobuf.timestamp_pb2.Timestamp):
[Output only] The last time a state event was received.
Timestamps are periodically collected and written to
storage; they may be stale by a few minutes.
last_config_ack_time (google.protobuf.timestamp_pb2.Timestamp):
[Output only] The last time a cloud-to-device config version
acknowledgment was received from the device. This field is
only for configurations sent through MQTT.
last_config_send_time (google.protobuf.timestamp_pb2.Timestamp):
[Output only] The last time a cloud-to-device config version
was sent to the device.
blocked (bool):
If a device is blocked, connections or
requests from this device will fail. Can be used
to temporarily prevent the device from
connecting if, for example, the sensor is
generating bad data and needs maintenance.
last_error_time (google.protobuf.timestamp_pb2.Timestamp):
[Output only] The time the most recent error occurred, such
as a failure to publish to Cloud Pub/Sub. This field is the
timestamp of 'last_error_status'.
last_error_status (google.rpc.status_pb2.Status):
[Output only] The error message of the most recent error,
such as a failure to publish to Cloud Pub/Sub.
'last_error_time' is the timestamp of this field. If no
errors have occurred, this field has an empty message and
the status code 0 == OK. Otherwise, this field is expected
to have a status code other than OK.
config (google.cloud.iot_v1.types.DeviceConfig):
The most recent device configuration, which is eventually
sent from Cloud IoT Core to the device. If not present on
creation, the configuration will be initialized with an
empty payload and version value of ``1``. To update this
field after creation, use the
``DeviceManager.ModifyCloudToDeviceConfig`` method.
state (google.cloud.iot_v1.types.DeviceState):
[Output only] The state most recently received from the
device. If no state has been reported, this field is not
present.
log_level (google.cloud.iot_v1.types.LogLevel):
**Beta Feature**
The logging verbosity for device activity. If unspecified,
DeviceRegistry.log_level will be used.
metadata (Sequence[google.cloud.iot_v1.types.Device.MetadataEntry]):
The metadata key-value pairs assigned to the device. This
metadata is not interpreted or indexed by Cloud IoT Core. It
can be used to add contextual information for the device.
Keys must conform to the regular expression
[a-zA-Z][a-zA-Z0-9-_.+~%]+ and be less than 128 bytes in
length.
Values are free-form strings. Each value must be less than
or equal to 32 KB in size.
The total size of all keys and values must be less than 256
KB, and the maximum number of key-value pairs is 500.
gateway_config (google.cloud.iot_v1.types.GatewayConfig):
Gateway-related configuration and state.
"""
id = proto.Field(proto.STRING, number=1,)
name = proto.Field(proto.STRING, number=2,)
num_id = proto.Field(proto.UINT64, number=3,)
credentials = proto.RepeatedField(
proto.MESSAGE, number=12, message="DeviceCredential",
)
last_heartbeat_time = proto.Field(
proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,
)
last_event_time = proto.Field(
proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,
)
last_state_time = proto.Field(
proto.MESSAGE, number=20, message=timestamp_pb2.Timestamp,
)
last_config_ack_time = proto.Field(
proto.MESSAGE, number=14, message=timestamp_pb2.Timestamp,
)
last_config_send_time = proto.Field(
proto.MESSAGE, number=18, message=timestamp_pb2.Timestamp,
)
blocked = proto.Field(proto.BOOL, number=19,)
last_error_time = proto.Field(
proto.MESSAGE, number=10, message=timestamp_pb2.Timestamp,
)
last_error_status = proto.Field(
proto.MESSAGE, number=11, message=status_pb2.Status,
)
config = proto.Field(proto.MESSAGE, number=13, message="DeviceConfig",)
state = proto.Field(proto.MESSAGE, number=16, message="DeviceState",)
log_level = proto.Field(proto.ENUM, number=21, enum="LogLevel",)
metadata = proto.MapField(proto.STRING, proto.STRING, number=17,)
gateway_config = proto.Field(proto.MESSAGE, number=24, message="GatewayConfig",)
class GatewayConfig(proto.Message):
r"""Gateway-related configuration and state.
Attributes:
gateway_type (google.cloud.iot_v1.types.GatewayType):
Indicates whether the device is a gateway.
gateway_auth_method (google.cloud.iot_v1.types.GatewayAuthMethod):
Indicates how to authorize and/or
authenticate devices to access the gateway.
last_accessed_gateway_id (str):
[Output only] The ID of the gateway the device accessed most
recently.
last_accessed_gateway_time (google.protobuf.timestamp_pb2.Timestamp):
[Output only] The most recent time at which the device
accessed the gateway specified in ``last_accessed_gateway``.
"""
gateway_type = proto.Field(proto.ENUM, number=1, enum="GatewayType",)
gateway_auth_method = proto.Field(proto.ENUM, number=2, enum="GatewayAuthMethod",)
last_accessed_gateway_id = proto.Field(proto.STRING, number=3,)
last_accessed_gateway_time = proto.Field(
proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,
)
class DeviceRegistry(proto.Message):
r"""A container for a group of devices.
Attributes:
id (str):
The identifier of this device registry. For example,
``myRegistry``.
name (str):
The resource path name. For example,
``projects/example-project/locations/us-central1/registries/my-registry``.
event_notification_configs (Sequence[google.cloud.iot_v1.types.EventNotificationConfig]):
The configuration for notification of
telemetry events received from the device. All
telemetry events that were successfully
published by the device and acknowledged by
Cloud IoT Core are guaranteed to be delivered to
Cloud Pub/Sub. If multiple configurations match
a message, only the first matching configuration
is used. If you try to publish a device
telemetry event using MQTT without specifying a
Cloud Pub/Sub topic for the device's registry,
the connection closes automatically. If you try
to do so using an HTTP connection, an error is
returned. Up to 10 configurations may be
provided.
state_notification_config (google.cloud.iot_v1.types.StateNotificationConfig):
The configuration for notification of new
states received from the device. State updates
are guaranteed to be stored in the state
history, but notifications to Cloud Pub/Sub are
not guaranteed. For example, if permissions are
misconfigured or the specified topic doesn't
exist, no notification will be published but the
state will still be stored in Cloud IoT Core.
mqtt_config (google.cloud.iot_v1.types.MqttConfig):
The MQTT configuration for this device
registry.
http_config (google.cloud.iot_v1.types.HttpConfig):
The DeviceService (HTTP) configuration for
this device registry.
log_level (google.cloud.iot_v1.types.LogLevel):
**Beta Feature**
The default logging verbosity for activity from devices in
this registry. The verbosity level can be overridden by
Device.log_level.
credentials (Sequence[google.cloud.iot_v1.types.RegistryCredential]):
The credentials used to verify the device
credentials. No more than 10 credentials can be
bound to a single registry at a time. The
verification process occurs at the time of
device creation or update. If this field is
empty, no verification is performed. Otherwise,
the credentials of a newly created device or
added credentials of an updated device should be
signed with one of these registry credentials.
Note, however, that existing devices will never
be affected by modifications to this list of
credentials: after a device has been
successfully created in a registry, it should be
able to connect even if its registry credentials
are revoked, deleted, or modified.
"""
id = proto.Field(proto.STRING, number=1,)
name = proto.Field(proto.STRING, number=2,)
event_notification_configs = proto.RepeatedField(
proto.MESSAGE, number=10, message="EventNotificationConfig",
)
state_notification_config = proto.Field(
proto.MESSAGE, number=7, message="StateNotificationConfig",
)
mqtt_config = proto.Field(proto.MESSAGE, number=4, message="MqttConfig",)
http_config = proto.Field(proto.MESSAGE, number=9, message="HttpConfig",)
log_level = proto.Field(proto.ENUM, number=11, enum="LogLevel",)
credentials = proto.RepeatedField(
proto.MESSAGE, number=8, message="RegistryCredential",
)
class MqttConfig(proto.Message):
r"""The configuration of MQTT for a device registry.
Attributes:
mqtt_enabled_state (google.cloud.iot_v1.types.MqttState):
If enabled, allows connections using the MQTT
protocol. Otherwise, MQTT connections to this
registry will fail.
"""
mqtt_enabled_state = proto.Field(proto.ENUM, number=1, enum="MqttState",)
class HttpConfig(proto.Message):
r"""The configuration of the HTTP bridge for a device registry.
Attributes:
http_enabled_state (google.cloud.iot_v1.types.HttpState):
If enabled, allows devices to use
DeviceService via the HTTP protocol. Otherwise,
any requests to DeviceService will fail for this
registry.
"""
http_enabled_state = proto.Field(proto.ENUM, number=1, enum="HttpState",)
class EventNotificationConfig(proto.Message):
r"""The configuration for forwarding telemetry events.
Attributes:
subfolder_matches (str):
If the subfolder name matches this string
exactly, this configuration will be used. The
string must not include the leading '/'
character. If empty, all strings are matched.
This field is used only for telemetry events;
subfolders are not supported for state changes.
pubsub_topic_name (str):
A Cloud Pub/Sub topic name. For example,
``projects/myProject/topics/deviceEvents``.
"""
subfolder_matches = proto.Field(proto.STRING, number=2,)
pubsub_topic_name = proto.Field(proto.STRING, number=1,)
class StateNotificationConfig(proto.Message):
r"""The configuration for notification of new states received
from the device.
Attributes:
pubsub_topic_name (str):
A Cloud Pub/Sub topic name. For example,
``projects/myProject/topics/deviceEvents``.
"""
pubsub_topic_name = proto.Field(proto.STRING, number=1,)
class RegistryCredential(proto.Message):
r"""A server-stored registry credential used to validate device
credentials.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
public_key_certificate (google.cloud.iot_v1.types.PublicKeyCertificate):
A public key certificate used to verify the
device credentials.
This field is a member of `oneof`_ ``credential``.
"""
public_key_certificate = proto.Field(
proto.MESSAGE, number=1, oneof="credential", message="PublicKeyCertificate",
)
class X509CertificateDetails(proto.Message):
r"""Details of an X.509 certificate. For informational purposes
only.
Attributes:
issuer (str):
The entity that signed the certificate.
subject (str):
The entity the certificate and public key
belong to.
start_time (google.protobuf.timestamp_pb2.Timestamp):
The time the certificate becomes valid.
expiry_time (google.protobuf.timestamp_pb2.Timestamp):
The time the certificate becomes invalid.
signature_algorithm (str):
The algorithm used to sign the certificate.
public_key_type (str):
The type of public key in the certificate.
"""
issuer = proto.Field(proto.STRING, number=1,)
subject = proto.Field(proto.STRING, number=2,)
start_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
expiry_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
signature_algorithm = proto.Field(proto.STRING, number=5,)
public_key_type = proto.Field(proto.STRING, number=6,)
class PublicKeyCertificate(proto.Message):
r"""A public key certificate format and data.
Attributes:
format (google.cloud.iot_v1.types.PublicKeyCertificateFormat):
The certificate format.
certificate (str):
The certificate data.
x509_details (google.cloud.iot_v1.types.X509CertificateDetails):
[Output only] The certificate details. Used only for X.509
certificates.
"""
format = proto.Field(proto.ENUM, number=1, enum="PublicKeyCertificateFormat",)
certificate = proto.Field(proto.STRING, number=2,)
x509_details = proto.Field(
proto.MESSAGE, number=3, message="X509CertificateDetails",
)
class DeviceCredential(proto.Message):
r"""A server-stored device credential used for authentication.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
public_key (google.cloud.iot_v1.types.PublicKeyCredential):
A public key used to verify the signature of
JSON Web Tokens (JWTs). When adding a new device
credential, either via device creation or via
modifications, this public key credential may be
required to be signed by one of the registry
level certificates. More specifically, if the
registry contains at least one certificate, any
new device credential must be signed by one of
the registry certificates. As a result, when the
registry contains certificates, only X.509
certificates are accepted as device credentials.
However, if the registry does not contain a
certificate, self-signed certificates and public
keys will be accepted. New device credentials
must be different from every registry-level
certificate.
This field is a member of `oneof`_ ``credential``.
expiration_time (google.protobuf.timestamp_pb2.Timestamp):
[Optional] The time at which this credential becomes
invalid. This credential will be ignored for new client
authentication requests after this timestamp; however, it
will not be automatically deleted.
"""
public_key = proto.Field(
proto.MESSAGE, number=2, oneof="credential", message="PublicKeyCredential",
)
expiration_time = proto.Field(
proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,
)
class PublicKeyCredential(proto.Message):
r"""A public key format and data.
Attributes:
format (google.cloud.iot_v1.types.PublicKeyFormat):
The format of the key.
key (str):
The key data.
"""
format = proto.Field(proto.ENUM, number=1, enum="PublicKeyFormat",)
key = proto.Field(proto.STRING, number=2,)
class DeviceConfig(proto.Message):
r"""The device configuration. Eventually delivered to devices.
Attributes:
version (int):
[Output only] The version of this update. The version number
is assigned by the server, and is always greater than 0
after device creation. The version must be 0 on the
``CreateDevice`` request if a ``config`` is specified; the
response of ``CreateDevice`` will always have a value of 1.
cloud_update_time (google.protobuf.timestamp_pb2.Timestamp):
[Output only] The time at which this configuration version
was updated in Cloud IoT Core. This timestamp is set by the
server.
device_ack_time (google.protobuf.timestamp_pb2.Timestamp):
[Output only] The time at which Cloud IoT Core received the
acknowledgment from the device, indicating that the device
has received this configuration version. If this field is
not present, the device has not yet acknowledged that it
received this version. Note that when the config was sent to
the device, many config versions may have been available in
Cloud IoT Core while the device was disconnected, and on
connection, only the latest version is sent to the device.
Some versions may never be sent to the device, and therefore
are never acknowledged. This timestamp is set by Cloud IoT
Core.
binary_data (bytes):
The device configuration data.
"""
version = proto.Field(proto.INT64, number=1,)
cloud_update_time = proto.Field(
proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,
)
device_ack_time = proto.Field(
proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,
)
binary_data = proto.Field(proto.BYTES, number=4,)
class DeviceState(proto.Message):
r"""The device state, as reported by the device.
Attributes:
update_time (google.protobuf.timestamp_pb2.Timestamp):
[Output only] The time at which this state version was
updated in Cloud IoT Core.
binary_data (bytes):
The device state data.
"""
update_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
binary_data = proto.Field(proto.BYTES, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.