repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
lanselin/pysal | pysal/contrib/handler/tests/test_error_sp.py | 6 | 7995 | import unittest
import scipy
import pysal
import numpy as np
from pysal.spreg import error_sp as SP
from pysal.contrib.handler import Model
from functools import partial
GM_Error = partial(Model, mtype='GM_Error')
GM_Endog_Error = partial(Model, mtype='GM_Endog_Error')
GM_Combo = partial(Model, mtype='GM_Combo')
class TestGMError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = GM_Error(self.y, self.X, self.w)
betas = np.array([[ 47.94371455], [ 0.70598088], [ -0.55571746], [ 0.37230161]])
np.testing.assert_allclose(reg.betas,betas,4)
u = np.array([ 27.4739775])
np.testing.assert_allclose(reg.u[0],u,4)
predy = np.array([ 52.9930255])
np.testing.assert_allclose(reg.predy[0],predy,4)
n = 49
np.testing.assert_allclose(reg.n,n,4)
k = 3
np.testing.assert_allclose(reg.k,k,4)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,4)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,4)
e = np.array([ 31.89620319])
np.testing.assert_allclose(reg.e_filtered[0],e,4)
predy = np.array([ 52.9930255])
np.testing.assert_allclose(reg.predy[0],predy,4)
my = 38.43622446938776
np.testing.assert_allclose(reg.mean_y,my)
sy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,sy)
vm = np.array([[ 1.51884943e+02, -5.37622793e+00, -1.86970286e+00], [ -5.37622793e+00, 2.48972661e-01, 5.26564244e-02], [ -1.86970286e+00, 5.26564244e-02, 3.18930650e-02]])
np.testing.assert_allclose(reg.vm,vm,4)
sig2 = 191.73716465732355
np.testing.assert_allclose(reg.sig2,sig2,4)
pr2 = 0.3495097406012179
np.testing.assert_allclose(reg.pr2,pr2)
std_err = np.array([ 12.32416094, 0.4989716 , 0.1785863 ])
np.testing.assert_allclose(reg.std_err,std_err,4)
z_stat = np.array([[ 3.89022140e+00, 1.00152805e-04], [ 1.41487186e+00, 1.57106070e-01], [ -3.11175868e+00, 1.85976455e-03]])
np.testing.assert_allclose(reg.z_stat,z_stat,4)
@unittest.skipIf(int(scipy.__version__.split(".")[1]) < 11,
"Maximum Likelihood requires SciPy version 11 or newer.")
class TestGMEndogError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = GM_Endog_Error(self.y, self.X, self.yd, self.q, self.w)
betas = np.array([[ 55.36095292], [ 0.46411479], [ -0.66883535], [ 0.38989939]])
np.testing.assert_allclose(reg.betas,betas,4)
u = np.array([ 26.55951566])
np.testing.assert_allclose(reg.u[0],u,4)
e = np.array([ 31.23925425])
np.testing.assert_allclose(reg.e_filtered[0],e,4)
predy = np.array([ 53.9074875])
np.testing.assert_allclose(reg.predy[0],predy,4)
n = 49
np.testing.assert_allclose(reg.n,n)
k = 3
np.testing.assert_allclose(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,4)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,4)
yend = np.array([ 15.72598])
np.testing.assert_allclose(reg.yend[0],yend,4)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.z[0],z,4)
my = 38.43622446938776
np.testing.assert_allclose(reg.mean_y,my)
sy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,sy)
vm = np.array([[ 5.29158422e+02, -1.57833675e+01, -8.38021080e+00],
[ -1.57833675e+01, 5.40235041e-01, 2.31120327e-01],
[ -8.38021080e+00, 2.31120327e-01, 1.44977385e-01]])
np.testing.assert_allclose(reg.vm,vm,4)
pr2 = 0.346472557570858
np.testing.assert_allclose(reg.pr2,pr2)
sig2 = 192.50022721929574
np.testing.assert_allclose(reg.sig2,sig2,4)
std_err = np.array([ 23.003401 , 0.73500657, 0.38075777])
np.testing.assert_allclose(reg.std_err,std_err,4)
z_stat = np.array([[ 2.40664208, 0.01609994], [ 0.63144305, 0.52775088], [-1.75659016, 0.07898769]])
np.testing.assert_allclose(reg.z_stat,z_stat,4)
@unittest.skipIf(int(scipy.__version__.split(".")[1]) < 11,
"Maximum Likelihood requires SciPy version 11 or newer.")
class TestGMCombo(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
# Only spatial lag
reg = GM_Combo(self.y, self.X, w=self.w)
e_reduced = np.array([ 28.18617481])
np.testing.assert_allclose(reg.e_pred[0],e_reduced,4)
predy_e = np.array([ 52.28082782])
np.testing.assert_allclose(reg.predy_e[0],predy_e,4)
betas = np.array([[ 57.61123515],[ 0.73441313], [ -0.59459416], [ -0.21762921], [ 0.54732051]])
np.testing.assert_allclose(reg.betas,betas,4)
u = np.array([ 25.57932637])
np.testing.assert_allclose(reg.u[0],u,4)
e_filtered = np.array([ 31.65374945])
np.testing.assert_allclose(reg.e_filtered[0],e_filtered,4)
predy = np.array([ 54.88767685])
np.testing.assert_allclose(reg.predy[0],predy,4)
n = 49
np.testing.assert_allclose(reg.n,n)
k = 4
np.testing.assert_allclose(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,4)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,4)
yend = np.array([ 35.4585005])
np.testing.assert_allclose(reg.yend[0],yend,4)
z = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_allclose(reg.z[0],z,4)
my = 38.43622446938776
np.testing.assert_allclose(reg.mean_y,my)
sy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,sy)
vm = np.array([ 5.22438333e+02, 2.38012875e-01, 3.20924173e-02,
2.15753579e-01])
np.testing.assert_allclose(np.diag(reg.vm),vm,4)
sig2 = 181.78650186468832
np.testing.assert_allclose(reg.sig2,sig2,4)
pr2 = 0.3018280166937799
np.testing.assert_allclose(reg.pr2,pr2,4)
pr2_e = 0.3561355586759414
np.testing.assert_allclose(reg.pr2_e,pr2_e,4)
std_err = np.array([ 22.85692222, 0.48786559, 0.17914356, 0.46449318])
np.testing.assert_allclose(reg.std_err,std_err,4)
z_stat = np.array([[ 2.52051597e+00, 1.17182922e-02], [ 1.50535954e+00, 1.32231664e-01], [ -3.31909311e+00, 9.03103123e-04], [ -4.68530506e-01, 6.39405261e-01]])
np.testing.assert_allclose(reg.z_stat,z_stat,4)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
40223145c2g18/40223145 | static/Brython3.1.0-20150301-090019/Lib/pydoc.py | 637 | 102017 | #!/usr/bin/env python3
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on the given port on the
local machine. Port number 0 can be used to get an arbitrary unused port.
Run "pydoc -b" to start an HTTP server on an arbitrary unused port and
open a Web browser to interactively browse documentation. The -p option
can be used with the -b option to explicitly specify the server port.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/X.Y/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__all__ = ['help']
__author__ = "Ka-Ping Yee <[email protected]>"
__date__ = "26 February 2001"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import builtins
import imp
import importlib.machinery
#brython fix me
import inspect
import io
import os
#brython fix me
#import pkgutil
import platform
import re
import sys
import time
import tokenize
import warnings
from collections import deque
from reprlib import Repr
#fix me brython
#from traceback import extract_tb, format_exception_only
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', result.rstrip()) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = doc.strip().split('\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not lines[1].rstrip():
return lines[0], '\n'.join(lines[2:])
return '', '\n'.join(lines)
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = pairs[1].join(text.split(pairs[0]))
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
#fix me brython
#return _re_stripid.sub(r'\1', text)
return text
def _is_some_method(obj):
return (inspect.isfunction(obj) or
inspect.ismethod(obj) or
inspect.isbuiltin(obj) or
inspect.ismethoddescriptor(obj))
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant or internal.
if name in {'__author__', '__builtins__', '__cached__', '__credits__',
'__date__', '__doc__', '__file__', '__initializing__',
'__loader__', '__module__', '__name__', '__package__',
'__path__', '__qualname__', '__slots__', '__version__'}:
return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return True
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
results = []
for (name, kind, cls, value) in inspect.classify_class_attrs(object):
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
results.append((name, kind, cls, value))
return results
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not line.strip():
line = file.readline()
if not line: break
line = line.strip()
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not line.strip():
line = file.readline()
if not line: break
result = line.split('"""')[0].strip()
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
try:
file = tokenize.open(filename)
except IOError:
# module can't be opened, so skip it
return None
binary_suffixes = importlib.machinery.BYTECODE_SUFFIXES[:]
binary_suffixes += importlib.machinery.EXTENSION_SUFFIXES[:]
if any(filename.endswith(x) for x in binary_suffixes):
# binary modules have to be imported
file.close()
if any(filename.endswith(x) for x in
importlib.machinery.BYTECODE_SUFFIXES):
loader = importlib.machinery.SourcelessFileLoader('__temp__',
filename)
else:
loader = importlib.machinery.ExtensionFileLoader('__temp__',
filename)
try:
module = loader.load_module('__temp__')
except:
return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else:
# text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
self.filename = filename
self.exc, self.value, self.tb = exc_info
def __str__(self):
exc = self.exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
with open(path, 'rb') as file:
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.seek(0)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Remove the module from sys.modules and re-import to try
# and avoid problems with partially loaded modules.
# Also remove any submodules because they won't appear
# in the newly loaded module's namespace if they're already
# in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
#fix me brython
#elif exc is ImportError and value.name == path:
elif exc is ImportError and str(value) == str(path):
# No such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in path.split('.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
PYTHONDOCS = os.environ.get("PYTHONDOCS",
"http://docs.python.org/%d.%d/library"
% sys.version_info[:2])
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError(message)
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS", self.PYTHONDOCS)
basedir = os.path.join(sys.base_exec_prefix, "lib",
"python%d.%d" % sys.version_info[:2])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'_thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(text.expandtabs())
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, modpkginfo):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = modpkginfo
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def filelink(self, url, path):
"""Make a link to source file."""
return '<a href="file:%s">%s</a>' % (url, path)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + ', '.join(parents) + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = name.split('.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
('.'.join(parts[:i+1]), parts[i]))
linkedname = '.'.join(links + parts[-1:])
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = self.filelink(url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % ', '.join(info)
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Reference</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda t: self.modulelink(t[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = [value for (key, value) in classes]
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', ' '.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', ' '.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', '<br>\n'.join(contents))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
print('docclass')
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if callable(value) or inspect.isdatadescriptor(value):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
attrs.sort(key=lambda t: t[0])
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % ', '.join(parents)
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % self.classlink(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.__func__
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, kwonlyargs, kwdefaults, varkw, defaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name):
# ignore a module if its name contains a surrogate character
continue
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
#def repr1(self, x, level):
# if hasattr(type(x), '__name__'):
# methodname = 'repr_' + '_'.join(type(x).__name__.split())
# if hasattr(self, methodname):
# return getattr(self, methodname)(x, level)
# return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return ''.join(ch + '\b' + ch for ch in text)
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = [prefix + line for line in text.split('\n')]
if lines: lines[-1] = lines[-1].rstrip()
return '\n'.join(lines)
def section(self, title, contents):
"""Format a section with a given heading."""
clean_contents = self.indent(contents).rstrip()
return self.bold(title) + '\n' + clean_contents + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = (classname(c, modname) for c in bases)
result = result + '(%s)' % ', '.join(parents)
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
all = getattr(object, '__all__', None)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE REFERENCE', docloc + """
The following documentation is automatically generated from the Python
source files. It may be incomplete, incorrect or include features that
are considered implementation detail and may vary between Python
implementations. When in doubt, consult the module reference at the
location listed above.
""")
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', '\n'.join(modpkgs))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', '\n'.join(submodules))
if classes:
classlist = [value for key, value in classes]
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', '\n'.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', '\n'.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', '\n'.join(contents))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % ', '.join(parents)
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if callable(value) or inspect.isdatadescriptor(value):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % classname(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.__func__
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann = \
inspect.getfullargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, ann,
formatvalue=self.formatvalue,
formatannotation=inspect.formatannotationrelativeto(object))
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
class _PlainTextDoc(TextDoc):
"""Subclass of TextDoc which overrides string styling"""
def bold(self, text):
return text
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if not hasattr(sys.stdout, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(text).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write('\n'.join(lines[:inc]) + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + '\n'.join(lines[r:r+inc]) + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in path.split('.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport('.'.join(parts[:n+1]), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = builtins
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
plaintext = _PlainTextDoc()
html = HTMLDoc()
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError('no Python documentation found for %r' % thing)
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0,
renderer=None):
"""Render text documentation, given an object or a path to an object."""
if renderer is None:
renderer = text
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + renderer.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0,
output=None):
"""Display text documentation, given an object or a path to an object."""
try:
if output is None:
pager(render_doc(thing, title, forceload))
else:
output.write(render_doc(thing, title, forceload, plaintext))
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w', encoding='utf-8')
file.write(page)
file.close()
print('wrote', name + '.html')
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'False': '',
'None': '',
'True': '',
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'nonlocal NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'nonlocal': ('nonlocal', 'global NAMESPACES'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "b'", '"""', '"', 'r"', 'b"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.items():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS '
'NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'CONVERSIONS': ('conversions', ''),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS '
'LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
#fix me brython
self.input = self._input or sys.stdin
self.output = self._output or sys.stdout
#fix me brython
#input = property(lambda self: self._input or sys.stdin)
#output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = replace(request, '"', '', "'", '').strip()
if request.lower() in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using input() when appropriate."""
if self.input is sys.stdin:
return input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(request.split()[1])
elif request in self.symbols: self.showsymbol(request)
elif request in ['True', 'False', 'None']:
# special case these keywords since they are objects too
doc(eval(request), 'Help on %s:')
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:', output=self._output)
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:', output=self._output)
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the interactive help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = list(sorted(items))
colw = width // columns
rows = (len(items) + columns - 1) // columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw - 1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(doc.strip() + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import formatter
buffer = io.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + ', '.join(xrefs.split()) + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def _gettopic(self, topic, more_xrefs=''):
"""Return unbuffered tuple of (topic, xrefs).
If an error occurs here, the exception is caught and displayed by
the url handler.
This function duplicates the showtopic method but returns its
result directly so it can be formatted for display in an html page.
"""
try:
import pydoc_data.topics
except ImportError:
return('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''' , '')
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
raise ValueError('could not find topic')
if isinstance(target, str):
return self._gettopic(target, more_xrefs)
label, xrefs = target
doc = pydoc_data.topics.topics[label]
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
return doc, xrefs
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if modname.find('.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = key.lower()
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
name = __import__(modname).__doc__ or ''
desc = name.split('\n')[0]
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
try:
loader = importer.find_module(modname)
except SyntaxError:
# raised by tests for bad coding cookies or BOM
continue
if hasattr(loader, 'get_source'):
try:
source = loader.get_source(modname)
except Exception:
if onerror:
onerror(modname)
continue
desc = source_synopsis(io.StringIO(source)) or ''
if hasattr(loader, 'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
try:
module = loader.load_module(modname)
except ImportError:
if onerror:
onerror(modname)
continue
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print(modname, desc and '- ' + desc)
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------- enhanced Web browser interface
def _start_server(urlhandler, port):
"""Start an HTTP server thread on a specific port.
Start an HTML/text server thread, so HTML or text documents can be
browsed dynamically and interactively with a Web browser. Example use:
>>> import time
>>> import pydoc
Define a URL handler. To determine what the client is asking
for, check the URL and content_type.
Then get or generate some text or HTML code and return it.
>>> def my_url_handler(url, content_type):
... text = 'the URL sent was: (%s, %s)' % (url, content_type)
... return text
Start server thread on port 0.
If you use port 0, the server will pick a random port number.
You can then use serverthread.port to get the port number.
>>> port = 0
>>> serverthread = pydoc._start_server(my_url_handler, port)
Check that the server is really started. If it is, open browser
and get first page. Use serverthread.url as the starting page.
>>> if serverthread.serving:
... import webbrowser
The next two lines are commented out so a browser doesn't open if
doctest is run on this module.
#... webbrowser.open(serverthread.url)
#True
Let the server do its thing. We just need to monitor its status.
Use time.sleep so the loop doesn't hog the CPU.
>>> starttime = time.time()
>>> timeout = 1 #seconds
This is a short timeout for testing purposes.
>>> while serverthread.serving:
... time.sleep(.01)
... if serverthread.serving and time.time() - starttime > timeout:
... serverthread.stop()
... break
Print any errors that may have occurred.
>>> print(serverthread.error)
None
"""
import http.server
import email.message
import select
import threading
class DocHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""Process a request from an HTML browser.
The URL received is in self.path.
Get an HTML page from self.urlhandler and send it.
"""
if self.path.endswith('.css'):
content_type = 'text/css'
else:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', '%s; charset=UTF-8' % content_type)
self.end_headers()
self.wfile.write(self.urlhandler(
self.path, content_type).encode('utf-8'))
def log_message(self, *args):
# Don't log messages.
pass
class DocServer(http.server.HTTPServer):
def __init__(self, port, callback):
self.host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
self.address = ('', port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
self.quit = False
def serve_until_quit(self):
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd:
self.handle_request()
self.server_close()
def server_activate(self):
self.base.server_activate(self)
if self.callback:
self.callback(self)
class ServerThread(threading.Thread):
def __init__(self, urlhandler, port):
self.urlhandler = urlhandler
self.port = int(port)
threading.Thread.__init__(self)
self.serving = False
self.error = None
def run(self):
"""Start the server."""
try:
DocServer.base = http.server.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = email.message.Message
DocHandler.urlhandler = staticmethod(self.urlhandler)
docsvr = DocServer(self.port, self.ready)
self.docserver = docsvr
docsvr.serve_until_quit()
except Exception as e:
self.error = e
def ready(self, server):
self.serving = True
self.host = server.host
self.port = server.server_port
self.url = 'http://%s:%d/' % (self.host, self.port)
def stop(self):
"""Stop the server and this thread nicely"""
self.docserver.quit = True
self.serving = False
self.url = None
thread = ServerThread(urlhandler, port)
thread.start()
# Wait until thread.serving is True to make sure we are
# really up before returning.
while not thread.error and not thread.serving:
time.sleep(.01)
return thread
def _url_handler(url, content_type="text/html"):
"""The pydoc url handler for use with the pydoc server.
If the content_type is 'text/css', the _pydoc.css style
sheet is read and returned if it exits.
If the content_type is 'text/html', then the result of
get_html_page(url) is returned.
"""
class _HTMLDoc(HTMLDoc):
def page(self, title, contents):
"""Format an HTML page."""
css_path = "pydoc_data/_pydoc.css"
css_link = (
'<link rel="stylesheet" type="text/css" href="%s">' %
css_path)
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Pydoc: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
%s</head><body bgcolor="#f0f0f8">%s<div style="clear:both;padding-top:.5em;">%s</div>
</body></html>''' % (title, css_link, html_navbar(), contents)
def filelink(self, url, path):
return '<a href="getfile?key=%s">%s</a>' % (url, path)
html = _HTMLDoc()
def html_navbar():
version = html.escape("%s [%s, %s]" % (platform.python_version(),
platform.python_build()[0],
platform.python_compiler()))
return """
<div style='float:left'>
Python %s<br>%s
</div>
<div style='float:right'>
<div style='text-align:center'>
<a href="index.html">Module Index</a>
: <a href="topics.html">Topics</a>
: <a href="keywords.html">Keywords</a>
</div>
<div>
<form action="get" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Get">
</form>
<form action="search" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Search">
</form>
</div>
</div>
""" % (version, html.escape(platform.platform(terse=True)))
def html_index():
"""Module Index page."""
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
names = [name for name in sys.builtin_module_names
if name != '__main__']
contents = html.multicolumn(names, bltinlink)
contents = [heading, '<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
contents.append(html.index(dir, seen))
contents.append(
'<p align=right><font color="#909090" face="helvetica,'
'arial"><strong>pydoc</strong> by Ka-Ping Yee'
'<[email protected]></font>')
return 'Index of Modules', ''.join(contents)
def html_search(key):
"""Search results page."""
# scan for modules
search_result = []
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
search_result.append((modname, desc and '- ' + desc))
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# format page
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
results = []
heading = html.heading(
'<big><big><strong>Search Results</strong></big></big>',
'#ffffff', '#7799ee')
for name, desc in search_result:
results.append(bltinlink(name) + desc)
contents = heading + html.bigsection(
'key = %s' % key, '#ffffff', '#ee77aa', '<br>'.join(results))
return 'Search Results', contents
def html_getfile(path):
"""Get and display a source file listing safely."""
path = path.replace('%20', ' ')
with tokenize.open(path) as fp:
lines = html.escape(fp.read())
body = '<pre>%s</pre>' % lines
heading = html.heading(
'<big><big><strong>File Listing</strong></big></big>',
'#ffffff', '#7799ee')
contents = heading + html.bigsection(
'File: %s' % path, '#ffffff', '#ee77aa', body)
return 'getfile %s' % path, contents
def html_topics():
"""Index of topic texts available."""
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.topics.keys())
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Topics', '#ffffff', '#ee77aa', contents)
return 'Topics', contents
def html_keywords():
"""Index of keywords."""
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.keywords.keys())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Keywords', '#ffffff', '#ee77aa', contents)
return 'Keywords', contents
def html_topicpage(topic):
"""Topic or keyword help page."""
buf = io.StringIO()
htmlhelp = Helper(buf, buf)
contents, xrefs = htmlhelp._gettopic(topic)
if topic in htmlhelp.keywords:
title = 'KEYWORD'
else:
title = 'TOPIC'
heading = html.heading(
'<big><big><strong>%s</strong></big></big>' % title,
'#ffffff', '#7799ee')
contents = '<pre>%s</pre>' % html.markup(contents)
contents = html.bigsection(topic , '#ffffff','#ee77aa', contents)
if xrefs:
xrefs = sorted(xrefs.split())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
xrefs = html.multicolumn(xrefs, bltinlink)
xrefs = html.section('Related help topics: ',
'#ffffff', '#ee77aa', xrefs)
return ('%s %s' % (title, topic),
''.join((heading, contents, xrefs)))
def html_getobj(url):
obj = locate(url, forceload=1)
if obj is None and url != 'None':
raise ValueError('could not find object')
title = describe(obj)
content = html.document(obj, url)
return title, content
def html_error(url, exc):
heading = html.heading(
'<big><big><strong>Error</strong></big></big>',
'#ffffff', '#7799ee')
contents = '<br>'.join(html.escape(line) for line in
format_exception_only(type(exc), exc))
contents = heading + html.bigsection(url, '#ffffff', '#bb0000',
contents)
return "Error - %s" % url, contents
def get_html_page(url):
"""Generate an HTML page for url."""
complete_url = url
if url.endswith('.html'):
url = url[:-5]
try:
if url in ("", "index"):
title, content = html_index()
elif url == "topics":
title, content = html_topics()
elif url == "keywords":
title, content = html_keywords()
elif '=' in url:
op, _, url = url.partition('=')
if op == "search?key":
title, content = html_search(url)
elif op == "getfile?key":
title, content = html_getfile(url)
elif op == "topic?key":
# try topics first, then objects.
try:
title, content = html_topicpage(url)
except ValueError:
title, content = html_getobj(url)
elif op == "get?key":
# try objects first, then topics.
if url in ("", "index"):
title, content = html_index()
else:
try:
title, content = html_getobj(url)
except ValueError:
title, content = html_topicpage(url)
else:
raise ValueError('bad pydoc url')
else:
title, content = html_getobj(url)
except Exception as exc:
# Catch any errors and display them in an error page.
title, content = html_error(complete_url, exc)
return html.page(title, content)
if url.startswith('/'):
url = url[1:]
if content_type == 'text/css':
path_here = os.path.dirname(os.path.realpath(__file__))
css_path = os.path.join(path_here, url)
with open(css_path) as fp:
return ''.join(fp.readlines())
elif content_type == 'text/html':
return get_html_page(url)
# Errors outside the url handler are caught by the server.
raise TypeError('unknown content type %r for url %s' % (content_type, url))
def browse(port=0, *, open_browser=True):
"""Start the enhanced pydoc Web server and open a Web browser.
Use port '0' to start the server on an arbitrary port.
Set open_browser to False to suppress opening a browser.
"""
import webbrowser
serverthread = _start_server(_url_handler, port)
if serverthread.error:
print(serverthread.error)
return
if serverthread.serving:
server_help_msg = 'Server commands: [b]rowser, [q]uit'
if open_browser:
webbrowser.open(serverthread.url)
try:
print('Server ready at', serverthread.url)
print(server_help_msg)
while serverthread.serving:
cmd = input('server> ')
cmd = cmd.lower()
if cmd == 'q':
break
elif cmd == 'b':
webbrowser.open(serverthread.url)
else:
print(server_help_msg)
except (KeyboardInterrupt, EOFError):
print()
finally:
if serverthread.serving:
serverthread.stop()
print('Server stopped')
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and x.find(os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage(Exception): pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'bk:p:w')
writing = False
start_server = False
open_browser = False
port = None
for opt, val in opts:
if opt == '-b':
start_server = True
open_browser = True
if opt == '-k':
apropos(val)
return
if opt == '-p':
start_server = True
port = val
if opt == '-w':
writing = True
if start_server:
if port is None:
port = 0
browse(port, open_browser=open_browser)
return
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print('file %r does not exist' % arg)
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport as value:
print(value)
except (getopt.error, BadUsage):
cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print("""pydoc - the Python documentation tool
{cmd} <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '{sep}', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
{cmd} -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
{cmd} -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
{cmd} -b
Start an HTTP server on an arbitrary unused port and open a Web browser
to interactively browse documentation. The -p option can be used with
the -b option to explicitly specify the server port.
{cmd} -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '{sep}', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""".format(cmd=cmd, sep=os.sep))
if __name__ == '__main__':
cli()
| gpl-3.0 |
kobejean/tensorflow | tensorflow/contrib/metrics/python/ops/metric_ops.py | 5 | 178391 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains metric-computing operations on streamed tensors.
Module documentation, including "@@" callouts, should be put in
third_party/tensorflow/contrib/metrics/__init__.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.distributions.normal import Normal
from tensorflow.python.util.deprecation import deprecated
# Epsilon constant used to represent extremely small quantity.
_EPSILON = 1e-7
def _safe_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.truediv(numerator, denominator),
0,
name=name)
@deprecated(None, 'Please switch to tf.metrics.true_positives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_true_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.true_negatives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_true_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.false_positives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_false_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.false_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.false_negatives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_false_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.false_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean')
def streaming_mean(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the (weighted) mean of the given values.
The `streaming_mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean_tensor')
def streaming_mean_tensor(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `streaming_mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `streaming_mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean_tensor(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.accuracy. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_accuracy(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates how often `predictions` matches `labels`.
The `streaming_accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of any shape.
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.accuracy(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.precision. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_precision(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `streaming_precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.recall. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_recall(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `streaming_recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false positive rate of predictions with respect to labels.
The `false_positive_rate` function creates two local variables,
`false_positives` and `true_negatives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_positive_rate`, an idempotent operation that simply divides
`false_positives` by the sum of `false_positives` and `true_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: Scalar float `Tensor` with the value of
`false_positives` divided by the sum of `false_positives` and
`true_negatives`.
update_op: `Operation` that increments `false_positives` and
`true_negatives` variables appropriately and whose value matches
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_p, false_positives_update_op = metrics.false_positives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_n, true_negatives_update_op = metrics.true_negatives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fpr(fp, tn, name):
return array_ops.where(
math_ops.greater(fp + tn, 0), math_ops.div(fp, fp + tn), 0, name)
fpr = compute_fpr(false_p, true_n, 'value')
update_op = compute_fpr(false_positives_update_op, true_negatives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false negative rate of predictions with respect to labels.
The `false_negative_rate` function creates two local variables,
`false_negatives` and `true_positives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_negative_rate`, an idempotent operation that simply divides
`false_negatives` by the sum of `false_negatives` and `true_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_negative_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: Scalar float `Tensor` with the value of
`false_negatives` divided by the sum of `false_negatives` and
`true_positives`.
update_op: `Operation` that increments `false_negatives` and
`true_positives` variables appropriately and whose value matches
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_n, false_negatives_update_op = metrics.false_negatives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_p, true_positives_update_op = metrics.true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fnr(fn, tp, name):
return array_ops.where(
math_ops.greater(fn + tp, 0), math_ops.div(fn, fn + tp), 0, name)
fnr = compute_fnr(false_n, true_p, 'value')
update_op = compute_fnr(false_negatives_update_op, true_positives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _streaming_confusion_matrix_at_thresholds(predictions,
labels,
thresholds,
weights=None,
includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast
to `bool`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invalid key: %s.' % include)
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
broadcast_weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(
array_ops.reshape(broadcast_weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_positives')
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(true_positives,
math_ops.reduce_sum(
is_true_positive, 1))
values['tp'] = true_positives
if 'fn' in includes:
false_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_negatives')
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(false_negatives,
math_ops.reduce_sum(
is_false_negative, 1))
values['fn'] = false_negatives
if 'tn' in includes:
true_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_negatives')
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(true_negatives,
math_ops.reduce_sum(
is_true_negative, 1))
values['tn'] = true_negatives
if 'fp' in includes:
false_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_positives')
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(false_positives,
math_ops.reduce_sum(
is_false_positive, 1))
values['fp'] = false_positives
return values, update_ops
def streaming_true_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tp',))
return values['tp'], update_ops['tp']
def streaming_false_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fn',))
return values['fn'], update_ops['fn']
def streaming_false_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fp',))
return values['fp'], update_ops['fp']
def streaming_true_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tn',))
return values['tn'], update_ops['tn']
def streaming_curve_points(labels=None,
predictions=None,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes curve (ROC or PR) values for a prespecified number of points.
The `streaming_curve_points` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
that are used to compute the curve values. To discretize the curve, a linearly
spaced set of thresholds is used to compute pairs of recall and precision
values.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
points: A `Tensor` with shape [num_thresholds, 2] that contains points of
the curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
TODO(chizeng): Consider rewriting this method to make use of logic within the
precision_recall_at_equal_thresholds method (to improve run time).
"""
with variable_scope.variable_scope(name, 'curve_points',
(labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))
kepsilon = _EPSILON # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def compute_points(tp, fn, tn, fp):
"""Computes the roc-auc or pr-auc based on confusion counts."""
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
return fp_rate, rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
return rec, prec
xs, ys = compute_points(values['tp'], values['fn'], values['tn'],
values['fp'])
points = array_ops.stack([xs, ys], axis=1)
update_op = control_flow_ops.group(*update_ops.values())
if metrics_collections:
ops.add_to_collections(metrics_collections, points)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return points, update_op
@deprecated(None, 'Please switch to tf.metrics.auc. Note that the order of '
'the labels and predictions arguments has been switched.')
def streaming_auc(predictions,
labels,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes the approximate AUC via a Riemann sum.
The `streaming_auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.auc(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
num_thresholds=num_thresholds,
curve=curve,
updates_collections=updates_collections,
name=name)
def _compute_dynamic_auc(labels, predictions, curve='ROC', weights=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This could be slow for large batches, but has the advantage of not
having its results degrade depending on the distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
curve: The name of the curve to be computed, 'ROC' for the Receiving
Operating Characteristic or 'PR' for the Precision-Recall curve.
weights: A 1-D `Tensor` of weights whose values are `float64`.
Returns:
A scalar `Tensor` containing the area-under-curve value for the input.
"""
# Compute the total weight and the total positive weight.
size = array_ops.size(predictions)
if weights is None:
weights = array_ops.ones_like(labels, dtype=dtypes.float64)
labels, predictions, weights = metrics_impl._remove_squeezable_dimensions(
labels, predictions, weights)
total_weight = math_ops.reduce_sum(weights)
total_positive = math_ops.reduce_sum(
array_ops.where(
math_ops.greater(labels, 0), weights,
array_ops.zeros_like(labels, dtype=dtypes.float64)))
def continue_computing_dynamic_auc():
"""Continues dynamic auc computation, entered if labels are not all equal.
Returns:
A scalar `Tensor` containing the area-under-curve value.
"""
# Sort the predictions descending, keeping the same order for the
# corresponding labels and weights.
ordered_predictions, indices = nn.top_k(predictions, k=size)
ordered_labels = array_ops.gather(labels, indices)
ordered_weights = array_ops.gather(weights, indices)
# Get the counts of the unique ordered predictions.
_, _, counts = array_ops.unique_with_counts(ordered_predictions)
# Compute the indices of the split points between different predictions.
splits = math_ops.cast(
array_ops.pad(math_ops.cumsum(counts), paddings=[[1, 0]]), dtypes.int32)
# Count the positives to the left of the split indices.
true_positives = array_ops.gather(
array_ops.pad(
math_ops.cumsum(
array_ops.where(
math_ops.greater(ordered_labels, 0), ordered_weights,
array_ops.zeros_like(ordered_labels,
dtype=dtypes.float64))),
paddings=[[1, 0]]), splits)
if curve == 'ROC':
# Compute the weight of the negatives to the left of every split point and
# the total weight of the negatives number of negatives for computing the
# FPR.
false_positives = array_ops.gather(
array_ops.pad(
math_ops.cumsum(
array_ops.where(
math_ops.less(ordered_labels, 1), ordered_weights,
array_ops.zeros_like(
ordered_labels, dtype=dtypes.float64))),
paddings=[[1, 0]]), splits)
total_negative = total_weight - total_positive
x_axis_values = math_ops.truediv(false_positives, total_negative)
y_axis_values = math_ops.truediv(true_positives, total_positive)
elif curve == 'PR':
x_axis_values = math_ops.truediv(true_positives, total_positive)
# For conformance, set precision to 1 when the number of positive
# classifications is 0.
positives = array_ops.gather(
array_ops.pad(math_ops.cumsum(ordered_weights), paddings=[[1, 0]]),
splits)
y_axis_values = array_ops.where(
math_ops.greater(splits, 0),
math_ops.truediv(true_positives, positives),
array_ops.ones_like(true_positives, dtype=dtypes.float64))
# Calculate trapezoid areas.
heights = math_ops.add(y_axis_values[1:], y_axis_values[:-1]) / 2.0
widths = math_ops.abs(
math_ops.subtract(x_axis_values[1:], x_axis_values[:-1]))
return math_ops.reduce_sum(math_ops.multiply(heights, widths))
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
return control_flow_ops.cond(
math_ops.logical_or(
math_ops.equal(total_positive, 0), math_ops.equal(
total_positive, total_weight)),
true_fn=lambda: array_ops.constant(0, dtypes.float64),
false_fn=continue_computing_dynamic_auc)
def streaming_dynamic_auc(labels,
predictions,
curve='ROC',
metrics_collections=(),
updates_collections=(),
name=None,
weights=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This has the advantage of being resilient to the distribution of
predictions by aggregating across batches, accumulating labels and predictions
and performing the final calculation using all of the concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
curve: The name of the curve for which to compute AUC, 'ROC' for the
Receiving Operating Characteristic or 'PR' for the Precision-Recall curve.
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
weights: A 'Tensor' of non-negative weights whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
Returns:
auc: A scalar `Tensor` containing the current area-under-curve value.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels` and `predictions` have mismatched shapes or if
`curve` isn't a recognized curve type.
"""
if curve not in ['PR', 'ROC']:
raise ValueError('curve must be either ROC or PR, %s unknown' % curve)
with variable_scope.variable_scope(name, default_name='dynamic_auc'):
labels.get_shape().assert_is_compatible_with(predictions.get_shape())
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1'),
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(
labels, name='concat_labels')
if weights is not None:
weights = array_ops.reshape(
math_ops.cast(weights, dtypes.float64), [-1])
weights_accum, update_weights = streaming_concat(
weights, name='concat_weights')
update_op = control_flow_ops.group(update_labels, update_preds,
update_weights)
else:
weights_accum = None
update_op = control_flow_ops.group(update_labels, update_preds)
auc = _compute_dynamic_auc(
labels_accum, preds_accum, curve=curve, weights=weights_accum)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def _compute_placement_auc(labels, predictions, weights, alpha,
logit_transformation, is_valid):
"""Computes the AUC and asymptotic normally distributed confidence interval.
The calculations are achieved using the fact that AUC = P(Y_1>Y_0) and the
concept of placement values for each labeled group, as presented by Delong and
Delong (1988). The actual algorithm used is a more computationally efficient
approach presented by Sun and Xu (2014). This could be slow for large batches,
but has the advantage of not having its results degrade depending on the
distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
is_valid: A bool tensor describing whether the input is valid.
Returns:
A 1-D `Tensor` containing the area-under-curve, lower, and upper confidence
interval values.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
AucData = collections_lib.namedtuple('AucData', ['auc', 'lower', 'upper'])
# pylint: enable=invalid-name
# If all the labels are the same or if number of observations are too few,
# AUC isn't well-defined
size = array_ops.size(predictions, out_type=dtypes.int32)
# Count the total number of positive and negative labels in the input.
total_0 = math_ops.reduce_sum(
math_ops.cast(1 - labels, weights.dtype) * weights)
total_1 = math_ops.reduce_sum(
math_ops.cast(labels, weights.dtype) * weights)
# Sort the predictions ascending, as well as
# (i) the corresponding labels and
# (ii) the corresponding weights.
ordered_predictions, indices = nn.top_k(predictions, k=size, sorted=True)
ordered_predictions = array_ops.reverse(
ordered_predictions, axis=array_ops.zeros(1, dtypes.int32))
indices = array_ops.reverse(indices, axis=array_ops.zeros(1, dtypes.int32))
ordered_labels = array_ops.gather(labels, indices)
ordered_weights = array_ops.gather(weights, indices)
# We now compute values required for computing placement values.
# We generate a list of indices (segmented_indices) of increasing order. An
# index is assigned for each unique prediction float value. Prediction
# values that are the same share the same index.
_, segmented_indices = array_ops.unique(ordered_predictions)
# We create 2 tensors of weights. weights_for_true is non-zero for true
# labels. weights_for_false is non-zero for false labels.
float_labels_for_true = math_ops.cast(ordered_labels, dtypes.float32)
float_labels_for_false = 1.0 - float_labels_for_true
weights_for_true = ordered_weights * float_labels_for_true
weights_for_false = ordered_weights * float_labels_for_false
# For each set of weights with the same segmented indices, we add up the
# weight values. Note that for each label, we deliberately rely on weights
# for the opposite label.
weight_totals_for_true = math_ops.segment_sum(weights_for_false,
segmented_indices)
weight_totals_for_false = math_ops.segment_sum(weights_for_true,
segmented_indices)
# These cumulative sums of weights importantly exclude the current weight
# sums.
cum_weight_totals_for_true = math_ops.cumsum(weight_totals_for_true,
exclusive=True)
cum_weight_totals_for_false = math_ops.cumsum(weight_totals_for_false,
exclusive=True)
# Compute placement values using the formula. Values with the same segmented
# indices and labels share the same placement values.
placements_for_true = (
(cum_weight_totals_for_true + weight_totals_for_true / 2.0) /
(math_ops.reduce_sum(weight_totals_for_true) + _EPSILON))
placements_for_false = (
(cum_weight_totals_for_false + weight_totals_for_false / 2.0) /
(math_ops.reduce_sum(weight_totals_for_false) + _EPSILON))
# We expand the tensors of placement values (for each label) so that their
# shapes match that of predictions.
placements_for_true = array_ops.gather(placements_for_true, segmented_indices)
placements_for_false = array_ops.gather(placements_for_false,
segmented_indices)
# Select placement values based on the label for each index.
placement_values = (
placements_for_true * float_labels_for_true +
placements_for_false * float_labels_for_false)
# Split placement values by labeled groups.
placement_values_0 = placement_values * math_ops.cast(
1 - ordered_labels, weights.dtype)
weights_0 = ordered_weights * math_ops.cast(
1 - ordered_labels, weights.dtype)
placement_values_1 = placement_values * math_ops.cast(
ordered_labels, weights.dtype)
weights_1 = ordered_weights * math_ops.cast(
ordered_labels, weights.dtype)
# Calculate AUC using placement values
auc_0 = (math_ops.reduce_sum(weights_0 * (1. - placement_values_0)) /
(total_0 + _EPSILON))
auc_1 = (math_ops.reduce_sum(weights_1 * (placement_values_1)) /
(total_1 + _EPSILON))
auc = array_ops.where(math_ops.less(total_0, total_1), auc_1, auc_0)
# Calculate variance and standard error using the placement values.
var_0 = (
math_ops.reduce_sum(
weights_0 * math_ops.square(1. - placement_values_0 - auc_0)) /
(total_0 - 1. + _EPSILON))
var_1 = (
math_ops.reduce_sum(
weights_1 * math_ops.square(placement_values_1 - auc_1)) /
(total_1 - 1. + _EPSILON))
auc_std_err = math_ops.sqrt(
(var_0 / (total_0 + _EPSILON)) + (var_1 / (total_1 + _EPSILON)))
# Calculate asymptotic normal confidence intervals
std_norm_dist = Normal(loc=0., scale=1.)
z_value = std_norm_dist.quantile((1.0 - alpha) / 2.0)
if logit_transformation:
estimate = math_ops.log(auc / (1. - auc + _EPSILON))
std_err = auc_std_err / (auc * (1. - auc + _EPSILON))
transformed_auc_lower = estimate + (z_value * std_err)
transformed_auc_upper = estimate - (z_value * std_err)
def inverse_logit_transformation(x):
exp_negative = math_ops.exp(math_ops.negative(x))
return 1. / (1. + exp_negative + _EPSILON)
auc_lower = inverse_logit_transformation(transformed_auc_lower)
auc_upper = inverse_logit_transformation(transformed_auc_upper)
else:
estimate = auc
std_err = auc_std_err
auc_lower = estimate + (z_value * std_err)
auc_upper = estimate - (z_value * std_err)
## If estimate is 1 or 0, no variance is present so CI = 1
## n.b. This can be misleading, since number obs can just be too low.
lower = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))),
auc, auc_lower)
upper = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))),
auc, auc_upper)
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
trivial_value = array_ops.constant(0.0)
return AucData(*control_flow_ops.cond(
is_valid, lambda: [auc, lower, upper], lambda: [trivial_value]*3))
def auc_with_confidence_intervals(labels,
predictions,
weights=None,
alpha=0.95,
logit_transformation=True,
metrics_collections=(),
updates_collections=(),
name=None):
"""Computes the AUC and asymptotic normally distributed confidence interval.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC curve and its confidence interval using
placement values. This has the advantage of being resilient to the
distribution of predictions by aggregating across batches, accumulating labels
and predictions and performing the final calculation using all of the
concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
Returns:
auc: A 1-D `Tensor` containing the current area-under-curve, lower, and
upper confidence interval values.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels`, `predictions`, and `weights` have mismatched shapes
or if `alpha` isn't in the range (0,1).
"""
if not (alpha > 0 and alpha < 1):
raise ValueError('alpha must be between 0 and 1; currently %.02f' % alpha)
if weights is None:
weights = array_ops.ones_like(predictions)
with variable_scope.variable_scope(
name,
default_name='auc_with_confidence_intervals',
values=[labels, predictions, weights]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
total_weight = math_ops.reduce_sum(weights)
weights = array_ops.reshape(weights, [-1])
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1'),
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(labels,
name='concat_labels')
weights_accum, update_weights = streaming_concat(
weights, name='concat_weights')
update_op_for_valid_case = control_flow_ops.group(
update_labels, update_preds, update_weights)
# Only perform updates if this case is valid.
all_labels_positive_or_0 = math_ops.logical_and(
math_ops.equal(math_ops.reduce_min(labels), 0),
math_ops.equal(math_ops.reduce_max(labels), 1))
sums_of_weights_at_least_1 = math_ops.greater_equal(total_weight, 1.0)
is_valid = math_ops.logical_and(all_labels_positive_or_0,
sums_of_weights_at_least_1)
update_op = control_flow_ops.cond(
sums_of_weights_at_least_1,
lambda: update_op_for_valid_case, control_flow_ops.no_op)
auc = _compute_placement_auc(
labels_accum,
preds_accum,
weights_accum,
alpha=alpha,
logit_transformation=logit_transformation,
is_valid=is_valid)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def precision_recall_at_equal_thresholds(labels,
predictions,
weights=None,
num_thresholds=None,
use_locking=None,
name=None):
"""A helper method for creating metrics related to precision-recall curves.
These values are true positives, false negatives, true negatives, false
positives, precision, and recall. This function returns a data structure that
contains ops within it.
Unlike _streaming_confusion_matrix_at_thresholds (which exhibits O(T * N)
space and run time), this op exhibits O(T + N) space and run time, where T is
the number of thresholds and N is the size of the predictions tensor. Hence,
it may be advantageous to use this function when `predictions` is big.
For instance, prefer this method for per-pixel classification tasks, for which
the predictions tensor may be very large.
Each number in `predictions`, a float in `[0, 1]`, is compared with its
corresponding label in `labels`, and counts as a single tp/fp/tn/fn value at
each threshold. This is then multiplied with `weights` which can be used to
reweight certain values, or more commonly used for masking values.
Args:
labels: A bool `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional; If provided, a `Tensor` that has the same dtype as,
and broadcastable to, `predictions`. This tensor is multiplied by counts.
num_thresholds: Optional; Number of thresholds, evenly distributed in
`[0, 1]`. Should be `>= 2`. Defaults to 201. Note that the number of bins
is 1 less than `num_thresholds`. Using an even `num_thresholds` value
instead of an odd one may yield unfriendly edges for bins.
use_locking: Optional; If True, the op will be protected by a lock.
Otherwise, the behavior is undefined, but may exhibit less contention.
Defaults to True.
name: Optional; variable_scope name. If not provided, the string
'precision_recall_at_equal_threshold' is used.
Returns:
result: A named tuple (See PrecisionRecallData within the implementation of
this function) with properties that are variables of shape
`[num_thresholds]`. The names of the properties are tp, fp, tn, fn,
precision, recall, thresholds. Types are same as that of predictions.
update_op: An op that accumulates values.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
PrecisionRecallData = collections_lib.namedtuple(
'PrecisionRecallData',
['tp', 'fp', 'tn', 'fn', 'precision', 'recall', 'thresholds'])
# pylint: enable=invalid-name
if num_thresholds is None:
num_thresholds = 201
if weights is None:
weights = 1.0
if use_locking is None:
use_locking = True
check_ops.assert_type(labels, dtypes.bool)
with variable_scope.variable_scope(name,
'precision_recall_at_equal_thresholds',
(labels, predictions, weights)):
# Make sure that predictions are within [0.0, 1.0].
with ops.control_dependencies([
check_ops.assert_greater_equal(
predictions,
math_ops.cast(0.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]'),
check_ops.assert_less_equal(
predictions,
math_ops.cast(1.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# It's important we aggregate using float64 since we're accumulating a lot
# of 1.0's for the true/false labels, and accumulating to float32 will
# be quite inaccurate even with just a modest amount of values (~20M).
# We use float64 instead of integer primarily since GPU scatter kernel
# only support floats.
agg_dtype = dtypes.float64
f_labels = math_ops.cast(labels, agg_dtype)
weights = math_ops.cast(weights, agg_dtype)
true_labels = f_labels * weights
false_labels = (1.0 - f_labels) * weights
# Flatten predictions and labels.
predictions = array_ops.reshape(predictions, [-1])
true_labels = array_ops.reshape(true_labels, [-1])
false_labels = array_ops.reshape(false_labels, [-1])
# To compute TP/FP/TN/FN, we are measuring a binary classifier
# C(t) = (predictions >= t)
# at each threshold 't'. So we have
# TP(t) = sum( C(t) * true_labels )
# FP(t) = sum( C(t) * false_labels )
#
# But, computing C(t) requires computation for each t. To make it fast,
# observe that C(t) is a cumulative integral, and so if we have
# thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
# where n = num_thresholds, and if we can compute the bucket function
# B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
# then we get
# C(t_i) = sum( B(j), j >= i )
# which is the reversed cumulative sum in tf.cumsum().
#
# We can compute B(i) efficiently by taking advantage of the fact that
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
# Given a prediction value p, we can map it to its bucket by
# bucket_index(p) = floor( p * (num_thresholds - 1) )
# so we can use tf.scatter_add() to update the buckets in one pass.
#
# This implementation exhibits a run time and space complexity of O(T + N),
# where T is the number of thresholds and N is the size of predictions.
# Metrics that rely on _streaming_confusion_matrix_at_thresholds instead
# exhibit a complexity of O(T * N).
# Compute the bucket indices for each prediction value.
bucket_indices = math_ops.cast(
math_ops.floor(predictions * (num_thresholds - 1)), dtypes.int32)
with ops.name_scope('variables'):
tp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], agg_dtype, name='tp_buckets')
fp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], agg_dtype, name='fp_buckets')
with ops.name_scope('update_op'):
update_tp = state_ops.scatter_add(
tp_buckets_v, bucket_indices, true_labels, use_locking=use_locking)
update_fp = state_ops.scatter_add(
fp_buckets_v, bucket_indices, false_labels, use_locking=use_locking)
# Set up the cumulative sums to compute the actual metrics.
tp = math_ops.cumsum(tp_buckets_v, reverse=True, name='tp')
fp = math_ops.cumsum(fp_buckets_v, reverse=True, name='fp')
# fn = sum(true_labels) - tp
# = sum(tp_buckets) - tp
# = tp[0] - tp
# Similarly,
# tn = fp[0] - fp
tn = fp[0] - fp
fn = tp[0] - tp
# We use a minimum to prevent division by 0.
epsilon = ops.convert_to_tensor(1e-7, dtype=agg_dtype)
precision = tp / math_ops.maximum(epsilon, tp + fp)
recall = tp / math_ops.maximum(epsilon, tp + fn)
# Convert all tensors back to predictions' dtype (as per function contract).
out_dtype = predictions.dtype
_convert = lambda tensor: math_ops.cast(tensor, out_dtype)
result = PrecisionRecallData(
tp=_convert(tp),
fp=_convert(fp),
tn=_convert(tn),
fn=_convert(fn),
precision=_convert(precision),
recall=_convert(recall),
thresholds=_convert(math_ops.lin_space(0.0, 1.0, num_thresholds)))
update_op = control_flow_ops.group(update_tp, update_fp)
return result, update_op
def streaming_specificity_at_sensitivity(predictions,
labels,
sensitivity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `streaming_specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
sensitivity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.specificity_at_sensitivity(
sensitivity=sensitivity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sensitivity_at_specificity(predictions,
labels,
specificity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the sensitivity at a given specificity.
The `streaming_sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
specificity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.sensitivity_at_specificity(
specificity=specificity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.precision_at_thresholds. Note that '
'the order of the labels and predictions arguments are switched.')
def streaming_precision_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `streaming_precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.recall_at_thresholds. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_recall_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `streaming_recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fpr values for different `thresholds` on `predictions`.
The `streaming_false_positive_rate_at_thresholds` function creates two
local variables, `false_positives`, `true_negatives`, for various values of
thresholds. `false_positive_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `False` values in `labels`
(`false_positives[i] / (false_positives[i] + true_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_positives` and
`true_negatives` variables that are used in the computation of
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fp', 'tn'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fpr(fp, tn, name):
return math_ops.div(fp, epsilon + fp + tn, name='fpr_' + name)
fpr = compute_fpr(values['fp'], values['tn'], 'value')
update_op = compute_fpr(update_ops['fp'], update_ops['tn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fnr values for different `thresholds` on `predictions`.
The `streaming_false_negative_rate_at_thresholds` function creates two
local variables, `false_negatives`, `true_positives`, for various values of
thresholds. `false_negative_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `True` values in `labels`
(`false_negatives[i] / (false_negatives[i] + true_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_negatives` and
`true_positives` variables that are used in the computation of
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fn', 'tp'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fnr(fn, tp, name):
return math_ops.div(fn, epsilon + fn + tp, name='fnr_' + name)
fnr = compute_fnr(values['fn'], values['tp'], 'value')
update_op = compute_fnr(update_ops['fn'], update_ops['tp'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
@deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, '
'and reshape labels from [batch_size] to [batch_size, 1].')
def streaming_recall_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall@k of the predictions with respect to dense labels.
The `streaming_recall_at_k` function creates two local variables, `total` and
`count`, that are used to compute the recall@k frequency. This frequency is
ultimately returned as `recall_at_<k>`: an idempotent operation that simply
divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, an `in_top_k` operation computes a `Tensor` with
shape [batch_size] whose elements indicate whether or not the corresponding
label is in the top `k` `predictions`. Then `update_op` increments `total`
with the reduced sum of `weights` where `in_top_k` is `True`, and it
increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A float `Tensor` of dimension [batch_size, num_classes].
labels: A `Tensor` of dimension [batch_size] whose type is in `int32`,
`int64`.
k: The number of top elements to look at for computing recall.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall_at_k`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
recall_at_k: A `Tensor` representing the recall@k, the fraction of labels
which fall into the top `k` predictions.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `recall_at_k`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k))
return streaming_mean(in_top_k, weights, metrics_collections,
updates_collections, name or _at_k_name('recall', k))
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_recall_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we'll calculate recall as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
actual positives (the full `labels` row).
If `class_id` is specified, we calculate recall by considering only the rows
in the batch for which `class_id` is in `labels`, and computing the
fraction of them for which `class_id` is in the corresponding row in
`labels`.
`streaming_sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`.
Values should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.recall_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
positives (all top `k` `predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.precision_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_top_k(top_k_predictions,
labels,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of top-k predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of
true positives (i.e., correct predictions, items in `top_k_predictions`
that are found in the corresponding row in `labels`) to positives (all
`top_k_predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_top_k` creates two local variables,
`true_positive_at_k` and `false_positive_at_k`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_k`: an idempotent operation that simply divides
`true_positive_at_k` by total (`true_positive_at_k` + `false_positive_at_k`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_k`. Internally, set operations applied to `top_k_predictions`
and `labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_k` and
`false_positive_at_k` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
ValueError: If `top_k_predictions` has rank < 2.
"""
default_name = _at_k_name('precision', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.precision_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def sparse_recall_at_top_k(labels,
top_k_predictions,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of top-k predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_top_k` creates two local variables, `true_positive_at_<k>`
and `false_negative_at_<k>`, that are used to compute the recall_at_k
frequency. This frequency is ultimately returned as `recall_at_<k>`: an
idempotent operation that simply divides `true_positive_at_<k>` by total
(`true_positive_at_<k>` + `false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Set operations applied to `top_k` and `labels` calculate the
true positives and false negatives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_negative_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range always count towards `false_negative_at_<k>`.
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
default_name = _at_k_name('recall', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.recall_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def _compute_recall_at_precision(tp, fp, fn, precision, name,
strict_mode=False):
"""Helper function to compute recall at a given `precision`.
Args:
tp: The number of true positives.
fp: The number of false positives.
fn: The number of false negatives.
precision: The precision for which the recall will be calculated.
name: An optional variable_scope name.
strict_mode: If true and there exists a threshold where the precision is
no smaller than the target precision, return the corresponding recall at
the threshold. Otherwise, return 0. If false, find the threshold where the
precision is closest to the target precision and return the recall at the
threshold.
Returns:
The recall at a given `precision`.
"""
precisions = math_ops.div(tp, tp + fp + _EPSILON)
if not strict_mode:
tf_index = math_ops.argmin(
math_ops.abs(precisions - precision), 0, output_type=dtypes.int32)
# Now, we have the implicit threshold, so compute the recall:
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
name)
else:
# We aim to find the threshold where the precision is minimum but no smaller
# than the target precision.
# The rationale:
# 1. Compute the difference between precisions (by different thresholds) and
# the target precision.
# 2. Take the reciprocal of the values by the above step. The intention is
# to make the positive values rank before negative values and also the
# smaller positives rank before larger positives.
tf_index = math_ops.argmax(
math_ops.div(1.0, precisions - precision + _EPSILON),
0,
output_type=dtypes.int32)
def _return_good_recall():
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
name)
return control_flow_ops.cond(precisions[tf_index] >= precision,
_return_good_recall, lambda: .0)
def recall_at_precision(labels,
predictions,
precision,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None,
strict_mode=False):
"""Computes `recall` at `precision`.
The `recall_at_precision` function creates four local variables,
`tp` (true positives), `fp` (false positives) and `fn` (false negatives)
that are used to compute the `recall` at the given `precision` value. The
threshold for the given `precision` value is computed and used to evaluate the
corresponding `recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall`. `update_op` increments the `tp`, `fp` and `fn` counts with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
precision: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
`precision`.
metrics_collections: An optional list of collections that `recall`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
strict_mode: If true and there exists a threshold where the precision is
above the target precision, return the corresponding recall at the
threshold. Otherwise, return 0. If false, find the threshold where the
precision is closest to the target precision and return the recall at the
threshold.
Returns:
recall: A scalar `Tensor` representing the recall at the given
`precision` value.
update_op: An operation that increments the `tp`, `fp` and `fn`
variables appropriately and whose value matches `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`precision` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if not 0 <= precision <= 1:
raise ValueError('`precision` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'recall_at_precision',
(predictions, labels, weights)):
thresholds = [
i * 1.0 / (num_thresholds - 1) for i in range(1, num_thresholds - 1)
]
thresholds = [0.0 - _EPSILON] + thresholds + [1.0 + _EPSILON]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
recall = _compute_recall_at_precision(values['tp'], values['fp'],
values['fn'], precision, 'value',
strict_mode)
update_op = _compute_recall_at_precision(update_ops['tp'], update_ops['fp'],
update_ops['fn'], precision,
'update_op', strict_mode)
if metrics_collections:
ops.add_to_collections(metrics_collections, recall)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return recall, update_op
def precision_at_recall(labels,
predictions,
target_recall,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision at a given recall.
This function creates variables to track the true positives, false positives,
true negatives, and false negatives at a set of thresholds. Among those
thresholds where recall is at least `target_recall`, precision is computed
at the threshold where recall is closest to `target_recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
precision at `target_recall`. `update_op` increments the counts of true
positives, false positives, true negatives, and false negatives with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about precision and recall, see
http://en.wikipedia.org/wiki/Precision_and_recall
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
target_recall: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
recall.
metrics_collections: An optional list of collections to which `precision`
should be added.
updates_collections: An optional list of collections to which `update_op`
should be added.
name: An optional variable_scope name.
Returns:
precision: A scalar `Tensor` representing the precision at the given
`target_recall` value.
update_op: An operation that increments the variables for tracking the
true positives, false positives, true negatives, and false negatives and
whose value matches `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`target_recall` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_recall is not '
'supported when eager execution is enabled.')
if target_recall < 0 or target_recall > 1:
raise ValueError('`target_recall` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'precision_at_recall',
(predictions, labels, weights)):
kepsilon = 1e-7 # Used to avoid division by zero.
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
def compute_precision_at_recall(tp, fp, fn, name):
"""Computes the precision at a given recall.
Args:
tp: True positives.
fp: False positives.
fn: False negatives.
name: A name for the operation.
Returns:
The precision at the desired recall.
"""
recalls = math_ops.div(tp, tp + fn + kepsilon)
# Because recall is monotone decreasing as a function of the threshold,
# the smallest recall exceeding target_recall occurs at the largest
# threshold where recall >= target_recall.
admissible_recalls = math_ops.cast(
math_ops.greater_equal(recalls, target_recall), dtypes.int64)
tf_index = math_ops.reduce_sum(admissible_recalls) - 1
# Now we have the threshold at which to compute precision:
return math_ops.div(tp[tf_index] + kepsilon,
tp[tf_index] + fp[tf_index] + kepsilon,
name)
precision_value = compute_precision_at_recall(
values['tp'], values['fp'], values['fn'], 'value')
update_op = compute_precision_at_recall(
update_ops['tp'], update_ops['fp'], update_ops['fn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, precision_value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return precision_value, update_op
def streaming_sparse_average_precision_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
See `sparse_average_precision_at_k` for details on formula. `weights` are
applied to the result of `sparse_average_precision_at_k`
`streaming_sparse_average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
return metrics.average_precision_at_k(
k=k,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sparse_average_precision_at_top_k(top_k_predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`streaming_sparse_average_precision_at_top_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
the true positives and false positives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension must be set and contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`. Values should be in range
[0, num_classes).
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `top_k_predictions`.
Values should be in range [0, num_classes).
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
Raises:
ValueError: if the last dimension of top_k_predictions is not set.
"""
return metrics_impl._streaming_sparse_average_precision_at_top_k( # pylint: disable=protected-access
predictions_idx=top_k_predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.mean_absolute_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_mean_absolute_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `streaming_mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_absolute_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_relative_error(predictions,
labels,
normalizer,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `streaming_mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_relative_error(
normalizer=normalizer,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.mean_squared_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `streaming_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(
None,
'Please switch to tf.metrics.root_mean_squared_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_root_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `streaming_root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.root_mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_covariance(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the unbiased sample covariance between `predictions` and `labels`.
The `streaming_covariance` function creates four local variables,
`comoment`, `mean_prediction`, `mean_label`, and `count`, which are used to
compute the sample covariance between predictions and labels across multiple
batches of data. The covariance is ultimately returned as an idempotent
operation that simply divides `comoment` by `count` - 1. We use `count` - 1
in order to get an unbiased estimate.
The algorithm used for this online computation is described in
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance.
Specifically, the formula used to combine two sample comoments is
`C_AB = C_A + C_B + (E[x_A] - E[x_B]) * (E[y_A] - E[y_B]) * n_A * n_B / n_AB`
The comoment for a single batch of data is simply
`sum((x - E[x]) * (y - E[y]))`, optionally weighted.
If `weights` is not None, then it is used to compute weighted comoments,
means, and count. NOTE: these weights are treated as "frequency weights", as
opposed to "reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
To facilitate the computation of covariance across multiple batches of data,
the function creates an `update_op` operation, which updates underlying
variables and returns the updated covariance.
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
covariance: A `Tensor` representing the current unbiased sample covariance,
`comoment` / (`count` - 1).
update_op: An operation that updates the local variables appropriately.
Raises:
ValueError: If labels and predictions are of different sizes or if either
`metrics_collections` or `updates_collections` are not a list or tuple.
"""
with variable_scope.variable_scope(name, 'covariance',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
mean_prediction = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_prediction')
mean_label = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_label')
comoment = metrics_impl.metric_variable( # C_A in update equation
[], dtypes.float32, name='comoment')
if weights is None:
batch_count = math_ops.to_float(array_ops.size(labels)) # n_B in eqn
weighted_predictions = predictions
weighted_labels = labels
else:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
batch_count = math_ops.reduce_sum(weights) # n_B in eqn
weighted_predictions = math_ops.multiply(predictions, weights)
weighted_labels = math_ops.multiply(labels, weights)
update_count = state_ops.assign_add(count_, batch_count) # n_AB in eqn
prev_count = update_count - batch_count # n_A in update equation
# We update the means by Delta=Error*BatchCount/(BatchCount+PrevCount)
# batch_mean_prediction is E[x_B] in the update equation
batch_mean_prediction = _safe_div(
math_ops.reduce_sum(weighted_predictions), batch_count,
'batch_mean_prediction')
delta_mean_prediction = _safe_div(
(batch_mean_prediction - mean_prediction) * batch_count, update_count,
'delta_mean_prediction')
update_mean_prediction = state_ops.assign_add(mean_prediction,
delta_mean_prediction)
# prev_mean_prediction is E[x_A] in the update equation
prev_mean_prediction = update_mean_prediction - delta_mean_prediction
# batch_mean_label is E[y_B] in the update equation
batch_mean_label = _safe_div(
math_ops.reduce_sum(weighted_labels), batch_count, 'batch_mean_label')
delta_mean_label = _safe_div((batch_mean_label - mean_label) * batch_count,
update_count, 'delta_mean_label')
update_mean_label = state_ops.assign_add(mean_label, delta_mean_label)
# prev_mean_label is E[y_A] in the update equation
prev_mean_label = update_mean_label - delta_mean_label
unweighted_batch_coresiduals = ((predictions - batch_mean_prediction) *
(labels - batch_mean_label))
# batch_comoment is C_B in the update equation
if weights is None:
batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals)
else:
batch_comoment = math_ops.reduce_sum(
unweighted_batch_coresiduals * weights)
# View delta_comoment as = C_AB - C_A in the update equation above.
# Since C_A is stored in a var, by how much do we need to increment that var
# to make the var = C_AB?
delta_comoment = (
batch_comoment + (prev_mean_prediction - batch_mean_prediction) *
(prev_mean_label - batch_mean_label) *
(prev_count * batch_count / update_count))
update_comoment = state_ops.assign_add(comoment, delta_comoment)
covariance = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='covariance')
with ops.control_dependencies([update_comoment]):
update_op = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, covariance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return covariance, update_op
def streaming_pearson_correlation(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes Pearson correlation coefficient between `predictions`, `labels`.
The `streaming_pearson_correlation` function delegates to
`streaming_covariance` the tracking of three [co]variances:
- `streaming_covariance(predictions, labels)`, i.e. covariance
- `streaming_covariance(predictions, predictions)`, i.e. variance
- `streaming_covariance(labels, labels)`, i.e. variance
The product-moment correlation ultimately returned is an idempotent operation
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`. To
facilitate correlation computation across multiple batches, the function
groups the `update_op`s of the underlying streaming_covariance and returns an
`update_op`.
If `weights` is not None, then it is used to compute a weighted correlation.
NOTE: these weights are treated as "frequency weights", as opposed to
"reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as predictions.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
pearson_r: A `Tensor` representing the current Pearson product-moment
correlation coefficient, the value of
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`.
update_op: An operation that updates the underlying variables appropriately.
Raises:
ValueError: If `labels` and `predictions` are of different sizes, or if
`weights` is the wrong size, or if either `metrics_collections` or
`updates_collections` are not a `list` or `tuple`.
"""
with variable_scope.variable_scope(name, 'pearson_r',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# Broadcast weights here to avoid duplicate broadcasting in each call to
# `streaming_covariance`.
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
cov, update_cov = streaming_covariance(
predictions, labels, weights=weights, name='covariance')
var_predictions, update_var_predictions = streaming_covariance(
predictions, predictions, weights=weights, name='variance_predictions')
var_labels, update_var_labels = streaming_covariance(
labels, labels, weights=weights, name='variance_labels')
pearson_r = math_ops.truediv(
cov,
math_ops.multiply(
math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)),
name='pearson_r')
update_op = math_ops.truediv(
update_cov,
math_ops.multiply(
math_ops.sqrt(update_var_predictions),
math_ops.sqrt(update_var_labels)),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, pearson_r)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return pearson_r, update_op
# TODO(nsilberman): add a 'normalized' flag so that the user can request
# normalization if the inputs are not normalized.
def streaming_mean_cosine_distance(predictions,
labels,
dim,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `streaming_mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of the same shape as `labels`.
labels: A `Tensor` of arbitrary shape.
dim: The dimension along which the cosine distance is computed.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`,
and whose dimension `dim` is 1.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(
radial_diffs, reduction_indices=[
dim,
], keepdims=True)
mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None,
name or 'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
def streaming_percentage_less(values,
threshold,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `streaming_percentage_less` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.percentage_below(
values=values,
threshold=threshold,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_iou(predictions,
labels,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened, if its rank > 1.
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened, if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_iou(
num_classes=num_classes,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def _next_array_size(required_size, growth_factor=1.5):
"""Calculate the next size for reallocating a dynamic array.
Args:
required_size: number or tf.Tensor specifying required array capacity.
growth_factor: optional number or tf.Tensor specifying the growth factor
between subsequent allocations.
Returns:
tf.Tensor with dtype=int32 giving the next array size.
"""
exponent = math_ops.ceil(
math_ops.log(math_ops.cast(required_size, dtypes.float32)) / math_ops.log(
math_ops.cast(growth_factor, dtypes.float32)))
return math_ops.cast(math_ops.ceil(growth_factor**exponent), dtypes.int32)
def streaming_concat(values,
axis=0,
max_size=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Concatenate values along an axis across batches.
The function `streaming_concat` creates two local variables, `array` and
`size`, that are used to store concatenated values. Internally, `array` is
used as storage for a dynamic array (if `maxsize` is `None`), which ensures
that updates can be run in amortized constant time.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that appends the values of a tensor and returns the
length of the concatenated axis.
This op allows for evaluating metrics that cannot be updated incrementally
using the same framework as other streaming metrics.
Args:
values: `Tensor` to concatenate. Rank and the shape along all axes other
than the axis to concatenate along must be statically known.
axis: optional integer axis to concatenate along.
max_size: optional integer maximum size of `value` along the given axis.
Once the maximum size is reached, further updates are no-ops. By default,
there is no maximum size: the array is resized as necessary.
metrics_collections: An optional list of collections that `value`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
value: A `Tensor` representing the concatenated values.
update_op: An operation that concatenates the next values.
Raises:
ValueError: if `values` does not have a statically known rank, `axis` is
not in the valid range or the size of `values` is not statically known
along any axis other than `axis`.
"""
with variable_scope.variable_scope(name, 'streaming_concat', (values,)):
# pylint: disable=invalid-slice-index
values_shape = values.get_shape()
if values_shape.dims is None:
raise ValueError('`values` must have known statically known rank')
ndim = len(values_shape)
if axis < 0:
axis += ndim
if not 0 <= axis < ndim:
raise ValueError('axis = %r not in [0, %r)' % (axis, ndim))
fixed_shape = [dim.value for n, dim in enumerate(values_shape) if n != axis]
if any(value is None for value in fixed_shape):
raise ValueError('all dimensions of `values` other than the dimension to '
'concatenate along must have statically known size')
# We move `axis` to the front of the internal array so assign ops can be
# applied to contiguous slices
init_size = 0 if max_size is None else max_size
init_shape = [init_size] + fixed_shape
array = metrics_impl.metric_variable(
init_shape, values.dtype, validate_shape=False, name='array')
size = metrics_impl.metric_variable([], dtypes.int32, name='size')
perm = [0 if n == axis else n + 1 if n < axis else n for n in range(ndim)]
valid_array = array[:size]
valid_array.set_shape([None] + fixed_shape)
value = array_ops.transpose(valid_array, perm, name='concat')
values_size = array_ops.shape(values)[axis]
if max_size is None:
batch_size = values_size
else:
batch_size = math_ops.minimum(values_size, max_size - size)
perm = [axis] + [n for n in range(ndim) if n != axis]
batch_values = array_ops.transpose(values, perm)[:batch_size]
def reallocate():
next_size = _next_array_size(new_size)
next_shape = array_ops.stack([next_size] + fixed_shape)
new_value = array_ops.zeros(next_shape, dtype=values.dtype)
old_value = array.value()
assign_op = state_ops.assign(array, new_value, validate_shape=False)
with ops.control_dependencies([assign_op]):
copy_op = array[:size].assign(old_value[:size])
# return value needs to be the same dtype as no_op() for cond
with ops.control_dependencies([copy_op]):
return control_flow_ops.no_op()
new_size = size + batch_size
array_size = array_ops.shape_internal(array, optimize=False)[0]
maybe_reallocate_op = control_flow_ops.cond(
new_size > array_size, reallocate, control_flow_ops.no_op)
with ops.control_dependencies([maybe_reallocate_op]):
append_values_op = array[size:new_size].assign(batch_values)
with ops.control_dependencies([append_values_op]):
update_op = size.assign(new_size)
if metrics_collections:
ops.add_to_collections(metrics_collections, value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value, update_op
# pylint: enable=invalid-slice-index
def aggregate_metrics(*value_update_tuples):
"""Aggregates the metric value tensors and update ops into two lists.
Args:
*value_update_tuples: a variable number of tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A list of value `Tensor` objects and a list of update ops.
Raises:
ValueError: if `value_update_tuples` is empty.
"""
if not value_update_tuples:
raise ValueError('Expected at least one value_tensor/update_op pair')
value_ops, update_ops = zip(*value_update_tuples)
return list(value_ops), list(update_ops)
def aggregate_metric_map(names_to_tuples):
"""Aggregates the metric names to tuple dictionary.
This function is useful for pairing metric names with their associated value
and update ops when the list of metrics is long. For example:
```python
metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({
'Mean Absolute Error': new_slim.metrics.streaming_mean_absolute_error(
predictions, labels, weights),
'Mean Relative Error': new_slim.metrics.streaming_mean_relative_error(
predictions, labels, labels, weights),
'RMSE Linear': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
'RMSE Log': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
})
```
Args:
names_to_tuples: a map of metric names to tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A dictionary from metric names to value ops and a dictionary from metric
names to update ops.
"""
metric_names = names_to_tuples.keys()
value_ops, update_ops = zip(*names_to_tuples.values())
return dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))
def count(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the number of examples, or sum of `weights`.
This metric keeps track of the denominator in `tf.metrics.mean`.
When evaluating some metric (e.g. mean) on one or more subsets of the data,
this auxiliary metric is useful for keeping track of how many examples there
are in each subset.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions. Only it's shape is used.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
count: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the metric from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.contrib.metrics.count is not supported when eager '
'execution is enabled.')
with variable_scope.variable_scope(name, 'count', (values, weights)):
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
values = math_ops.to_float(values)
values, _, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=values,
labels=None,
weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
num_values = math_ops.reduce_sum(weights)
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count_, num_values)
count_ = metrics_impl._aggregate_variable(count_, metrics_collections) # pylint: disable=protected-access
if updates_collections:
ops.add_to_collections(updates_collections, update_count_op)
return count_, update_count_op
def cohen_kappa(labels,
predictions_idx,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates Cohen's kappa.
[Cohen's kappa](https://en.wikipedia.org/wiki/Cohen's_kappa) is a statistic
that measures inter-annotator agreement.
The `cohen_kappa` function calculates the confusion matrix, and creates three
local variables to compute the Cohen's kappa: `po`, `pe_row`, and `pe_col`,
which refer to the diagonal part, rows and columns totals of the confusion
matrix, respectively. This value is ultimately returned as `kappa`, an
idempotent operation that is calculated by
pe = (pe_row * pe_col) / N
k = (sum(po) - sum(pe)) / (N - sum(pe))
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`kappa`. `update_op` weights each prediction by the corresponding value in
`weights`.
Class labels are expected to start at 0. E.g., if `num_classes`
was three, then the possible labels would be [0, 1, 2].
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
NOTE: Equivalent to `sklearn.metrics.cohen_kappa_score`, but the method
doesn't support weighted matrix yet.
Args:
labels: 1-D `Tensor` of real labels for the classification task. Must be
one of the following types: int16, int32, int64.
predictions_idx: 1-D `Tensor` of predicted class indices for a given
classification. Must have the same type as `labels`.
num_classes: The possible number of labels.
weights: Optional `Tensor` whose shape matches `predictions`.
metrics_collections: An optional list of collections that `kappa` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
kappa: Scalar float `Tensor` representing the current Cohen's kappa.
update_op: `Operation` that increments `po`, `pe_row` and `pe_col`
variables appropriately and whose value matches `kappa`.
Raises:
ValueError: If `num_classes` is less than 2, or `predictions` and `labels`
have mismatched shapes, or if `weights` is not `None` and its shape
doesn't match `predictions`, or if either `metrics_collections` or
`updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.contrib.metrics.cohen_kappa is not supported '
'when eager execution is enabled.')
if num_classes < 2:
raise ValueError('`num_classes` must be >= 2.'
'Found: {}'.format(num_classes))
with variable_scope.variable_scope(name, 'cohen_kappa',
(labels, predictions_idx, weights)):
# Convert 2-dim (num, 1) to 1-dim (num,)
labels.get_shape().with_rank_at_most(2)
if labels.get_shape().ndims == 2:
labels = array_ops.squeeze(labels, axis=[-1])
predictions_idx, labels, weights = (
metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions_idx,
labels=labels,
weights=weights))
predictions_idx.get_shape().assert_is_compatible_with(labels.get_shape())
stat_dtype = (
dtypes.int64
if weights is None or weights.dtype.is_integer else dtypes.float32)
po = metrics_impl.metric_variable((num_classes,), stat_dtype, name='po')
pe_row = metrics_impl.metric_variable(
(num_classes,), stat_dtype, name='pe_row')
pe_col = metrics_impl.metric_variable(
(num_classes,), stat_dtype, name='pe_col')
# Table of the counts of agreement:
counts_in_table = confusion_matrix.confusion_matrix(
labels,
predictions_idx,
num_classes=num_classes,
weights=weights,
dtype=stat_dtype,
name='counts_in_table')
po_t = array_ops.diag_part(counts_in_table)
pe_row_t = math_ops.reduce_sum(counts_in_table, axis=0)
pe_col_t = math_ops.reduce_sum(counts_in_table, axis=1)
update_po = state_ops.assign_add(po, po_t)
update_pe_row = state_ops.assign_add(pe_row, pe_row_t)
update_pe_col = state_ops.assign_add(pe_col, pe_col_t)
def _calculate_k(po, pe_row, pe_col, name):
po_sum = math_ops.reduce_sum(po)
total = math_ops.reduce_sum(pe_row)
pe_sum = math_ops.reduce_sum(
metrics_impl._safe_div( # pylint: disable=protected-access
pe_row * pe_col, total, None))
po_sum, pe_sum, total = (math_ops.to_double(po_sum),
math_ops.to_double(pe_sum),
math_ops.to_double(total))
# kappa = (po - pe) / (N - pe)
k = metrics_impl._safe_scalar_div( # pylint: disable=protected-access
po_sum - pe_sum,
total - pe_sum,
name=name)
return k
kappa = _calculate_k(po, pe_row, pe_col, name='value')
update_op = _calculate_k(
update_po, update_pe_row, update_pe_col, name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, kappa)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return kappa, update_op
__all__ = [
'auc_with_confidence_intervals',
'aggregate_metric_map',
'aggregate_metrics',
'cohen_kappa',
'count',
'precision_recall_at_equal_thresholds',
'recall_at_precision',
'sparse_recall_at_top_k',
'streaming_accuracy',
'streaming_auc',
'streaming_curve_points',
'streaming_dynamic_auc',
'streaming_false_negative_rate',
'streaming_false_negative_rate_at_thresholds',
'streaming_false_negatives',
'streaming_false_negatives_at_thresholds',
'streaming_false_positive_rate',
'streaming_false_positive_rate_at_thresholds',
'streaming_false_positives',
'streaming_false_positives_at_thresholds',
'streaming_mean',
'streaming_mean_absolute_error',
'streaming_mean_cosine_distance',
'streaming_mean_iou',
'streaming_mean_relative_error',
'streaming_mean_squared_error',
'streaming_mean_tensor',
'streaming_percentage_less',
'streaming_precision',
'streaming_precision_at_thresholds',
'streaming_recall',
'streaming_recall_at_k',
'streaming_recall_at_thresholds',
'streaming_root_mean_squared_error',
'streaming_sensitivity_at_specificity',
'streaming_sparse_average_precision_at_k',
'streaming_sparse_average_precision_at_top_k',
'streaming_sparse_precision_at_k',
'streaming_sparse_precision_at_top_k',
'streaming_sparse_recall_at_k',
'streaming_specificity_at_sensitivity',
'streaming_true_negatives',
'streaming_true_negatives_at_thresholds',
'streaming_true_positives',
'streaming_true_positives_at_thresholds',
]
| apache-2.0 |
k3nnyfr/s2a_fr-nsis | s2a/Python/Lib/distutils/tests/test_unixccompiler.py | 8 | 5057 | """Tests for distutils.unixccompiler."""
import os
import sys
import unittest
from test.test_support import EnvironmentVarGuard, run_unittest
from distutils import sysconfig
from distutils.unixccompiler import UnixCCompiler
class UnixCCompilerTestCase(unittest.TestCase):
def setUp(self):
self._backup_platform = sys.platform
self._backup_get_config_var = sysconfig.get_config_var
class CompilerWrapper(UnixCCompiler):
def rpath_foo(self):
return self.runtime_library_dir_option('/foo')
self.cc = CompilerWrapper()
def tearDown(self):
sys.platform = self._backup_platform
sysconfig.get_config_var = self._backup_get_config_var
def test_runtime_libdir_option(self):
# not tested under windows
if sys.platform == 'win32':
return
# Issue#5900
#
# Ensure RUNPATH is added to extension modules with RPATH if
# GNU ld is used
# darwin
sys.platform = 'darwin'
self.assertEqual(self.cc.rpath_foo(), '-L/foo')
# hp-ux
sys.platform = 'hp-ux'
old_gcv = sysconfig.get_config_var
def gcv(v):
return 'xxx'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), ['+s', '-L/foo'])
def gcv(v):
return 'gcc'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), ['-Wl,+s', '-L/foo'])
def gcv(v):
return 'g++'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), ['-Wl,+s', '-L/foo'])
sysconfig.get_config_var = old_gcv
# irix646
sys.platform = 'irix646'
self.assertEqual(self.cc.rpath_foo(), ['-rpath', '/foo'])
# osf1V5
sys.platform = 'osf1V5'
self.assertEqual(self.cc.rpath_foo(), ['-rpath', '/foo'])
# GCC GNULD
sys.platform = 'bar'
def gcv(v):
if v == 'CC':
return 'gcc'
elif v == 'GNULD':
return 'yes'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-Wl,-R/foo')
# GCC non-GNULD
sys.platform = 'bar'
def gcv(v):
if v == 'CC':
return 'gcc'
elif v == 'GNULD':
return 'no'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-Wl,-R/foo')
# GCC GNULD with fully qualified configuration prefix
# see #7617
sys.platform = 'bar'
def gcv(v):
if v == 'CC':
return 'x86_64-pc-linux-gnu-gcc-4.4.2'
elif v == 'GNULD':
return 'yes'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-Wl,-R/foo')
# non-GCC GNULD
sys.platform = 'bar'
def gcv(v):
if v == 'CC':
return 'cc'
elif v == 'GNULD':
return 'yes'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-R/foo')
# non-GCC non-GNULD
sys.platform = 'bar'
def gcv(v):
if v == 'CC':
return 'cc'
elif v == 'GNULD':
return 'no'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-R/foo')
# AIX C/C++ linker
sys.platform = 'aix'
def gcv(v):
return 'xxx'
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-R/foo')
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for OS X')
def test_osx_cc_overrides_ldshared(self):
# Issue #18080:
# ensure that setting CC env variable also changes default linker
def gcv(v):
if v == 'LDSHARED':
return 'gcc-4.2 -bundle -undefined dynamic_lookup '
return 'gcc-4.2'
sysconfig.get_config_var = gcv
with EnvironmentVarGuard() as env:
env['CC'] = 'my_cc'
del env['LDSHARED']
sysconfig.customize_compiler(self.cc)
self.assertEqual(self.cc.linker_so[0], 'my_cc')
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for OS X')
def test_osx_explict_ldshared(self):
# Issue #18080:
# ensure that setting CC env variable does not change
# explicit LDSHARED setting for linker
def gcv(v):
if v == 'LDSHARED':
return 'gcc-4.2 -bundle -undefined dynamic_lookup '
return 'gcc-4.2'
sysconfig.get_config_var = gcv
with EnvironmentVarGuard() as env:
env['CC'] = 'my_cc'
env['LDSHARED'] = 'my_ld -bundle -dynamic'
sysconfig.customize_compiler(self.cc)
self.assertEqual(self.cc.linker_so[0], 'my_ld')
def test_suite():
return unittest.makeSuite(UnixCCompilerTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| gpl-3.0 |
jonboiser/kolibri | kolibri/logger/serializers.py | 1 | 5206 | from django.db.models import Sum
from django.utils.timezone import now
from kolibri.auth.models import FacilityUser
from kolibri.core.serializers import KolibriModelSerializer
from kolibri.logger.models import AttemptLog, ContentSessionLog, ContentSummaryLog, ExamAttemptLog, ExamLog, MasteryLog, UserSessionLog
from rest_framework import serializers
class ContentSessionLogSerializer(KolibriModelSerializer):
extra_fields = serializers.JSONField(default='{}')
class Meta:
model = ContentSessionLog
fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp',
'end_timestamp', 'time_spent', 'kind', 'extra_fields', 'progress')
class ExamLogSerializer(KolibriModelSerializer):
progress = serializers.SerializerMethodField()
score = serializers.SerializerMethodField()
def get_progress(self, obj):
return obj.attemptlogs.values_list('item').distinct().count()
def get_score(self, obj):
return obj.attemptlogs.values_list('item').order_by('completion_timestamp').distinct().aggregate(Sum('correct')).get('correct__sum')
class Meta:
model = ExamLog
fields = ('id', 'exam', 'user', 'closed', 'progress', 'score', 'completion_timestamp')
read_only_fields = ('completion_timestamp', )
def update(self, instance, validated_data):
# This has changed, set the completion timestamp
if validated_data.get('closed') and not instance.closed:
instance.completion_timestamp = now()
return super(ExamLogSerializer, self).update(instance, validated_data)
class MasteryLogSerializer(KolibriModelSerializer):
pastattempts = serializers.SerializerMethodField()
totalattempts = serializers.SerializerMethodField()
mastery_criterion = serializers.JSONField(default='{}')
class Meta:
model = MasteryLog
fields = ('id', 'summarylog', 'start_timestamp', 'pastattempts', 'totalattempts', 'user',
'end_timestamp', 'completion_timestamp', 'mastery_criterion', 'mastery_level', 'complete')
def get_pastattempts(self, obj):
# will return a list of the latest 10 correct and hint_taken fields for each attempt.
return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).values('correct', 'hinted', 'error').order_by('-start_timestamp')[:10]
def get_totalattempts(self, obj):
return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).count()
class AttemptLogSerializer(KolibriModelSerializer):
answer = serializers.JSONField(default='{}')
interaction_history = serializers.JSONField(default='[]')
class Meta:
model = AttemptLog
fields = ('id', 'masterylog', 'start_timestamp', 'sessionlog',
'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',
'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history', 'error')
class ExamAttemptLogSerializer(KolibriModelSerializer):
answer = serializers.JSONField(default='{}', allow_null=True)
interaction_history = serializers.JSONField(default='[]')
class Meta:
model = ExamAttemptLog
fields = ('id', 'examlog', 'start_timestamp', 'channel_id', 'content_id',
'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',
'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')
def validate(self, data):
# Only do this validation when both are being set
# not necessary on PATCH, for example
if data.get('examlog') and data.get('user'):
try:
if data['examlog'].user != data['user']:
raise serializers.ValidationError('User field and user for related exam log are not the same')
except ExamLog.DoesNotExist:
raise serializers.ValidationError('Invalid exam log')
return data
class ContentSummaryLogSerializer(KolibriModelSerializer):
currentmasterylog = serializers.SerializerMethodField()
extra_fields = serializers.JSONField(default='{}')
class Meta:
model = ContentSummaryLog
fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp', 'currentmasterylog',
'end_timestamp', 'completion_timestamp', 'time_spent', 'progress', 'kind', 'extra_fields')
def get_currentmasterylog(self, obj):
try:
current_log = obj.masterylogs.latest('end_timestamp')
return MasteryLogSerializer(current_log).data
except MasteryLog.DoesNotExist:
return None
class UserSessionLogSerializer(KolibriModelSerializer):
class Meta:
model = UserSessionLog
fields = ('pk', 'user', 'channels', 'start_timestamp', 'last_interaction_timestamp', 'pages')
class TotalContentProgressSerializer(serializers.ModelSerializer):
progress = serializers.SerializerMethodField()
class Meta:
model = FacilityUser
fields = ('progress', 'id')
def get_progress(self, obj):
return obj.contentsummarylog_set.filter(progress=1).aggregate(Sum('progress')).get('progress__sum')
| mit |
wimnat/ansible-modules-core | web_infrastructure/django_manage.py | 10 | 11548 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Scott Anderson <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: django_manage
short_description: Manages a Django application.
description:
- Manages a Django application using the I(manage.py) application frontend to I(django-admin). With the I(virtualenv) parameter, all management commands will be executed by the given I(virtualenv) installation.
version_added: "1.1"
options:
command:
choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ]
description:
- The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate.
- Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run with the I(--noinput) flag.
required: true
app_path:
description:
- The path to the root of the Django application where B(manage.py) lives.
required: true
settings:
description:
- The Python path to the application's settings module, such as 'myapp.settings'.
required: false
pythonpath:
description:
- A directory to add to the Python path. Typically used to include the settings module if it is located external to the application directory.
required: false
virtualenv:
description:
- An optional path to a I(virtualenv) installation to use while running the manage application.
required: false
apps:
description:
- A list of space-delimited apps to target. Used by the 'test' command.
required: false
cache_table:
description:
- The name of the table used for database-backed caching. Used by the 'createcachetable' command.
required: false
database:
description:
- The database to target. Used by the 'createcachetable', 'flush', 'loaddata', and 'syncdb' commands.
required: false
failfast:
description:
- Fail the command immediately if a test fails. Used by the 'test' command.
required: false
default: "no"
choices: [ "yes", "no" ]
fixtures:
description:
- A space-delimited list of fixture file names to load in the database. B(Required) by the 'loaddata' command.
required: false
skip:
description:
- Will skip over out-of-order missing migrations, you can only use this parameter with I(migrate)
required: false
version_added: "1.3"
merge:
description:
- Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with 'migrate' command
required: false
version_added: "1.3"
link:
description:
- Will create links to the files instead of copying them, you can only use this parameter with 'collectstatic' command
required: false
version_added: "1.3"
notes:
- I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified.
- This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location.
- This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately.
- To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings.
- To be able to use the collectstatic command, you must have enabled staticfiles in your settings.
- As of ansible 2.x, your I(manage.py) application must be executable (rwxr-xr-x), and must have a valid I(shebang), i.e. "#!/usr/bin/env python", for invoking the appropriate Python interpreter.
requirements: [ "virtualenv", "django" ]
author: "Scott Anderson (@tastychutney)"
'''
EXAMPLES = """
# Run cleanup on the application installed in 'django_dir'.
- django_manage:
command: cleanup
app_path: "{{ django_dir }}"
# Load the initial_data fixture into the application
- django_manage:
command: loaddata
app_path: "{{ django_dir }}"
fixtures: "{{ initial_data }}"
# Run syncdb on the application
- django_manage:
command: syncdb
app_path: "{{ django_dir }}"
settings: "{{ settings_app_name }}"
pythonpath: "{{ settings_dir }}"
virtualenv: "{{ virtualenv_dir }}"
# Run the SmokeTest test case from the main app. Useful for testing deploys.
- django_manage:
command: test
app_path: "{{ django_dir }}"
apps: main.SmokeTest
# Create an initial superuser.
- django_manage:
command: "createsuperuser --noinput --username=admin [email protected]"
app_path: "{{ django_dir }}"
"""
import os
def _fail(module, cmd, out, err, **kwargs):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg, **kwargs)
def _ensure_virtualenv(module):
venv_param = module.params['virtualenv']
if venv_param is None:
return
vbin = os.path.join(os.path.expanduser(venv_param), 'bin')
activate = os.path.join(vbin, 'activate')
if not os.path.exists(activate):
virtualenv = module.get_bin_path('virtualenv', True)
vcmd = '%s %s' % (virtualenv, venv_param)
vcmd = [virtualenv, venv_param]
rc, out_venv, err_venv = module.run_command(vcmd)
if rc != 0:
_fail(module, vcmd, out_venv, err_venv)
os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
os.environ["VIRTUAL_ENV"] = venv_param
def createcachetable_filter_output(line):
return "Already exists" not in line
def flush_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def loaddata_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def syncdb_filter_output(line):
return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line)
def migrate_filter_output(line):
return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line)
def collectstatic_filter_output(line):
return line and "0 static files" not in line
def main():
command_allowed_param_map = dict(
cleanup=(),
createcachetable=('cache_table', 'database', ),
flush=('database', ),
loaddata=('database', 'fixtures', ),
syncdb=('database', ),
test=('failfast', 'testrunner', 'liveserver', 'apps', ),
validate=(),
migrate=('apps', 'skip', 'merge', 'database',),
collectstatic=('clear', 'link', ),
)
command_required_param_map = dict(
loaddata=('fixtures', ),
)
# forces --noinput on every command that needs it
noinput_commands = (
'flush',
'syncdb',
'migrate',
'test',
'collectstatic',
)
# These params are allowed for certain commands only
specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner')
# These params are automatically added to the command if present
general_params = ('settings', 'pythonpath', 'database',)
specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
end_of_command_params = ('apps', 'cache_table', 'fixtures')
module = AnsibleModule(
argument_spec=dict(
command = dict(default=None, required=True),
app_path = dict(default=None, required=True),
settings = dict(default=None, required=False),
pythonpath = dict(default=None, required=False, aliases=['python_path']),
virtualenv = dict(default=None, required=False, aliases=['virtual_env']),
apps = dict(default=None, required=False),
cache_table = dict(default=None, required=False),
clear = dict(default=None, required=False, type='bool'),
database = dict(default=None, required=False),
failfast = dict(default='no', required=False, type='bool', aliases=['fail_fast']),
fixtures = dict(default=None, required=False),
liveserver = dict(default=None, required=False, aliases=['live_server']),
testrunner = dict(default=None, required=False, aliases=['test_runner']),
skip = dict(default=None, required=False, type='bool'),
merge = dict(default=None, required=False, type='bool'),
link = dict(default=None, required=False, type='bool'),
),
)
command = module.params['command']
app_path = os.path.expanduser(module.params['app_path'])
virtualenv = module.params['virtualenv']
for param in specific_params:
value = module.params[param]
if param in specific_boolean_params:
value = module.boolean(value)
if value and param not in command_allowed_param_map[command]:
module.fail_json(msg='%s param is incompatible with command=%s' % (param, command))
for param in command_required_param_map.get(command, ()):
if not module.params[param]:
module.fail_json(msg='%s param is required for command=%s' % (param, command))
_ensure_virtualenv(module)
cmd = "./manage.py %s" % (command, )
if command in noinput_commands:
cmd = '%s --noinput' % cmd
for param in general_params:
if module.params[param]:
cmd = '%s --%s=%s' % (cmd, param, module.params[param])
for param in specific_boolean_params:
if module.boolean(module.params[param]):
cmd = '%s --%s' % (cmd, param)
# these params always get tacked on the end of the command
for param in end_of_command_params:
if module.params[param]:
cmd = '%s %s' % (cmd, module.params[param])
rc, out, err = module.run_command(cmd, cwd=os.path.expanduser(app_path))
if rc != 0:
if command == 'createcachetable' and 'table' in err and 'already exists' in err:
out = 'Already exists.'
else:
if "Unknown command:" in err:
_fail(module, cmd, err, "Unknown django command: %s" % command)
_fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path)
changed = False
lines = out.split('\n')
filt = globals().get(command + "_filter_output", None)
if filt:
filtered_output = filter(filt, lines)
if len(filtered_output):
changed = filtered_output
module.exit_json(changed=changed, out=out, cmd=cmd, app_path=app_path, virtualenv=virtualenv,
settings=module.params['settings'], pythonpath=module.params['pythonpath'])
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
sdague/home-assistant | homeassistant/components/dynalite/switch.py | 14 | 1153 | """Support for the Dynalite channels and presets as switches."""
from typing import Callable
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .dynalitebase import DynaliteBase, async_setup_entry_base
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: Callable
) -> None:
"""Record the async_add_entities function to add them later when received from Dynalite."""
async_setup_entry_base(
hass, config_entry, async_add_entities, "switch", DynaliteSwitch
)
class DynaliteSwitch(DynaliteBase, SwitchEntity):
"""Representation of a Dynalite Channel as a Home Assistant Switch."""
@property
def is_on(self) -> bool:
"""Return true if switch is on."""
return self._device.is_on
async def async_turn_on(self, **kwargs) -> None:
"""Turn the switch on."""
await self._device.async_turn_on()
async def async_turn_off(self, **kwargs) -> None:
"""Turn the switch off."""
await self._device.async_turn_off()
| apache-2.0 |
SerpentCS/odoo | addons/account/res_currency.py | 340 | 2267 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010 OpenERP s.a. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
"""Inherit res.currency to handle accounting date values when converting currencies"""
class res_currency_account(osv.osv):
_inherit = "res.currency"
def _get_conversion_rate(self, cr, uid, from_currency, to_currency, context=None):
if context is None:
context = {}
rate = super(res_currency_account, self)._get_conversion_rate(cr, uid, from_currency, to_currency, context=context)
#process the case where the account doesn't work with an outgoing currency rate method 'at date' but 'average'
account = context.get('res.currency.compute.account')
account_invert = context.get('res.currency.compute.account_invert')
if account and account.currency_mode == 'average' and account.currency_id:
query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
cr.execute('select sum(debit-credit),sum(amount_currency) from account_move_line l ' \
'where l.currency_id=%s and l.account_id=%s and '+query, (account.currency_id.id,account.id,))
tot1,tot2 = cr.fetchone()
if tot2 and not account_invert:
rate = float(tot1)/float(tot2)
elif tot1 and account_invert:
rate = float(tot2)/float(tot1)
return rate
| agpl-3.0 |
damirda/ansible-modules-core | cloud/amazon/ec2_vol.py | 42 | 19238 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vol
short_description: create and attach a volume, return volume id and device map
description:
- creates an EBS volume and optionally attaches it to an instance. If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made. This module has a dependency on python-boto.
version_added: "1.1"
options:
instance:
description:
- instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach.
required: false
default: null
name:
description:
- volume Name tag if you wish to attach an existing volume (requires instance)
required: false
default: null
version_added: "1.6"
id:
description:
- volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
required: false
default: null
version_added: "1.6"
volume_size:
description:
- size of volume (in GB) to create.
required: false
default: null
volume_type:
description:
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default
and continues to remain the Ansible default for backwards compatibility.
required: false
default: standard
version_added: "1.9"
iops:
description:
- the provisioned IOPs you want to associate with this volume (integer).
required: false
default: 100
version_added: "1.3"
encrypted:
description:
- Enable encryption at rest for this volume.
default: false
version_added: "1.8"
device_name:
description:
- device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows.
required: false
default: null
delete_on_termination:
description:
- When set to "yes", the volume will be deleted upon instance termination.
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.1"
zone:
description:
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
snapshot:
description:
- snapshot ID on which to base the volume
required: false
default: null
version_added: "1.5"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
version_added: "1.5"
state:
description:
- whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
required: false
default: present
choices: ['absent', 'present', 'list']
version_added: "1.6"
author: "Lester Wade (@lwade)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple attachment action
- ec2_vol:
instance: XXXXXX
volume_size: 5
device_name: sdd
# Example using custom iops params
- ec2_vol:
instance: XXXXXX
volume_size: 5
iops: 100
device_name: sdd
# Example using snapshot id
- ec2_vol:
instance: XXXXXX
snapshot: "{{ snapshot }}"
# Playbook example combined with instance launch
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
wait: yes
count: 3
register: ec2
- ec2_vol:
instance: "{{ item.id }} "
volume_size: 5
with_items: ec2.instances
register: ec2_vol
# Example: Launch an instance and then add a volume if not already attached
# * Volume will be created with the given name if not already created.
# * Nothing will happen if the volume is already attached.
# * Requires Ansible 2.0
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
zone: YYYYYY
id: my_instance
wait: yes
count: 1
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
with_items: ec2.instances
register: ec2_vol
# Remove a volume
- ec2_vol:
id: vol-XXXXXXXX
state: absent
# Detach a volume (since 1.9)
- ec2_vol:
id: vol-XXXXXXXX
instance: None
# List volumes for an instance
- ec2_vol:
instance: i-XXXXXX
state: list
# Create new volume using SSD storage
- ec2_vol:
instance: XXXXXX
volume_size: 50
volume_type: gp2
device_name: /dev/xvdf
# Attach an existing volume to instance. The volume will be deleted upon instance termination.
- ec2_vol:
instance: XXXXXX
id: XXXXXX
device_name: /dev/sdf
delete_on_termination: yes
'''
RETURN = '''
device:
description: device name of attached volume
returned: when success
type: string
sample: "/def/sdf"
volume_id:
description: the id of volume
returned: when success
type: string
sample: "vol-35b333d9"
volume_type:
description: the volume type
returned: when success
type: string
sample: "standard"
volume:
description: a dictionary containing detailed attributes of the volume
returned: when success
type: string
sample: {
"attachment_set": {
"attach_time": "2015-10-23T00:22:29.000Z",
"deleteOnTermination": "false",
"device": "/dev/sdf",
"instance_id": "i-8356263c",
"status": "attached"
},
"create_time": "2015-10-21T14:36:08.870Z",
"encrypted": false,
"id": "vol-35b333d9",
"iops": null,
"size": 1,
"snapshot_id": "",
"status": "in-use",
"tags": {
"env": "dev"
},
"type": "standard",
"zone": "us-east-1b"
}
'''
import time
from distutils.version import LooseVersion
try:
import boto.ec2
from boto.exception import BotoServerError
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_volume(module, ec2):
name = module.params.get('name')
id = module.params.get('id')
zone = module.params.get('zone')
filters = {}
volume_ids = None
# If no name or id supplied, just try volume creation based on module parameters
if id is None and name is None:
return None
if zone:
filters['availability_zone'] = zone
if name:
filters = {'tag:Name': name}
if id:
volume_ids = [id]
try:
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if not vols:
if id:
msg = "Could not find the volume with id: %s" % id
if name:
msg += (" and name: %s" % name)
module.fail_json(msg=msg)
else:
return None
if len(vols) > 1:
module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
return vols[0]
def get_volumes(module, ec2):
instance = module.params.get('instance')
try:
if not instance:
vols = ec2.get_all_volumes()
else:
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return vols
def delete_volume(module, ec2):
volume_id = module.params['id']
try:
ec2.delete_volume(volume_id)
module.exit_json(changed=True)
except boto.exception.EC2ResponseError as ec2_error:
if ec2_error.code == 'InvalidVolume.NotFound':
module.exit_json(changed=False)
module.fail_json(msg=ec2_error.message)
def boto_supports_volume_encryption():
"""
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def create_volume(module, ec2, zone):
changed = False
name = module.params.get('name')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
snapshot = module.params.get('snapshot')
# If custom iops is defined we use volume_type "io1" rather than the default of "standard"
if iops:
volume_type = 'io1'
volume = get_volume(module, ec2)
if volume is None:
try:
if boto_supports_volume_encryption():
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
changed = True
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
changed = True
while volume.status != 'available':
time.sleep(3)
volume.update()
if name:
ec2.create_tags([volume.id], {"Name": name})
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return volume, changed
def attach_volume(module, ec2, volume, instance):
device_name = module.params.get('device_name')
delete_on_termination = module.params.get('delete_on_termination')
changed = False
# If device_name isn't set, make a choice based on best practices here:
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
# In future this needs to be more dynamic but combining block device mapping best practices
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
# Use password data attribute to tell whether the instance is Windows or Linux
if device_name is None:
try:
if not ec2.get_password_data(instance.id):
device_name = '/dev/sdf'
else:
device_name = '/dev/xvdf'
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if volume.attachment_state() is not None:
adata = volume.attach_data
if adata.instance_id != instance.id:
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
% (volume.id, adata.instance_id))
else:
# Volume is already attached to right instance
changed = modify_dot_attribute(module, ec2, instance, device_name)
else:
try:
volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
modify_dot_attribute(module, ec2, instance, device_name)
return volume, changed
def modify_dot_attribute(module, ec2, instance, device_name):
""" Modify delete_on_termination attribute """
delete_on_termination = module.params.get('delete_on_termination')
changed = False
try:
instance.update()
dot = instance.block_device_mapping[device_name].delete_on_termination
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if delete_on_termination != dot:
try:
bdt = BlockDeviceType(delete_on_termination=delete_on_termination)
bdm = BlockDeviceMapping()
bdm[device_name] = bdt
ec2.modify_instance_attribute(instance_id=instance.id, attribute='blockDeviceMapping', value=bdm)
while instance.block_device_mapping[device_name].delete_on_termination != delete_on_termination:
time.sleep(3)
instance.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return changed
def detach_volume(module, ec2, volume):
changed = False
if volume.attachment_state() is not None:
adata = volume.attach_data
volume.detach()
while volume.attachment_state() is not None:
time.sleep(3)
volume.update()
changed = True
return volume, changed
def get_volume_info(volume, state):
# If we're just listing volumes then do nothing, else get the latest update for the volume
if state != 'list':
volume.update()
volume_info = {}
attachment = volume.attach_data
volume_info = {
'create_time': volume.create_time,
'encrypted': volume.encrypted,
'id': volume.id,
'iops': volume.iops,
'size': volume.size,
'snapshot_id': volume.snapshot_id,
'status': volume.status,
'type': volume.type,
'zone': volume.zone,
'attachment_set': {
'attach_time': attachment.attach_time,
'device': attachment.device,
'instance_id': attachment.instance_id,
'status': attachment.status
},
'tags': volume.tags
}
if hasattr(attachment, 'deleteOnTermination'):
volume_info['attachment_set']['deleteOnTermination'] = attachment.deleteOnTermination
return volume_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance = dict(),
id = dict(),
name = dict(),
volume_size = dict(),
volume_type = dict(choices=['standard', 'gp2', 'io1'], default='standard'),
iops = dict(),
encrypted = dict(type='bool', default=False),
device_name = dict(),
delete_on_termination = dict(type='bool', default=False),
zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
snapshot = dict(),
state = dict(choices=['absent', 'present', 'list'], default='present')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
id = module.params.get('id')
name = module.params.get('name')
instance = module.params.get('instance')
volume_size = module.params.get('volume_size')
encrypted = module.params.get('encrypted')
device_name = module.params.get('device_name')
zone = module.params.get('zone')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
# Ensure we have the zone or can get the zone
if instance is None and zone is None and state == 'present':
module.fail_json(msg="You must specify either instance or zone")
# Set volume detach flag
if instance == 'None' or instance == '':
instance = None
detach_vol_flag = True
else:
detach_vol_flag = False
# Set changed flag
changed = False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'list':
returned_volumes = []
vols = get_volumes(module, ec2)
for v in vols:
attachment = v.attach_data
returned_volumes.append(get_volume_info(v, state))
module.exit_json(changed=False, volumes=returned_volumes)
if encrypted and not boto_supports_volume_encryption():
module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes")
# Here we need to get the zone info for the instance. This covers situation where
# instance is specified but zone isn't.
# Useful for playbooks chaining instance launch with volume create + attach and where the
# zone doesn't matter to the user.
inst = None
if instance:
try:
reservation = ec2.get_all_instances(instance_ids=instance)
except BotoServerError as e:
module.fail_json(msg=e.message)
inst = reservation[0].instances[0]
zone = inst.placement
# Check if there is a volume already mounted there.
if device_name:
if device_name in inst.block_device_mapping:
module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
volume_id=inst.block_device_mapping[device_name].volume_id,
device=device_name,
changed=False)
# Delaying the checks until after the instance check allows us to get volume ids for existing volumes
# without needing to pass an unused volume_size
if not volume_size and not (id or name or snapshot):
module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
if volume_size and (id or snapshot):
module.fail_json(msg="Cannot specify volume_size together with id or snapshot")
if state == 'present':
volume, changed = create_volume(module, ec2, zone)
if detach_vol_flag:
volume, changed = detach_volume(module, ec2, volume)
elif inst is not None:
volume, changed = attach_volume(module, ec2, volume, inst)
# Add device, volume_id and volume_type parameters separately to maintain backward compatability
volume_info = get_volume_info(volume, state)
module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'], volume_id=volume_info['id'], volume_type=volume_info['type'])
elif state == 'absent':
delete_volume(module, ec2)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
libvirt/autotest | frontend/afe/rpc_handler.py | 4 | 4666 | """\
RPC request handler Django. Exposed RPC interface functions should be
defined in rpc_interface.py.
"""
__author__ = '[email protected] (Steve Howard)'
import traceback, pydoc, re, urllib, logging, logging.handlers, inspect
from autotest_lib.frontend.afe.json_rpc import serviceHandler
from autotest_lib.frontend.afe import models, rpc_utils
from autotest_lib.client.common_lib import global_config
from autotest_lib.frontend.afe import rpcserver_logging
LOGGING_REGEXPS = [r'.*add_.*',
r'delete_.*',
r'.*_remove_.*',
r'modify_.*',
r'create.*']
FULL_REGEXP = '(' + '|'.join(LOGGING_REGEXPS) + ')'
COMPILED_REGEXP = re.compile(FULL_REGEXP)
def should_log_message(name):
return COMPILED_REGEXP.match(name)
class RpcMethodHolder(object):
'Dummy class to hold RPC interface methods as attributes.'
class RpcHandler(object):
def __init__(self, rpc_interface_modules, document_module=None):
self._rpc_methods = RpcMethodHolder()
self._dispatcher = serviceHandler.ServiceHandler(self._rpc_methods)
# store all methods from interface modules
for module in rpc_interface_modules:
self._grab_methods_from(module)
# get documentation for rpc_interface we can send back to the
# user
if document_module is None:
document_module = rpc_interface_modules[0]
self.html_doc = pydoc.html.document(document_module)
def get_rpc_documentation(self):
return rpc_utils.raw_http_response(self.html_doc)
def raw_request_data(self, request):
if request.method == 'POST':
return request.raw_post_data
return urllib.unquote(request.META['QUERY_STRING'])
def execute_request(self, json_request):
return self._dispatcher.handleRequest(json_request)
def decode_request(self, json_request):
return self._dispatcher.translateRequest(json_request)
def dispatch_request(self, decoded_request):
return self._dispatcher.dispatchRequest(decoded_request)
def log_request(self, user, decoded_request, decoded_result,
log_all=False):
if log_all or should_log_message(decoded_request['method']):
msg = '%s:%s %s' % (decoded_request['method'], user,
decoded_request['params'])
if decoded_result['err']:
msg += '\n' + decoded_result['err_traceback']
rpcserver_logging.rpc_logger.error(msg)
else:
rpcserver_logging.rpc_logger.info(msg)
def encode_result(self, results):
return self._dispatcher.translateResult(results)
def handle_rpc_request(self, request):
user = models.User.current_user()
json_request = self.raw_request_data(request)
decoded_request = self.decode_request(json_request)
decoded_result = self.dispatch_request(decoded_request)
result = self.encode_result(decoded_result)
if rpcserver_logging.LOGGING_ENABLED:
self.log_request(user, decoded_request, decoded_result)
return rpc_utils.raw_http_response(result)
def handle_jsonp_rpc_request(self, request):
request_data = request.GET['request']
callback_name = request.GET['callback']
# callback_name must be a simple identifier
assert re.search(r'^\w+$', callback_name)
result = self.execute_request(request_data)
padded_result = '%s(%s)' % (callback_name, result)
return rpc_utils.raw_http_response(padded_result,
content_type='text/javascript')
@staticmethod
def _allow_keyword_args(f):
"""\
Decorator to allow a function to take keyword args even though
the RPC layer doesn't support that. The decorated function
assumes its last argument is a dictionary of keyword args and
passes them to the original function as keyword args.
"""
def new_fn(*args):
assert args
keyword_args = args[-1]
args = args[:-1]
return f(*args, **keyword_args)
new_fn.func_name = f.func_name
return new_fn
def _grab_methods_from(self, module):
for name in dir(module):
if name.startswith('_'):
continue
attribute = getattr(module, name)
if not inspect.isfunction(attribute):
continue
decorated_function = RpcHandler._allow_keyword_args(attribute)
setattr(self._rpc_methods, name, decorated_function)
| gpl-2.0 |
zahanm/foodpedia | django/contrib/localflavor/se/utils.py | 202 | 2398 | import re
import datetime
def id_number_checksum(gd):
"""
Calculates a Swedish ID number checksum, using the
"Luhn"-algoritm
"""
n = s = 0
for c in (gd['year'] + gd['month'] + gd['day'] + gd['serial']):
tmp = ((n % 2) and 1 or 2) * int(c)
if tmp > 9:
tmp = sum([int(i) for i in str(tmp)])
s += tmp
n += 1
if (s % 10) == 0:
return 0
return (((s / 10) + 1) * 10) - s
def validate_id_birthday(gd, fix_coordination_number_day=True):
"""
Validates the birth_day and returns the datetime.date object for
the birth_day.
If the date is an invalid birth day, a ValueError will be raised.
"""
today = datetime.date.today()
day = int(gd['day'])
if fix_coordination_number_day and day > 60:
day -= 60
if gd['century'] is None:
# The century was not specified, and need to be calculated from todays date
current_year = today.year
year = int(today.strftime('%Y')) - int(today.strftime('%y')) + int(gd['year'])
if ('%s%s%02d' % (gd['year'], gd['month'], day)) > today.strftime('%y%m%d'):
year -= 100
# If the person is older than 100 years
if gd['sign'] == '+':
year -= 100
else:
year = int(gd['century'] + gd['year'])
# Make sure the year is valid
# There are no swedish personal identity numbers where year < 1800
if year < 1800:
raise ValueError
# ValueError will be raise for invalid dates
birth_day = datetime.date(year, int(gd['month']), day)
# birth_day must not be in the future
if birth_day > today:
raise ValueError
return birth_day
def format_personal_id_number(birth_day, gd):
# birth_day.strftime cannot be used, since it does not support dates < 1900
return unicode(str(birth_day.year) + gd['month'] + gd['day'] + gd['serial'] + gd['checksum'])
def format_organisation_number(gd):
if gd['century'] is None:
century = ''
else:
century = gd['century']
return unicode(century + gd['year'] + gd['month'] + gd['day'] + gd['serial'] + gd['checksum'])
def valid_organisation(gd):
return gd['century'] in (None, 16) and \
int(gd['month']) >= 20 and \
gd['sign'] in (None, '-') and \
gd['year'][0] in ('2', '5', '7', '8', '9') # group identifier
| bsd-3-clause |
jazzband/silk | silk/auth.py | 1 | 1060 | from functools import wraps, WRAPPER_ASSIGNMENTS
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from silk.config import SilkyConfig
def login_possibly_required(function=None, **kwargs):
if SilkyConfig().SILKY_AUTHENTICATION:
return login_required(function, **kwargs)
return function
def permissions_possibly_required(function=None):
if SilkyConfig().SILKY_AUTHORISATION:
actual_decorator = user_passes_test(
SilkyConfig().SILKY_PERMISSIONS
)
if function:
return actual_decorator(function)
return actual_decorator
return function
def user_passes_test(test_func):
def decorator(view_func):
@wraps(view_func, assigned=WRAPPER_ASSIGNMENTS)
def _wrapped_view(request, *args, **kwargs):
if test_func(request.user):
return view_func(request, *args, **kwargs)
else:
raise PermissionDenied
return _wrapped_view
return decorator
| mit |
hellofreedom/ansible-modules-core | files/replace.py | 103 | 5341 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Evan Kaufman <[email protected]
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import tempfile
DOCUMENTATION = """
---
module: replace
author: "Evan Kaufman (@EvanK)"
extends_documentation_fragment:
- files
- validate
short_description: Replace all instances of a particular string in a
file using a back-referenced regular expression.
description:
- This module will replace all instances of a pattern within a file.
- It is up to the user to maintain idempotence by ensuring that the
same pattern would never match any replacements made.
version_added: "1.6"
options:
dest:
required: true
aliases: [ name, destfile ]
description:
- The file to modify.
regexp:
required: true
description:
- The regular expression to look for in the contents of the file.
Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
Uses multiline mode, which means C(^) and C($) match the beginning
and end respectively of I(each line) of the file.
replace:
required: false
description:
- The string to replace regexp matches. May contain backreferences
that will get expanded with the regexp capture groups if the regexp
matches. If not set, matches are removed entirely.
backup:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
others:
description:
- All arguments accepted by the M(file) module also work here.
required: false
"""
EXAMPLES = r"""
- replace: dest=/etc/hosts regexp='(\s+)old\.host\.name(\s+.*)?$' replace='\1new.host.name\2' backup=yes
- replace: dest=/home/jdoe/.ssh/known_hosts regexp='^old\.host\.name[^\n]*\n' owner=jdoe group=jdoe mode=644
- replace: dest=/etc/apache/ports regexp='^(NameVirtualHost|Listen)\s+80\s*$' replace='\1 127.0.0.1:8080' validate='/usr/sbin/apache2ctl -f %s -t'
"""
def write_changes(module,contents,dest):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd,'wb')
f.write(contents)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc,err))
if valid:
module.atomic_move(tmpfile, dest)
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_file_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(required=True, aliases=['name', 'destfile']),
regexp=dict(required=True),
replace=dict(default='', type='str'),
backup=dict(default=False, type='bool'),
validate=dict(default=None, type='str'),
),
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
dest = os.path.expanduser(params['dest'])
if os.path.isdir(dest):
module.fail_json(rc=256, msg='Destination %s is a directory !' % dest)
if not os.path.exists(dest):
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
else:
f = open(dest, 'rb')
contents = f.read()
f.close()
mre = re.compile(params['regexp'], re.MULTILINE)
result = re.subn(mre, params['replace'], contents, 0)
if result[1] > 0 and contents != result[0]:
msg = '%s replacements made' % result[1]
changed = True
else:
msg = ''
changed = False
if changed and not module.check_mode:
if params['backup'] and os.path.exists(dest):
module.backup_local(dest)
if params['follow'] and os.path.islink(dest):
dest = os.path.realpath(dest)
write_changes(module, result[0], dest)
msg, changed = check_file_attrs(module, changed, msg)
module.exit_json(changed=changed, msg=msg)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
mattnenterprise/servo | tests/wpt/web-platform-tests/xhr/resources/access-control-preflight-denied.py | 20 | 1609 | def main(request, response):
def fail(message):
response.content = "FAIL: " + str(message)
response.status = 400
def getState(token):
server_state = request.server.stash.take(token)
if not server_state:
return "Uninitialized"
return server_state
def setState(token, state):
request.server.stash.put(token, state)
def resetState(token):
setState(token, "")
response.headers.set("Cache-Control", "no-store")
response.headers.set("Access-Control-Allow-Origin", request.headers.get("origin"))
response.headers.set("Access-Control-Max-Age", 1)
token = request.GET.first("token", None)
state = getState(token)
command = request.GET.first("command", None)
if command == "reset":
if request.method == "GET":
resetState(token)
response.content = "Server state reset"
else:
fail("Invalid Method.")
elif state == "Uninitialized":
if request.method == "OPTIONS":
response.content = "This request should not be displayed."
setState(token, "Denied")
else:
fail(state)
elif state == "Denied":
if request.method == "GET" and command == "complete":
resetState(token)
response.content = "Request successfully blocked."
else:
setState("Deny Ignored")
fail("The request was not denied.")
elif state == "Deny Ignored":
resetState(token)
fail(state)
else:
resetState(token)
fail("Unknown Error.")
| mpl-2.0 |
team-xue/xue | xue/classes/migrations/0001_initial.py | 1 | 2157 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Major'
db.create_table('classes_major', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=16)),
('shortname', self.gf('django.db.models.fields.CharField')(max_length=4)),
))
db.send_create_signal('classes', ['Major'])
# Adding model 'LogicalClass'
db.create_table('classes_logicalclass', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date', self.gf('django.db.models.fields.DateField')()),
('seq', self.gf('django.db.models.fields.IntegerField')()),
('major', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['classes.Major'])),
))
db.send_create_signal('classes', ['LogicalClass'])
def backwards(self, orm):
# Deleting model 'Major'
db.delete_table('classes_major')
# Deleting model 'LogicalClass'
db.delete_table('classes_logicalclass')
models = {
'classes.logicalclass': {
'Meta': {'object_name': 'LogicalClass'},
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'major': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['classes.Major']"}),
'seq': ('django.db.models.fields.IntegerField', [], {})
},
'classes.major': {
'Meta': {'object_name': 'Major'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'shortname': ('django.db.models.fields.CharField', [], {'max_length': '4'})
}
}
complete_apps = ['classes']
| bsd-3-clause |
DayGitH/Python-Challenges | DailyProgrammer/DP20140709B.py | 1 | 2282 | """
[7/9/2014] Challenge #170 [Intermediate] Rummy Checker
https://www.reddit.com/r/dailyprogrammer/comments/2a9u0a/792014_challenge_170_intermediate_rummy_checker/
# [](#IntermediateIcon) _(Intermediate)_: Rummy Checker
[Rummy](http://en.wikipedia.org/wiki/Rummy) is another very common card game. This time, the aim of the game is to
match cards together into groups (**melds**) in your hand. You continually swap cards until you have such melds, at
which point if you have a valid hand you have won. Your hand contains 7 cards, and your hand will contain 2 melds - one
that is 3 long and one that is 4 long. A meld is either:
* 3 or 4 cards of the same rank and different suit (eg. 3 jacks or 4 nines) called a **set**
* 3 or 4 cards in the same suit but increasing rank - eg. Ace, Two, Three, Four of Hearts, called a **run**
Ace is played low - ie. before 2 rather than after king.
Your challenge today is as follows. You will be given a Rummy hand of 7 cards. You will then be given another card,
that you have the choice to pick up. The challenge is to tell whether picking up the card will win you the game or not
- ie. whether picking it up will give you a winning hand. You will also need to state which card it is being replaced
with.
## Input Description
First you will be given a comma separated list of 7 cards on one line, as so:
Two of Diamonds, Three of Diamonds, Four of Diamonds, Seven of Diamonds, Seven of Clubs, Seven of Hearts, Jack of
Hearts
Next, you will be given another (**new**) card on a new line, like so:
Five of Diamonds
## Output Description
If replacing a card in your hand with the new card will give you a winning hand, print which card in your hand is being
replaced to win, for example:
Swap the new card for the Jack of Hearts to win!
Because in that case, that would give you a run (Two, Three, Four, Five of Diamonds) and a set (Seven of Diamonds,
Clubs and Hearts). In the event that picking up the new card will do nothing, print:
No possible winning hand.
# Notes
You may want to re-use some code for your card and deck structure from your solution to [this
challenge](http://www.reddit.com/r/dailyprogrammer/comments/29zut0) where appropriate.
"""
def main():
pass
if __name__ == "__main__":
main()
| mit |
Distrotech/intellij-community | python/lib/Lib/site-packages/django/contrib/sessions/models.py | 231 | 2034 | import base64
import cPickle as pickle
from django.db import models
from django.utils.translation import ugettext_lazy as _
class SessionManager(models.Manager):
def encode(self, session_dict):
"""
Returns the given session dictionary pickled and encoded as a string.
"""
return SessionStore().encode(session_dict)
def save(self, session_key, session_dict, expire_date):
s = self.model(session_key, self.encode(session_dict), expire_date)
if session_dict:
s.save()
else:
s.delete() # Clear sessions with no data.
return s
class Session(models.Model):
"""
Django provides full support for anonymous sessions. The session
framework lets you store and retrieve arbitrary data on a
per-site-visitor basis. It stores data on the server side and
abstracts the sending and receiving of cookies. Cookies contain a
session ID -- not the data itself.
The Django sessions framework is entirely cookie-based. It does
not fall back to putting session IDs in URLs. This is an intentional
design decision. Not only does that behavior make URLs ugly, it makes
your site vulnerable to session-ID theft via the "Referer" header.
For complete documentation on using Sessions in your code, consult
the sessions documentation that is shipped with Django (also available
on the Django Web site).
"""
session_key = models.CharField(_('session key'), max_length=40,
primary_key=True)
session_data = models.TextField(_('session data'))
expire_date = models.DateTimeField(_('expire date'), db_index=True)
objects = SessionManager()
class Meta:
db_table = 'django_session'
verbose_name = _('session')
verbose_name_plural = _('sessions')
def get_decoded(self):
return SessionStore().decode(self.session_data)
# At bottom to avoid circular import
from django.contrib.sessions.backends.db import SessionStore
| apache-2.0 |
zero-rp/miniblink49 | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/win_unittest.py | 27 | 6482 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import unittest
from webkitpy.common.system import outputcapture
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.layout_tests.port import port_testcase
from webkitpy.layout_tests.port import win
from webkitpy.tool.mocktool import MockOptions
class WinPortTest(port_testcase.PortTestCase):
port_name = 'win'
full_port_name = 'win-xp'
port_maker = win.WinPort
os_name = 'win'
os_version = 'xp'
def test_setup_environ_for_server(self):
port = self.make_port()
port._executive = MockExecutive(should_log=True)
output = outputcapture.OutputCapture()
# FIXME: This test should not use the real os.environ
orig_environ = os.environ.copy()
env = output.assert_outputs(self, port.setup_environ_for_server)
self.assertEqual(orig_environ["PATH"], os.environ["PATH"])
self.assertNotEqual(env["PATH"], os.environ["PATH"])
def test_setup_environ_for_server_cygpath(self):
port = self.make_port()
env = port.setup_environ_for_server(port.driver_name())
self.assertEqual(env['CYGWIN_PATH'], '/mock-checkout/third_party/cygwin/bin')
def test_setup_environ_for_server_register_cygwin(self):
port = self.make_port(options=MockOptions(register_cygwin=True, results_directory='/'))
port._executive = MockExecutive(should_log=True)
expected_logs = "MOCK run_command: ['/mock-checkout/third_party/cygwin/setup_mount.bat'], cwd=None\n"
output = outputcapture.OutputCapture()
output.assert_outputs(self, port.setup_environ_for_server, expected_logs=expected_logs)
def assert_name(self, port_name, os_version_string, expected):
port = self.make_port(port_name=port_name, os_version=os_version_string)
self.assertEqual(expected, port.name())
def test_versions(self):
port = self.make_port()
self.assertIn(port.name(), ('win-xp', 'win-win7'))
self.assert_name(None, 'xp', 'win-xp')
self.assert_name('win', 'xp', 'win-xp')
self.assert_name('win-xp', 'xp', 'win-xp')
self.assert_name('win-xp', '7sp0', 'win-xp')
self.assert_name(None, '7sp0', 'win-win7')
self.assert_name(None, 'vista', 'win-win7')
self.assert_name('win', '7sp0', 'win-win7')
self.assert_name('win-win7', 'xp', 'win-win7')
self.assert_name('win-win7', '7sp0', 'win-win7')
self.assert_name('win-win7', 'vista', 'win-win7')
self.assertRaises(AssertionError, self.assert_name, None, 'w2k', 'win-xp')
def test_baseline_path(self):
port = self.make_port(port_name='win-xp')
self.assertEqual(port.baseline_path(), port._webkit_baseline_path('win-xp'))
port = self.make_port(port_name='win-win7')
self.assertEqual(port.baseline_path(), port._webkit_baseline_path('win'))
def test_build_path(self):
# Test that optional paths are used regardless of whether they exist.
options = MockOptions(configuration='Release', build_directory='/foo')
self.assert_build_path(options, ['/mock-checkout/out/Release'], '/foo/Release')
# Test that optional relative paths are returned unmodified.
options = MockOptions(configuration='Release', build_directory='foo')
self.assert_build_path(options, ['/mock-checkout/out/Release'], 'foo/Release')
# Test that we prefer the legacy dir over the new dir.
options = MockOptions(configuration='Release', build_directory=None)
self.assert_build_path(options, ['/mock-checkout/build/Release', '/mock-checkout/out'], '/mock-checkout/build/Release')
def test_build_path_timestamps(self):
options = MockOptions(configuration='Release', build_directory=None)
port = self.make_port(options=options)
port.host.filesystem.maybe_make_directory('/mock-checkout/out/Release')
port.host.filesystem.maybe_make_directory('/mock-checkout/build/Release')
# Check with 'out' being newer.
port.host.filesystem.mtime = lambda f: 5 if '/out/' in f else 4
self.assertEqual(port._build_path(), '/mock-checkout/out/Release')
# Check with 'build' being newer.
port.host.filesystem.mtime = lambda f: 5 if '/build/' in f else 4
self.assertEqual(port._build_path(), '/mock-checkout/build/Release')
def test_operating_system(self):
self.assertEqual('win', self.make_port().operating_system())
def test_driver_name_option(self):
self.assertTrue(self.make_port()._path_to_driver().endswith('content_shell.exe'))
self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver.exe'))
def test_path_to_image_diff(self):
self.assertEqual(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/image_diff.exe')
| apache-2.0 |
olitheolix/qtmacs | qtmacs/auxiliary.py | 1 | 49418 | # Copyright 2012, Oliver Nagy <[email protected]>
#
# This file is part of Qtmacs.
#
# Qtmacs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Qtmacs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Qtmacs. If not, see <http://www.gnu.org/licenses/>.
"""
Qtmacs internal support classes.
The classes and functions in this module are used by various internal
modules and serve mostly administrative purposes that do not require
state information of the objects that use them.
While all classes in this file can safely be used in any applet/macro,
only ``QtmacsKeysequence`` is likely be of any practical value.
It is safe to use::
from auxiliary import something
"""
import re
import inspect
import qtmacs.type_check
import qtmacs.qte_global as qte_global
from PyQt4 import QtCore, QtGui
from qtmacs.exceptions import *
# Shorthands
type_check = qtmacs.type_check.type_check
# ----------------------------------------------------------------------
# Classes
# ----------------------------------------------------------------------
class QtmacsMessage(object):
"""
Data container that is passed along with every signal or hook.
The ``data`` field is an arbitrary Python object and ``senderObj``
specifies the object that triggered the delivery of the message.
The message recipient can query both fields directly via the
``data`` and ``senderObj`` attributes. Furthermore, the ``isHook``
flag indicates if the message was delivered via a hook (**True**)
or a signal (**False**). Finally, the ``messengerName`` attribute,
specifies the name of the signal or hook that delivered the
object.
|Args|
* ``data`` (**object**): arbitrary. The recipient must know what to
* ``senderObj`` (**QObject**): reference to calling object.
|Raises|
* **None**
"""
@type_check
def __init__(self, data=None, senderObj: QtCore.QObject=None):
super().__init__()
self.data = data
self.senderObj = senderObj
# Indicate whether this message was sent by a signal or a hook.
self.isHook = None
# Name of signal (without the `qtesig` prefix) or hook.
self.messengerName = None
@type_check
def setHookName(self, name: str):
"""
Specify that the message will be delivered with the hook ``name``.
"""
self.isHook = True
self.messengerName = name
@type_check
def setSignalName(self, name: str):
"""
Specify that the message will be delivered with the signal ``name``.
"""
self.isHook = False
self.messengerName = name
class QtmacsVersionStructure(object):
"""
Container object to maintain version information.
|Args|
* **None**
|Raises|
* **None**
"""
def __init__(self):
self.version = None
self.created = None
self.last_changed = None
class QtmacsAdminStructure(object):
"""
Container object carried by every applet and widget in the
instance variable ``_qteAdmin``.
This class holds all the information needed by Qtmacs to
administrate its applets and widgets to avoids name space
pollution of the Qt classes.
As a rule of thumb, do not set any values in this object
manually. Instead, use the dedicated access methods. If there is
no such method, then the variable is like not meant to be tempered
with.
|Args|
* ``qteApplet`` (**QtmacsApplet**): handle to applet holding this
either this structure directly, or the widget which holds it.
* ``appletID`` (**str**): applet ID.
* ``isFocusable`` (**bool**): whether a widget can have the focus
(ignored for``QtmacsApplets``).
* ``isQtmacsWindow`` (**bool**): whether or not the caller is
``QtmacsMain``. This flag only exists to avoid problems with
assigning this object to ``QtmacsMain`` at start up.
|Raises|
* **None**
"""
def __init__(self, qteApplet, appletID=None,
isFocusable=True, isQtmacsWindow=False):
# Keep a reference to the main Qtmacs class.
self.qteMain = qte_global.qteMain
# Save a handle to the parent applet.
self.qteApplet = qteApplet
# Save the applet name (a string).
self.appletID = appletID
# Unfocusable widgets are skipped when cycling the focus.
self.isFocusable = isFocusable
# If true, call the qteKeyPressEventBefore method of the
# applet (not the widget!) before it is processed by Qtmacs.
self.receiveBeforeQtmacsParser = False
# If true, call the qteKeyPressEventAfter method of the applet
# (not the widget!) after it was processed by Qtmacs.
self.receiveAfterQtmacsParser = False
# If True, Qtmacs will intercept the key events for this widget.
self.filterKeyEvents = True
if not isQtmacsWindow:
# Initially, the local key map mirrors the global one.
self.keyMap = self.qteMain.qteCopyGlobalKeyMap()
# Applet signature. This information determines which macros
# are compatible.
self.appletSignature = None
# Widget Signature. This variable is automatically set for
# every widget added via ``qteAddWidget``. If the object is
# not a widget but a reference then it defaults to the string
# "QWidget".
self.widgetSignature = "QWidget"
# List of widgets held by this applet. The ordering of this
# list determines the focus sequence.
self.widgetList = []
# Specify whether the widget is a QtmacsApplet. The default
# value is true because the qteAddWidget routine will
# overwrite this flag for widgets.
self.isQtmacsApplet = True
# Specify if the applet is a mini applet.
self.isMiniApplet = False
# Handle to parent window. This is always **None** if the
# widget is invisible. This flag is updated automatically by
# the show() and hide() methods.
self.parentWindow = None
# Visibility flag. This is usually the same as Qt's native
# ``isVisible`` but whereas Qt does not actually update this
# flag until the event loop had a chance to paint the applet,
# the isVisible flag will update as soon as the show/hide
# methods are called. This extra information is necessary
# because several methods in QtmacsMain make applets visible
# and invisible without calling the event loop in between,
# which makes it impossible to track the visibility states.
self.isVisible = False
# This is general purpose dictionary that macros can use to
# store applet specific information.
self.macroData = {}
# If True, then the applet can be killed without loosing
# data. This is mostly a convenience flag to facilitate a
# reasonably generic kill-applet macro, but the applet
# programmer is free to provide his own kill-applet macro for
# his applet. That macro may use applet specific variables to
# determine whether or not the applet can be safely killed and
# if not, how to achieve it.
self.readyToKill = True
@type_check
def qteSetKeyFilterPolicy(self, receiveBefore: bool=False,
useQtmacs: bool=None,
receiveAfter: bool=False):
"""
Set the policy on how Qtmacs filters keyboard events for a
particular widgets.
The options can be arbitrarily combined, eg. ::
widget.qteSetKeyFilterPolicy(True, True, False)
will first pass the event to the applet's ``keyPressEvent``
method and afterwards pass the same event to Qtmacs' keyboard
filter.
For all text-processing widgets (eg. ``QLineEdit``,
``QTextEdit``, ``QWebView``, etc.) it is almost always a good
idea to use the default, ie. (False, True, False, False),
which lets Qtmacs process everything. In this case the only
way to interact with the widget is via macros (and the mouse).
If ``receiveBefore`` and/or ``receiveAfter`` is set then
``qteKeyPressEventBefore`` and/or ``qteKeyPressEventAfter`` of
the QtmacsApplet (not widget) is called to inspect the event.
.. note:: The default behaviour is to let Qtmacs handle all
keyboard events and interact with the applet only via
macros. It may be more convenient for a programmer to
handle keyboard events directly in the keyPressEvent
routine, as is customary with Qt applications, but this
compromises the customisation ability of Qtmacs. As a rule
of thumb, applet classes should not implement keyPressEvent
at all. However, since there is an exception to every rule
Qtmacs allows it.
.. note:: This method must be part of the qteAdmin object
because which is attached to every object under the
control of Qtmacs.
|Args|
* ``receiveBefore`` (**bool**): pass the keyEvent to the applet
before Qtmacs processes it.
* ``useQtmacs`` (**bool**): let Qtmacs parse the key.
* ``receiveAfter`` (**bool**): pass the keyEvent to the applet
after Qtmacs processed it.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Store key filter policy flags.
self.filterKeyEvents = useQtmacs
self.receiveBeforeQtmacsParser = receiveBefore
self.receiveAfterQtmacsParser = receiveAfter
def keyFilterPolicy(self):
"""
Return the key filter policy for the current applet.
.. note:: This method must be part of the qteAdmin object
because which is attached to every object under the
control of Qtmacs.
|Args|
* **None**
|Returns|
``tuple``: (receiveBefore, useQtmacs, receiveAfter)
|Raises|
* **None**
"""
return (self.receiveBeforeQtmacsParser, self.filterKeyEvents,
self.receiveAfterQtmacsParser)
class QtmacsKeysequence(object):
"""
Parse and represent a Qtmacs keyboard sequence.
Without any argument, it represents an empty sequence. If the
argument is a string or a list/tuple, then a parser attempts to
convert it into a sequence of valid ``QKeyEvent`` objects. If the
argument is another QtmacsKeysequence, then a copy of the object
is returned.
Examples for instantiating this object with human readable
keyboard sequences::
QtmacsKeysequence('<ctrl>+f h <alt>+K <ctrl>+k')
QtmacsKeysequence('<ctrl>+f <ctrl>+<alt>++ <ctrl>+<alt>+<space>')
QtmacsKeysequence('<ctrl>+f <ctrl>+F <ctrl>++ <ctrl>+<space>'
'<ctrl>+< <alt>+> < > <space>')
The syntax of the string should be self explanatory. In addition,
everything in angular brackets is case insensitive, eg. '<ctrl>-f'
and '<CtRL>-f' are the same, and so is '<space>' and
'<SPAce>'. However, non-bracketed keys are case sensitive,
eg. '<ctrl>-f>' is not the same as '<ctrl>+F'. Note that it is not
necessary (in fact impossible) to specify a <shift> modifier.
Keyboard combination are separated by (an arbitrary number of)
white spaces. Non-printable characters have a bracketed mnemonic,
eg. <space>, <backspace>, <tab>, <F1>. The exact list of available
characters, as well as the necessity for <shift> modifiers,
depends on the used OS and keyboard. The used layout is specified
in ``Qt_keymap`` variable from the global name space which
``QtmacsMain`` sets at startup, although it utilises the
``platform_setup.py`` module to do the actual work. That module is
also the point of entry for adding new key maps, and/or extending
existing ones.
Instead of specifying a human readable string it is also possible
to instantiate ``QtmacsKeyboardsequence`` with sequence of Qt
constants from the ``QtCore.Qt`` name space, for instance::
QtmacsKeysequence([(QtCore.Qt.ControlModifier, QtCore.Qt.Key_H),
(QtCore.Qt.NoModifier, QtCore.Qt.Key_K)])
is the same as::
QtmacsKeysequence('<ctrl>+h k').
The macro/applet programmer is unlikely to encounter this class at
all as the methods of these classes that require keyboard
sequences (eg. ``qteBindKeyWidget``) are usually called
with human readable strings anyway because they are convenient.
However, Qtmacs internally, the only accepted way to deal with
keyboard shortcuts is via this class.
|Args|
* ``keysequence`` (**str** or **tuple** or **list** or
**QtmacsKeysequence**)
|Raises|
* **QtmacsKeysequenceError** if ``keysequence`` could not be parsed.
"""
def __init__(self, keysequence=None):
# Only used when called as an iterator to yield the individual
# QKeyEvents that make up the key sequence represented by this
# class.
self._iterCnt = 0
# Get a reference to the key map for this machine. This
# reference is usually set by the constructor of the
# QtmacsMain class early on and should therefore be
# available. If not, then something is seriously wrong.
if hasattr(qte_global, 'Qt_key_map'):
# Dictionary that maps human readable keys to Qt
# constants.
self.keyDict = qte_global.Qt_key_map
else:
msg = '"Qt_key_map" variable does not exist in global name space'
raise QtmacsKeysequenceError(msg)
# Get a reference to the modifier map for this machine (set at
# the same time as Qt_key_map above).
if hasattr(qte_global, 'Qt_modifier_map'):
# Dictionary that maps modifier keys to Qt constants.
self.modDict = qte_global.Qt_modifier_map
else:
msg = '"Qt_modifier_map" variable does not exist '
msg += 'in global name space.'
raise QtmacsKeysequenceError(msg)
# Make a copy of keyDict but with keys as values and vice
# versa. This dictionary will be used to map the binary (Qt
# internal) representation of keys to human readable values.
self.keyDictReverse = {}
for key, value in self.keyDict.items():
self.keyDictReverse[value] = key
# A list of QKeyEvent events and numerical constants from the
# Qt library. Both lists represent the same key sequence and
# the reset() method clears both.
self.keylistQtConstants = None
self.keylistKeyEvent = None
self.reset()
# Act on the argument passed to the constructor.
if isinstance(keysequence, str):
# We were passed a string --> parse it to extract the key sequence.
self.str2key(keysequence)
elif isinstance(keysequence, list) or isinstance(keysequence, tuple):
# We were passed a list --> parse it to extract the key sequence.
self.list2key(keysequence)
elif isinstance(keysequence, QtmacsKeysequence):
# We were passed another QtmacsKeysequence object --> copy
# all its attributes.
self.keylistQtConstants = keysequence.keylistQtConstants
self.keylistKeyEvent = keysequence.keylistKeyEvent
elif keysequence is None:
# We were passed nothing --> do nothing.
pass
else:
msg = 'Argument must be either None, a string, a list, '
msg += 'or a QtmacsKeySequence.'
raise QtmacsKeysequenceError(msg)
def __repr__(self):
"""
Print a human readable version of the key sequence represented
by this object.
"""
return self.toString()
def reset(self):
"""
Flush the key sequences.
|Args|
* **None**
|Returns|
**None**
|Raises|
* **None**
"""
self.keylistQtConstants = []
self.keylistKeyEvent = []
def list2key(self, keyList):
"""
Convert a list of (``QtModifier``, ``QtCore.Qt.Key_*``) tuples
into a key sequence.
If no error is raised, then the list was accepted.
|Args|
* ``keyList`` (**list**): eg. (QtCore.Qt.ControlModifier,
QtCore.Qt.Key_F).
|Returns|
**None**
|Raises|
* **QtmacsKeysequenceError** if the provided ``keysequence``
could not be parsed.
"""
for keyCombo in keyList:
if not (isinstance(keyCombo, list) or isinstance(keyCombo, tuple)):
msg = ('Format of native key list is invalid.'
' Must be a list/tuple of list/tuples.')
raise QtmacsKeysequenceError(msg)
if len(keyCombo) != 2:
msg = 'Format of native key list is invalid.'
msg += 'Each element must have exactly 2 entries.'
raise QtmacsKeysequenceError(msg)
# Construct a new QKeyEvent. Note that the general
# modifier (ie. <ctrl> and <alt>) still need to be
# combined with shift modifier (which is never a general
# modifier) if the key demands it. This combination is a
# simple "or" on the QFlags structure. Also note that the
# "text" argument is omitted because Qt is smart enough to
# fill it internally. Furthermore, the QKeyEvent method
# will raise an error if the provided key sequence makes
# no sense, but to avoid raising an exception inside an
# exception the QtmacsKeysequenceError is not raised
# inside the exception block.
key_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress, keyCombo[1],
keyCombo[0])
try:
key_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress,
keyCombo[1], keyCombo[0])
err = False
except TypeError:
err = True
if err:
msg = ('Format of native key list is invalid. '
'Must be a list/tuple of list/tuples.')
raise QtmacsKeysequenceError(msg)
else:
self.appendQKeyEvent(key_event)
def str2key(self, keyString):
"""
Parse a human readable key sequence.
If no error is raised, then ``keyString`` could be
successfully converted into a valid key sequence and is
henceforth represented by this object.
|Args|
* ``keyString`` (**QtmacsKeysequence**): eg. "<ctrl>+f"
|Returns|
**None**
|Raises|
* **QtmacsKeysequenceError** if ``keyString`` could not be parsed.
"""
# Ensure the string is non-empty.
if keyString == '':
raise QtmacsKeysequenceError('Cannot parse empty string')
tmp = str(keyString)
tmp = tmp.replace('<', '<')
tmp = tmp.replace('>', '>')
keyStringHtml = '<b>{}</b>.'.format(tmp)
del tmp
# Remove leading and trailing white spaces, and reduce
# sequences of white spaces to a single white space. If this
# results in an emtpy string (typically the case when the user
# tries to register a white space with ' ' instead of with
# '<space>') then raise an error.
rawKeyStr = keyString.strip()
if len(rawKeyStr) == 0:
msg = 'Cannot parse the key combination {}.'.format(keyStringHtml)
raise QtmacsKeysequenceError(msg)
# Split the string at these white spaces and convert eg.
# " <ctrl>+x <ctrl>+f " first into
# "<ctrl>+x <ctrl>+f" and from there into the list of
# individual key combinations ["<ctrl>+x", "<ctrl>+f"].
rawKeyStr = re.sub(' +', ' ', rawKeyStr)
rawKeyStr = rawKeyStr.split(' ')
# Now process the key combinations one by one. By definition.
for key in rawKeyStr:
# Find all bracketed keys in the key combination
# (eg. <ctrl>, <space>).
desc_keys = re.findall('<.*?>', key)
# There are four possibilities:
# * no bracketed key (eg. "x" or "X")
# * one bracketed key (eg. "<ctrl>+x", or just "<space>")
# * two bracketed keys (eg. "<ctrl>+<space>" or "<ctrl>+<alt>+f")
# * three bracketed keys (eg. <ctrl>+<alt>+<space>).
if len(desc_keys) == 0:
# No bracketed key means no modifier, so the key must
# stand by itself.
modStr = ['<NONE>']
keyStr = key
elif len(desc_keys) == 1:
if '+' not in key:
# If no '+' sign is present then it must be
# bracketed key without any modifier
# (eg. "<space>").
modStr = ['<NONE>']
keyStr = key
else:
# Since a '+' sign and exactly one bracketed key
# is available, it must be a modifier plus a
# normal key (eg. "<ctrl>+f", "<alt>++").
idx = key.find('+')
modStr = [key[:idx]]
keyStr = key[idx + 1:]
elif len(desc_keys) == 2:
# There are either two modifiers and a normal key
# (eg. "<ctrl>+<alt>+x") or one modifier and one
# bracketed key (eg. "<ctrl>+<space>").
if (key.count('+') == 0) or (key.count('+') > 3):
# A valid key combination must feature at least
# one- and at most three "+" symbols.
msg = 'Cannot parse the key combination {}.'
msg = msg.format(keyStringHtml)
raise QtmacsKeysequenceError(msg)
elif key.count('+') == 1:
# One modifier and one bracketed key
# (eg. "<ctrl>+<space>").
idx = key.find('+')
modStr = [key[:idx]]
keyStr = key[idx + 1:]
elif (key.count('+') == 2) or (key.count('+') == 3):
# Two modifiers and one normal key
# (eg. "<ctrl>+<alt>+f", "<ctrl>+<alt>++").
idx1 = key.find('+')
idx2 = key.find('+', idx1 + 1)
modStr = [key[:idx1], key[idx1 + 1:idx2]]
keyStr = key[idx2 + 1:]
elif len(desc_keys) == 3:
if key.count('+') == 2:
# There are two modifiers and one bracketed key
# (eg. "<ctrl>+<alt>+<space>").
idx1 = key.find('+')
idx2 = key.find('+', idx1 + 1)
modStr = [key[:idx1], key[idx1 + 1:idx2]]
keyStr = key[idx2 + 1:]
else:
# A key combination with three bracketed entries
# must have exactly two '+' symbols. It cannot be
# valid otherwise.
msg = 'Cannot parse the key combination {}.'
msg = msg.format(keyStringHtml)
raise QtmacsKeysequenceError(msg)
else:
msg = 'Cannot parse the key combination {}.'
msg = msg.format(keyStringHtml)
raise QtmacsKeysequenceError(msg)
# The dictionary keys that map the modifiers and bracketed
# keys to Qt constants are all upper case by
# convention. Therefore, convert all modifier keys and
# bracketed normal keys.
modStr = [_.upper() for _ in modStr]
if (keyStr[0] == '<') and (keyStr[-1] == '>'):
keyStr = keyStr.upper()
# Convert the text version of the modifier key into the
# QFlags structure used by Qt by "or"ing them
# together. The loop is necessary because more than one
# modifier may be active (eg. <ctrl>+<alt>).
modQt = QtCore.Qt.NoModifier
for mod in modStr:
# Ensure that the modifier actually exists (eg. the
# user might have made type like "<ctlr>" instead of
# "<ctrl>"). Also, the keys in the dictionary consist
# of only upper case letter for the modifier keys.
if mod not in self.modDict:
msg = 'Cannot parse the key combination {}.'
msg = msg.format(keyStringHtml)
raise QtmacsKeysequenceError(msg)
# Since the modifier exists in the dictionary, "or"
# them with the other flags.
modQt = modQt | self.modDict[mod]
# Repeat the modifier procedure for the key. However,
# unlike for the modifiers, no loop is necessary here
# because only one key can be pressed at the same time.
if keyStr in self.keyDict:
modQt_shift, keyQt = self.keyDict[keyStr]
else:
msg = 'Cannot parse the key combination {}.'
msg = msg.format(keyStringHtml)
raise QtmacsKeysequenceError(msg)
# Construct a new QKeyEvent. Note that the general
# modifier (ie. <ctrl> and <alt>) still need to be
# combined with shift modifier if the key demands it. This
# combination is a simple "or" on the QFlags structure.
# Also note that the "text" argument is omitted because Qt
# is smart enough to determine it internally.
key_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress, keyQt,
modQt | modQt_shift)
# Finally, append this key to the key sequence represented
# by this object.
self.appendQKeyEvent(key_event)
@type_check
def appendQKeyEvent(self, keyEvent: QtGui.QKeyEvent):
"""
Append another key to the key sequence represented by this object.
|Args|
* ``keyEvent`` (**QKeyEvent**): the key to add.
|Returns|
**None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Store the QKeyEvent.
self.keylistKeyEvent.append(keyEvent)
# Convenience shortcuts.
mod = keyEvent.modifiers()
key = keyEvent.key()
# Add the modifier and key to the list. The modifier is a
# QFlag structure and must by typecast to an integer to avoid
# difficulties with the hashing in the ``match`` routine of
# the ``QtmacsKeymap`` object.
self.keylistQtConstants.append((int(mod), key))
def toQtKeylist(self):
"""
Return the key sequence represented by this object as a tuple
of Qt constants.
The tuple contains as many elements as there are individual
key combination, each represented by a (QtModifier,
QtCore.Qt.Key_xxx) tuple itself. For instance, if the object
was created as Qtmacs('<Ctrl>+h k') then this function would
return the tuple ((67108864, 72), (0, 75)). Note that this
list is suitable as an argument to QtmacsKeysequence, which
would create another object representing the same key
sequence.
Note that the numerical constants may be machine dependent.
|Args|
* **None**
|Returns|
**list**: list of (QtModifer, Qt.Key_xxx) tuples.
|Raises|
* **None**
"""
return tuple(self.keylistQtConstants)
def toQKeyEventList(self):
"""
Return the key sequence represented by this object as a tuple
of Qt constants.
The tuple contains as many elements as there are individual
key combination, each represented by a
(QtModifier, QtCore.Qt.Key_***) tuple itself. For instance, if
the object was created as Qtmacs('<Ctrl>+h k') then this
function would return the tuple ((67108864, 72), (0, 75)).
Note that this list is suitable as an argument to QtmacsKeysequence,
which would create another object representing the same key sequence.
Note that the numerical constants may be machine dependent.
|Args|
**None**
|Returns|
**list**: list of QKeyEvents.
|Raises|
* **None**
"""
return tuple(self.keylistKeyEvent)
def toString(self):
"""
Return the key sequence as a human readable string, eg. "<ctrl>+x".
Note that this list is suitable as an argument to
QtmacsKeysequence, which would create another object
representing the same key sequence. If a key could not be
converted then it will be displayed as '<Unknown>'. If this
happens, then the key map in ``qte_global.default_qt_keymap``
is incomplete and should be amended accordingly.
|Args|
* **None**
|Returns|
**str**: the key sequence, eg. '<ctrl>+f', or '<F1>', or
'<Unknown>'.
|Raises|
* **None**
"""
# Initialise the final output string.
retVal = ''
for mod, key in self.keylistQtConstants:
out = ''
# Check for any modifiers except <shift> and add the
# corresponding string.
if (mod & QtCore.Qt.ControlModifier):
out += '<Ctrl>+'
if (mod & QtCore.Qt.AltModifier):
out += '<Alt>+'
if (mod & QtCore.Qt.MetaModifier):
out += '<Meta>+'
if (mod & QtCore.Qt.KeypadModifier):
out += '<Keypad>+'
if (mod & QtCore.Qt.GroupSwitchModifier):
out += '<GroupSwitch>+'
# Format the string representation depending on whether or
# not <Shift> is active.
if (mod & QtCore.Qt.ShiftModifier):
# If the key with the shift modifier exists in the
# reverse dictionary then use that string, otherwise
# construct it manually be printing the modifier and
# the key name. The first case is typically
# encountered for upper case characters, where eg. 'F'
# is preferable over '<Shift>+f'.
if (QtCore.Qt.ShiftModifier, key) in self.keyDictReverse:
# The shift-combined key exists in the dictionary,
# so use it.
out += self.keyDictReverse[(QtCore.Qt.ShiftModifier, key)]
elif (QtCore.Qt.NoModifier, key) in self.keyDictReverse:
# The shift-combined key does not exists in the
# dictionary, so assemble the modifier and key by
# hand.
out += ('<Shift>+' +
self.keyDictReverse[(QtCore.Qt.NoModifier, key)])
else:
out += '<Unknown>'
else:
if (QtCore.Qt.NoModifier, key) in self.keyDictReverse:
out += self.keyDictReverse[(QtCore.Qt.NoModifier, key)]
else:
out += '<Unknown>'
# Add a spacer.
retVal += out + ' '
# Return the final string (minus the last spacer).
return retVal[:-1]
class QtmacsKeymap(dict):
"""
Implement the required functionality for a Qtmacs key map.
This class is effectively a dictionary.
|Args|
** None **
|Raises|
* **None**
"""
@type_check
def qteInsertKey(self, keysequence: QtmacsKeysequence, macroName: str):
"""
Insert a new key into the key map and associate it with a
macro.
If the key sequence is already associated with a macro then it
will be overwritten.
|Args|
* ``keysequence`` (**QtmacsKeysequence**): associate a macro with
a key sequence in this key map.
* ``macroName`` (**str**): macro name.
|Returns|
**None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Get a dedicated reference to self to facilitate traversing
# through the key map.
keyMap = self
# Get the key sequence as a list of tuples, where each tuple
# contains the the control modifier and the key code, and both
# are specified as Qt constants.
keysequence = keysequence.toQtKeylist()
# Traverse the shortcut sequence and generate new keys as
# necessary.
for key in keysequence[:-1]:
# If the key does not yet exist add an empty dictionary
# (it will be filled later).
if key not in keyMap:
keyMap[key] = {}
# Similarly, if the key does exist but references anything
# other than a dictionary (eg. a previously installed
# ``QtmacdMacro`` instance), then delete it.
if not isinstance(keyMap[key], dict):
keyMap[key] = {}
# Go one level down in the key-map tree.
keyMap = keyMap[key]
# Assign the new macro object associated with this key.
keyMap[keysequence[-1]] = macroName
@type_check
def qteRemoveKey(self, keysequence: QtmacsKeysequence):
"""
Remove ``keysequence`` from this key map.
|Args|
* ``keysequence`` (**QtmacsKeysequence**): key sequence to
remove from this key map.
|Returns|
**None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Get a dedicated reference to self to facilitate traversing
# through the key map.
keyMap = self
# Keep a reference to the root element in the key map.
keyMapRef = keyMap
# Get the key sequence as a list of tuples, where each tuple
# contains the the control modifier and the key code, and both
# are specified as Qt constants.
keysequence = keysequence.toQtKeylist()
# ------------------------------------------------------------
# Remove the leaf element from the tree.
# ------------------------------------------------------------
for key in keysequence[:-1]:
# Quit if the key does not exist. This can happen if the
# user tries to remove a key that has never been
# registered.
if key not in keyMap:
return
# Go one level down in the key-map tree.
keyMap = keyMap[key]
# The specified key sequence does not exist if the leaf
# element (ie. last entry in the key sequence) is missing.
if keysequence[-1] not in keyMap:
return
else:
# Remove the leaf.
keyMap.pop(keysequence[-1])
# ------------------------------------------------------------
# Prune the prefix path defined by ``keysequence`` and remove
# all empty dictionaries. Start at the leaf level.
# ------------------------------------------------------------
# Drop the last element in the key sequence, because it was
# removed in the above code fragment already.
keysequence = keysequence[:-1]
# Now successively remove the key sequence in reverse order.
while(len(keysequence)):
# Start at the root and move to the last branch level
# before the leaf level.
keyMap = keyMapRef
for key in keysequence[:-1]:
keyMap = keyMap[key]
# If the leaf is a non-empty dictionary then another key
# with the same prefix still exists. In this case do
# nothing. However, if the leaf is now empty it must be
# removed.
if len(keyMap[key]):
return
else:
keyMap.pop(key)
@type_check
def match(self, keysequence: QtmacsKeysequence):
"""
Look up the key sequence in key map.
If ``keysequence`` leads to a macro in the key map represented
by this object then the method returns ``(macroName,
True)``. If it does not lead to a macro but is nonetheless
valid (ie. the sequence is still incomplete), then it returns
``(None, True)``. Finally, if the sequence cannot lead to a
macro because it is invalid then the return value is ``(None,
False)``.
|Args|
* ``keysequence`` (**QtmacsKeysequence**): associate a macro
with a key sequence in this key map.
* ``macroName`` (**str**): macro name.
|Returns|
(**str**: macro name, **bool**: partial match)
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
try:
# Look up the ``keysequence`` in the current key map (ie.
# this very object which inherits from ``dict``). If
# ``keysequence`` does not lead to a valid macro then
# return **None**.
macroName = self
for _ in keysequence.toQtKeylist():
macroName = macroName[_]
except KeyError:
# This error occurs if the keyboard sequence does not lead
# to any macro and is therefore invalid.
return (None, False)
# At this point we know that the key sequence entered so far
# exists. Two possibilities from here on forward: 1) the key
# sequence now points to a macro or 2) the key sequence is
# still incomplete.
if isinstance(macroName, dict):
# Another dictionary --> key sequence is still incomplete.
return (None, True)
else:
# Macro object --> return it.
return (macroName, True)
# ----------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------
def qteIsQtmacsWidget(widgetObj):
"""
Determine if a widget is part of Qtmacs widget hierarchy.
A widget belongs to the Qtmacs hierarchy if it, or one of its
parents, has a "_qteAdmin" attribute (added via ``qteAddWidget``).
Since every applet has this attribute is guaranteed that the
function returns **True** if the widget is embedded inside
somewhere.
|Args|
* ``widgetObj`` (**QWidget**): the widget to test.
|Returns|
* **bool**: **True** if the widget, or one of its ancestors
in the Qt hierarchy have a '_qteAdmin' attribute.
|Raises|
* **None**
"""
if widgetObj is None:
return False
if hasattr(widgetObj, '_qteAdmin'):
return True
# Keep track of the already visited objects to avoid infinite loops.
visited = [widgetObj]
# Traverse the hierarchy until a parent features the '_qteAdmin'
# attribute, the parent is None, or the parent is an already
# visited widget.
wid = widgetObj.parent()
while wid not in visited:
if hasattr(wid, '_qteAdmin'):
return True
elif wid is None:
return False
else:
visited.append(wid)
wid = wid.parent()
return False
def qteGetAppletFromWidget(widgetObj):
"""
Return the parent applet of ``widgetObj``.
|Args|
* ``widgetObj`` (**QWidget**): widget (if any) for which the
containing applet is requested.
|Returns|
* **QtmacsApplet**: the applet containing ``widgetObj`` or **None**.
|Raises|
* **None**
"""
if widgetObj is None:
return None
if hasattr(widgetObj, '_qteAdmin'):
return widgetObj._qteAdmin.qteApplet
# Keep track of the already visited objects to avoid infinite loops.
visited = [widgetObj]
# Traverse the hierarchy until a parent features the '_qteAdmin'
# attribute, the parent is None, or the parent is an already
# visited widget.
wid = widgetObj.parent()
while wid not in visited:
if hasattr(wid, '_qteAdmin'):
return wid._qteAdmin.qteApplet
elif wid is None:
return None
else:
visited.append(wid)
wid = wid.parent()
return None
class QtmacsModeBar(QtGui.QWidget):
"""
Represent a list of modes, each represented by a ``QLabel``.
The purpose of this class is to facilitate a flexible mechanims
to display various modes or status flags. It consists of a list
of modes, each with an associated value and a ``QLabel`` instance
that are lined up horizontally.
It is typically displayed beneath another widget eg. ``SciEditor``.
The class takes care that all but the rightmost label are only as
long and high as necessary.
A typical use case inside an applet with a ``QtmacsScintilla`` widget
could be as follows::
# Create a mode bar instance and add some modes.
self.qteScintilla = QtmacsScintilla(self)
self._qteModeBar = QtmacsModeBar()
self._qteModeBar.qteAddMode('EOL', 'U')
self._qteModeBar.qteAddMode('READONLY', 'R')
self._qteModeBar.qteAddMode('MODIFIED', '-')
# Arrange the layout so that the mode bar is at the bottom.
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.qteScintilla)
vbox.addWidget(self._qteModeBar)
self.setLayout(vbox)
|Args|
* **None**
|Raises|
* **None**
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setLayout(QtGui.QHBoxLayout())
self._qteModeList = []
def _qteGetLabelInstance(self):
"""
Return an instance of a ``QLabel`` with the correct color scheme.
|Args|
* **None**
|Returns|
* **QLabel**
|Raises|
* **None**
"""
# Create a label with the proper colour appearance.
layout = self.layout()
label = QtGui.QLabel(self)
style = 'QLabel { background-color : white; color : blue; }'
label.setStyleSheet(style)
return label
def _qteUpdateLabelWidths(self):
"""
Ensure all but the last ``QLabel`` are only as wide as necessary.
The width of the last label is manually set to a large value to
ensure that it stretches as much as possible. The height of all
widgets is also set appropriately. The method also takes care
or rearranging the widgets in the correct order, ie. in the
order specified by ``self._qteModeList``.
|Args|
* **None**
|Returns|
* **None**
|Raises|
* **None**
"""
layout = self.layout()
# Remove all labels from the list and add them again in the
# new order.
for ii in range(layout.count()):
label = layout.itemAt(ii)
layout.removeItem(label)
# Add all labels and ensure they have appropriate width.
for item in self._qteModeList:
label = item[2]
width = label.fontMetrics().size(0, str(item[1])).width()
label.setMaximumWidth(width)
label.setMinimumWidth(width)
layout.addWidget(label)
# Remove the width constraint from the last label so that
# it can expand to the right.
_, _, label = self._qteModeList[-1]
label.setMaximumWidth(1600000)
@type_check
def qteGetMode(self, mode: str):
"""
Return a tuple containing the ``mode``, its value, and
its associated ``QLabel`` instance.
|Args|
* ``mode`` (**str**): size and position of new window.
|Returns|
* (**str**, **object**, **QLabel**: (mode, value, label).
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
for item in self._qteModeList:
if item[0] == mode:
return item
return None
@type_check
def qteAddMode(self, mode: str, value):
"""
Append label for ``mode`` and display ``value`` on it.
|Args|
* ``mode`` (**str**): mode of mode.
* ``value`` (**object**): value of mode.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Add the label to the layout and the local mode list.
label = self._qteGetLabelInstance()
label.setText(value)
self._qteModeList.append((mode, value, label))
self._qteUpdateLabelWidths()
@type_check
def qteChangeModeValue(self, mode: str, value):
"""
Change the value of ``mode`` to ``value``.
If ``mode`` does not exist then nothing happens and the method
returns **False**, otherwise **True**.
|Args|
* ``mode`` (**str**): mode of mode.
* ``value`` (**object**): value of mode.
|Returns|
* **bool**: **True** if the item was removed and **False** if there
was an error (most likely ``mode`` does not exist).
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Search through the list for ``mode``.
for idx, item in enumerate(self._qteModeList):
if item[0] == mode:
# Update the displayed value in the label.
label = item[2]
label.setText(value)
# Overwrite the old data record with the updated one
# and adjust the widths of the modes.
self._qteModeList[idx] = (mode, value, label)
self._qteUpdateLabelWidths()
return True
return False
@type_check
def qteInsertMode(self, pos: int, mode: str, value):
"""
Insert ``mode`` at position ``pos``.
If ``pos`` is negative then this is equivalent to ``pos=0``. If it
is larger than the number of modes in the list then it is appended
as the last element.
|Args|
* ``pos`` (**int**): insertion point.
* ``mode`` (**str**): name of mode.
* ``value`` (**object**) value associated with ``mode``.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Add the label to the list.
label = self._qteGetLabelInstance()
label.setText(value)
self._qteModeList.insert(pos, (mode, value, label))
self._qteUpdateLabelWidths()
@type_check
def qteRemoveMode(self, mode: str):
"""
Remove ``mode`` and associated label.
If ``mode`` does not exist then nothing happens and the method
returns **False**, otherwise **True**.
|Args|
* ``pos`` (**QRect**): size and position of new window.
* ``windowID`` (**str**): unique window ID.
|Returns|
* **bool**: **True** if the item was removed and **False** if there
was an error (most likely ``mode`` does not exist).
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Search through the list for ``mode``.
for idx, item in enumerate(self._qteModeList):
if item[0] == mode:
# Remove the record and delete the label.
self._qteModeList.remove(item)
item[2].hide()
item[2].deleteLater()
self._qteUpdateLabelWidths()
return True
return False
def qteAllModes(self):
"""
|Args|
* ``pos`` (**QRect**): size and position of new window.
* ``windowID`` (**str**): unique window ID.
|Returns|
* **list**: a list of all modes.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
return [_[0] for _ in self._qteModeList]
| gpl-3.0 |
wziard/autokey | src/lib/qtui/folderpage.py | 48 | 2524 | #!/usr/bin/env python
# coding=UTF-8
#
# Generated by pykdeuic4 from folderpage.ui on Sun Mar 4 11:39:39 2012
#
# WARNING! All changes to this file will be lost.
from PyKDE4 import kdecore
from PyKDE4 import kdeui
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_FolderPage(object):
def setupUi(self, FolderPage):
FolderPage.setObjectName(_fromUtf8("FolderPage"))
FolderPage.resize(568, 530)
self.verticalLayout_2 = QtGui.QVBoxLayout(FolderPage)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.urlLabel = KUrlLabel(FolderPage)
self.urlLabel.setAlignment(QtCore.Qt.AlignCenter)
self.urlLabel.setObjectName(_fromUtf8("urlLabel"))
self.verticalLayout_2.addWidget(self.urlLabel)
self.settingsGroupBox = QtGui.QGroupBox(FolderPage)
self.settingsGroupBox.setObjectName(_fromUtf8("settingsGroupBox"))
self.verticalLayout = QtGui.QVBoxLayout(self.settingsGroupBox)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.showInTrayCheckbox = QtGui.QCheckBox(self.settingsGroupBox)
self.showInTrayCheckbox.setObjectName(_fromUtf8("showInTrayCheckbox"))
self.verticalLayout.addWidget(self.showInTrayCheckbox)
self.kseparator = KSeparator(self.settingsGroupBox)
self.kseparator.setObjectName(_fromUtf8("kseparator"))
self.verticalLayout.addWidget(self.kseparator)
self.settingsWidget = SettingsWidget(self.settingsGroupBox)
self.settingsWidget.setObjectName(_fromUtf8("settingsWidget"))
self.verticalLayout.addWidget(self.settingsWidget)
self.verticalLayout_2.addWidget(self.settingsGroupBox)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.retranslateUi(FolderPage)
QtCore.QMetaObject.connectSlotsByName(FolderPage)
def retranslateUi(self, FolderPage):
FolderPage.setWindowTitle(kdecore.i18n(_fromUtf8("Form")))
self.urlLabel.setTipText(kdecore.i18n(_fromUtf8("Open the folder in the default file manager")))
self.settingsGroupBox.setTitle(kdecore.i18n(_fromUtf8("Folder Settings")))
self.showInTrayCheckbox.setText(kdecore.i18n(_fromUtf8("Show in notification icon menu")))
from PyKDE4.kdeui import KUrlLabel, KSeparator
from configwindow import SettingsWidget
| gpl-3.0 |
SUSE-Cloud/nova | nova/api/openstack/compute/contrib/cloudpipe_update.py | 12 | 2603 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import db
from nova.openstack.common.gettextutils import _
authorize = extensions.extension_authorizer('compute', 'cloudpipe_update')
class CloudpipeUpdateController(wsgi.Controller):
"""Handle updating the vpn ip/port for cloudpipe instances."""
def __init__(self):
super(CloudpipeUpdateController, self).__init__()
@wsgi.action("update")
def update(self, req, id, body):
"""Configure cloudpipe parameters for the project."""
context = req.environ['nova.context']
authorize(context)
if id != "configure-project":
msg = _("Unknown action %s") % id
raise webob.exc.HTTPBadRequest(explanation=msg)
project_id = context.project_id
try:
params = body['configure_project']
vpn_ip = params['vpn_ip']
vpn_port = params['vpn_port']
except (TypeError, KeyError):
raise webob.exc.HTTPUnprocessableEntity()
networks = db.project_get_networks(context, project_id)
for network in networks:
db.network_update(context, network['id'],
{'vpn_public_address': vpn_ip,
'vpn_public_port': int(vpn_port)})
return webob.exc.HTTPAccepted()
class Cloudpipe_update(extensions.ExtensionDescriptor):
"""Adds the ability to set the vpn ip/port for cloudpipe instances."""
name = "CloudpipeUpdate"
alias = "os-cloudpipe-update"
namespace = "http://docs.openstack.org/compute/ext/cloudpipe-update/api/v2"
updated = "2012-11-14T00:00:00+00:00"
def get_controller_extensions(self):
controller = CloudpipeUpdateController()
extension = extensions.ControllerExtension(self, 'os-cloudpipe',
controller)
return [extension]
| apache-2.0 |
ddd332/presto | presto-docs/target/sphinx/sphinx/versioning.py | 5 | 4264 | # -*- coding: utf-8 -*-
"""
sphinx.versioning
~~~~~~~~~~~~~~~~~
Implements the low-level algorithms Sphinx uses for the versioning of
doctrees.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from uuid import uuid4
from operator import itemgetter
from sphinx.util.pycompat import product, zip_longest, all
# anything below that ratio is considered equal/changed
VERSIONING_RATIO = 65
def add_uids(doctree, condition):
"""Add a unique id to every node in the `doctree` which matches the
condition and yield the nodes.
:param doctree:
A :class:`docutils.nodes.document` instance.
:param condition:
A callable which returns either ``True`` or ``False`` for a given node.
"""
for node in doctree.traverse(condition):
node.uid = uuid4().hex
yield node
def merge_doctrees(old, new, condition):
"""Merge the `old` doctree with the `new` one while looking at nodes
matching the `condition`.
Each node which replaces another one or has been added to the `new` doctree
will be yielded.
:param condition:
A callable which returns either ``True`` or ``False`` for a given node.
"""
old_iter = old.traverse(condition)
new_iter = new.traverse(condition)
old_nodes = []
new_nodes = []
ratios = {}
seen = set()
# compare the nodes each doctree in order
for old_node, new_node in zip_longest(old_iter, new_iter):
if old_node is None:
new_nodes.append(new_node)
continue
if new_node is None:
old_nodes.append(old_node)
continue
ratio = get_ratio(old_node.rawsource, new_node.rawsource)
if ratio == 0:
new_node.uid = old_node.uid
seen.add(new_node)
else:
ratios[old_node, new_node] = ratio
old_nodes.append(old_node)
new_nodes.append(new_node)
# calculate the ratios for each unequal pair of nodes, should we stumble
# on a pair which is equal we set the uid and add it to the seen ones
for old_node, new_node in product(old_nodes, new_nodes):
if new_node in seen or (old_node, new_node) in ratios:
continue
ratio = get_ratio(old_node.rawsource, new_node.rawsource)
if ratio == 0:
new_node.uid = old_node.uid
seen.add(new_node)
else:
ratios[old_node, new_node] = ratio
# choose the old node with the best ratio for each new node and set the uid
# as long as the ratio is under a certain value, in which case we consider
# them not changed but different
ratios = sorted(ratios.iteritems(), key=itemgetter(1))
for (old_node, new_node), ratio in ratios:
if new_node in seen:
continue
else:
seen.add(new_node)
if ratio < VERSIONING_RATIO:
new_node.uid = old_node.uid
else:
new_node.uid = uuid4().hex
yield new_node
# create new uuids for any new node we left out earlier, this happens
# if one or more nodes are simply added.
for new_node in set(new_nodes) - seen:
new_node.uid = uuid4().hex
yield new_node
def get_ratio(old, new):
"""Return a "similiarity ratio" (in percent) representing the similarity
between the two strings where 0 is equal and anything above less than equal.
"""
if not all([old, new]):
return VERSIONING_RATIO
return levenshtein_distance(old, new) / (len(old) / 100.0)
def levenshtein_distance(a, b):
"""Return the Levenshtein edit distance between two strings *a* and *b*."""
if a == b:
return 0
if len(a) < len(b):
a, b = b, a
if not a:
return len(b)
previous_row = xrange(len(b) + 1)
for i, column1 in enumerate(a):
current_row = [i + 1]
for j, column2 in enumerate(b):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (column1 != column2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
| apache-2.0 |
Magicking/pycoin | pycoin/key/bip32.py | 14 | 5602 | # -*- coding: utf-8 -*-
"""
A BIP0032-style hierarchical wallet.
Implement a BIP0032-style hierarchical wallet which can create public
or private wallet keys. Each key can create many child nodes. Each node
has a wallet key and a corresponding private & public key, which can
be used to generate Bitcoin addresses or WIF private keys.
At any stage, the private information can be stripped away, after which
descendants can only produce public keys.
Private keys can also generate "hardened" children, which cannot be
generated by the corresponding public keys. This is useful for generating
"change" addresses, for example, which there is no need to share with people
you give public keys to.
The MIT License (MIT)
Copyright (c) 2013 by Richard Kiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import hashlib
import hmac
import logging
import struct
from .. import ecdsa
from ..encoding import public_pair_to_sec, from_bytes_32, to_bytes_32
from ..ecdsa.ellipticcurve import INFINITY
logger = logging.getLogger(__name__)
ORDER = ecdsa.generator_secp256k1.order()
_SUBKEY_VALIDATION_LOG_ERR_FMT = """
BUY A LOTTO TICKET RIGHT NOW! (And consider giving up your wallet to
science!)
You have stumbled across an astronomically unlikely scenario. Your HD
wallet contains an invalid subkey. Having access to this information would
be incredibly valuable to the Bitcoin development community.
If you are inclined to help, please make sure to back up this wallet (or
any outputted information) onto a USB drive and e-mail "Richard Kiss"
<[email protected]> or "Matt Bogosian" <[email protected]> for
instructions on how best to donate it without losing your bitcoins.
WARNING: DO NOT SEND ANY WALLET INFORMATION UNLESS YOU WANT TO LOSE ALL
THE BITCOINS IT CONTAINS.
""".strip()
class DerivationError(ValueError): pass
def subkey_secret_exponent_chain_code_pair(
secret_exponent, chain_code_bytes, i, is_hardened, public_pair=None):
"""
Yield info for a child node for this node.
secret_exponent:
base secret exponent
chain_code:
base chain code
i:
the index for this node.
is_hardened:
use "hardened key derivation". The public version of this node cannot calculate this child.
public_pair:
the public_pair for the given secret exponent. If you leave it None, it's calculated for you
(but then it's slower)
Returns a pair (new_secret_exponent, new_chain_code)
"""
i_as_bytes = struct.pack(">L", i)
if is_hardened:
data = b'\0' + to_bytes_32(secret_exponent) + i_as_bytes
else:
if public_pair is None:
public_pair = ecdsa.public_pair_for_secret_exponent(ecdsa.generator_secp256k1, secret_exponent)
sec = public_pair_to_sec(public_pair, compressed=True)
data = sec + i_as_bytes
I64 = hmac.HMAC(key=chain_code_bytes, msg=data, digestmod=hashlib.sha512).digest()
I_left_as_exponent = from_bytes_32(I64[:32])
if I_left_as_exponent >= ORDER:
logger.critical(_SUBKEY_VALIDATION_LOG_ERR_FMT)
raise DerivationError('I_L >= {}'.format(ORDER))
new_secret_exponent = (I_left_as_exponent + secret_exponent) % ORDER
if new_secret_exponent == 0:
logger.critical(_SUBKEY_VALIDATION_LOG_ERR_FMT)
raise DerivationError('k_{} == 0'.format(i))
new_chain_code = I64[32:]
return new_secret_exponent, new_chain_code
def subkey_public_pair_chain_code_pair(public_pair, chain_code_bytes, i):
"""
Yield info for a child node for this node.
public_pair:
base public pair
chain_code:
base chain code
i:
the index for this node.
Returns a pair (new_public_pair, new_chain_code)
"""
i_as_bytes = struct.pack(">l", i)
sec = public_pair_to_sec(public_pair, compressed=True)
data = sec + i_as_bytes
I64 = hmac.HMAC(key=chain_code_bytes, msg=data, digestmod=hashlib.sha512).digest()
I_left_as_exponent = from_bytes_32(I64[:32])
x, y = public_pair
the_point = I_left_as_exponent * ecdsa.generator_secp256k1 + \
ecdsa.Point(ecdsa.generator_secp256k1.curve(), x, y, ORDER)
if the_point == INFINITY:
logger.critical(_SUBKEY_VALIDATION_LOG_ERR_FMT)
raise DerivationError('K_{} == {}'.format(i, the_point))
I_left_as_exponent = from_bytes_32(I64[:32])
if I_left_as_exponent >= ORDER:
logger.critical(_SUBKEY_VALIDATION_LOG_ERR_FMT)
raise DerivationError('I_L >= {}'.format(ORDER))
new_public_pair = the_point.pair()
new_chain_code = I64[32:]
return new_public_pair, new_chain_code
| mit |
alaski/nova | nova/tests/unit/test_metadata.py | 3 | 63631 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for metadata service."""
import base64
import copy
import hashlib
import hmac
import os
import re
import requests
try:
import cPickle as pickle
except ImportError:
import pickle
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
import six
import webob
from nova.api.metadata import base
from nova.api.metadata import handler
from nova.api.metadata import password
from nova.api.metadata import vendordata
from nova.api.metadata import vendordata_dynamic
from nova import block_device
from nova.compute import flavors
from nova.conductor import api as conductor_api
from nova import context
from nova import exception
from nova.network import api as network_api
from nova.network import model as network_model
from nova.network.neutronv2 import api as neutronapi
from nova.network.security_group import openstack_driver
from nova import objects
from nova.objects import virt_device_metadata as metadata_obj
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_network
from nova.tests import uuidsentinel as uuids
from nova import utils
from nova.virt import netutils
CONF = cfg.CONF
USER_DATA_STRING = (b"This is an encoded string")
ENCODE_USER_DATA_STRING = base64.b64encode(USER_DATA_STRING)
FAKE_SEED = '7qtD24mpMR2'
def fake_inst_obj(context):
inst = objects.Instance(
context=context,
id=1,
user_id='fake_user',
uuid='b65cee2f-8c69-4aeb-be2f-f79742548fc2',
project_id='test',
key_name="key",
key_data="ssh-rsa AAAAB3Nzai....N3NtHw== someuser@somehost",
host='test',
launch_index=1,
reservation_id='r-xxxxxxxx',
user_data=ENCODE_USER_DATA_STRING,
image_ref=uuids.image_ref,
kernel_id=None,
ramdisk_id=None,
vcpus=1,
fixed_ips=[],
root_device_name='/dev/sda1',
hostname='test.novadomain',
display_name='my_displayname',
metadata={},
device_metadata=fake_metadata_objects(),
default_ephemeral_device=None,
default_swap_device=None,
system_metadata={},
security_groups=objects.SecurityGroupList(),
availability_zone=None)
inst.keypairs = objects.KeyPairList(objects=[
fake_keypair_obj(inst.key_name, inst.key_data)])
nwinfo = network_model.NetworkInfo([])
inst.info_cache = objects.InstanceInfoCache(context=context,
instance_uuid=inst.uuid,
network_info=nwinfo)
inst.flavor = flavors.get_default_flavor()
return inst
def fake_keypair_obj(name, data):
return objects.KeyPair(name=name,
type='fake_type',
public_key=data)
def return_non_existing_address(*args, **kwarg):
raise exception.NotFound()
def fake_InstanceMetadata(testcase, inst_data, address=None,
sgroups=None, content=None, extra_md=None,
vd_driver=None, network_info=None,
network_metadata=None):
content = content or []
extra_md = extra_md or {}
if sgroups is None:
sgroups = [{'name': 'default'}]
def sg_get(*args, **kwargs):
return sgroups
secgroup_api = openstack_driver.get_openstack_security_group_driver()
testcase.stub_out('%(module)s.%(class)s.get_instance_security_groups' %
{'module': secgroup_api.__module__,
'class': secgroup_api.__class__.__name__}, sg_get)
return base.InstanceMetadata(inst_data, address=address,
content=content, extra_md=extra_md,
vd_driver=vd_driver, network_info=network_info,
network_metadata=network_metadata)
def fake_request(testcase, mdinst, relpath, address="127.0.0.1",
fake_get_metadata=None, headers=None,
fake_get_metadata_by_instance_id=None, app=None):
def get_metadata_by_remote_address(self, address):
return mdinst
if app is None:
app = handler.MetadataRequestHandler()
if fake_get_metadata is None:
fake_get_metadata = get_metadata_by_remote_address
if testcase:
testcase.stub_out(
'%(module)s.%(class)s.get_metadata_by_remote_address' %
{'module': app.__module__,
'class': app.__class__.__name__},
fake_get_metadata)
if fake_get_metadata_by_instance_id:
testcase.stub_out(
'%(module)s.%(class)s.get_metadata_by_instance_id' %
{'module': app.__module__,
'class': app.__class__.__name__},
fake_get_metadata_by_instance_id)
request = webob.Request.blank(relpath)
request.remote_addr = address
if headers is not None:
request.headers.update(headers)
response = request.get_response(app)
return response
class FakeDeviceMetadata(metadata_obj.DeviceMetadata):
pass
class FakeDeviceBus(metadata_obj.DeviceBus):
pass
def fake_metadata_objects():
nic_obj = metadata_obj.NetworkInterfaceMetadata(
bus=metadata_obj.PCIDeviceBus(address='0000:00:01.0'),
mac='00:00:00:00:00:00',
tags=['foo']
)
ide_disk_obj = metadata_obj.DiskMetadata(
bus=metadata_obj.IDEDeviceBus(address='0:0'),
serial='disk-vol-2352423',
path='/dev/sda',
tags=['baz'],
)
scsi_disk_obj = metadata_obj.DiskMetadata(
bus=metadata_obj.SCSIDeviceBus(address='05c8:021e:04a7:011b'),
serial='disk-vol-2352423',
path='/dev/sda',
tags=['baz'],
)
usb_disk_obj = metadata_obj.DiskMetadata(
bus=metadata_obj.USBDeviceBus(address='05c8:021e'),
serial='disk-vol-2352423',
path='/dev/sda',
tags=['baz'],
)
fake_device_obj = FakeDeviceMetadata()
device_with_fake_bus_obj = metadata_obj.NetworkInterfaceMetadata(
bus=FakeDeviceBus(),
mac='00:00:00:00:00:00',
tags=['foo']
)
mdlist = metadata_obj.InstanceDeviceMetadata(
instance_uuid='b65cee2f-8c69-4aeb-be2f-f79742548fc2',
devices=[nic_obj, ide_disk_obj, scsi_disk_obj, usb_disk_obj,
fake_device_obj, device_with_fake_bus_obj])
return mdlist
def fake_metadata_dicts():
nic_meta = {
'type': 'nic',
'bus': 'pci',
'address': '0000:00:01.0',
'mac': '00:00:00:00:00:00',
'tags': ['foo'],
}
ide_disk_meta = {
'type': 'disk',
'bus': 'ide',
'address': '0:0',
'serial': 'disk-vol-2352423',
'path': '/dev/sda',
'tags': ['baz'],
}
scsi_disk_meta = copy.copy(ide_disk_meta)
scsi_disk_meta['bus'] = 'scsi'
scsi_disk_meta['address'] = '05c8:021e:04a7:011b'
usb_disk_meta = copy.copy(ide_disk_meta)
usb_disk_meta['bus'] = 'usb'
usb_disk_meta['address'] = '05c8:021e'
return [nic_meta, ide_disk_meta, scsi_disk_meta, usb_disk_meta]
class MetadataTestCase(test.TestCase):
def setUp(self):
super(MetadataTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.flags(use_local=True, group='conductor')
self.keypair = fake_keypair_obj(self.instance.key_name,
self.instance.key_data)
fake_network.stub_out_nw_api_get_instance_nw_info(self)
def test_can_pickle_metadata(self):
# Make sure that InstanceMetadata is possible to pickle. This is
# required for memcache backend to work correctly.
md = fake_InstanceMetadata(self, self.instance.obj_clone())
pickle.dumps(md, protocol=0)
def test_user_data(self):
inst = self.instance.obj_clone()
inst['user_data'] = base64.b64encode("happy")
md = fake_InstanceMetadata(self, inst)
self.assertEqual(
md.get_ec2_metadata(version='2009-04-04')['user-data'], "happy")
def test_no_user_data(self):
inst = self.instance.obj_clone()
inst.user_data = None
md = fake_InstanceMetadata(self, inst)
obj = object()
self.assertEqual(
md.get_ec2_metadata(version='2009-04-04').get('user-data', obj),
obj)
def _test_security_groups(self):
inst = self.instance.obj_clone()
sgroups = [{'name': name} for name in ('default', 'other')]
expected = ['default', 'other']
md = fake_InstanceMetadata(self, inst, sgroups=sgroups)
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['security-groups'], expected)
def test_security_groups(self):
self._test_security_groups()
def test_neutron_security_groups(self):
self.flags(use_neutron=True)
self._test_security_groups()
def test_local_hostname_fqdn(self):
md = fake_InstanceMetadata(self, self.instance.obj_clone())
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-hostname'],
"%s.%s" % (self.instance['hostname'], CONF.dhcp_domain))
def test_format_instance_mapping(self):
# Make sure that _format_instance_mappings works.
instance_ref0 = objects.Instance(**{'id': 0,
'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
'root_device_name': None,
'default_ephemeral_device': None,
'default_swap_device': None})
instance_ref1 = objects.Instance(**{'id': 0,
'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2',
'root_device_name': '/dev/sda1',
'default_ephemeral_device': None,
'default_swap_device': None})
def fake_bdm_get(ctxt, uuid):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 87654321,
'snapshot_id': None,
'no_device': None,
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': True,
'device_name': '/dev/sdh'}),
fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': 'swap',
'delete_on_termination': None,
'device_name': '/dev/sdc'}),
fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': None,
'delete_on_termination': None,
'device_name': '/dev/sdb'})]
self.stub_out('nova.db.block_device_mapping_get_all_by_instance',
fake_bdm_get)
expected = {'ami': 'sda1',
'root': '/dev/sda1',
'ephemeral0': '/dev/sdb',
'swap': '/dev/sdc',
'ebs0': '/dev/sdh'}
conductor_api.LocalAPI()
self.assertEqual(base._format_instance_mapping(self.context,
instance_ref0), block_device._DEFAULT_MAPPINGS)
self.assertEqual(base._format_instance_mapping(self.context,
instance_ref1), expected)
def test_pubkey(self):
md = fake_InstanceMetadata(self, self.instance.obj_clone())
pubkey_ent = md.lookup("/2009-04-04/meta-data/public-keys")
self.assertEqual(base.ec2_md_print(pubkey_ent),
"0=%s" % self.instance['key_name'])
self.assertEqual(base.ec2_md_print(pubkey_ent['0']['openssh-key']),
self.instance['key_data'])
def test_image_type_ramdisk(self):
inst = self.instance.obj_clone()
inst['ramdisk_id'] = uuids.ramdisk_id
md = fake_InstanceMetadata(self, inst)
data = md.lookup("/latest/meta-data/ramdisk-id")
self.assertIsNotNone(data)
self.assertTrue(re.match('ari-[0-9a-f]{8}', data))
def test_image_type_kernel(self):
inst = self.instance.obj_clone()
inst['kernel_id'] = uuids.kernel_id
md = fake_InstanceMetadata(self, inst)
data = md.lookup("/2009-04-04/meta-data/kernel-id")
self.assertTrue(re.match('aki-[0-9a-f]{8}', data))
self.assertEqual(
md.lookup("/ec2/2009-04-04/meta-data/kernel-id"), data)
def test_image_type_no_kernel_raises(self):
inst = self.instance.obj_clone()
md = fake_InstanceMetadata(self, inst)
self.assertRaises(base.InvalidMetadataPath,
md.lookup, "/2009-04-04/meta-data/kernel-id")
def test_check_version(self):
inst = self.instance.obj_clone()
md = fake_InstanceMetadata(self, inst)
self.assertTrue(md._check_version('1.0', '2009-04-04'))
self.assertFalse(md._check_version('2009-04-04', '1.0'))
self.assertFalse(md._check_version('2009-04-04', '2008-09-01'))
self.assertTrue(md._check_version('2008-09-01', '2009-04-04'))
self.assertTrue(md._check_version('2009-04-04', '2009-04-04'))
@mock.patch('nova.virt.netutils.get_injected_network_template')
def test_InstanceMetadata_uses_passed_network_info(self, mock_get):
network_info = []
mock_get.return_value = False
base.InstanceMetadata(fake_inst_obj(self.context),
network_info=network_info)
mock_get.assert_called_once_with(network_info)
@mock.patch.object(netutils, "get_network_metadata", autospec=True)
def test_InstanceMetadata_gets_network_metadata(self, mock_netutils):
network_data = {'links': [], 'networks': [], 'services': []}
mock_netutils.return_value = network_data
md = base.InstanceMetadata(fake_inst_obj(self.context))
self.assertEqual(network_data, md.network_metadata)
def test_InstanceMetadata_invoke_metadata_for_config_drive(self):
fakes.stub_out_key_pair_funcs(self)
inst = self.instance.obj_clone()
inst_md = base.InstanceMetadata(inst)
expected_paths = [
'ec2/2009-04-04/user-data',
'ec2/2009-04-04/meta-data.json',
'ec2/latest/user-data',
'ec2/latest/meta-data.json',
'openstack/2012-08-10/meta_data.json',
'openstack/2012-08-10/user_data',
'openstack/2013-04-04/meta_data.json',
'openstack/2013-04-04/user_data',
'openstack/2013-10-17/meta_data.json',
'openstack/2013-10-17/user_data',
'openstack/2013-10-17/vendor_data.json',
'openstack/2015-10-15/meta_data.json',
'openstack/2015-10-15/user_data',
'openstack/2015-10-15/vendor_data.json',
'openstack/2015-10-15/network_data.json',
'openstack/2016-06-30/meta_data.json',
'openstack/2016-06-30/user_data',
'openstack/2016-06-30/vendor_data.json',
'openstack/2016-06-30/network_data.json',
'openstack/2016-10-06/meta_data.json',
'openstack/2016-10-06/user_data',
'openstack/2016-10-06/vendor_data.json',
'openstack/2016-10-06/network_data.json',
'openstack/2016-10-06/vendor_data2.json',
'openstack/latest/meta_data.json',
'openstack/latest/user_data',
'openstack/latest/vendor_data.json',
'openstack/latest/network_data.json',
'openstack/latest/vendor_data2.json',
]
actual_paths = []
for (path, value) in inst_md.metadata_for_config_drive():
actual_paths.append(path)
self.assertIsNotNone(path)
self.assertEqual(expected_paths, actual_paths)
@mock.patch('nova.virt.netutils.get_injected_network_template')
def test_InstanceMetadata_queries_network_API_when_needed(self, mock_get):
network_info_from_api = []
mock_get.return_value = False
base.InstanceMetadata(fake_inst_obj(self.context))
mock_get.assert_called_once_with(network_info_from_api)
def test_local_ipv4(self):
nw_info = fake_network.fake_get_instance_nw_info(self,
num_networks=2)
expected_local = "192.168.1.100"
md = fake_InstanceMetadata(self, self.instance,
network_info=nw_info, address="fake")
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(expected_local, data['meta-data']['local-ipv4'])
def test_local_ipv4_from_nw_info(self):
nw_info = fake_network.fake_get_instance_nw_info(self,
num_networks=2)
expected_local = "192.168.1.100"
md = fake_InstanceMetadata(self, self.instance,
network_info=nw_info)
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-ipv4'], expected_local)
def test_local_ipv4_from_address(self):
expected_local = "fake"
md = fake_InstanceMetadata(self, self.instance,
network_info=[], address="fake")
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-ipv4'], expected_local)
@mock.patch.object(base64, 'b64encode', lambda data: FAKE_SEED)
@mock.patch('nova.cells.rpcapi.CellsAPI.get_keypair_at_top')
@mock.patch.object(jsonutils, 'dump_as_bytes')
def _test_as_json_with_options(self, mock_json_dump_as_bytes,
mock_cells_keypair,
is_cells=False, os_version=base.GRIZZLY):
if is_cells:
self.flags(enable=True, group='cells')
self.flags(cell_type='compute', group='cells')
instance = self.instance
keypair = self.keypair
md = fake_InstanceMetadata(self, instance)
expected_metadata = {
'uuid': md.uuid,
'hostname': md._get_hostname(),
'name': instance.display_name,
'launch_index': instance.launch_index,
'availability_zone': md.availability_zone,
}
if md.launch_metadata:
expected_metadata['meta'] = md.launch_metadata
if md.files:
expected_metadata['files'] = md.files
if md.extra_md:
expected_metadata['extra_md'] = md.extra_md
if md.network_config:
expected_metadata['network_config'] = md.network_config
if instance.key_name:
expected_metadata['public_keys'] = {
keypair.name: keypair.public_key
}
expected_metadata['keys'] = [{'type': keypair.type,
'data': keypair.public_key,
'name': keypair.name}]
if md._check_os_version(base.GRIZZLY, os_version):
expected_metadata['random_seed'] = FAKE_SEED
if md._check_os_version(base.LIBERTY, os_version):
expected_metadata['project_id'] = instance.project_id
if md._check_os_version(base.NEWTON_ONE, os_version):
expected_metadata['devices'] = fake_metadata_dicts()
mock_cells_keypair.return_value = keypair
md._metadata_as_json(os_version, 'non useless path parameter')
if instance.key_name:
if is_cells:
mock_cells_keypair.assert_called_once_with(mock.ANY,
instance.user_id,
instance.key_name)
self.assertIsInstance(mock_cells_keypair.call_args[0][0],
context.RequestContext)
self.assertEqual(md.md_mimetype, base.MIME_TYPE_APPLICATION_JSON)
mock_json_dump_as_bytes.assert_called_once_with(expected_metadata)
def test_as_json(self):
for os_version in base.OPENSTACK_VERSIONS:
self._test_as_json_with_options(os_version=os_version)
def test_as_json_with_cells_mode(self):
for os_version in base.OPENSTACK_VERSIONS:
self._test_as_json_with_options(is_cells=True,
os_version=os_version)
@mock.patch.object(objects.Instance, 'get_by_uuid')
def test_metadata_as_json_deleted_keypair(self, mock_inst_get_by_uuid):
"""Tests that we handle missing instance keypairs.
"""
instance = self.instance.obj_clone()
# we want to make sure that key_name is set but not keypairs so it has
# to be lazy-loaded from the database
delattr(instance, 'keypairs')
mock_inst_get_by_uuid.return_value = instance
md = fake_InstanceMetadata(self, instance)
meta = md._metadata_as_json(base.OPENSTACK_VERSIONS[-1], path=None)
meta = jsonutils.loads(meta)
self.assertNotIn('keys', meta)
self.assertNotIn('public_keys', meta)
class OpenStackMetadataTestCase(test.TestCase):
def setUp(self):
super(OpenStackMetadataTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.flags(use_local=True, group='conductor')
fake_network.stub_out_nw_api_get_instance_nw_info(self)
def test_empty_device_metadata(self):
fakes.stub_out_key_pair_funcs(self)
inst = self.instance.obj_clone()
inst.device_metadata = None
mdinst = fake_InstanceMetadata(self, inst)
mdjson = mdinst.lookup("/openstack/latest/meta_data.json")
mddict = jsonutils.loads(mdjson)
self.assertEqual([], mddict['devices'])
def test_device_metadata(self):
# Because we handle a list of devices, we have only one test and in it
# include the various devices types that we have to test, as well as a
# couple of fake device types and bus types that should be silently
# ignored
fakes.stub_out_key_pair_funcs(self)
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self, inst)
mdjson = mdinst.lookup("/openstack/latest/meta_data.json")
mddict = jsonutils.loads(mdjson)
self.assertEqual(fake_metadata_dicts(), mddict['devices'])
def test_top_level_listing(self):
# request for /openstack/<version>/ should show metadata.json
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self, inst)
result = mdinst.lookup("/openstack")
# trailing / should not affect anything
self.assertEqual(result, mdinst.lookup("/openstack/"))
# the 'content' should not show up in directory listing
self.assertNotIn(base.CONTENT_DIR, result)
self.assertIn('2012-08-10', result)
self.assertIn('latest', result)
def test_version_content_listing(self):
# request for /openstack/<version>/ should show metadata.json
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self, inst)
listing = mdinst.lookup("/openstack/2012-08-10")
self.assertIn("meta_data.json", listing)
def test_returns_apis_supported_in_liberty_version(self):
mdinst = fake_InstanceMetadata(self, self.instance)
liberty_supported_apis = mdinst.lookup("/openstack/2015-10-15")
self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME,
base.VD_JSON_NAME, base.NW_JSON_NAME],
liberty_supported_apis)
def test_returns_apis_supported_in_havana_version(self):
mdinst = fake_InstanceMetadata(self, self.instance)
havana_supported_apis = mdinst.lookup("/openstack/2013-10-17")
self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME,
base.VD_JSON_NAME], havana_supported_apis)
def test_returns_apis_supported_in_folsom_version(self):
mdinst = fake_InstanceMetadata(self, self.instance)
folsom_supported_apis = mdinst.lookup("/openstack/2012-08-10")
self.assertEqual([base.MD_JSON_NAME, base.UD_NAME],
folsom_supported_apis)
def test_returns_apis_supported_in_grizzly_version(self):
mdinst = fake_InstanceMetadata(self, self.instance)
grizzly_supported_apis = mdinst.lookup("/openstack/2013-04-04")
self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME],
grizzly_supported_apis)
def test_metadata_json(self):
fakes.stub_out_key_pair_funcs(self)
inst = self.instance.obj_clone()
content = [
('/etc/my.conf', "content of my.conf"),
('/root/hello', "content of /root/hello"),
]
mdinst = fake_InstanceMetadata(self, inst,
content=content)
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
mdjson = mdinst.lookup("/openstack/latest/meta_data.json")
mddict = jsonutils.loads(mdjson)
self.assertEqual(mddict['uuid'], self.instance['uuid'])
self.assertIn('files', mddict)
self.assertIn('public_keys', mddict)
self.assertEqual(mddict['public_keys'][self.instance['key_name']],
self.instance['key_data'])
self.assertIn('launch_index', mddict)
self.assertEqual(mddict['launch_index'], self.instance['launch_index'])
# verify that each of the things we put in content
# resulted in an entry in 'files', that their content
# there is as expected, and that /content lists them.
for (path, content) in content:
fent = [f for f in mddict['files'] if f['path'] == path]
self.assertEqual(1, len(fent))
fent = fent[0]
found = mdinst.lookup("/openstack%s" % fent['content_path'])
self.assertEqual(found, content)
def test_x509_keypair(self):
inst = self.instance.obj_clone()
expected = {'name': self.instance['key_name'],
'type': 'x509',
'data': 'public_key'}
inst.keypairs[0].name = expected['name']
inst.keypairs[0].type = expected['type']
inst.keypairs[0].public_key = expected['data']
mdinst = fake_InstanceMetadata(self, inst)
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
mddict = jsonutils.loads(mdjson)
self.assertEqual([expected], mddict['keys'])
def test_extra_md(self):
# make sure extra_md makes it through to metadata
fakes.stub_out_key_pair_funcs(self)
inst = self.instance.obj_clone()
extra = {'foo': 'bar', 'mylist': [1, 2, 3],
'mydict': {"one": 1, "two": 2}}
mdinst = fake_InstanceMetadata(self, inst, extra_md=extra)
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
mddict = jsonutils.loads(mdjson)
for key, val in six.iteritems(extra):
self.assertEqual(mddict[key], val)
def test_password(self):
# make sure extra_md makes it through to metadata
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self, inst)
result = mdinst.lookup("/openstack/latest/password")
self.assertEqual(result, password.handle_password)
def test_userdata(self):
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self, inst)
userdata_found = mdinst.lookup("/openstack/2012-08-10/user_data")
self.assertEqual(USER_DATA_STRING, userdata_found)
# since we had user-data in this instance, it should be in listing
self.assertIn('user_data', mdinst.lookup("/openstack/2012-08-10"))
inst.user_data = None
mdinst = fake_InstanceMetadata(self, inst)
# since this instance had no user-data it should not be there.
self.assertNotIn('user_data', mdinst.lookup("/openstack/2012-08-10"))
self.assertRaises(base.InvalidMetadataPath,
mdinst.lookup, "/openstack/2012-08-10/user_data")
def test_random_seed(self):
fakes.stub_out_key_pair_funcs(self)
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self, inst)
# verify that 2013-04-04 has the 'random' field
mdjson = mdinst.lookup("/openstack/2013-04-04/meta_data.json")
mddict = jsonutils.loads(mdjson)
self.assertIn("random_seed", mddict)
self.assertEqual(len(base64.b64decode(mddict["random_seed"])), 512)
# verify that older version do not have it
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
self.assertNotIn("random_seed", jsonutils.loads(mdjson))
def test_project_id(self):
fakes.stub_out_key_pair_funcs(self)
mdinst = fake_InstanceMetadata(self, self.instance)
# verify that 2015-10-15 has the 'project_id' field
mdjson = mdinst.lookup("/openstack/2015-10-15/meta_data.json")
mddict = jsonutils.loads(mdjson)
self.assertIn("project_id", mddict)
self.assertEqual(mddict["project_id"], self.instance.project_id)
# verify that older version do not have it
mdjson = mdinst.lookup("/openstack/2013-10-17/meta_data.json")
self.assertNotIn("project_id", jsonutils.loads(mdjson))
def test_no_dashes_in_metadata(self):
# top level entries in meta_data should not contain '-' in their name
fakes.stub_out_key_pair_funcs(self)
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self, inst)
mdjson = jsonutils.loads(
mdinst.lookup("/openstack/latest/meta_data.json"))
self.assertEqual([], [k for k in mdjson.keys() if k.find("-") != -1])
def test_vendor_data_presence(self):
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self, inst)
# verify that 2013-10-17 has the vendor_data.json file
result = mdinst.lookup("/openstack/2013-10-17")
self.assertIn('vendor_data.json', result)
# verify that older version do not have it
result = mdinst.lookup("/openstack/2013-04-04")
self.assertNotIn('vendor_data.json', result)
# verify that 2016-10-06 has the vendor_data2.json file
result = mdinst.lookup("/openstack/2016-10-06")
self.assertIn('vendor_data2.json', result)
def test_vendor_data_response(self):
inst = self.instance.obj_clone()
mydata = {'mykey1': 'value1', 'mykey2': 'value2'}
class myVdriver(vendordata.VendorDataDriver):
def __init__(self, *args, **kwargs):
super(myVdriver, self).__init__(*args, **kwargs)
data = mydata.copy()
uuid = kwargs['instance']['uuid']
data.update({'inst_uuid': uuid})
self.data = data
def get(self):
return self.data
mdinst = fake_InstanceMetadata(self, inst, vd_driver=myVdriver)
# verify that 2013-10-17 has the vendor_data.json file
vdpath = "/openstack/2013-10-17/vendor_data.json"
vd = jsonutils.loads(mdinst.lookup(vdpath))
# the instance should be passed through, and our class copies the
# uuid through to 'inst_uuid'.
self.assertEqual(vd['inst_uuid'], inst['uuid'])
# check the other expected values
for k, v in mydata.items():
self.assertEqual(vd[k], v)
def _test_vendordata2_response_inner(self, request_mock, response_code,
include_rest_result=True):
request_mock.return_value.status_code = response_code
request_mock.return_value.text = '{"color": "blue"}'
with utils.tempdir() as tmpdir:
jsonfile = os.path.join(tmpdir, 'test.json')
with open(jsonfile, 'w') as f:
f.write(jsonutils.dumps({'ldap': '10.0.0.1',
'ad': '10.0.0.2'}))
self.flags(vendordata_providers=['StaticJSON', 'DynamicJSON'],
vendordata_jsonfile_path=jsonfile,
vendordata_dynamic_targets=[
'web@http://fake.com/foobar']
)
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self, inst)
# verify that 2013-10-17 has the vendor_data.json file
vdpath = "/openstack/2013-10-17/vendor_data.json"
vd = jsonutils.loads(mdinst.lookup(vdpath))
self.assertEqual('10.0.0.1', vd.get('ldap'))
self.assertEqual('10.0.0.2', vd.get('ad'))
# verify that 2016-10-06 works as well
vdpath = "/openstack/2016-10-06/vendor_data.json"
vd = jsonutils.loads(mdinst.lookup(vdpath))
self.assertEqual('10.0.0.1', vd.get('ldap'))
self.assertEqual('10.0.0.2', vd.get('ad'))
# verify the new format as well
vdpath = "/openstack/2016-10-06/vendor_data2.json"
vd = jsonutils.loads(mdinst.lookup(vdpath))
self.assertEqual('10.0.0.1', vd['static'].get('ldap'))
self.assertEqual('10.0.0.2', vd['static'].get('ad'))
if include_rest_result:
self.assertEqual('blue', vd['web'].get('color'))
else:
self.assertEqual({}, vd['web'])
@mock.patch.object(requests, 'request')
def test_vendor_data_response_vendordata2_ok(self, request_mock):
self._test_vendordata2_response_inner(request_mock,
requests.codes.OK)
@mock.patch.object(requests, 'request')
def test_vendor_data_response_vendordata2_created(self, request_mock):
self._test_vendordata2_response_inner(request_mock,
requests.codes.CREATED)
@mock.patch.object(requests, 'request')
def test_vendor_data_response_vendordata2_accepted(self, request_mock):
self._test_vendordata2_response_inner(request_mock,
requests.codes.ACCEPTED)
@mock.patch.object(requests, 'request')
def test_vendor_data_response_vendordata2_no_content(self, request_mock):
self._test_vendordata2_response_inner(request_mock,
requests.codes.NO_CONTENT,
include_rest_result=False)
def _test_vendordata2_response_inner_exceptional(
self, request_mock, log_mock, exc):
request_mock.side_effect = exc('Ta da!')
with utils.tempdir() as tmpdir:
jsonfile = os.path.join(tmpdir, 'test.json')
with open(jsonfile, 'w') as f:
f.write(jsonutils.dumps({'ldap': '10.0.0.1',
'ad': '10.0.0.2'}))
self.flags(vendordata_providers=['StaticJSON', 'DynamicJSON'],
vendordata_jsonfile_path=jsonfile,
vendordata_dynamic_targets=[
'web@http://fake.com/foobar']
)
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self, inst)
# verify the new format as well
vdpath = "/openstack/2016-10-06/vendor_data2.json"
vd = jsonutils.loads(mdinst.lookup(vdpath))
self.assertEqual('10.0.0.1', vd['static'].get('ldap'))
self.assertEqual('10.0.0.2', vd['static'].get('ad'))
# and exception should result in nothing being added, but no error
self.assertEqual({}, vd['web'])
self.assertTrue(log_mock.called)
@mock.patch.object(vendordata_dynamic.LOG, 'warning')
@mock.patch.object(requests, 'request')
def test_vendor_data_response_vendordata2_type_error(self, request_mock,
log_mock):
self._test_vendordata2_response_inner_exceptional(
request_mock, log_mock, TypeError)
@mock.patch.object(vendordata_dynamic.LOG, 'warning')
@mock.patch.object(requests, 'request')
def test_vendor_data_response_vendordata2_value_error(self, request_mock,
log_mock):
self._test_vendordata2_response_inner_exceptional(
request_mock, log_mock, ValueError)
@mock.patch.object(vendordata_dynamic.LOG, 'warning')
@mock.patch.object(requests, 'request')
def test_vendor_data_response_vendordata2_request_error(self,
request_mock,
log_mock):
self._test_vendordata2_response_inner_exceptional(
request_mock, log_mock, requests.exceptions.RequestException)
@mock.patch.object(vendordata_dynamic.LOG, 'warning')
@mock.patch.object(requests, 'request')
def test_vendor_data_response_vendordata2_ssl_error(self,
request_mock,
log_mock):
self._test_vendordata2_response_inner_exceptional(
request_mock, log_mock, requests.exceptions.SSLError)
def test_network_data_presence(self):
inst = self.instance.obj_clone()
mdinst = fake_InstanceMetadata(self, inst)
# verify that 2015-10-15 has the network_data.json file
result = mdinst.lookup("/openstack/2015-10-15")
self.assertIn('network_data.json', result)
# verify that older version do not have it
result = mdinst.lookup("/openstack/2013-10-17")
self.assertNotIn('network_data.json', result)
def test_network_data_response(self):
inst = self.instance.obj_clone()
nw_data = {
"links": [{"ethernet_mac_address": "aa:aa:aa:aa:aa:aa",
"id": "nic0", "type": "ethernet", "vif_id": 1,
"mtu": 1500}],
"networks": [{"id": "network0", "ip_address": "10.10.0.2",
"link": "nic0", "netmask": "255.255.255.0",
"network_id":
"00000000-0000-0000-0000-000000000000",
"routes": [], "type": "ipv4"}],
"services": [{'address': '1.2.3.4', 'type': 'dns'}]}
mdinst = fake_InstanceMetadata(self, inst,
network_metadata=nw_data)
# verify that 2015-10-15 has the network_data.json file
nwpath = "/openstack/2015-10-15/network_data.json"
nw = jsonutils.loads(mdinst.lookup(nwpath))
# check the other expected values
for k, v in nw_data.items():
self.assertEqual(nw[k], v)
class MetadataHandlerTestCase(test.TestCase):
"""Test that metadata is returning proper values."""
def setUp(self):
super(MetadataHandlerTestCase, self).setUp()
fake_network.stub_out_nw_api_get_instance_nw_info(self)
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.flags(use_local=True, group='conductor')
self.mdinst = fake_InstanceMetadata(self, self.instance,
address=None, sgroups=None)
def test_callable(self):
def verify(req, meta_data):
self.assertIsInstance(meta_data, CallableMD)
return "foo"
class CallableMD(object):
def lookup(self, path_info):
return verify
response = fake_request(self, CallableMD(), "/bar")
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, "foo")
def test_root(self):
expected = "\n".join(base.VERSIONS) + "\nlatest"
response = fake_request(self, self.mdinst, "/")
self.assertEqual(response.body, expected)
response = fake_request(self, self.mdinst, "/foo/../")
self.assertEqual(response.body, expected)
def test_root_metadata_proxy_enabled(self):
self.flags(service_metadata_proxy=True,
group='neutron')
expected = "\n".join(base.VERSIONS) + "\nlatest"
response = fake_request(self, self.mdinst, "/")
self.assertEqual(response.body, expected)
response = fake_request(self, self.mdinst, "/foo/../")
self.assertEqual(response.body, expected)
def test_version_root(self):
response = fake_request(self, self.mdinst, "/2009-04-04")
response_ctype = response.headers['Content-Type']
self.assertTrue(response_ctype.startswith("text/plain"))
self.assertEqual(response.body, 'meta-data/\nuser-data')
response = fake_request(self, self.mdinst, "/9999-99-99")
self.assertEqual(response.status_int, 404)
def test_json_data(self):
fakes.stub_out_key_pair_funcs(self)
response = fake_request(self, self.mdinst,
"/openstack/latest/meta_data.json")
response_ctype = response.headers['Content-Type']
self.assertTrue(response_ctype.startswith("application/json"))
response = fake_request(self, self.mdinst,
"/openstack/latest/vendor_data.json")
response_ctype = response.headers['Content-Type']
self.assertTrue(response_ctype.startswith("application/json"))
def test_user_data_non_existing_fixed_address(self):
self.stub_out('nova.network.api.API.get_fixed_ip_by_address',
return_non_existing_address)
response = fake_request(None, self.mdinst, "/2009-04-04/user-data",
"127.1.1.1")
self.assertEqual(response.status_int, 404)
def test_fixed_address_none(self):
response = fake_request(None, self.mdinst,
relpath="/2009-04-04/user-data", address=None)
self.assertEqual(response.status_int, 500)
def test_invalid_path_is_404(self):
response = fake_request(self, self.mdinst,
relpath="/2009-04-04/user-data-invalid")
self.assertEqual(response.status_int, 404)
def test_user_data_with_use_forwarded_header(self):
expected_addr = "192.192.192.2"
def fake_get_metadata(self_gm, address):
if address == expected_addr:
return self.mdinst
else:
raise Exception("Expected addr of %s, got %s" %
(expected_addr, address))
self.flags(use_forwarded_for=True)
response = fake_request(self, self.mdinst,
relpath="/2009-04-04/user-data",
address="168.168.168.1",
fake_get_metadata=fake_get_metadata,
headers={'X-Forwarded-For': expected_addr})
self.assertEqual(response.status_int, 200)
response_ctype = response.headers['Content-Type']
self.assertTrue(response_ctype.startswith("text/plain"))
self.assertEqual(response.body,
base64.b64decode(self.instance['user_data']))
response = fake_request(self, self.mdinst,
relpath="/2009-04-04/user-data",
address="168.168.168.1",
fake_get_metadata=fake_get_metadata,
headers=None)
self.assertEqual(response.status_int, 500)
@mock.patch('oslo_utils.secretutils.constant_time_compare')
def test_by_instance_id_uses_constant_time_compare(self, mock_compare):
mock_compare.side_effect = test.TestingException
req = webob.Request.blank('/')
hnd = handler.MetadataRequestHandler()
req.headers['X-Instance-ID'] = 'fake-inst'
req.headers['X-Instance-ID-Signature'] = 'fake-sig'
req.headers['X-Tenant-ID'] = 'fake-proj'
self.assertRaises(test.TestingException,
hnd._handle_instance_id_request, req)
self.assertEqual(1, mock_compare.call_count)
def _fake_x_get_metadata(self, self_app, instance_id, remote_address):
if remote_address is None:
raise Exception('Expected X-Forwared-For header')
elif instance_id == self.expected_instance_id:
return self.mdinst
else:
# raise the exception to aid with 500 response code test
raise Exception("Expected instance_id of %s, got %s" %
(self.expected_instance_id, instance_id))
def test_user_data_with_neutron_instance_id(self):
self.expected_instance_id = 'a-b-c-d'
signed = hmac.new(
CONF.neutron.metadata_proxy_shared_secret,
self.expected_instance_id,
hashlib.sha256).hexdigest()
# try a request with service disabled
response = fake_request(
self, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
headers={'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 200)
# now enable the service
self.flags(service_metadata_proxy=True,
group='neutron')
response = fake_request(
self, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 200)
response_ctype = response.headers['Content-Type']
self.assertTrue(response_ctype.startswith("text/plain"))
self.assertEqual(response.body,
base64.b64decode(self.instance['user_data']))
# mismatched signature
response = fake_request(
self, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': ''})
self.assertEqual(response.status_int, 403)
# missing X-Tenant-ID from request
response = fake_request(
self, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'a-b-c-d',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 400)
# mismatched X-Tenant-ID
response = fake_request(
self, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'FAKE',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 404)
# without X-Forwarded-For
response = fake_request(
self, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 500)
# unexpected Instance-ID
signed = hmac.new(
CONF.neutron.metadata_proxy_shared_secret,
'z-z-z-z',
hashlib.sha256).hexdigest()
response = fake_request(
self, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'z-z-z-z',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 500)
def test_get_metadata(self):
def _test_metadata_path(relpath):
# recursively confirm a http 200 from all meta-data elements
# available at relpath.
response = fake_request(self, self.mdinst,
relpath=relpath)
for item in response.body.split('\n'):
if 'public-keys' in relpath:
# meta-data/public-keys/0=keyname refers to
# meta-data/public-keys/0
item = item.split('=')[0]
if item.endswith('/'):
path = relpath + '/' + item
_test_metadata_path(path)
continue
path = relpath + '/' + item
response = fake_request(self, self.mdinst, relpath=path)
self.assertEqual(response.status_int, 200, message=path)
_test_metadata_path('/2009-04-04/meta-data')
def _metadata_handler_with_instance_id(self, hnd):
expected_instance_id = 'a-b-c-d'
signed = hmac.new(
CONF.neutron.metadata_proxy_shared_secret,
expected_instance_id,
hashlib.sha256).hexdigest()
self.flags(service_metadata_proxy=True, group='neutron')
response = fake_request(
None, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata=False,
app=hnd,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Instance-ID': 'a-b-c-d',
'X-Tenant-ID': 'test',
'X-Instance-ID-Signature': signed})
self.assertEqual(200, response.status_int)
self.assertEqual(base64.b64decode(self.instance['user_data']),
response.body)
@mock.patch.object(base, 'get_metadata_by_instance_id')
def test_metadata_handler_with_instance_id(self, get_by_uuid):
# test twice to ensure that the cache works
get_by_uuid.return_value = self.mdinst
self.flags(metadata_cache_expiration=15)
hnd = handler.MetadataRequestHandler()
self._metadata_handler_with_instance_id(hnd)
self._metadata_handler_with_instance_id(hnd)
self.assertEqual(1, get_by_uuid.call_count)
@mock.patch.object(base, 'get_metadata_by_instance_id')
def test_metadata_handler_with_instance_id_no_cache(self, get_by_uuid):
# test twice to ensure that disabling the cache works
get_by_uuid.return_value = self.mdinst
self.flags(metadata_cache_expiration=0)
hnd = handler.MetadataRequestHandler()
self._metadata_handler_with_instance_id(hnd)
self._metadata_handler_with_instance_id(hnd)
self.assertEqual(2, get_by_uuid.call_count)
def _metadata_handler_with_remote_address(self, hnd):
response = fake_request(
None, self.mdinst,
fake_get_metadata=False,
app=hnd,
relpath="/2009-04-04/user-data",
address="192.192.192.2")
self.assertEqual(200, response.status_int)
self.assertEqual(base64.b64decode(self.instance.user_data),
response.body)
@mock.patch.object(base, 'get_metadata_by_address')
def test_metadata_handler_with_remote_address(self, get_by_uuid):
# test twice to ensure that the cache works
get_by_uuid.return_value = self.mdinst
self.flags(metadata_cache_expiration=15)
hnd = handler.MetadataRequestHandler()
self._metadata_handler_with_remote_address(hnd)
self._metadata_handler_with_remote_address(hnd)
self.assertEqual(1, get_by_uuid.call_count)
@mock.patch.object(base, 'get_metadata_by_address')
def test_metadata_handler_with_remote_address_no_cache(self, get_by_uuid):
# test twice to ensure that disabling the cache works
get_by_uuid.return_value = self.mdinst
self.flags(metadata_cache_expiration=0)
hnd = handler.MetadataRequestHandler()
self._metadata_handler_with_remote_address(hnd)
self._metadata_handler_with_remote_address(hnd)
self.assertEqual(2, get_by_uuid.call_count)
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_metadata_lb_proxy(self, mock_get_client):
self.flags(service_metadata_proxy=True, group='neutron')
self.expected_instance_id = 'a-b-c-d'
# with X-Metadata-Provider
proxy_lb_id = 'edge-x'
mock_client = mock_get_client()
mock_client.list_ports.return_value = {
'ports': [{'device_id': 'a-b-c-d', 'tenant_id': 'test'}]}
mock_client.list_subnets.return_value = {
'subnets': [{'network_id': 'f-f-f-f'}]}
response = fake_request(
self, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Metadata-Provider': proxy_lb_id})
self.assertEqual(200, response.status_int)
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_metadata_lb_proxy_chain(self, mock_get_client):
self.flags(service_metadata_proxy=True, group='neutron')
self.expected_instance_id = 'a-b-c-d'
# with X-Metadata-Provider
proxy_lb_id = 'edge-x'
def fake_list_ports(ctx, **kwargs):
if kwargs.get('fixed_ips') == 'ip_address=192.192.192.2':
return {
'ports': [{
'device_id': 'a-b-c-d',
'tenant_id': 'test'}]}
else:
return {'ports':
[]}
mock_client = mock_get_client()
mock_client.list_ports.side_effect = fake_list_ports
mock_client.list_subnets.return_value = {
'subnets': [{'network_id': 'f-f-f-f'}]}
response = fake_request(
self, self.mdinst,
relpath="/2009-04-04/user-data",
address="10.10.10.10",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2, 10.10.10.10',
'X-Metadata-Provider': proxy_lb_id})
self.assertEqual(200, response.status_int)
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_metadata_lb_proxy_signed(self, mock_get_client):
shared_secret = "testing1234"
self.flags(
metadata_proxy_shared_secret=shared_secret,
service_metadata_proxy=True, group='neutron')
self.expected_instance_id = 'a-b-c-d'
# with X-Metadata-Provider
proxy_lb_id = 'edge-x'
signature = hmac.new(
shared_secret,
proxy_lb_id,
hashlib.sha256).hexdigest()
mock_client = mock_get_client()
mock_client.list_ports.return_value = {
'ports': [{'device_id': 'a-b-c-d', 'tenant_id': 'test'}]}
mock_client.list_subnets.return_value = {
'subnets': [{'network_id': 'f-f-f-f'}]}
response = fake_request(
self, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Metadata-Provider': proxy_lb_id,
'X-Metadata-Provider-Signature': signature})
self.assertEqual(200, response.status_int)
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_metadata_lb_proxy_signed_fail(self, mock_get_client):
shared_secret = "testing1234"
bad_secret = "testing3468"
self.flags(
metadata_proxy_shared_secret=shared_secret,
service_metadata_proxy=True, group='neutron')
self.expected_instance_id = 'a-b-c-d'
# with X-Metadata-Provider
proxy_lb_id = 'edge-x'
signature = hmac.new(
bad_secret,
proxy_lb_id,
hashlib.sha256).hexdigest()
mock_client = mock_get_client()
mock_client.list_ports.return_value = {
'ports': [{'device_id': 'a-b-c-d', 'tenant_id': 'test'}]}
mock_client.list_subnets.return_value = {
'subnets': [{'network_id': 'f-f-f-f'}]}
response = fake_request(
self, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
headers={'X-Forwarded-For': '192.192.192.2',
'X-Metadata-Provider': proxy_lb_id,
'X-Metadata-Provider-Signature': signature})
self.assertEqual(403, response.status_int)
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(network_api, 'API')
def test_get_metadata_by_address(self, mock_net_api, mock_get_context):
mock_get_context.return_value = 'CONTEXT'
api = mock.Mock()
fixed_ip = objects.FixedIP(
instance_uuid='2bfd8d71-6b69-410c-a2f5-dbca18d02966')
api.get_fixed_ip_by_address.return_value = fixed_ip
mock_net_api.return_value = api
with mock.patch.object(base, 'get_metadata_by_instance_id') as gmd:
base.get_metadata_by_address('foo')
api.get_fixed_ip_by_address.assert_called_once_with(
'CONTEXT', 'foo')
gmd.assert_called_once_with(fixed_ip.instance_uuid, 'foo', 'CONTEXT')
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(objects.Instance, 'get_by_uuid')
def test_get_metadata_by_instance_id(self, mock_uuid, mock_context):
inst = objects.Instance()
mock_uuid.return_value = inst
with mock.patch.object(base, 'InstanceMetadata') as imd:
base.get_metadata_by_instance_id('foo', 'bar', ctxt='CONTEXT')
self.assertFalse(mock_context.called, "get_admin_context() should not"
"have been called, the context was given")
mock_uuid.assert_called_once_with('CONTEXT', 'foo',
expected_attrs=['ec2_ids', 'flavor', 'info_cache', 'metadata',
'system_metadata', 'security_groups', 'keypairs',
'device_metadata'])
imd.assert_called_once_with(inst, 'bar')
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(objects.Instance, 'get_by_uuid')
def test_get_metadata_by_instance_id_null_context(self,
mock_uuid, mock_context):
inst = objects.Instance()
mock_uuid.return_value = inst
mock_context.return_value = 'CONTEXT'
with mock.patch.object(base, 'InstanceMetadata') as imd:
base.get_metadata_by_instance_id('foo', 'bar')
mock_context.assert_called_once_with()
mock_uuid.assert_called_once_with('CONTEXT', 'foo',
expected_attrs=['ec2_ids', 'flavor', 'info_cache', 'metadata',
'system_metadata', 'security_groups', 'keypairs',
'device_metadata'])
imd.assert_called_once_with(inst, 'bar')
class MetadataPasswordTestCase(test.TestCase):
def setUp(self):
super(MetadataPasswordTestCase, self).setUp()
fake_network.stub_out_nw_api_get_instance_nw_info(self)
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.flags(use_local=True, group='conductor')
self.mdinst = fake_InstanceMetadata(self, self.instance,
address=None, sgroups=None)
self.flags(use_local=True, group='conductor')
def test_get_password(self):
request = webob.Request.blank('')
self.mdinst.password = 'foo'
result = password.handle_password(request, self.mdinst)
self.assertEqual(result, 'foo')
def test_bad_method(self):
request = webob.Request.blank('')
request.method = 'PUT'
self.assertRaises(webob.exc.HTTPBadRequest,
password.handle_password, request, self.mdinst)
@mock.patch('nova.objects.Instance.get_by_uuid')
def _try_set_password(self, get_by_uuid, val='bar'):
request = webob.Request.blank('')
request.method = 'POST'
request.body = val
get_by_uuid.return_value = self.instance
with mock.patch.object(self.instance, 'save') as save:
password.handle_password(request, self.mdinst)
save.assert_called_once_with()
self.assertIn('password_0', self.instance.system_metadata)
def test_set_password(self):
self.mdinst.password = ''
self._try_set_password()
def test_conflict(self):
self.mdinst.password = 'foo'
self.assertRaises(webob.exc.HTTPConflict,
self._try_set_password)
def test_too_large(self):
self.mdinst.password = ''
self.assertRaises(webob.exc.HTTPBadRequest,
self._try_set_password,
val=('a' * (password.MAX_SIZE + 1)))
| apache-2.0 |
yongtang/tensorflow | tensorflow/python/keras/utils/vis_utils.py | 6 | 12161 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=g-import-not-at-top
"""Utilities related to model visualization."""
import os
import sys
from tensorflow.python.keras.utils.io_utils import path_to_string
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
try:
# pydot-ng is a fork of pydot that is better maintained.
import pydot_ng as pydot
except ImportError:
# pydotplus is an improved version of pydot
try:
import pydotplus as pydot
except ImportError:
# Fall back on pydot if necessary.
try:
import pydot
except ImportError:
pydot = None
def check_pydot():
"""Returns True if PyDot and Graphviz are available."""
if pydot is None:
return False
try:
# Attempt to create an image of a blank graph
# to check the pydot/graphviz installation.
pydot.Dot.create(pydot.Dot())
return True
except (OSError, pydot.InvocationException):
return False
def is_wrapped_model(layer):
from tensorflow.python.keras.engine import functional
from tensorflow.python.keras.layers import wrappers
return (isinstance(layer, wrappers.Wrapper) and
isinstance(layer.layer, functional.Functional))
def add_edge(dot, src, dst):
if not dot.get_edge(src, dst):
dot.add_edge(pydot.Edge(src, dst))
@keras_export('keras.utils.model_to_dot')
def model_to_dot(model,
show_shapes=False,
show_dtype=False,
show_layer_names=True,
rankdir='TB',
expand_nested=False,
dpi=96,
subgraph=False):
"""Convert a Keras model to dot format.
Args:
model: A Keras model instance.
show_shapes: whether to display shape information.
show_dtype: whether to display layer dtypes.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
expand_nested: whether to expand nested models into clusters.
dpi: Dots per inch.
subgraph: whether to return a `pydot.Cluster` instance.
Returns:
A `pydot.Dot` instance representing the Keras model or
a `pydot.Cluster` instance representing nested model if
`subgraph=True`.
Raises:
ImportError: if graphviz or pydot are not available.
"""
from tensorflow.python.keras.layers import wrappers
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import functional
if not check_pydot():
message = (
'You must install pydot (`pip install pydot`) '
'and install graphviz '
'(see instructions at https://graphviz.gitlab.io/download/) ',
'for plot_model/model_to_dot to work.')
if 'IPython.core.magics.namespace' in sys.modules:
# We don't raise an exception here in order to avoid crashing notebook
# tests where graphviz is not available.
print(message)
return
else:
raise ImportError(message)
if subgraph:
dot = pydot.Cluster(style='dashed', graph_name=model.name)
dot.set('label', model.name)
dot.set('labeljust', 'l')
else:
dot = pydot.Dot()
dot.set('rankdir', rankdir)
dot.set('concentrate', True)
dot.set('dpi', dpi)
dot.set_node_defaults(shape='record')
sub_n_first_node = {}
sub_n_last_node = {}
sub_w_first_node = {}
sub_w_last_node = {}
layers = model.layers
if not model._is_graph_network:
node = pydot.Node(str(id(model)), label=model.name)
dot.add_node(node)
return dot
elif isinstance(model, sequential.Sequential):
if not model.built:
model.build()
layers = super(sequential.Sequential, model).layers
# Create graph nodes.
for i, layer in enumerate(layers):
layer_id = str(id(layer))
# Append a wrapped layer's label to node's label, if it exists.
layer_name = layer.name
class_name = layer.__class__.__name__
if isinstance(layer, wrappers.Wrapper):
if expand_nested and isinstance(layer.layer,
functional.Functional):
submodel_wrapper = model_to_dot(
layer.layer,
show_shapes,
show_dtype,
show_layer_names,
rankdir,
expand_nested,
subgraph=True)
# sub_w : submodel_wrapper
sub_w_nodes = submodel_wrapper.get_nodes()
sub_w_first_node[layer.layer.name] = sub_w_nodes[0]
sub_w_last_node[layer.layer.name] = sub_w_nodes[-1]
dot.add_subgraph(submodel_wrapper)
else:
layer_name = '{}({})'.format(layer_name, layer.layer.name)
child_class_name = layer.layer.__class__.__name__
class_name = '{}({})'.format(class_name, child_class_name)
if expand_nested and isinstance(layer, functional.Functional):
submodel_not_wrapper = model_to_dot(
layer,
show_shapes,
show_dtype,
show_layer_names,
rankdir,
expand_nested,
subgraph=True)
# sub_n : submodel_not_wrapper
sub_n_nodes = submodel_not_wrapper.get_nodes()
sub_n_first_node[layer.name] = sub_n_nodes[0]
sub_n_last_node[layer.name] = sub_n_nodes[-1]
dot.add_subgraph(submodel_not_wrapper)
# Create node's label.
if show_layer_names:
label = '{}: {}'.format(layer_name, class_name)
else:
label = class_name
# Rebuild the label as a table including the layer's dtype.
if show_dtype:
def format_dtype(dtype):
if dtype is None:
return '?'
else:
return str(dtype)
label = '%s|%s' % (label, format_dtype(layer.dtype))
# Rebuild the label as a table including input/output shapes.
if show_shapes:
def format_shape(shape):
return str(shape).replace(str(None), 'None')
try:
outputlabels = format_shape(layer.output_shape)
except AttributeError:
outputlabels = '?'
if hasattr(layer, 'input_shape'):
inputlabels = format_shape(layer.input_shape)
elif hasattr(layer, 'input_shapes'):
inputlabels = ', '.join(
[format_shape(ishape) for ishape in layer.input_shapes])
else:
inputlabels = '?'
label = '%s\n|{input:|output:}|{{%s}|{%s}}' % (label,
inputlabels,
outputlabels)
if not expand_nested or not isinstance(
layer, functional.Functional):
node = pydot.Node(layer_id, label=label)
dot.add_node(node)
# Connect nodes with edges.
for layer in layers:
layer_id = str(id(layer))
for i, node in enumerate(layer._inbound_nodes):
node_key = layer.name + '_ib-' + str(i)
if node_key in model._network_nodes:
for inbound_layer in nest.flatten(node.inbound_layers):
inbound_layer_id = str(id(inbound_layer))
if not expand_nested:
assert dot.get_node(inbound_layer_id)
assert dot.get_node(layer_id)
add_edge(dot, inbound_layer_id, layer_id)
else:
# if inbound_layer is not Model or wrapped Model
if (not isinstance(inbound_layer,
functional.Functional) and
not is_wrapped_model(inbound_layer)):
# if current layer is not Model or wrapped Model
if (not isinstance(layer, functional.Functional) and
not is_wrapped_model(layer)):
assert dot.get_node(inbound_layer_id)
assert dot.get_node(layer_id)
add_edge(dot, inbound_layer_id, layer_id)
# if current layer is Model
elif isinstance(layer, functional.Functional):
add_edge(dot, inbound_layer_id,
sub_n_first_node[layer.name].get_name())
# if current layer is wrapped Model
elif is_wrapped_model(layer):
add_edge(dot, inbound_layer_id, layer_id)
name = sub_w_first_node[layer.layer.name].get_name()
add_edge(dot, layer_id, name)
# if inbound_layer is Model
elif isinstance(inbound_layer, functional.Functional):
name = sub_n_last_node[inbound_layer.name].get_name()
if isinstance(layer, functional.Functional):
output_name = sub_n_first_node[layer.name].get_name()
add_edge(dot, name, output_name)
else:
add_edge(dot, name, layer_id)
# if inbound_layer is wrapped Model
elif is_wrapped_model(inbound_layer):
inbound_layer_name = inbound_layer.layer.name
add_edge(dot,
sub_w_last_node[inbound_layer_name].get_name(),
layer_id)
return dot
@keras_export('keras.utils.plot_model')
def plot_model(model,
to_file='model.png',
show_shapes=False,
show_dtype=False,
show_layer_names=True,
rankdir='TB',
expand_nested=False,
dpi=96):
"""Converts a Keras model to dot format and save to a file.
Example:
```python
input = tf.keras.Input(shape=(100,), dtype='int32', name='input')
x = tf.keras.layers.Embedding(
output_dim=512, input_dim=10000, input_length=100)(input)
x = tf.keras.layers.LSTM(32)(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
output = tf.keras.layers.Dense(1, activation='sigmoid', name='output')(x)
model = tf.keras.Model(inputs=[input], outputs=[output])
dot_img_file = '/tmp/model_1.png'
tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
```
Args:
model: A Keras model instance
to_file: File name of the plot image.
show_shapes: whether to display shape information.
show_dtype: whether to display layer dtypes.
show_layer_names: whether to display layer names.
rankdir: `rankdir` argument passed to PyDot,
a string specifying the format of the plot:
'TB' creates a vertical plot;
'LR' creates a horizontal plot.
expand_nested: Whether to expand nested models into clusters.
dpi: Dots per inch.
Returns:
A Jupyter notebook Image object if Jupyter is installed.
This enables in-line display of the model plots in notebooks.
"""
dot = model_to_dot(
model,
show_shapes=show_shapes,
show_dtype=show_dtype,
show_layer_names=show_layer_names,
rankdir=rankdir,
expand_nested=expand_nested,
dpi=dpi)
to_file = path_to_string(to_file)
if dot is None:
return
_, extension = os.path.splitext(to_file)
if not extension:
extension = 'png'
else:
extension = extension[1:]
# Save image to disk.
dot.write(to_file, format=extension)
# Return the image as a Jupyter Image object, to be displayed in-line.
# Note that we cannot easily detect whether the code is running in a
# notebook, and thus we always return the Image if Jupyter is available.
if extension != 'pdf':
try:
from IPython import display
return display.Image(filename=to_file)
except ImportError:
pass
| apache-2.0 |
elkingtonmcb/django | django/template/loaders/locmem.py | 464 | 1194 | """
Wrapper for loading templates from a plain Python dict.
"""
import warnings
from django.template import Origin, TemplateDoesNotExist
from django.utils.deprecation import RemovedInDjango20Warning
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def __init__(self, engine, templates_dict):
self.templates_dict = templates_dict
super(Loader, self).__init__(engine)
def get_contents(self, origin):
try:
return self.templates_dict[origin.name]
except KeyError:
raise TemplateDoesNotExist(origin)
def get_template_sources(self, template_name):
yield Origin(
name=template_name,
template_name=template_name,
loader=self,
)
def load_template_source(self, template_name, template_dirs=None):
warnings.warn(
'The load_template_sources() method is deprecated. Use '
'get_template() or get_contents() instead.',
RemovedInDjango20Warning,
)
try:
return self.templates_dict[template_name], template_name
except KeyError:
raise TemplateDoesNotExist(template_name)
| bsd-3-clause |
bxshi/gem5 | src/mem/ruby/system/DirectoryMemory.py | 16 | 2226 | # Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Brad Beckmann
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
class RubyDirectoryMemory(SimObject):
type = 'RubyDirectoryMemory'
cxx_class = 'DirectoryMemory'
cxx_header = "mem/ruby/system/DirectoryMemory.hh"
version = Param.Int(0, "")
size = Param.MemorySize("1GB", "capacity in bytes")
use_map = Param.Bool(False, "enable sparse memory")
map_levels = Param.Int(4, "sparse memory map levels")
# the default value of the numa high bit is specified in the command line
# option and must be passed into the directory memory sim object
numa_high_bit = Param.Int("numa high bit")
| bsd-3-clause |
fyndsi/Django-facebook | docs/docs_env/Lib/site-packages/pip-1.0-py2.5.egg/pip/log.py | 33 | 6246 | """Logging
"""
import sys
import logging
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
VERBOSE_DEBUG = logging.DEBUG-1
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO+logging.WARN)/2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [VERBOSE_DEBUG, DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
def __init__(self):
self.consumers = []
self.indent = 0
self.explicit_levels = False
self.in_progress = None
self.in_progress_hanging = False
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
rendered = None
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if rendered is None:
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' '*self.indent + rendered
if self.explicit_levels:
## FIXME: should this be a name, not a level number?
rendered = '%02i %s' % (level, rendered)
if hasattr(consumer, 'write'):
consumer.write(rendered+'\n')
else:
consumer(rendered)
def _show_progress(self):
"""Should we display download progress?"""
return (self.stdout_level_matches(self.NOTIFY) and sys.stdout.isatty())
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self._show_progress():
sys.stdout.write(' '*self.indent + msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
self.last_message = None
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self._show_progress():
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
# These erase any messages shown with show_progress (besides .'s)
logger.show_progress('')
logger.show_progress('')
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self, message=None):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
if message is None:
sys.stdout.write('.')
sys.stdout.flush()
else:
if self.last_message:
padding = ' ' * max(0, len(self.last_message)-len(message))
else:
padding = ''
sys.stdout.write('\r%s%s%s%s' % (' '*self.indent, self.in_progress, message, padding))
sys.stdout.flush()
self.last_message = message
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger()
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None or stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
def move_stdout_to_stderr(self):
to_remove = []
to_add = []
for consumer_level, consumer in self.consumers:
if consumer == sys.stdout:
to_remove.append((consumer_level, consumer))
to_add.append((consumer_level, sys.stderr))
for item in to_remove:
self.consumers.remove(item)
self.consumers.extend(to_add)
logger = Logger()
| bsd-3-clause |
ric2b/Vivaldi-browser | chromium/third_party/blink/tools/blinkpy/style/patchreader.py | 2 | 2840 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Chris Jerdonek ([email protected])
# Copyright (C) 2010 ProFUSION embedded systems
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from blinkpy.common.checkout.diff_parser import DiffParser
_log = logging.getLogger(__name__)
class PatchReader(object):
"""Supports checking style in patches."""
def __init__(self, text_file_reader):
"""Create a PatchReader instance.
Args:
text_file_reader: A TextFileReader instance.
"""
self._text_file_reader = text_file_reader
def check(self, patch_string):
"""Checks style in the given patch."""
patch_files = DiffParser(patch_string.splitlines()).files
for path, diff_file in patch_files.iteritems():
line_numbers = diff_file.added_or_modified_line_numbers()
_log.debug('Found %s new or modified lines in: %s', len(line_numbers), path)
if not line_numbers:
# Don't check files which contain only deleted lines
# as they can never add style errors. However, mark them as
# processed so that we count up number of such files.
self._text_file_reader.count_delete_only_file()
continue
self._text_file_reader.process_file(file_path=path, line_numbers=line_numbers)
| bsd-3-clause |
thedep2/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/myspass.py | 40 | 2670 | from __future__ import unicode_literals
import os.path
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
)
class MySpassIE(InfoExtractor):
_VALID_URL = r'http://www\.myspass\.de/.*'
_TEST = {
'url': 'http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/',
'md5': '0b49f4844a068f8b33f4b7c88405862b',
'info_dict': {
'id': '11741',
'ext': 'mp4',
"description": "Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?",
"title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2",
},
}
def _real_extract(self, url):
META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s'
# video id is the last path element of the URL
# usually there is a trailing slash, so also try the second but last
url_path = compat_urllib_parse_urlparse(url).path
url_parent_path, video_id = os.path.split(url_path)
if not video_id:
_, video_id = os.path.split(url_parent_path)
# get metadata
metadata_url = META_DATA_URL_TEMPLATE % video_id
metadata = self._download_xml(metadata_url, video_id)
# extract values from metadata
url_flv_el = metadata.find('url_flv')
if url_flv_el is None:
raise ExtractorError('Unable to extract download url')
video_url = url_flv_el.text
title_el = metadata.find('title')
if title_el is None:
raise ExtractorError('Unable to extract title')
title = title_el.text
format_id_el = metadata.find('format_id')
if format_id_el is None:
format = 'mp4'
else:
format = format_id_el.text
description_el = metadata.find('description')
if description_el is not None:
description = description_el.text
else:
description = None
imagePreview_el = metadata.find('imagePreview')
if imagePreview_el is not None:
thumbnail = imagePreview_el.text
else:
thumbnail = None
return {
'id': video_id,
'url': video_url,
'title': title,
'format': format,
'thumbnail': thumbnail,
'description': description,
}
| gpl-3.0 |
ivandeex/dz | inst/hooks/pre_find_module_path/hook-queue.py | 1 | 1230 | # -----------------------------------------------------------------------------
# Copyright (c) 2005-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
"""
warning for 'import queue' in 2.7 from the future
Problem appears to be that pyinstaller cannot have two modules of the same
name that differ only by lower/upper case. The from the future 'queue' simply
imports all of the 'Queue' module. So by my reading, since 'queue' and 'Queue'
can not coexist in a frozen app, and since 'queue' requires 'Queue', there is
no way to use 'queue' in a frozen 2.7 app.
"""
from PyInstaller.compat import is_py2
from PyInstaller.utils.hooks import logger
def pre_find_module_path(api):
if not is_py2:
return
# maybe the 'import queue' was not really needed, so just make sure it
# is not found, otherwise it will crowd out the potential future
# import of 'Queue'
api.search_dirs = []
logger.warning("import queue (lowercase), not supported")
| mit |
cold-brew-coding/protagonist | drafter/ext/snowcrash/tools/gyp/pylib/gyp/xcode_emulation.py | 428 | 57360 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import copy
import gyp.common
import os
import os.path
import re
import shlex
import subprocess
import sys
import tempfile
from gyp.common import GypError
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
_sdk_root_cache = {}
# Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_plist_cache = {}
# Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_codesigning_key_cache = {}
# Populated lazily by _XcodeVersion. Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_xcode_version_cache = ()
def __init__(self, spec):
self.spec = spec
self.isIOS = False
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
self._ConvertConditionalKeys(configname)
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _ConvertConditionalKeys(self, configname):
"""Converts or warns on conditional keys. Xcode supports conditional keys,
such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation
with some keys converted while the rest force a warning."""
settings = self.xcode_settings[configname]
conditional_keys = [key for key in settings if key.endswith(']')]
for key in conditional_keys:
# If you need more, speak up at http://crbug.com/122592
if key.endswith("[sdk=iphoneos*]"):
if configname.endswith("iphoneos"):
new_key = key.split("[")[0]
settings[new_key] = settings[key]
else:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
del settings[key]
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library') or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def GetActiveArchs(self, configname):
"""Returns the architectures this target should be built for."""
# TODO: Look at VALID_ARCHS, ONLY_ACTIVE_ARCH; possibly set
# CURRENT_ARCH / NATIVE_ARCH env vars?
return self.xcode_settings[configname].get('ARCHS', [self._DefaultArch()])
def _GetStdout(self, cmdlist):
job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running %s' % (job.returncode, cmdlist[0]))
return out.rstrip('\n')
def _GetSdkVersionInfoItem(self, sdk, infoitem):
# xcodebuild requires Xcode and can't run on Command Line Tools-only
# systems from 10.7 onward.
# Since the CLT has no SDK paths anyway, returning None is the
# most sensible route and should still do the right thing.
try:
return self._GetStdout(['xcodebuild', '-version', '-sdk', sdk, infoitem])
except:
pass
def _SdkRoot(self, configname):
if configname is None:
configname = self.configname
return self.GetPerConfigSetting('SDKROOT', configname, default='')
def _SdkPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root.startswith('/'):
return sdk_root
return self._XcodeSdkPath(sdk_root)
def _XcodeSdkPath(self, sdk_root):
if sdk_root not in XcodeSettings._sdk_path_cache:
sdk_path = self._GetSdkVersionInfoItem(sdk_root, 'Path')
XcodeSettings._sdk_path_cache[sdk_root] = sdk_path
if sdk_root:
XcodeSettings._sdk_root_cache[sdk_path] = sdk_root
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname, arch=None):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings() and sdk_root:
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
cflags.append('-fstrict-aliasing')
elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
cflags.append('-fno-strict-aliasing')
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
if arch is not None:
archs = [arch]
else:
archs = self._Settings().get('ARCHS', [self._DefaultArch()])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
if sdk_root:
framework_root = sdk_root
else:
framework_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
cflags_c.append('-ansi')
else:
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def _AddObjectiveCARCFlags(self, flags):
if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
flags.append('-fobjc-arc')
def _AddObjectiveCMissingPropertySynthesisFlags(self, flags):
if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS',
'YES', default='NO'):
flags.append('-Wobjc-missing-property-synthesis')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self._AddObjectiveCARCFlags(cflags_objc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
self._AddObjectiveCARCFlags(cflags_objcc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = '(\S+)'
WORD = '\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings() and self._SdkPath():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
if arch is not None:
archs = [arch]
else:
archs = self._Settings().get('ARCHS', [self._DefaultArch()])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name and self.spec['type'] != 'loadable_module':
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
sdk_root = self._SdkPath()
if not sdk_root:
sdk_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerConfigSetting(self, setting, configname, default=None):
if configname in self.xcode_settings:
return self.xcode_settings[configname].get(setting, default)
else:
return self.GetPerTargetSetting(setting, default)
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
is_first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if is_first_pass:
result = self.xcode_settings[configname].get(setting, None)
is_first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, self.spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self.spec['type'] == 'loadable_module' and self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def _GetTargetPostbuilds(self, configname, output, output_binary,
quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _GetIOSPostbuilds(self, configname, output_binary):
"""Return a shell command to codesign the iOS output binary so it can
be deployed to a device. This should be run as the very last step of the
build."""
if not (self.isIOS and self.spec['type'] == "executable"):
return []
settings = self.xcode_settings[configname]
key = self._GetIOSCodeSignIdentityKey(settings)
if not key:
return []
# Warn for any unimplemented signing xcode keys.
unimpl = ['OTHER_CODE_SIGN_FLAGS']
unimpl = set(unimpl) & set(self.xcode_settings[configname].keys())
if unimpl:
print 'Warning: Some codesign keys not implemented, ignoring: %s' % (
', '.join(sorted(unimpl)))
return ['%s code-sign-bundle "%s" "%s" "%s" "%s"' % (
os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
settings.get('CODE_SIGN_RESOURCE_RULES_PATH', ''),
settings.get('CODE_SIGN_ENTITLEMENTS', ''),
settings.get('PROVISIONING_PROFILE', ''))
]
def _GetIOSCodeSignIdentityKey(self, settings):
identity = settings.get('CODE_SIGN_IDENTITY')
if not identity:
return None
if identity not in XcodeSettings._codesigning_key_cache:
output = subprocess.check_output(
['security', 'find-identity', '-p', 'codesigning', '-v'])
for line in output.splitlines():
if identity in line:
fingerprint = line.split()[1]
cache = XcodeSettings._codesigning_key_cache
assert identity not in cache or fingerprint == cache[identity], (
"Multiple codesigning fingerprints for identity: %s" % identity)
XcodeSettings._codesigning_key_cache[identity] = fingerprint
return XcodeSettings._codesigning_key_cache.get(identity, '')
def AddImplicitPostbuilds(self, configname, output, output_binary,
postbuilds=[], quiet=False):
"""Returns a list of shell commands that should run before and after
|postbuilds|."""
assert output_binary is not None
pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet)
post = self._GetIOSPostbuilds(configname, output_binary)
return pre + postbuilds + post
def _AdjustLibrary(self, library, config_name=None):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
sdk_root = self._SdkPath(config_name)
if not sdk_root:
sdk_root = ''
return l.replace('$(SDKROOT)', sdk_root)
def AdjustLibraries(self, libraries, config_name=None):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [self._AdjustLibrary(library, config_name)
for library in libraries]
return libraries
def _BuildMachineOSBuild(self):
return self._GetStdout(['sw_vers', '-buildVersion'])
# This method ported from the logic in Homebrew's CLT version check
def _CLTVersion(self):
# pkgutil output looks like
# package-id: com.apple.pkg.CLTools_Executables
# version: 5.0.1.0.1.1382131676
# volume: /
# location: /
# install-time: 1382544035
# groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group
STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables"
regex = re.compile('version: (?P<version>.+)')
for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]:
try:
output = self._GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key])
return re.search(regex, output).groupdict()['version']
except:
continue
def _XcodeVersion(self):
# `xcodebuild -version` output looks like
# Xcode 4.6.3
# Build version 4H1503
# or like
# Xcode 3.2.6
# Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
# BuildVersion: 10M2518
# Convert that to '0463', '4H1503'.
if len(XcodeSettings._xcode_version_cache) == 0:
try:
version_list = self._GetStdout(['xcodebuild', '-version']).splitlines()
# In some circumstances xcodebuild exits 0 but doesn't return
# the right results; for example, a user on 10.7 or 10.8 with
# a bogus path set via xcode-select
# In that case this may be a CLT-only install so fall back to
# checking that version.
if len(version_list) < 2:
raise GypError, "xcodebuild returned unexpected results"
except:
version = self._CLTVersion()
if version:
version = re.match('(\d\.\d\.?\d*)', version).groups()[0]
else:
raise GypError, "No Xcode or CLT version detected!"
# The CLT has no build information, so we return an empty string.
version_list = [version, '']
version = version_list[0]
build = version_list[-1]
# Be careful to convert "4.2" to "0420":
version = version.split()[-1].replace('.', '')
version = (version + '0' * (3 - len(version))).zfill(4)
if build:
build = build.split()[-1]
XcodeSettings._xcode_version_cache = (version, build)
return XcodeSettings._xcode_version_cache
def _XcodeIOSDeviceFamily(self, configname):
family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1')
return [int(x) for x in family.split(',')]
def GetExtraPlistItems(self, configname=None):
"""Returns a dictionary with extra items to insert into Info.plist."""
if configname not in XcodeSettings._plist_cache:
cache = {}
cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()
xcode, xcode_build = self._XcodeVersion()
cache['DTXcode'] = xcode
cache['DTXcodeBuild'] = xcode_build
sdk_root = self._SdkRoot(configname)
if not sdk_root:
sdk_root = self._DefaultSdkRoot()
cache['DTSDKName'] = sdk_root
if xcode >= '0430':
cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductBuildVersion')
else:
cache['DTSDKBuild'] = cache['BuildMachineOSBuild']
if self.isIOS:
cache['DTPlatformName'] = cache['DTSDKName']
if configname.endswith("iphoneos"):
cache['DTPlatformVersion'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductVersion')
cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']
else:
cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']
XcodeSettings._plist_cache[configname] = cache
# Include extra plist items that are per-target, not per global
# XcodeSettings.
items = dict(XcodeSettings._plist_cache[configname])
if self.isIOS:
items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)
return items
def _DefaultSdkRoot(self):
"""Returns the default SDKROOT to use.
Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode
project, then the environment variable was empty. Starting with this
version, Xcode uses the name of the newest SDK installed.
"""
if self._XcodeVersion() < '0500':
return ''
default_sdk_path = self._XcodeSdkPath('')
default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path)
if default_sdk_root:
return default_sdk_root
try:
all_sdks = self._GetStdout(['xcodebuild', '-showsdks'])
except:
# If xcodebuild fails, there will be no valid SDKs
return ''
for line in all_sdks.splitlines():
items = line.split()
if len(items) >= 3 and items[-2] == '-sdk':
sdk_root = items[-1]
sdk_path = self._XcodeSdkPath(sdk_root)
if sdk_path == default_sdk_path:
return sdk_root
return ''
def _DefaultArch(self):
# For Mac projects, Xcode changed the default value used when ARCHS is not
# set from "i386" to "x86_64".
#
# For iOS projects, if ARCHS is unset, it defaults to "armv7 armv7s" when
# building for a device, and the simulator binaries are always build for
# "i386".
#
# For new projects, ARCHS is set to $(ARCHS_STANDARD_INCLUDING_64_BIT),
# which correspond to "armv7 armv7s arm64", and when building the simulator
# the architecture is either "i386" or "x86_64" depending on the simulated
# device (respectively 32-bit or 64-bit device).
#
# Since the value returned by this function is only used when ARCHS is not
# set, then on iOS we return "i386", as the default xcode project generator
# does not set ARCHS if it is not set in the .gyp file.
if self.isIOS:
return 'i386'
version, build = self._XcodeVersion()
if version >= '0500':
return 'x86_64'
return 'i386'
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def _CompiledHeader(self, lang, arch):
assert self.compile_headers
h = self.compiled_headers[lang]
if arch:
h += '.' + arch
return h
def GetInclude(self, lang, arch=None):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self._CompiledHeader(lang, arch)
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang, arch):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self._CompiledHeader(lang, arch) + '.gch'
def GetObjDependencies(self, sources, objs, arch=None):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang, arch)))
return result
def GetPchBuildCommands(self, arch=None):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c', arch), '-x c-header', 'c', self.header),
(self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
(self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
(self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
]
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = os.path.splitext(output)[0] + '.nib'
# Compiled storyboard files are referred to by .storyboardc.
if output.endswith('.storyboard'):
output = os.path.splitext(output)[0] + '.storyboardc'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the source plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerConfigSetting('SDKROOT', configuration):
env['SDKROOT'] = xcode_settings._SdkPath(configuration)
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
def _HasIOSTarget(targets):
"""Returns true if any target contains the iOS specific key
IPHONEOS_DEPLOYMENT_TARGET."""
for target_dict in targets.values():
for config in target_dict['configurations'].values():
if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'):
return True
return False
def _AddIOSDeviceConfigurations(targets):
"""Clone all targets and append -iphoneos to the name. Configure these targets
to build for iOS devices."""
for target_dict in targets.values():
for config_name in target_dict['configurations'].keys():
config = target_dict['configurations'][config_name]
new_config_name = config_name + '-iphoneos'
new_config_dict = copy.deepcopy(config)
if target_dict['toolset'] == 'target':
new_config_dict['xcode_settings']['ARCHS'] = ['armv7']
new_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos'
target_dict['configurations'][new_config_name] = new_config_dict
return targets
def CloneConfigurationForDeviceAndEmulator(target_dicts):
"""If |target_dicts| contains any iOS targets, automatically create -iphoneos
targets for iOS device builds."""
if _HasIOSTarget(target_dicts):
return _AddIOSDeviceConfigurations(target_dicts)
return target_dicts
| mit |
gale320/sync-engine | migrations/versions/094_eas_passwords.py | 8 | 2164 | """EAS passwords
Revision ID: 427812c1e849
Revises:159607944f52
Create Date: 2014-09-14 22:15:51.225342
"""
# revision identifiers, used by Alembic.
revision = '427812c1e849'
down_revision = '159607944f52'
from datetime import datetime
from alembic import op
import sqlalchemy as sa
def upgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
# Do nothing if the affected table isn't present.
if not engine.has_table('easaccount'):
return
# Do not define foreign key constraint here; that's done for all account
# tables in the next migration.
op.add_column('easaccount', sa.Column('password_id', sa.Integer(),
sa.ForeignKey('secret.id')))
Base = sa.ext.declarative.declarative_base()
Base.metadata.reflect(engine)
from inbox.models.session import session_scope
class EASAccount(Base):
__table__ = Base.metadata.tables['easaccount']
secret = sa.orm.relationship(
'Secret', primaryjoin='EASAccount.password_id == Secret.id')
class Secret(Base):
__table__ = Base.metadata.tables['secret']
with session_scope(versioned=False) as \
db_session:
accounts = db_session.query(EASAccount).all()
print '# EAS accounts: ', len(accounts)
for account in accounts:
secret = Secret()
# Need to set non-nullable attributes.
secret.created_at = datetime.utcnow()
secret.updated_at = datetime.utcnow()
secret.type = 0
secret.acl_id = 0
secret.secret = account.password
account.secret = secret
db_session.commit()
op.alter_column('easaccount', 'password_id',
existing_type=sa.Integer(),
nullable=False)
def downgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easaccount'):
return
op.drop_constraint('easaccount_ibfk_2', 'easaccount', type_='foreignkey')
op.drop_column('easaccount', 'password_id')
| agpl-3.0 |
chungjjang80/FRETBursts | fretbursts/burst_plot_attic.py | 2 | 2650 | # encoding: utf-8
#
# FRETBursts - A single-molecule FRET burst analysis toolkit.
#
# Copyright (C) 2014 Antonino Ingargiola <[email protected]>
#
"""
WARNING: Plot function ATTIC! Functions here are broken!
Here there are function originally placed in burst_plot.py that became
broken and I didn't had the time or the need to update. They live here until
I decide to fix or delete them.
"""
##
# Other plot wrapper functions
#
def wplot(*args, **kwargs):
AX, s = dplot_8ch(*args, **kwargs)
kwargs.update(AX=AX)
q = gs.mToolQT(gcf(), dplot_8ch, *args, **kwargs)
return AX, q
def splot(d, fun=scatter_width_size,
scroll=False, pgrid=True, figsize=(10, 8), nosuptitle=False, ax=None,
scale=True, **kwargs):
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
else:
fig = ax.figure
for i in xrange(d.nch):
try:
if i == 0 and not nosuptitle: fig.set_title(d.status())
except:
print("WARNING: No title in plots.")
ax.grid(pgrid)
fun(d, i, **kwargs)
s = None
if scroll: s = ScrollingToolQT(fig)
return ax, s
##
# Other misc plot functions
#
def bplot(d, ich, b_index, ph0=True, pad=0):
"""Plot photons in a burst as vertical bars. Burst: d.mburst[ich][b_index].
"""
br = bl.b_irange(d.mburst[ich], b_index, pad=pad)
accept = d.A_em[ich][br]
donor = -accept
ph = d.ph_times_m[ich][br]
if ph0: ph -= ph[0]
dtime = (ph[donor])*d.clk_p*1e6
atime = (ph[accept])*d.clk_p*1e6
plt.vlines(dtime,0,1, lw=2, color='g', alpha=0.8)
plt.vlines(atime,0,1, lw=2, color='r', alpha=0.8)
#plot(dtime, ones_like(ph[donor]), '^', color='g', alpha=0.5)
#plot(atime, -ones_like(ph[accept]), 'o', color='r', alpha=0.5)
xlabel("Time (us)")
nd, na, nt = donor.sum(), accept.sum(), ph.size
E = float(na)/(nt)
title("#ph = %d, #D-ph = %d, #A-ph = %d, E = %.2f" % (nt,nd,na,E))
plt.ylim(-10,10)
def bg_legend_8ch(d):
ax = gca()
L = ax.get_lines()[1::2]
for i,l in enumerate(L):
ich = i/3
x = i%3
s = ['Tot', 'D', 'A']
r = [d.rate_m[ich], d.rate_dd[ich], d.rate_ad[ich]]
l.set_label("CH%d, %s %d cps" % (ich+1, s[x], r[x]))
ax.legend()
plt.draw()
def bg_legend_alex(d):
ax = gca()
L = ax.get_lines()[1::2]
for i,l in enumerate(L):
ich = i/4
x = i%4
s = ['Tot', 'DD', 'AD', 'AA']
r = [d.rate_m[ich], d.rate_dd[ich], d.rate_ad[ich], d.rate_aa[ich]]
l.set_label("CH%d, %s %d cps" % (ich+1, s[x], r[x]))
ax.legend()
plt.draw()
| gpl-2.0 |
dan-blanchard/conda-env | conda_env/cli/main_attach.py | 4 | 2010 | from argparse import RawDescriptionHelpFormatter
from ..utils.notebooks import current_env, Notebook
from conda.cli import common
from ..env import from_environment
description = """
Embeds information describing your conda environment
into the notebook metadata
"""
example = """
examples:
conda env attach -n root notebook.ipynb
conda env attach -r user/environment notebook.ipynb
"""
def configure_parser(sub_parsers):
p = sub_parsers.add_parser(
'attach',
formatter_class=RawDescriptionHelpFormatter,
description=description,
help=description,
epilog=example,
)
group = p.add_mutually_exclusive_group(required=True)
group.add_argument(
'-n', '--name',
action='store',
help='local environment definition',
default=None
)
group.add_argument(
'-r', '--remote',
action='store',
help='remote environment definition',
default=None
)
p.add_argument(
'--force',
action='store_true',
default=False,
help='Replace existing environment definition'
)
p.add_argument(
'--no-builds',
default=False,
action='store_true',
required=False,
help='Remove build specification from dependencies'
)
p.add_argument(
'notebook',
help='notebook file',
action='store',
default=None
)
common.add_parser_json(p)
p.set_defaults(func=execute)
def execute(args, parser):
if args.name is not None:
prefix = common.get_prefix(args)
content = from_environment(args.name, prefix, no_builds=args.no_builds).to_dict()
else:
content = {'remote': args.remote}
print("Environment {} will be attach into {}".format(args.name, args.notebook))
nb = Notebook(args.notebook)
if nb.inject(content, args.force):
print("Done.")
else:
print("The environment couldn't be attached due:")
print(nb.msg)
| bsd-3-clause |
krapivchenkon/udacity-gae-demo | ConferenceCentral_Complete/models.py | 11 | 3885 | #!/usr/bin/env python
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
__author__ = '[email protected] (Wesley Chun)'
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty()
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.StringProperty(repeated=True)
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class Conference(ndb.Model):
"""Conference -- Conference object"""
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty()
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty()
month = ndb.IntegerProperty() # TODO: do we need for indexing like Java?
endDate = ndb.DateProperty()
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) #DateTimeField()
month = messages.IntegerField(7, variant=messages.Variant.INT32)
maxAttendees = messages.IntegerField(8, variant=messages.Variant.INT32)
seatsAvailable = messages.IntegerField(9, variant=messages.Variant.INT32)
endDate = messages.StringField(10) #DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True)
| gpl-3.0 |
bmya/odoo-addons | report_extended_purchase/__openerp__.py | 4 | 1664 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Report Configurator - Purchase',
'version': '8.0.1.2.0',
'category': 'Reporting Subsystem',
'sequence': 14,
'summary': '',
'description': """
Report Configurator - Purchase
==============================
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'report_extended',
'purchase',
],
'data': [
'views/report_view.xml',
'views/purchase_view.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': True,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
FrankBian/kuma | vendor/packages/translate-toolkit/translate/convert/test_po2tiki.py | 7 | 1034 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# po2tiki unit tests
# Author: Wil Clouser <[email protected]>
# Date: 2008-12-01
from translate.convert import po2tiki
from translate.storage import tiki
from translate.convert import test_convert
from translate.misc import wStringIO
class TestPo2Tiki:
def test_convertpo(self):
inputfile = """
#: translated
msgid "zero_source"
msgstr "zero_target"
#: unused
msgid "one_source"
msgstr "one_target"
"""
outputfile = wStringIO.StringIO()
po2tiki.convertpo(inputfile, outputfile)
output = outputfile.getvalue()
assert '"one_source" => "one_target",' in output
assert '"zero_source" => "zero_target",' in output
class TestPo2TikiCommand(test_convert.TestConvertCommand, TestPo2Tiki):
"""Tests running actual po2tiki commands on files"""
convertmodule = po2tiki
defaultoptions = {}
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
| mpl-2.0 |
YangChihWei/2015cdb_g1_0623 | static/Brython3.1.1-20150328-091302/Lib/fnmatch.py | 894 | 3163 | """Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import os
import posixpath
import re
import functools
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normcase(name)
pat = os.path.normcase(pat)
return fnmatchcase(name, pat)
@functools.lru_cache(maxsize=256, typed=True)
def _compile_pattern(pat):
if isinstance(pat, bytes):
pat_str = str(pat, 'ISO-8859-1')
res_str = translate(pat_str)
res = bytes(res_str, 'ISO-8859-1')
else:
res = translate(pat)
return re.compile(res).match
def filter(names, pat):
"""Return the subset of the list NAMES that match PAT."""
result = []
pat = os.path.normcase(pat)
match = _compile_pattern(pat)
if os.path is posixpath:
# normcase on posix is NOP. Optimize it away from the loop.
for name in names:
if match(name):
result.append(name)
else:
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
match = _compile_pattern(pat)
return match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return res + '\Z(?ms)'
| gpl-3.0 |
littlstar/chromium.src | components/test/data/password_manager/run_tests.py | 43 | 4038 | # -*- coding: utf-8 -*-
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This file allows the bots to be easily configure and run the tests."""
import argparse
import os
import tempfile
from environment import Environment
import tests
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Password Manager automated tests runner help.")
parser.add_argument(
"--chrome-path", action="store", dest="chrome_path",
help="Set the chrome path (required).", nargs=1, required=True)
parser.add_argument(
"--chromedriver-path", action="store", dest="chromedriver_path",
help="Set the chromedriver path (required).", nargs=1, required=True)
parser.add_argument(
"--profile-path", action="store", dest="profile_path",
help="Set the profile path (required). You just need to choose a "
"temporary empty folder. If the folder is not empty all its content "
"is going to be removed.",
nargs=1, required=True)
parser.add_argument(
"--passwords-path", action="store", dest="passwords_path",
help="Set the usernames/passwords path (required).", nargs=1,
required=True)
parser.add_argument("--save-path", action="store", nargs=1, dest="save_path",
help="Write the results in a file.", required=True)
args = parser.parse_args()
environment = Environment('', '', '', None, False)
tests.Tests(environment)
xml = open(args.save_path[0],"w")
xml.write("<xml>")
try:
results = tempfile.NamedTemporaryFile(
dir=os.path.join(tempfile.gettempdir()), delete=False)
results_path = results.name
results.close()
full_path = os.path.realpath(__file__)
tests_dir = os.path.dirname(full_path)
tests_path = os.path.join(tests_dir, "tests.py")
for websitetest in environment.websitetests:
# The tests can be flaky. This is why we try to rerun up to 3 times.
for x in range(0, 3):
# TODO(rchtara): Using "pkill" is just temporary until a better,
# platform-independent solution is found.
os.system("pkill chrome")
try:
os.remove(results_path)
except Exception:
pass
# TODO(rchtara): Using "timeout is just temporary until a better,
# platform-independent solution is found.
# The website test runs in two passes, each pass has an internal
# timeout of 200s for waiting (see |remaining_time_to_wait| and
# Wait() in websitetest.py). Accounting for some more time spent on
# the non-waiting execution, 300 seconds should be the upper bound on
# the runtime of one pass, thus 600 seconds for the whole test.
os.system("timeout 600 python %s %s --chrome-path %s "
"--chromedriver-path %s --passwords-path %s --profile-path %s "
"--save-path %s" %
(tests_path, websitetest.name, args.chrome_path[0],
args.chromedriver_path[0], args.passwords_path[0],
args.profile_path[0], results_path))
if os.path.isfile(results_path):
results = open(results_path, "r")
count = 0 # Count the number of successful tests.
for line in results:
xml.write(line)
count += line.count("successful='True'")
results.close()
# There is only two tests running for every website: the prompt and
# the normal test. If both of the tests were successful, the tests
# would be stopped for the current website.
if count == 2:
break
else:
xml.write("<result><test name='%s' type='prompt' successful='false'>"
"</test><test name='%s' type='normal' successful='false'></test>"
"</result>" % (websitetest.name, websitetest.name))
finally:
try:
os.remove(results_path)
except Exception:
pass
xml.write("</xml>")
xml.close()
| bsd-3-clause |
elelsee/pycfn-elasticsearch | pycfn_elasticsearch/vendored/docutils/core.py | 117 | 29426 | # $Id: core.py 7466 2012-06-25 14:56:51Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Calling the ``publish_*`` convenience functions (or instantiating a
`Publisher` object) with component names will result in default
behavior. For custom behavior (setting component options), create
custom component objects first, and pass *them* to
``publish_*``/`Publisher`. See `The Docutils Publisher`_.
.. _The Docutils Publisher: http://docutils.sf.net/docs/api/publisher.html
"""
__docformat__ = 'reStructuredText'
import sys
import pprint
from docutils import __version__, __version_details__, SettingsSpec
from docutils import frontend, io, utils, readers, writers
from docutils.frontend import OptionParser
from docutils.transforms import Transformer
from docutils.utils.error_reporting import ErrorOutput, ErrorString
import docutils.readers.doctree
class Publisher:
"""
A facade encapsulating the high-level logic of a Docutils system.
"""
def __init__(self, reader=None, parser=None, writer=None,
source=None, source_class=io.FileInput,
destination=None, destination_class=io.FileOutput,
settings=None):
"""
Initial setup. If any of `reader`, `parser`, or `writer` are not
specified, the corresponding ``set_...`` method should be called with
a component name (`set_reader` sets the parser as well).
"""
self.document = None
"""The document tree (`docutils.nodes` objects)."""
self.reader = reader
"""A `docutils.readers.Reader` instance."""
self.parser = parser
"""A `docutils.parsers.Parser` instance."""
self.writer = writer
"""A `docutils.writers.Writer` instance."""
for component in 'reader', 'parser', 'writer':
assert not isinstance(getattr(self, component), str), (
'passed string "%s" as "%s" parameter; pass an instance, '
'or use the "%s_name" parameter instead (in '
'docutils.core.publish_* convenience functions).'
% (getattr(self, component), component, component))
self.source = source
"""The source of input data, a `docutils.io.Input` instance."""
self.source_class = source_class
"""The class for dynamically created source objects."""
self.destination = destination
"""The destination for docutils output, a `docutils.io.Output`
instance."""
self.destination_class = destination_class
"""The class for dynamically created destination objects."""
self.settings = settings
"""An object containing Docutils settings as instance attributes.
Set by `self.process_command_line()` or `self.get_settings()`."""
self._stderr = ErrorOutput()
def set_reader(self, reader_name, parser, parser_name):
"""Set `self.reader` by name."""
reader_class = readers.get_reader_class(reader_name)
self.reader = reader_class(parser, parser_name)
self.parser = self.reader.parser
def set_writer(self, writer_name):
"""Set `self.writer` by name."""
writer_class = writers.get_writer_class(writer_name)
self.writer = writer_class()
def set_components(self, reader_name, parser_name, writer_name):
if self.reader is None:
self.set_reader(reader_name, self.parser, parser_name)
if self.parser is None:
if self.reader.parser is None:
self.reader.set_parser(parser_name)
self.parser = self.reader.parser
if self.writer is None:
self.set_writer(writer_name)
def setup_option_parser(self, usage=None, description=None,
settings_spec=None, config_section=None,
**defaults):
if config_section:
if not settings_spec:
settings_spec = SettingsSpec()
settings_spec.config_section = config_section
parts = config_section.split()
if len(parts) > 1 and parts[-1] == 'application':
settings_spec.config_section_dependencies = ['applications']
#@@@ Add self.source & self.destination to components in future?
option_parser = OptionParser(
components=(self.parser, self.reader, self.writer, settings_spec),
defaults=defaults, read_config_files=True,
usage=usage, description=description)
return option_parser
def get_settings(self, usage=None, description=None,
settings_spec=None, config_section=None, **defaults):
"""
Set and return default settings (overrides in `defaults` dict).
Set components first (`self.set_reader` & `self.set_writer`).
Explicitly setting `self.settings` disables command line option
processing from `self.publish()`.
"""
option_parser = self.setup_option_parser(
usage, description, settings_spec, config_section, **defaults)
self.settings = option_parser.get_default_values()
return self.settings
def process_programmatic_settings(self, settings_spec,
settings_overrides,
config_section):
if self.settings is None:
defaults = (settings_overrides or {}).copy()
# Propagate exceptions by default when used programmatically:
defaults.setdefault('traceback', True)
self.get_settings(settings_spec=settings_spec,
config_section=config_section,
**defaults)
def process_command_line(self, argv=None, usage=None, description=None,
settings_spec=None, config_section=None,
**defaults):
"""
Pass an empty list to `argv` to avoid reading `sys.argv` (the
default).
Set components first (`self.set_reader` & `self.set_writer`).
"""
option_parser = self.setup_option_parser(
usage, description, settings_spec, config_section, **defaults)
if argv is None:
argv = sys.argv[1:]
# converting to Unicode (Python 3 does this automatically):
if sys.version_info < (3,0):
# TODO: make this failsafe and reversible?
argv_encoding = (frontend.locale_encoding or 'ascii')
argv = [a.decode(argv_encoding) for a in argv]
self.settings = option_parser.parse_args(argv)
def set_io(self, source_path=None, destination_path=None):
if self.source is None:
self.set_source(source_path=source_path)
if self.destination is None:
self.set_destination(destination_path=destination_path)
def set_source(self, source=None, source_path=None):
if source_path is None:
source_path = self.settings._source
else:
self.settings._source = source_path
# Raise IOError instead of system exit with `tracback == True`
# TODO: change io.FileInput's default behaviour and remove this hack
try:
self.source = self.source_class(
source=source, source_path=source_path,
encoding=self.settings.input_encoding)
except TypeError:
self.source = self.source_class(
source=source, source_path=source_path,
encoding=self.settings.input_encoding)
def set_destination(self, destination=None, destination_path=None):
if destination_path is None:
destination_path = self.settings._destination
else:
self.settings._destination = destination_path
self.destination = self.destination_class(
destination=destination, destination_path=destination_path,
encoding=self.settings.output_encoding,
error_handler=self.settings.output_encoding_error_handler)
def apply_transforms(self):
self.document.transformer.populate_from_components(
(self.source, self.reader, self.reader.parser, self.writer,
self.destination))
self.document.transformer.apply_transforms()
def publish(self, argv=None, usage=None, description=None,
settings_spec=None, settings_overrides=None,
config_section=None, enable_exit_status=False):
"""
Process command line options and arguments (if `self.settings` not
already set), run `self.reader` and then `self.writer`. Return
`self.writer`'s output.
"""
exit = None
try:
if self.settings is None:
self.process_command_line(
argv, usage, description, settings_spec, config_section,
**(settings_overrides or {}))
self.set_io()
self.document = self.reader.read(self.source, self.parser,
self.settings)
self.apply_transforms()
output = self.writer.write(self.document, self.destination)
self.writer.assemble_parts()
except SystemExit, error:
exit = 1
exit_status = error.code
except Exception, error:
if not self.settings: # exception too early to report nicely
raise
if self.settings.traceback: # Propagate exceptions?
self.debugging_dumps()
raise
self.report_Exception(error)
exit = True
exit_status = 1
self.debugging_dumps()
if (enable_exit_status and self.document
and (self.document.reporter.max_level
>= self.settings.exit_status_level)):
sys.exit(self.document.reporter.max_level + 10)
elif exit:
sys.exit(exit_status)
return output
def debugging_dumps(self):
if not self.document:
return
if self.settings.dump_settings:
print >>self._stderr, '\n::: Runtime settings:'
print >>self._stderr, pprint.pformat(self.settings.__dict__)
if self.settings.dump_internals:
print >>self._stderr, '\n::: Document internals:'
print >>self._stderr, pprint.pformat(self.document.__dict__)
if self.settings.dump_transforms:
print >>self._stderr, '\n::: Transforms applied:'
print >>self._stderr, (' (priority, transform class, '
'pending node details, keyword args)')
print >>self._stderr, pprint.pformat(
[(priority, '%s.%s' % (xclass.__module__, xclass.__name__),
pending and pending.details, kwargs)
for priority, xclass, pending, kwargs
in self.document.transformer.applied])
if self.settings.dump_pseudo_xml:
print >>self._stderr, '\n::: Pseudo-XML:'
print >>self._stderr, self.document.pformat().encode(
'raw_unicode_escape')
def report_Exception(self, error):
if isinstance(error, utils.SystemMessage):
self.report_SystemMessage(error)
elif isinstance(error, UnicodeEncodeError):
self.report_UnicodeError(error)
elif isinstance(error, io.InputError):
self._stderr.write(u'Unable to open source file for reading:\n'
u' %s\n' % ErrorString(error))
elif isinstance(error, io.OutputError):
self._stderr.write(
u'Unable to open destination file for writing:\n'
u' %s\n' % ErrorString(error))
else:
print >>self._stderr, u'%s' % ErrorString(error)
print >>self._stderr, ("""\
Exiting due to error. Use "--traceback" to diagnose.
Please report errors to <[email protected]>.
Include "--traceback" output, Docutils version (%s [%s]),
Python version (%s), your OS type & version, and the
command line used.""" % (__version__, __version_details__,
sys.version.split()[0]))
def report_SystemMessage(self, error):
print >>self._stderr, ('Exiting due to level-%s (%s) system message.'
% (error.level,
utils.Reporter.levels[error.level]))
def report_UnicodeError(self, error):
data = error.object[error.start:error.end]
self._stderr.write(
'%s\n'
'\n'
'The specified output encoding (%s) cannot\n'
'handle all of the output.\n'
'Try setting "--output-encoding-error-handler" to\n'
'\n'
'* "xmlcharrefreplace" (for HTML & XML output);\n'
' the output will contain "%s" and should be usable.\n'
'* "backslashreplace" (for other output formats);\n'
' look for "%s" in the output.\n'
'* "replace"; look for "?" in the output.\n'
'\n'
'"--output-encoding-error-handler" is currently set to "%s".\n'
'\n'
'Exiting due to error. Use "--traceback" to diagnose.\n'
'If the advice above doesn\'t eliminate the error,\n'
'please report it to <[email protected]>.\n'
'Include "--traceback" output, Docutils version (%s),\n'
'Python version (%s), your OS type & version, and the\n'
'command line used.\n'
% (ErrorString(error),
self.settings.output_encoding,
data.encode('ascii', 'xmlcharrefreplace'),
data.encode('ascii', 'backslashreplace'),
self.settings.output_encoding_error_handler,
__version__, sys.version.split()[0]))
default_usage = '%prog [options] [<source> [<destination>]]'
default_description = ('Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sf.net/docs/user/config.html> for '
'the full reference.')
def publish_cmdline(reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=True, argv=None,
usage=default_usage, description=default_description):
"""
Set up & run a `Publisher` for command-line-based file I/O (input and
output file paths taken automatically from the command line). Return the
encoded string output also.
Parameters: see `publish_programmatically` for the remainder.
- `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
- `usage`: Usage string, output if there's a problem parsing the command
line.
- `description`: Program description, output for the "--help" option
(along with command-line option descriptions).
"""
pub = Publisher(reader, parser, writer, settings=settings)
pub.set_components(reader_name, parser_name, writer_name)
output = pub.publish(
argv, usage, description, settings_spec, settings_overrides,
config_section=config_section, enable_exit_status=enable_exit_status)
return output
def publish_file(source=None, source_path=None,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None, settings_overrides=None,
config_section=None, enable_exit_status=False):
"""
Set up & run a `Publisher` for programmatic use with file-like I/O.
Return the encoded string output also.
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source_class=io.FileInput, source=source, source_path=source_path,
destination_class=io.FileOutput,
destination=destination, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return output
def publish_string(source, source_path=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher` for programmatic use with string I/O. Return
the encoded string or Unicode string output.
For encoded string output, be sure to set the 'output_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
output. Here's one way::
publish_string(..., settings_overrides={'output_encoding': 'unicode'})
Similarly for Unicode string input (`source`)::
publish_string(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source_class=io.StringInput, source=source, source_path=source_path,
destination_class=io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return output
def publish_parts(source, source_path=None, source_class=io.StringInput,
destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher`, and return a dictionary of document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client. For programmatic use with string I/O.
For encoded string input, be sure to set the 'input_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
input. Here's how::
publish_parts(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source=source, source_path=source_path, source_class=source_class,
destination_class=io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return pub.writer.parts
def publish_doctree(source, source_path=None,
source_class=io.StringInput,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher` for programmatic use with string I/O.
Return the document tree.
For encoded string input, be sure to set the 'input_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
input. Here's one way::
publish_doctree(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
pub = Publisher(reader=reader, parser=parser, writer=None,
settings=settings,
source_class=source_class,
destination_class=io.NullOutput)
pub.set_components(reader_name, parser_name, 'null')
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_source(source, source_path)
pub.set_destination(None, None)
output = pub.publish(enable_exit_status=enable_exit_status)
return pub.document
def publish_from_doctree(document, destination_path=None,
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=False):
"""
Set up & run a `Publisher` to render from an existing document
tree data structure, for programmatic use with string I/O. Return
the encoded string output.
Note that document.settings is overridden; if you want to use the settings
of the original `document`, pass settings=document.settings.
Also, new document.transformer and document.reporter objects are
generated.
For encoded string output, be sure to set the 'output_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
output. Here's one way::
publish_from_doctree(
..., settings_overrides={'output_encoding': 'unicode'})
Parameters: `document` is a `docutils.nodes.document` object, an existing
document tree.
Other parameters: see `publish_programmatically`.
"""
reader = docutils.readers.doctree.Reader(parser_name='null')
pub = Publisher(reader, None, writer,
source=io.DocTreeInput(document),
destination_class=io.StringOutput, settings=settings)
if not writer and writer_name:
pub.set_writer(writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_destination(None, destination_path)
return pub.publish(enable_exit_status=enable_exit_status)
def publish_cmdline_to_binary(reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=True, argv=None,
usage=default_usage, description=default_description,
destination=None, destination_class=io.BinaryFileOutput
):
"""
Set up & run a `Publisher` for command-line-based file I/O (input and
output file paths taken automatically from the command line). Return the
encoded string output also.
This is just like publish_cmdline, except that it uses
io.BinaryFileOutput instead of io.FileOutput.
Parameters: see `publish_programmatically` for the remainder.
- `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
- `usage`: Usage string, output if there's a problem parsing the command
line.
- `description`: Program description, output for the "--help" option
(along with command-line option descriptions).
"""
pub = Publisher(reader, parser, writer, settings=settings,
destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
output = pub.publish(
argv, usage, description, settings_spec, settings_overrides,
config_section=config_section, enable_exit_status=enable_exit_status)
return output
def publish_programmatically(source_class, source, source_path,
destination_class, destination, destination_path,
reader, reader_name,
parser, parser_name,
writer, writer_name,
settings, settings_spec,
settings_overrides, config_section,
enable_exit_status):
"""
Set up & run a `Publisher` for custom programmatic use. Return the
encoded string output and the Publisher object.
Applications should not need to call this function directly. If it does
seem to be necessary to call this function directly, please write to the
Docutils-develop mailing list
<http://docutils.sf.net/docs/user/mailing-lists.html#docutils-develop>.
Parameters:
* `source_class` **required**: The class for dynamically created source
objects. Typically `io.FileInput` or `io.StringInput`.
* `source`: Type depends on `source_class`:
- If `source_class` is `io.FileInput`: Either a file-like object
(must have 'read' and 'close' methods), or ``None``
(`source_path` is opened). If neither `source` nor
`source_path` are supplied, `sys.stdin` is used.
- If `source_class` is `io.StringInput` **required**: The input
string, either an encoded 8-bit string (set the
'input_encoding' setting to the correct encoding) or a Unicode
string (set the 'input_encoding' setting to 'unicode').
* `source_path`: Type depends on `source_class`:
- `io.FileInput`: Path to the input file, opened if no `source`
supplied.
- `io.StringInput`: Optional. Path to the file or object that produced
`source`. Only used for diagnostic output.
* `destination_class` **required**: The class for dynamically created
destination objects. Typically `io.FileOutput` or `io.StringOutput`.
* `destination`: Type depends on `destination_class`:
- `io.FileOutput`: Either a file-like object (must have 'write' and
'close' methods), or ``None`` (`destination_path` is opened). If
neither `destination` nor `destination_path` are supplied,
`sys.stdout` is used.
- `io.StringOutput`: Not used; pass ``None``.
* `destination_path`: Type depends on `destination_class`:
- `io.FileOutput`: Path to the output file. Opened if no `destination`
supplied.
- `io.StringOutput`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
* `reader`: A `docutils.readers.Reader` object.
* `reader_name`: Name or alias of the Reader class to be instantiated if
no `reader` supplied.
* `parser`: A `docutils.parsers.Parser` object.
* `parser_name`: Name or alias of the Parser class to be instantiated if
no `parser` supplied.
* `writer`: A `docutils.writers.Writer` object.
* `writer_name`: Name or alias of the Writer class to be instantiated if
no `writer` supplied.
* `settings`: A runtime settings (`docutils.frontend.Values`) object, for
dotted-attribute access to runtime settings. It's the end result of the
`SettingsSpec`, config file, and option processing. If `settings` is
passed, it's assumed to be complete and no further setting/config/option
processing is done.
* `settings_spec`: A `docutils.SettingsSpec` subclass or object. Provides
extra application-specific settings definitions independently of
components. In other words, the application becomes a component, and
its settings data is processed along with that of the other components.
Used only if no `settings` specified.
* `settings_overrides`: A dictionary containing application-specific
settings defaults that override the defaults of other components.
Used only if no `settings` specified.
* `config_section`: A string, the name of the configuration file section
for this application. Overrides the ``config_section`` attribute
defined by `settings_spec`. Used only if no `settings` specified.
* `enable_exit_status`: Boolean; enable exit status at end of processing?
"""
pub = Publisher(reader, parser, writer, settings=settings,
source_class=source_class,
destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_source(source, source_path)
pub.set_destination(destination, destination_path)
output = pub.publish(enable_exit_status=enable_exit_status)
return output, pub
| apache-2.0 |
alfa-jor/addon | plugin.video.alfa/lib/sambatools/smb/utils/U32.py | 2 | 5139 | # U32.py implements 32-bit unsigned int class for Python
# Version 1.0
# Copyright (C) 2001-2002 Dmitry Rozmanov
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# e-mail: [email protected]
#
#====================================================================
C = 0x1000000000L
#--------------------------------------------------------------------
def norm(n):
return n & 0xFFFFFFFFL
#====================================================================
class U32:
v = 0L
#--------------------------------------------------------------------
def __init__(self, value = 0):
self.v = C + norm(abs(long(value)))
#--------------------------------------------------------------------
def set(self, value = 0):
self.v = C + norm(abs(long(value)))
#--------------------------------------------------------------------
def __repr__(self):
return hex(norm(self.v))
#--------------------------------------------------------------------
def __long__(self): return long(norm(self.v))
#--------------------------------------------------------------------
def __int__(self): return int(norm(self.v))
#--------------------------------------------------------------------
def __chr__(self): return chr(norm(self.v))
#--------------------------------------------------------------------
def __add__(self, b):
r = U32()
r.v = C + norm(self.v + b.v)
return r
#--------------------------------------------------------------------
def __sub__(self, b):
r = U32()
if self.v < b.v:
r.v = C + norm(0x100000000L - (b.v - self.v))
else: r.v = C + norm(self.v - b.v)
return r
#--------------------------------------------------------------------
def __mul__(self, b):
r = U32()
r.v = C + norm(self.v * b.v)
return r
#--------------------------------------------------------------------
def __div__(self, b):
r = U32()
r.v = C + (norm(self.v) / norm(b.v))
return r
#--------------------------------------------------------------------
def __mod__(self, b):
r = U32()
r.v = C + (norm(self.v) % norm(b.v))
return r
#--------------------------------------------------------------------
def __neg__(self): return U32(self.v)
#--------------------------------------------------------------------
def __pos__(self): return U32(self.v)
#--------------------------------------------------------------------
def __abs__(self): return U32(self.v)
#--------------------------------------------------------------------
def __invert__(self):
r = U32()
r.v = C + norm(~self.v)
return r
#--------------------------------------------------------------------
def __lshift__(self, b):
r = U32()
r.v = C + norm(self.v << b)
return r
#--------------------------------------------------------------------
def __rshift__(self, b):
r = U32()
r.v = C + (norm(self.v) >> b)
return r
#--------------------------------------------------------------------
def __and__(self, b):
r = U32()
r.v = C + norm(self.v & b.v)
return r
#--------------------------------------------------------------------
def __or__(self, b):
r = U32()
r.v = C + norm(self.v | b.v)
return r
#--------------------------------------------------------------------
def __xor__(self, b):
r = U32()
r.v = C + norm(self.v ^ b.v)
return r
#--------------------------------------------------------------------
def __not__(self):
return U32(not norm(self.v))
#--------------------------------------------------------------------
def truth(self):
return norm(self.v)
#--------------------------------------------------------------------
def __cmp__(self, b):
if norm(self.v) > norm(b.v): return 1
elif norm(self.v) < norm(b.v): return -1
else: return 0
#--------------------------------------------------------------------
def __nonzero__(self):
return norm(self.v)
| gpl-3.0 |
bloodes/python | fixture/address.py | 1 | 9007 |
from model.address import Adress
import re
class Address:
def __init__(self, app):
self.app = app
def create_new_address(self, address):
wd = self.app.wd
# open home page
self.app.open_home_page_address()
# add_new_address
wd.find_element_by_link_text("add new").click()
# fill_information_about_somebody
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys("%s" % address.firstname)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys("%s" % address.middlename)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys("%s" % address.lastname)
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys("%s" % address.nickname)
wd.find_element_by_name("theform").click()
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys("%s" % address.title)
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys("%s" % address.company)
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys("%s" % address.address)
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys("%s" % address.home)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys("%s" % address.mobile)
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys("%s" % address.work)
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys("%s" % address.fax)
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").clear()
wd.find_element_by_name("homepage").send_keys("%s" % address.homepage)
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys("%s" % address.address2)
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys("%s" % address.phone2)
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys("%s" % address.notes)
# submit_address_create
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.address_cache = None
def delete_first_address(self):
self.delete_some_address(0)
def delete_some_address(self, index):
wd = self.app.wd
# open home page
self.app.open_home_page_address()
self.select_address_by_index(index)
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to_alert().accept()
self.address_cache = None
def select_first_address(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def select_address_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def count(self):
wd = self.app.wd
self.app.open_home_page_address()
return len(wd.find_elements_by_name("selected[]"))
def fill_group_form(self, address):
wd = self.app.wd
self.change_field_value("firstname", address.firstname)
self.change_field_value("middlename", address.middlename)
self.change_field_value("lastname", address.lastname)
self.change_field_value("nickname", address.nickname)
self.change_field_value("title", address.title)
self.change_field_value("company", address.company)
self.change_field_value("address", address.address)
self.change_field_value("home", address.home)
self.change_field_value("mobile", address.mobile)
self.change_field_value("work", address.work)
self.change_field_value("fax", address.fax)
self.change_field_value("homepage", address.homepage)
self.change_field_value("address2", address.address2)
self.change_field_value("phone2", address.phone2)
self.change_field_value("notes", address.notes)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def modify_some_address(self, index, new_address_data):
wd = self.app.wd
self.app.open_home_page_address()
self.select_address_by_index(index)
# open modification form
wd.find_element_by_xpath("//div[@id='content']/form[@name='MainForm']/table/tbody/tr["+str(index+2)+"]/td[8]/a/img").click()
# fill group form
self.fill_group_form(new_address_data)
# submit modification
wd.find_element_by_name("update").click()
self.address_cache = None
def modify_first_address(self):
self.modify_some_address(0)
def sometimes_add_new_address(self):
if self.count() == 0:
self.create_new_address(
Adress(firstname="efwgwe", middlename="gweegweggeweg", lastname="wgewegwegwegweg",
nickname="wegwegwegeggg", title="egegegweg", company="dfgfgdfgdgdf",
address="rgergerrherg", home="rgrgerger", mobile="rgegrrg", work="fgfgbfb",
fax="rgergeg", homepage="dfhhdfhhd", address2="fhdhdfhfhdf", phone2="ddhfdfbfbd",
notes="dfhhdhfhdhfh"))
address_cache = None
def get_address_list(self):
if self.address_cache is None:
wd = self.app.wd
self.app.open_home_page_address()
self.address_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
firstname = cells[1].text
lastname = cells[2].text
id = cells[0].find_element_by_tag_name('input').get_attribute('value')
all_phones = cells[5].text.splitlines()
self.address_cache.append(Adress(firstname = firstname, id = id,lastname = lastname, home = all_phones[0] , mobile = all_phones[1], work = all_phones[2], phone2 = all_phones[3]))
return list(self.address_cache)
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.app.open_home_page_address()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name('a').click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.app.open_home_page_address()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name('a').click()
def get_contact_info_from_edit_page(self,index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
home = wd.find_element_by_name("home").get_attribute("value")
mobile = wd.find_element_by_name("mobile").get_attribute("value")
work = wd.find_element_by_name("work").get_attribute("value")
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
return Adress(firstname = firstname, id = id, home = home, mobile = mobile, work=work, phone2 = phone2, lastname = lastname )
def get_address_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id('content').text
home = re.search ("H: (.*)", text).group(1)
work = re.search ("W: (.*)", text).group(1)
mobile = re.search ("M: (.*)", text).group(1)
phone2 = re.search ("P: (.*)", text).group(1)
return Adress (home = home, mobile = mobile, work=work, phone2 = phone2)
| apache-2.0 |
maurofaccenda/ansible | lib/ansible/modules/cloud/openstack/os_user_role.py | 49 | 6456 | #!/usr/bin/python
# Copyright (c) 2016 IBM
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_user_role
short_description: Associate OpenStack Identity users and roles
extends_documentation_fragment: openstack
author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)"
version_added: "2.1"
description:
- Grant and revoke roles in either project or domain context for
OpenStack Identity Users.
options:
role:
description:
- Name or ID for the role.
required: true
user:
description:
- Name or ID for the user. If I(user) is not specified, then
I(group) is required. Both may not be specified.
required: false
default: null
group:
description:
- Name or ID for the group. Valid only with keystone version 3.
If I(group) is not specified, then I(user) is required. Both
may not be specified.
required: false
default: null
project:
description:
- Name or ID of the project to scope the role association to.
If you are using keystone version 2, then this value is required.
required: false
default: null
domain:
description:
- ID of the domain to scope the role association to. Valid only with
keystone version 3, and required if I(project) is not specified.
required: false
default: null
state:
description:
- Should the roles be present or absent on the user.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Grant an admin role on the user admin in the project project1
- os_user_role:
cloud: mycloud
user: admin
role: admin
project: project1
# Revoke the admin role from the user barney in the newyork domain
- os_user_role:
cloud: mycloud
state: absent
user: barney
role: admin
domain: newyork
'''
RETURN = '''
#
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _system_state_change(state, assignment):
if state == 'present' and not assignment:
return True
elif state == 'absent' and assignment:
return True
return False
def _build_kwargs(user, group, project, domain):
kwargs = {}
if user:
kwargs['user'] = user
if group:
kwargs['group'] = group
if project:
kwargs['project'] = project
if domain:
kwargs['domain'] = domain
return kwargs
def main():
argument_spec = openstack_full_argument_spec(
role=dict(required=True),
user=dict(required=False),
group=dict(required=False),
project=dict(required=False),
domain=dict(required=False),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
required_one_of=[
['user', 'group']
])
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
# role grant/revoke API introduced in 1.5.0
if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.5.0')):
module.fail_json(msg='shade 1.5.0 or higher is required for this module')
role = module.params.pop('role')
user = module.params.pop('user')
group = module.params.pop('group')
project = module.params.pop('project')
domain = module.params.pop('domain')
state = module.params.pop('state')
try:
cloud = shade.operator_cloud(**module.params)
filters = {}
r = cloud.get_role(role)
if r is None:
module.fail_json(msg="Role %s is not valid" % role)
filters['role'] = r['id']
if user:
u = cloud.get_user(user)
if u is None:
module.fail_json(msg="User %s is not valid" % user)
filters['user'] = u['id']
if group:
g = cloud.get_group(group)
if g is None:
module.fail_json(msg="Group %s is not valid" % group)
filters['group'] = g['id']
if domain:
d = cloud.get_domain(domain)
if d is None:
module.fail_json(msg="Domain %s is not valid" % domain)
filters['domain'] = d['id']
if project:
if domain:
p = cloud.get_project(project, domain_id=filters['domain'])
else:
p = cloud.get_project(project)
if p is None:
module.fail_json(msg="Project %s is not valid" % project)
filters['project'] = p['id']
assignment = cloud.list_role_assignments(filters=filters)
if module.check_mode:
module.exit_json(changed=_system_state_change(state, assignment))
changed = False
if state == 'present':
if not assignment:
kwargs = _build_kwargs(user, group, project, domain)
cloud.grant_role(role, **kwargs)
changed = True
elif state == 'absent':
if assignment:
kwargs = _build_kwargs(user, group, project, domain)
cloud.revoke_role(role, **kwargs)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
jfarcher/checkinapi | flask/lib/python2.7/site-packages/click/parser.py | 199 | 15510 | # -*- coding: utf-8 -*-
"""
click.parser
~~~~~~~~~~~~
This module started out as largely a copy paste from the stdlib's
optparse module with the features removed that we do not need from
optparse because we implement them in Click on a higher level (for
instance type handling, help formatting and a lot more).
The plan is to remove more and more from here over time.
The reason this is a different module and not optparse from the stdlib
is that there are differences in 2.x and 3.x about the error messages
generated and optparse in the stdlib uses gettext for no good reason
and might cause us issues.
"""
import re
from collections import deque
from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \
BadArgumentUsage
def _unpack_args(args, nargs_spec):
"""Given an iterable of arguments and an iterable of nargs specifications,
it returns a tuple with all the unpacked arguments at the first index
and all remaining arguments as the second.
The nargs specification is the number of arguments that should be consumed
or `-1` to indicate that this position should eat up all the remainders.
Missing items are filled with `None`.
"""
args = deque(args)
nargs_spec = deque(nargs_spec)
rv = []
spos = None
def _fetch(c):
try:
if spos is None:
return c.popleft()
else:
return c.pop()
except IndexError:
return None
while nargs_spec:
nargs = _fetch(nargs_spec)
if nargs == 1:
rv.append(_fetch(args))
elif nargs > 1:
x = [_fetch(args) for _ in range(nargs)]
# If we're reversed, we're pulling in the arguments in reverse,
# so we need to turn them around.
if spos is not None:
x.reverse()
rv.append(tuple(x))
elif nargs < 0:
if spos is not None:
raise TypeError('Cannot have two nargs < 0')
spos = len(rv)
rv.append(None)
# spos is the position of the wildcard (star). If it's not `None`,
# we fill it with the remainder.
if spos is not None:
rv[spos] = tuple(args)
args = []
rv[spos + 1:] = reversed(rv[spos + 1:])
return tuple(rv), list(args)
def _error_opt_args(nargs, opt):
if nargs == 1:
raise BadOptionUsage('%s option requires an argument' % opt)
raise BadOptionUsage('%s option requires %d arguments' % (opt, nargs))
def split_opt(opt):
first = opt[:1]
if first.isalnum():
return '', opt
if opt[1:2] == first:
return opt[:2], opt[2:]
return first, opt[1:]
def normalize_opt(opt, ctx):
if ctx is None or ctx.token_normalize_func is None:
return opt
prefix, opt = split_opt(opt)
return prefix + ctx.token_normalize_func(opt)
def split_arg_string(string):
"""Given an argument string this attempts to split it into small parts."""
rv = []
for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)"'
r'|\S+)\s*', string, re.S):
arg = match.group().strip()
if arg[:1] == arg[-1:] and arg[:1] in '"\'':
arg = arg[1:-1].encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
try:
arg = type(string)(arg)
except UnicodeError:
pass
rv.append(arg)
return rv
class Option(object):
def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
self._short_opts = []
self._long_opts = []
self.prefixes = set()
for opt in opts:
prefix, value = split_opt(opt)
if not prefix:
raise ValueError('Invalid start character for option (%s)'
% opt)
self.prefixes.add(prefix[0])
if len(prefix) == 1 and len(value) == 1:
self._short_opts.append(opt)
else:
self._long_opts.append(opt)
self.prefixes.add(prefix)
if action is None:
action = 'store'
self.dest = dest
self.action = action
self.nargs = nargs
self.const = const
self.obj = obj
@property
def takes_value(self):
return self.action in ('store', 'append')
def process(self, value, state):
if self.action == 'store':
state.opts[self.dest] = value
elif self.action == 'store_const':
state.opts[self.dest] = self.const
elif self.action == 'append':
state.opts.setdefault(self.dest, []).append(value)
elif self.action == 'append_const':
state.opts.setdefault(self.dest, []).append(self.const)
elif self.action == 'count':
state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
else:
raise ValueError('unknown action %r' % self.action)
state.order.append(self.obj)
class Argument(object):
def __init__(self, dest, nargs=1, obj=None):
self.dest = dest
self.nargs = nargs
self.obj = obj
def process(self, value, state):
if self.nargs > 1:
holes = sum(1 for x in value if x is None)
if holes == len(value):
value = None
elif holes != 0:
raise BadArgumentUsage('argument %s takes %d values'
% (self.dest, self.nargs))
state.opts[self.dest] = value
state.order.append(self.obj)
class ParsingState(object):
def __init__(self, rargs):
self.opts = {}
self.largs = []
self.rargs = rargs
self.order = []
class OptionParser(object):
"""The option parser is an internal class that is ultimately used to
parse options and arguments. It's modelled after optparse and brings
a similar but vastly simplified API. It should generally not be used
directly as the high level Click classes wrap it for you.
It's not nearly as extensible as optparse or argparse as it does not
implement features that are implemented on a higher level (such as
types or defaults).
:param ctx: optionally the :class:`~click.Context` where this parser
should go with.
"""
def __init__(self, ctx=None):
#: The :class:`~click.Context` for this parser. This might be
#: `None` for some advanced use cases.
self.ctx = ctx
#: This controls how the parser deals with interspersed arguments.
#: If this is set to `False`, the parser will stop on the first
#: non-option. Click uses this to implement nested subcommands
#: safely.
self.allow_interspersed_args = True
#: This tells the parser how to deal with unknown options. By
#: default it will error out (which is sensible), but there is a
#: second mode where it will ignore it and continue processing
#: after shifting all the unknown options into the resulting args.
self.ignore_unknown_options = False
if ctx is not None:
self.allow_interspersed_args = ctx.allow_interspersed_args
self.ignore_unknown_options = ctx.ignore_unknown_options
self._short_opt = {}
self._long_opt = {}
self._opt_prefixes = set(['-', '--'])
self._args = []
def add_option(self, opts, dest, action=None, nargs=1, const=None,
obj=None):
"""Adds a new option named `dest` to the parser. The destination
is not inferred (unlike with optparse) and needs to be explicitly
provided. Action can be any of ``store``, ``store_const``,
``append``, ``appnd_const`` or ``count``.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
if obj is None:
obj = dest
opts = [normalize_opt(opt, self.ctx) for opt in opts]
option = Option(opts, dest, action=action, nargs=nargs,
const=const, obj=obj)
self._opt_prefixes.update(option.prefixes)
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
def add_argument(self, dest, nargs=1, obj=None):
"""Adds a positional argument named `dest` to the parser.
The `obj` can be used to identify the option in the order list
that is returned from the parser.
"""
if obj is None:
obj = dest
self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
def parse_args(self, args):
"""Parses positional arguments and returns ``(values, args, order)``
for the parsed options and arguments as well as the leftover
arguments if there are any. The order is a list of objects as they
appear on the command line. If arguments appear multiple times they
will be memorized multiple times as well.
"""
state = ParsingState(args)
try:
self._process_args_for_options(state)
self._process_args_for_args(state)
except UsageError:
if self.ctx is None or not self.ctx.resilient_parsing:
raise
return state.opts, state.largs, state.order
def _process_args_for_args(self, state):
pargs, args = _unpack_args(state.largs + state.rargs,
[x.nargs for x in self._args])
for idx, arg in enumerate(self._args):
arg.process(pargs[idx], state)
state.largs = args
state.rargs = []
def _process_args_for_options(self, state):
while state.rargs:
arg = state.rargs.pop(0)
arglen = len(arg)
# Double dashes always handled explicitly regardless of what
# prefixes are valid.
if arg == '--':
return
elif arg[:1] in self._opt_prefixes and arglen > 1:
self._process_opts(arg, state)
elif self.allow_interspersed_args:
state.largs.append(arg)
else:
state.rargs.insert(0, arg)
return
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt, explicit_value, state):
if opt not in self._long_opt:
possibilities = [word for word in self._long_opt
if word.startswith(opt)]
raise NoSuchOption(opt, possibilities=possibilities)
option = self._long_opt[opt]
if option.takes_value:
# At this point it's safe to modify rargs by injecting the
# explicit value, because no exception is raised in this
# branch. This means that the inserted value will be fully
# consumed.
if explicit_value is not None:
state.rargs.insert(0, explicit_value)
nargs = option.nargs
if len(state.rargs) < nargs:
_error_opt_args(nargs, opt)
elif nargs == 1:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
elif explicit_value is not None:
raise BadOptionUsage('%s option does not take a value' % opt)
else:
value = None
option.process(value, state)
def _match_short_opt(self, arg, state):
stop = False
i = 1
prefix = arg[0]
unknown_options = []
for ch in arg[1:]:
opt = normalize_opt(prefix + ch, self.ctx)
option = self._short_opt.get(opt)
i += 1
if not option:
if self.ignore_unknown_options:
unknown_options.append(ch)
continue
raise NoSuchOption(opt)
if option.takes_value:
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
state.rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(state.rargs) < nargs:
_error_opt_args(nargs, opt)
elif nargs == 1:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
else:
value = None
option.process(value, state)
if stop:
break
# If we got any unknown options we re-combinate the string of the
# remaining options and re-attach the prefix, then report that
# to the state as new larg. This way there is basic combinatorics
# that can be achieved while still ignoring unknown arguments.
if self.ignore_unknown_options and unknown_options:
state.largs.append(prefix + ''.join(unknown_options))
def _process_opts(self, arg, state):
explicit_value = None
# Long option handling happens in two parts. The first part is
# supporting explicitly attached values. In any case, we will try
# to long match the option first.
if '=' in arg:
long_opt, explicit_value = arg.split('=', 1)
else:
long_opt = arg
norm_long_opt = normalize_opt(long_opt, self.ctx)
# At this point we will match the (assumed) long option through
# the long option matching code. Note that this allows options
# like "-foo" to be matched as long options.
try:
self._match_long_opt(norm_long_opt, explicit_value, state)
except NoSuchOption:
# At this point the long option matching failed, and we need
# to try with short options. However there is a special rule
# which says, that if we have a two character options prefix
# (applies to "--foo" for instance), we do not dispatch to the
# short option code and will instead raise the no option
# error.
if arg[:2] not in self._opt_prefixes:
return self._match_short_opt(arg, state)
if not self.ignore_unknown_options:
raise
state.largs.append(arg)
| gpl-3.0 |
konne88/MediaCMN | filter/options.py | 1 | 3243 | # MediaCMN - Tools to create a consistent music library.
# Copyright (C) 2009 Konstantin Weitz
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from share.options import CommonCmnOptions
from share.options import check
#http://www.rubyrobot.org/article/duplicate-a-mysql-database
#mysqladmin create DB_name -u DB_user --password=DB_pass mysqldump -u DB_user --password=DB_pass DB_name | mysql -u DB_user --password=DB_pass -h DB_host DB_name
class FilterOptions(CommonCmnOptions):
def __init__(self):
super(FilterOptions,self).__init__()
self.filter = 'mpft'
self.level = 0.0
self._appname = "Filter"
self._opts.append(('level','l',"NUMBER","some filters find duplicate files"
" by scanning them for acoustic similarity\n"
"that is important because the same track will look different to the computer\n"
"depending on what encoding is used, but will still sound alike\n"
"because of the nature of finding duplicates by similarity,\n"
"the filter may make mistakes\n"
"to find such mistakes the filter can also check if the tags of files\n"
"sounding alike are also similar"
"NUMBER is a value between 0 and 1. if 0 is passed, tags don't matter\n"
"to the filter. the closer the value is to 1 the more the tags need to\n"
"be similar in order for two files to be seen as the same",
self.level))
self._opts.append(('filter','f',"FILTER","FILTER is a combination"
" of the following options:\n"
"m merge all files with a duplicate md5 hash\n"
"f merge all files with a duplicate fingerprint\n"
" makes use of the level argument\n"
"p merge all files with a duplicate puid\n"
" makes use of the level argument\n"
"t merge all files with duplicate tags",
self.filter))
self._appdesc="Filter the index by merging similar songs."
def _set_option_value(self,opt,val):
q = None
if opt == 'level':
v = check.make_float_between_zero_and_one(val)
if v == None:
print "The level argument is not a number between 0 and 1"
q = 1
else:
self.level = val
elif opt == 'filter':
self.filter = val
else:
r = super(FilterOptions,self)._set_option_value(opt,val)
if r != None:
q = r
return q
| gpl-3.0 |
mitodl/micromasters | grades/management/commands/complete_course_run_freeze.py | 1 | 2627 | """
Sets the global freeze status for the course run to "complete"
"""
from celery.result import GroupResult
from django.core.cache import caches
from django.core.management import BaseCommand, CommandError
from courses.models import CourseRun
from grades.models import CourseRunGradingStatus
from grades.tasks import CACHE_ID_BASE_STR
from micromasters.celery import app
cache_redis = caches['redis']
class Command(BaseCommand):
"""
Sets the global freeze status for the course run to "complete"
"""
help = ('Sets the global freeze status for the course run to "complete". '
'This should not be necessary if all the users are processed')
def add_arguments(self, parser):
parser.add_argument("edx_course_key", help="the edx_course_key for the course run")
def handle(self, *args, **kwargs): # pylint: disable=unused-argument
edx_course_key = kwargs.get('edx_course_key')
try:
run = CourseRun.objects.get(edx_course_key=edx_course_key)
except CourseRun.DoesNotExist:
raise CommandError('Course Run for course_id "{0}" does not exist'.format(edx_course_key))
if not run.can_freeze_grades:
self.stdout.write(
self.style.ERROR(
'Course Run "{0}" cannot be marked as frozen yet'.format(edx_course_key)
)
)
return
if CourseRunGradingStatus.is_complete(run):
self.stdout.write(
self.style.SUCCESS(
'Course Run "{0}" is already marked as complete'.format(edx_course_key)
)
)
return
# check if there are tasks running
cache_id = CACHE_ID_BASE_STR.format(edx_course_key)
group_results_id = cache_redis.get(cache_id)
if group_results_id is not None:
results = GroupResult.restore(group_results_id, app=app)
if results and not results.ready():
self.stdout.write(
self.style.WARNING(
'Tasks for Course Run "{0}" are still running. '
'Impossible to set the global "complete" status'.format(edx_course_key)
)
)
return
# if the tasks are done remove the entry in the cache
cache_redis.delete(group_results_id)
CourseRunGradingStatus.set_to_complete(run)
self.stdout.write(
self.style.SUCCESS(
'Course Run "{0}" has been marked as complete'.format(edx_course_key)
)
)
| bsd-3-clause |
mtagle/airflow | airflow/contrib/utils/log/task_handler_with_custom_formatter.py | 5 | 2216 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Custom logging formatter for Airflow
"""
import logging
from logging import StreamHandler
from airflow.configuration import conf
from airflow.utils.helpers import parse_template_string
class TaskHandlerWithCustomFormatter(StreamHandler):
"""
Custom implementation of StreamHandler, a class which writes logging records for Airflow
"""
def __init__(self, stream):
super().__init__()
self.prefix_jinja_template = None
def set_context(self, ti):
"""
Accept the run-time context (i.e. the current task) and configure the formatter accordingly.
:param ti:
:return:
"""
if ti.raw:
return
prefix = conf.get('logging', 'task_log_prefix_template')
rendered_prefix = ""
if prefix:
_, self.prefix_jinja_template = parse_template_string(prefix)
rendered_prefix = self._render_prefix(ti)
formatter = logging.Formatter(rendered_prefix + ":" + self.formatter._fmt) # pylint: disable=W0212
self.setFormatter(formatter)
self.setLevel(self.level)
def _render_prefix(self, ti):
if self.prefix_jinja_template:
jinja_context = ti.get_template_context()
return self.prefix_jinja_template.render(**jinja_context)
logging.warning("'task_log_prefix_template' is in invalid format, ignoring the variable value")
return ""
| apache-2.0 |
andymckay/django | django/conf/locale/sl/formats.py | 257 | 1834 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j. M. Y'
SHORT_DATETIME_FORMAT = 'j.n.Y. H:i'
FIRST_DAY_OF_WEEK = 0
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/account_tests.py | 1 | 4654 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import account
from .fhirdate import FHIRDate
class AccountTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Account", js["resourceType"])
return account.Account(js)
def testAccount1(self):
inst = self.instantiate_from("account-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Account instance")
self.implAccount1(inst)
js = inst.as_json()
self.assertEqual("Account", js["resourceType"])
inst2 = account.Account(js)
self.implAccount1(inst2)
def implAccount1(self, inst):
self.assertEqual(inst.coverage[0].priority, 1)
self.assertEqual(inst.description, "Hospital charges")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "urn:oid:0.1.2.3.4.5.6.7")
self.assertEqual(inst.identifier[0].value, "654321")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "HACC Funded Billing for Peter James Chalmers")
self.assertEqual(inst.servicePeriod.end.date, FHIRDate("2016-06-30").date)
self.assertEqual(inst.servicePeriod.end.as_json(), "2016-06-30")
self.assertEqual(inst.servicePeriod.start.date, FHIRDate("2016-01-01").date)
self.assertEqual(inst.servicePeriod.start.as_json(), "2016-01-01")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">HACC Funded Billing for Peter James Chalmers</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "PBILLACCT")
self.assertEqual(inst.type.coding[0].display, "patient billing account")
self.assertEqual(inst.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.type.text, "patient")
def testAccount2(self):
inst = self.instantiate_from("account-example-with-guarantor.json")
self.assertIsNotNone(inst, "Must have instantiated a Account instance")
self.implAccount2(inst)
js = inst.as_json()
self.assertEqual("Account", js["resourceType"])
inst2 = account.Account(js)
self.implAccount2(inst2)
def implAccount2(self, inst):
self.assertEqual(inst.coverage[0].priority, 1)
self.assertEqual(inst.coverage[1].priority, 2)
self.assertEqual(inst.description, "Hospital charges")
self.assertFalse(inst.guarantor[0].onHold)
self.assertEqual(inst.guarantor[0].period.start.date, FHIRDate("2016-01-01").date)
self.assertEqual(inst.guarantor[0].period.start.as_json(), "2016-01-01")
self.assertEqual(inst.id, "ewg")
self.assertEqual(inst.identifier[0].system, "urn:oid:0.1.2.3.4.5.6.7")
self.assertEqual(inst.identifier[0].value, "654321")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.name, "Inpatient: Peter James Chalmers")
self.assertEqual(inst.servicePeriod.end.date, FHIRDate("2016-06-30").date)
self.assertEqual(inst.servicePeriod.end.as_json(), "2016-06-30")
self.assertEqual(inst.servicePeriod.start.date, FHIRDate("2016-01-01").date)
self.assertEqual(inst.servicePeriod.start.as_json(), "2016-01-01")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Inpatient Admission for Peter James Chalmers Account</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "PBILLACCT")
self.assertEqual(inst.type.coding[0].display, "patient billing account")
self.assertEqual(inst.type.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActCode")
self.assertEqual(inst.type.text, "patient")
| bsd-3-clause |
hobson/pug-nlp | pug/nlp/tests.py | 1 | 1514 | #!/usr/bin/env python
"""
Uses the python unittest module to test this app with `python -m unittest pug.nlp`.
"""
# from django.test import TestCase
from unittest import TestCase, main
import doctest
from pug.nlp import util, http, penn_treebank_tokenizer, detector_morse
class NLPDocTest(TestCase):
def test_module(self, module=None):
if module:
failure_count, test_count = doctest.testmod(module, raise_on_error=False, verbose=True)
msg = "Ran {0} tests in {3} and {1} passed ({2} failed)".format(test_count, test_count-failure_count, failure_count, module.__file__)
print msg
if failure_count:
# print "Ignoring {0} doctest failures...".format(__file__)
self.fail(msg)
# return failure_count, test_count
def test_util(self):
self.test_module(util, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
def test_http(self):
self.test_module(http, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
def test_regex_patterns(self):
self.test_module(regex, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
def test_penn_treebank_tokenizer(self):
self.test_module(penn_treebank_tokenizer, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
def test_detector_morse(self):
self.test_module(detector_morse, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
if __name__ == '__main__':
main()
| mit |
jeasoft/odoo | addons/website/models/res_config.py | 240 | 2660 |
from openerp.osv import fields, osv
class website_config_settings(osv.osv_memory):
_name = 'website.config.settings'
_inherit = 'res.config.settings'
_columns = {
'website_id': fields.many2one('website', string="website", required=True),
'website_name': fields.related('website_id', 'name', type="char", string="Website Name"),
'language_ids': fields.related('website_id', 'language_ids', type='many2many', relation='res.lang', string='Languages'),
'default_lang_id': fields.related('website_id', 'default_lang_id', type='many2one', relation='res.lang', string='Default language'),
'default_lang_code': fields.related('website_id', 'default_lang_code', type="char", string="Default language code"),
'google_analytics_key': fields.related('website_id', 'google_analytics_key', type="char", string='Google Analytics Key'),
'social_twitter': fields.related('website_id', 'social_twitter', type="char", string='Twitter Account'),
'social_facebook': fields.related('website_id', 'social_facebook', type="char", string='Facebook Account'),
'social_github': fields.related('website_id', 'social_github', type="char", string='GitHub Account'),
'social_linkedin': fields.related('website_id', 'social_linkedin', type="char", string='LinkedIn Account'),
'social_youtube': fields.related('website_id', 'social_youtube', type="char", string='Youtube Account'),
'social_googleplus': fields.related('website_id', 'social_googleplus', type="char", string='Google+ Account'),
}
def on_change_website_id(self, cr, uid, ids, website_id, context=None):
website_data = self.pool.get('website').read(cr, uid, [website_id], [], context=context)[0]
values = {'website_name': website_data['name']}
for fname, v in website_data.items():
if fname in self._columns:
values[fname] = v[0] if v and self._columns[fname]._type == 'many2one' else v
return {'value' : values}
# FIXME in trunk for god sake. Change the fields above to fields.char instead of fields.related,
# and create the function set_website who will set the value on the website_id
# create does not forward the values to the related many2one. Write does.
def create(self, cr, uid, vals, context=None):
config_id = super(website_config_settings, self).create(cr, uid, vals, context=context)
self.write(cr, uid, config_id, vals, context=context)
return config_id
_defaults = {
'website_id': lambda self,cr,uid,c: self.pool.get('website').search(cr, uid, [], context=c)[0],
}
| agpl-3.0 |
rubencabrera/odoo | addons/account_budget/report/analytic_account_budget_report.py | 360 | 7589 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class analytic_account_budget_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(analytic_account_budget_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'funct': self.funct,
'funct_total': self.funct_total,
'time': time,
})
self.context = context
def funct(self, object, form, ids=None, done=None, level=1):
if ids is None:
ids = {}
if not ids:
ids = self.ids
if not done:
done = {}
global tot
tot = {
'theo':0.00,
'pln':0.00,
'prac':0.00,
'perc':0.00
}
result = []
accounts = self.pool.get('account.analytic.account').browse(self.cr, self.uid, [object.id], self.context.copy())
c_b_lines_obj = self.pool.get('crossovered.budget.lines')
obj_c_budget = self.pool.get('crossovered.budget')
for account_id in accounts:
res = {}
b_line_ids = []
for line in account_id.crossovered_budget_line:
b_line_ids.append(line.id)
if not b_line_ids:
return []
d_from = form['date_from']
d_to = form['date_to']
self.cr.execute('SELECT DISTINCT(crossovered_budget_id) FROM crossovered_budget_lines WHERE id =ANY(%s)',(b_line_ids,))
budget_ids = self.cr.fetchall()
context = {'wizard_date_from':d_from,'wizard_date_to':d_to}
for i in range(0, len(budget_ids)):
budget_name = obj_c_budget.browse(self.cr, self.uid, [budget_ids[i][0]])
res= {
'b_id':'-1',
'a_id':'-1',
'name':budget_name[0].name,
'status':1,
'theo':0.00,
'pln':0.00,
'prac':0.00,
'perc':0.00
}
result.append(res)
line_ids = c_b_lines_obj.search(self.cr, self.uid, [('id', 'in', b_line_ids), ('crossovered_budget_id','=',budget_ids[i][0])])
line_id = c_b_lines_obj.browse(self.cr, self.uid, line_ids)
tot_theo = tot_pln = tot_prac = tot_perc = 0
done_budget = []
for line in line_id:
if line.id in b_line_ids:
theo = pract = 0.00
theo = c_b_lines_obj._theo_amt(self.cr, self.uid, [line.id], context)[line.id]
pract = c_b_lines_obj._prac_amt(self.cr, self.uid, [line.id], context)[line.id]
if line.general_budget_id.id in done_budget:
for record in result:
if record['b_id'] == line.general_budget_id.id and record['a_id'] == line.analytic_account_id.id:
record['theo'] += theo
record['pln'] += line.planned_amount
record['prac'] += pract
record['perc'] += line.percentage
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += line.percentage
else:
res1 = {
'b_id': line.general_budget_id.id,
'a_id': line.analytic_account_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': theo,
'pln': line.planned_amount,
'prac': pract,
'perc': line.percentage
}
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += line.percentage
result.append(res1)
done_budget.append(line.general_budget_id.id)
else:
if line.general_budget_id.id in done_budget:
continue
else:
res1={
'b_id': line.general_budget_id.id,
'a_id': line.analytic_account_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': 0.00,
'pln': 0.00,
'prac': 0.00,
'perc': 0.00
}
result.append(res1)
done_budget.append(line.general_budget_id.id)
if tot_theo == 0.00:
tot_perc = 0.00
else:
tot_perc = float(tot_prac / tot_theo) * 100
result[-(len(done_budget) +1)]['theo'] = tot_theo
tot['theo'] +=tot_theo
result[-(len(done_budget) +1)]['pln'] = tot_pln
tot['pln'] +=tot_pln
result[-(len(done_budget) +1)]['prac'] = tot_prac
tot['prac'] +=tot_prac
result[-(len(done_budget) +1)]['perc'] = tot_perc
if tot['theo'] == 0.00:
tot['perc'] = 0.00
else:
tot['perc'] = float(tot['prac'] / tot['theo']) * 100
return result
def funct_total(self, form):
result = []
res = {}
res = {
'tot_theo': tot['theo'],
'tot_pln': tot['pln'],
'tot_prac': tot['prac'],
'tot_perc': tot['perc']
}
result.append(res)
return result
class report_analyticaccountbudget(osv.AbstractModel):
_name = 'report.account_budget.report_analyticaccountbudget'
_inherit = 'report.abstract_report'
_template = 'account_budget.report_analyticaccountbudget'
_wrapped_report_class = analytic_account_budget_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
druuu/django | tests/migrations/test_optimizer.py | 108 | 22542 | # -*- coding: utf-8 -*-
from django.db import migrations, models
from django.db.migrations.optimizer import MigrationOptimizer
from django.test import SimpleTestCase
from .models import CustomModelBase, EmptyManager
class OptimizerTests(SimpleTestCase):
"""
Tests the migration autodetector.
"""
def optimize(self, operations):
"""
Handy shortcut for getting results + number of loops
"""
optimizer = MigrationOptimizer()
return optimizer.optimize(operations), optimizer._iterations
def assertOptimizesTo(self, operations, expected, exact=None, less_than=None):
result, iterations = self.optimize(operations)
result = [repr(f.deconstruct()) for f in result]
expected = [repr(f.deconstruct()) for f in expected]
self.assertEqual(expected, result)
if exact is not None and iterations != exact:
raise self.failureException("Optimization did not take exactly %s iterations (it took %s)" % (exact, iterations))
if less_than is not None and iterations >= less_than:
raise self.failureException("Optimization did not take less than %s iterations (it took %s)" % (less_than, iterations))
def assertDoesNotOptimize(self, operations):
self.assertOptimizesTo(operations, operations)
def test_single(self):
"""
Tests that the optimizer does nothing on a single operation,
and that it does it in just one pass.
"""
self.assertOptimizesTo(
[migrations.DeleteModel("Foo")],
[migrations.DeleteModel("Foo")],
exact=1,
)
def test_create_delete_model(self):
"""
CreateModel and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.DeleteModel("Foo"),
],
[],
)
def test_create_rename_model(self):
"""
CreateModel should absorb RenameModels.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(CustomModelBase),
managers=managers,
),
migrations.RenameModel("Foo", "Bar"),
],
[
migrations.CreateModel(
"Bar",
[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(CustomModelBase),
managers=managers,
)
],
)
def test_rename_model_self(self):
"""
RenameModels should absorb themselves.
"""
self.assertOptimizesTo(
[
migrations.RenameModel("Foo", "Baa"),
migrations.RenameModel("Baa", "Bar"),
],
[
migrations.RenameModel("Foo", "Bar"),
],
)
def _test_create_alter_foo_delete_model(self, alter_foo):
"""
CreateModel, AlterModelTable, AlterUniqueTogether/AlterIndexTogether/
AlterOrderWithRespectTo, and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.AlterModelTable("Foo", "woohoo"),
alter_foo,
migrations.DeleteModel("Foo"),
],
[],
)
def test_create_alter_unique_delete_model(self):
self._test_create_alter_foo_delete_model(migrations.AlterUniqueTogether("Foo", [["a", "b"]]))
def test_create_alter_index_delete_model(self):
self._test_create_alter_foo_delete_model(migrations.AlterIndexTogether("Foo", [["a", "b"]]))
def test_create_alter_owrt_delete_model(self):
self._test_create_alter_foo_delete_model(migrations.AlterOrderWithRespectTo("Foo", "a"))
def _test_alter_alter_model(self, alter_foo, alter_bar):
"""
Two AlterUniqueTogether/AlterIndexTogether/AlterOrderWithRespectTo
should collapse into the second.
"""
self.assertOptimizesTo(
[
alter_foo,
alter_bar,
],
[
alter_bar,
],
)
def test_alter_alter_table_model(self):
self._test_alter_alter_model(
migrations.AlterModelTable("Foo", "a"),
migrations.AlterModelTable("Foo", "b"),
)
def test_alter_alter_unique_model(self):
self._test_alter_alter_model(
migrations.AlterUniqueTogether("Foo", [["a", "b"]]),
migrations.AlterUniqueTogether("Foo", [["a", "c"]]),
)
def test_alter_alter_index_model(self):
self._test_alter_alter_model(
migrations.AlterIndexTogether("Foo", [["a", "b"]]),
migrations.AlterIndexTogether("Foo", [["a", "c"]]),
)
def test_alter_alter_owrt_model(self):
self._test_alter_alter_model(
migrations.AlterOrderWithRespectTo("Foo", "a"),
migrations.AlterOrderWithRespectTo("Foo", "b"),
)
def test_optimize_through_create(self):
"""
We should be able to optimize away create/delete through a create or delete
of a different model, but only if the create operation does not mention the model
at all.
"""
# These should work
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Bar"),
migrations.DeleteModel("Foo"),
],
[],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
migrations.DeleteModel("Bar"),
],
[],
)
# This should not work - FK should block it
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]),
migrations.DeleteModel("Foo"),
],
)
# This should not work - bases should block it
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo", )),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo", )),
migrations.DeleteModel("Foo"),
],
)
def test_create_model_add_field(self):
"""
AddField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(CustomModelBase),
managers=managers,
),
migrations.AddField("Foo", "age", models.IntegerField()),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
],
options={'verbose_name': 'Foo'},
bases=(CustomModelBase),
managers=managers,
),
],
)
def test_create_model_add_field_not_through_fk(self):
"""
AddField should NOT optimize into CreateModel if it's an FK to a model
that's between them.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Link", [("url", models.TextField())]),
migrations.AddField("Foo", "link", models.ForeignKey("migrations.Link", models.CASCADE)),
],
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Link", [("url", models.TextField())]),
migrations.AddField("Foo", "link", models.ForeignKey("migrations.Link", models.CASCADE)),
],
)
def test_create_model_add_field_not_through_m2m_through(self):
"""
AddField should NOT optimize into CreateModel if it's an M2M using a
through that's created between them.
"""
# Note: The middle model is not actually a valid through model,
# but that doesn't matter, as we never render it.
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("LinkThrough", []),
migrations.AddField("Foo", "link", models.ManyToManyField("migrations.Link", through="migrations.LinkThrough")),
],
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("LinkThrough", []),
migrations.AddField("Foo", "link", models.ManyToManyField("migrations.Link", through="migrations.LinkThrough")),
],
)
def test_create_model_alter_field(self):
"""
AlterField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(CustomModelBase),
managers=managers,
),
migrations.AlterField("Foo", "name", models.IntegerField()),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.IntegerField()),
],
options={'verbose_name': 'Foo'},
bases=(CustomModelBase),
managers=managers,
),
],
)
def test_create_model_rename_field(self):
"""
RenameField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={'verbose_name': 'Foo'},
bases=(CustomModelBase),
managers=managers,
),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("title", models.CharField(max_length=255)),
],
options={'verbose_name': 'Foo'},
bases=(CustomModelBase),
managers=managers,
),
],
)
def test_add_field_rename_field(self):
"""
RenameField should optimize into AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.AddField("Foo", "title", models.CharField(max_length=255)),
],
)
def test_alter_field_rename_field(self):
"""
RenameField should optimize to the other side of AlterField,
and into itself.
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
migrations.RenameField("Foo", "title", "nom"),
],
[
migrations.RenameField("Foo", "name", "nom"),
migrations.AlterField("Foo", "nom", models.CharField(max_length=255)),
],
)
def test_create_model_remove_field(self):
"""
RemoveField should optimize into CreateModel.
"""
managers = [('objects', EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
],
options={'verbose_name': 'Foo'},
bases=(CustomModelBase),
managers=managers,
),
migrations.RemoveField("Foo", "age"),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
],
options={'verbose_name': 'Foo'},
bases=(CustomModelBase),
managers=managers,
),
],
)
def test_add_field_alter_field(self):
"""
AlterField should optimize into AddField.
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AlterField("Foo", "age", models.FloatField(default=2.4)),
],
[
migrations.AddField("Foo", name="age", field=models.FloatField(default=2.4)),
],
)
def test_add_field_delete_field(self):
"""
RemoveField should cancel AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[],
)
def test_alter_field_delete_field(self):
"""
RemoveField should absorb AlterField
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[
migrations.RemoveField("Foo", "age"),
],
)
def _test_create_alter_foo_field(self, alter):
"""
CreateModel, AlterFooTogether/AlterOrderWithRespectTo followed by an
add/alter/rename field should optimize to CreateModel and the Alter*
"""
# AddField
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.AddField("Foo", "c", models.IntegerField()),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
],
)
# AlterField
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.AlterField("Foo", "b", models.CharField(max_length=255)),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
migrations.AlterField("Foo", "c", models.CharField(max_length=255)),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.CharField(max_length=255)),
]),
alter,
],
)
# RenameField
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "b", "c"),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "b", "x"),
migrations.RenameField("Foo", "x", "c"),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "b", "c"),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
migrations.RenameField("Foo", "c", "d"),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("d", models.IntegerField()),
]),
alter,
],
)
# RemoveField
self.assertDoesNotOptimize(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
migrations.RemoveField("Foo", "b"),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
]),
alter,
migrations.RemoveField("Foo", "c"),
],
[
migrations.CreateModel("Foo", [
("a", models.IntegerField()),
("b", models.IntegerField()),
]),
alter,
],
)
def test_create_alter_unique_field(self):
self._test_create_alter_foo_field(migrations.AlterUniqueTogether("Foo", [["a", "b"]]))
def test_create_alter_index_field(self):
self._test_create_alter_foo_field(migrations.AlterIndexTogether("Foo", [["a", "b"]]))
def test_create_alter_owrt_field(self):
self._test_create_alter_foo_field(migrations.AlterOrderWithRespectTo("Foo", "b"))
def test_optimize_through_fields(self):
"""
Checks that field-level through checking is working.
This should manage to collapse model Foo to nonexistence,
and model Bar to a single IntegerField called "width".
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AddField("Bar", "width", models.IntegerField()),
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RenameField("Bar", "size", "dimensions"),
migrations.RemoveField("Foo", "age"),
migrations.RenameModel("Foo", "Phou"),
migrations.RemoveField("Bar", "dimensions"),
migrations.RenameModel("Phou", "Fou"),
migrations.DeleteModel("Fou"),
],
[
migrations.CreateModel("Bar", [("width", models.IntegerField())]),
],
)
| bsd-3-clause |
imsparsh/python-for-android | python3-alpha/extra_modules/pyxmpp2/etree.py | 46 | 2735 | #
# (C) Copyright 2011 Jacek Konieczny <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""ElementTree API selection.
The rest of PyXMPP2 package imports the ElementTree API from this module.
The actual API can be selected in one of two ways:
By importing this module (before anything else) its :etree:`ElementTree`
variable:
.. python::
import pyxmpp2.etree
import xml.etree.cElementTree
pyxmpp2.etree.ElementTree = xml.etree.cElementTree
Or by setting the 'PYXMPP2_ETREE' environment variable, e.g.::
$ PYXMPP2_ETREE="xml.etree"
By default the standard Python ElementTree implementation is used
(`xml.etree.ElementTree
<http://docs.python.org/library/xml.etree.elementtree.html>`__)
"""
# pylint: disable=C0103
__docformat__ = "restructuredtext en"
import os
import sys
from abc import ABCMeta
if "PYXMPP2_ETREE" in os.environ:
ElementTree = __import__(os.environ["PYXMPP2_ETREE"], fromlist=[""])
else:
from xml.etree import ElementTree # pylint: disable=W0404
class ElementClass(metaclass=ABCMeta):
"""Abstract class used to reference the :etree:`ElementTree.Element`
object type of the selected Element Tree implementation.
"""
element_type = None
@classmethod
def __subclasshook__(cls, other):
if cls.element_type is None:
cls.element_type = type(ElementTree.Element("x"))
if cls is ElementClass:
return other is cls.element_type or hasattr(other, "tag")
return NotImplemented
def element_to_unicode(element):
"""Serialize an XML element into a unicode string.
This should work the same on Python2 and Python3 and with all
:etree:`ElementTree` implementations.
:Parameters:
- `element`: the XML element to serialize
:Types:
- `element`: :etree:`ElementTree.Element`
"""
if hasattr(ElementTree, 'tounicode'):
# pylint: disable=E1103
return ElementTree.tounicode("element")
elif sys.version_info.major < 3:
return str(ElementTree.tostring(element))
else:
return ElementTree.tostring(element, encoding = "unicode")
| apache-2.0 |
jakev/dtf | python-dtf/dtf/core/cmds/client.py | 2 | 11429 | # Android Device Testing Framework ("dtf")
# Copyright 2013-2015 Jake Valletta (@jake_valletta)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Built-in module for client on device """
from __future__ import absolute_import
from __future__ import print_function
import os.path
from argparse import ArgumentParser
import dtf.logging as log
import dtf.properties as prop
import dtf.adb as adb
from dtf.module import Module
from dtf.constants import DTF_CLIENT
from dtf.globals import get_generic_global
from dtf.client import (DtfClient, RESP_OK, RESP_NO_READ, RESP_ERROR,
RESP_NO_WRITE, RESP_EXISTS, RESP_NO_EXIST,
ERR_SOCK)
DEFAULT_UPLOAD_PATH = '/data/data/com.dtf.client'
class client(Module): # pylint: disable=invalid-name
"""Module class for dtf client"""
adb = adb.DtfAdb()
client = DtfClient()
@classmethod
def usage(cls):
"""Display module usage"""
print('dtf Client Manager')
print('Subcommands:')
print(' download Download a file using dtfClient.')
print(' execute Execute a command using dtfClient.')
print(' install Install the dtf client on device.')
print(' status Print the install status of the client.')
print(' remove Uninstall the dtf client.')
print(" restart Restart dtfClient's socket service.")
print(' upload Upload file using dtfClient.')
print(' mode Configure connection mode.')
print('')
return 0
def do_install(self):
"""Install the dtf client on device"""
dtf_client_path = os.path.expanduser(
get_generic_global("Client", "apk_file"))
if not os.path.isfile(dtf_client_path):
log.e(self.name, "Unable to find APK file: %s" % dtf_client_path)
return -1
log.i(self.name, "Waiting for device to be connected...")
self.adb.wait_for_device()
log.i(self.name, "Removing old client if it exists...")
self.adb.uninstall(DTF_CLIENT)
log.i(self.name, "Installing dtf client...")
self.adb.install(dtf_client_path)
cmd = "am startservice -a com.dtf.action.name.INITIALIZE"
self.adb.shell_command(cmd)
busybox_path = "/data/data/%s/files/busybox" % DTF_CLIENT
prop.set_prop('Info', 'busybox', busybox_path)
log.i(self.name, "dtf client installed.")
return 0
def do_status(self):
"""Print the install status of the client"""
if self.adb.is_installed(DTF_CLIENT):
print('dtf Client Status: Installed')
print('')
else:
print('dtf Client Status: Not Installed')
print('')
def do_remove(self):
"""Uninstall the dtf client"""
log.i(self.name, "Waiting for device to be connected...")
self.adb.wait_for_device()
log.i(self.name, "Removing dtf client...")
self.adb.uninstall(DTF_CLIENT)
prop.del_prop('Info', 'busybox')
log.i(self.name, "dtf client removed!")
return 0
def do_upload(self, args):
"""Upload file to dtf client directory"""
parser = ArgumentParser(
prog='client upload',
description='Upload file to device with dtfClient.')
parser.add_argument('--path', dest='upload_path',
default=None, help="Specify a upload point.")
parser.add_argument('file_name', type=str,
help='The file to upload.')
args = parser.parse_args(args)
file_name = args.file_name
if args.upload_path is None:
upload_file_name = os.path.basename(file_name)
upload_path = "%s/%s" % (DEFAULT_UPLOAD_PATH, upload_file_name)
else:
upload_path = args.upload_path
if not os.path.isfile(file_name):
log.e(self.name, "File does not exist: %s" % file_name)
return -1
log.i(self.name, "Waiting for device to be connected...")
self.adb.wait_for_device()
log.i(self.name, "Device connected!")
# Is client installed?
if not self.adb.is_installed(DTF_CLIENT):
log.e(self.name, "dtf Client is not installed!")
return -1
resp = self.client.upload_file(file_name, upload_path)
if resp == RESP_OK:
log.i(self.name, "File upload success!")
return 0
# These are all error conditions
if resp == RESP_ERROR:
log.e(self.name, "General error!")
elif resp == RESP_EXISTS:
log.e(self.name, "Remote file exist!")
elif resp == RESP_NO_WRITE:
log.e(self.name, "No write permissions!")
elif resp == ERR_SOCK:
log.e(self.name, "Socket error!")
else:
log.e(self.name, "Unknown response, cannot proceed.")
# Getting here means error.
return -1
def do_download(self, args):
"""Download a file using the dtfClient API"""
parser = ArgumentParser(
prog='client download',
description='Download file from device with dtfClient.')
parser.add_argument('--path', dest='download_path',
default=None, help="Specify local path.")
parser.add_argument('file_name', type=str,
help='The file to download.')
args = parser.parse_args(args)
file_name = args.file_name
if args.download_path is None:
local_path = os.path.basename(file_name)
else:
local_path = args.download_path
if os.path.isfile(local_path):
log.e(self.name, "Local file '%s' already exists!" % local_path)
return -1
log.i(self.name, "Waiting for connected device...")
self.adb.wait_for_device()
log.i(self.name, "Device connected!")
# Is client installed?
if not self.adb.is_installed(DTF_CLIENT):
log.e(self.name, "dtf Client is not installed!")
return -1
resp = self.client.download_file(file_name, local_path)
if resp == RESP_OK:
log.i(self.name, "File download success!")
return 0
# These are all error conditions
if resp == RESP_ERROR:
log.e(self.name, "General error!")
elif resp == RESP_NO_EXIST:
log.e(self.name, "Remote file doesnt exist!")
elif resp == RESP_NO_READ:
log.e(self.name, "No read permissions!")
elif resp == ERR_SOCK:
log.e(self.name, "Socket error!")
else:
log.e(self.name, "Unknown response, cannot proceed.")
# Getting here means an error
return -1
def do_restart(self):
"""Restart the socket service on the dtfClient"""
log.i(self.name, "Waiting for device to be connected...")
self.adb.wait_for_device()
log.i(self.name, "Connected!")
cmd = "am startservice -a com.dtf.action.name.RESTART_SOCKET"
self.adb.shell_command(cmd)
return 0
def do_execute(self, args):
"""Execute a command using the dtfClient"""
if len(args) != 1:
print('Usage:')
print('dtf client execute [command]')
return -1
command_string = args.pop()
log.i(self.name, "Waiting for connected device...")
self.adb.wait_for_device()
log.i(self.name, "Device connected!")
# Is client installed?
if not self.adb.is_installed(DTF_CLIENT):
log.e(self.name, "dtf Client is not installed!")
return -1
response, resp_code = self.client.execute_command(command_string)
if resp_code == RESP_OK:
print(response)
return 0
elif resp_code == ERR_SOCK:
log.e(self.name, "Socket error!")
return -1
else:
log.e(self.name, "Something went wrong with the command (Err: %s)"
% ord(resp_code))
return -1
def do_mode(self, args):
"""Configure the debugging mode to use"""
if len(args) < 1:
current_mode = prop.get_prop('Client', 'mode')
print("Current Mode: %s" % current_mode)
print('')
print('Usage:')
print('dtf client mode [usb|wifi <ip:port>]')
return -1
mode = args.pop(0)
if mode not in [adb.MODE_USB, adb.MODE_WIFI]:
log.e(self.name, "Invalid mode!")
return -2
self.adb.wait_for_device()
# Wifi mode requires IP:Port
if mode == adb.MODE_WIFI:
if len(args) != 1:
log.e(self.name, "Wifi mode requires IP address:port!")
return -3
try:
ip_address, port = args[0].split(":")
except ValueError:
log.e(self.name, "Invalid IP address:port!")
return -4
log.i(self.name, "Setting Wifi mode to %s:%s..."
% (ip_address, port))
# Reconfigure the client
try:
self.client.set_to_wifi(ip_address, port)
except IOError:
log.e(self.name, "Unable to set to wifi mode!")
log.e(self.name, "Please reconnect your USB device.")
return -5
# Set the properties
prop.set_prop('Client', 'mode', adb.MODE_WIFI)
prop.set_prop('Client', 'ip-addr', ip_address)
prop.set_prop('Client', 'port', port)
# USB Takes no arguments
elif mode == adb.MODE_USB:
log.i(self.name, "Setting to USB mode...")
# Reconfigure the client
self.client.set_to_usb()
# Set the properties
prop.set_prop('Client', 'mode', adb.MODE_USB)
return 0
def execute(self, args):
"""Main module executor"""
self.name = self.__self__
rtn = 0
if len(args) < 1:
return self.usage()
sub_cmd = args.pop(0)
if sub_cmd == 'install':
rtn = self.do_install()
elif sub_cmd == 'status':
rtn = self.do_status()
elif sub_cmd == 'remove':
rtn = self.do_remove()
elif sub_cmd == 'upload':
rtn = self.do_upload(args)
elif sub_cmd == 'download':
rtn = self.do_download(args)
elif sub_cmd == 'restart':
rtn = self.do_restart()
elif sub_cmd == 'execute':
rtn = self.do_execute(args)
elif sub_cmd == 'mode':
rtn = self.do_mode(args)
else:
print("Sub-command '%s' not found!" % sub_cmd)
rtn = self.usage()
return rtn
| apache-2.0 |
peiyuwang/pants | tests/python/pants_test/backend/jvm/tasks/test_export_classpath_integration.py | 11 | 1178 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import time
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class ExportClasspathIntegrationTest(PantsRunIntegrationTest):
def test_export_manifest_jar(self):
ctimes = []
manifest_jar_path = "dist/export-classpath/manifest.jar"
for _ in range(2):
pants_run = self.run_pants(["export-classpath",
"--manifest-jar-only",
"examples/src/java/org/pantsbuild/example/hello/simple"])
self.assert_success(pants_run)
self.assertTrue(os.path.exists(manifest_jar_path))
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(manifest_jar_path)
ctimes.append(ctime)
# ctime is only accurate to second.
time.sleep(1)
self.assertTrue(ctimes[1] > ctimes[0], "{} is not overwritten.".format(manifest_jar_path))
| apache-2.0 |
FHannes/intellij-community | python/lib/Lib/site-packages/django/core/files/temp.py | 536 | 1819 | """
The temp module provides a NamedTemporaryFile that can be re-opened on any
platform. Most platforms use the standard Python tempfile.TemporaryFile class,
but MS Windows users are given a custom class.
This is needed because in Windows NT, the default implementation of
NamedTemporaryFile uses the O_TEMPORARY flag, and thus cannot be reopened [1].
1: http://mail.python.org/pipermail/python-list/2005-December/359474.html
"""
import os
import tempfile
from django.core.files.utils import FileProxyMixin
__all__ = ('NamedTemporaryFile', 'gettempdir',)
if os.name == 'nt':
class TemporaryFile(FileProxyMixin):
"""
Temporary file object constructor that works in Windows and supports
reopening of the temporary file in windows.
"""
def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='',
dir=None):
fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
dir=dir)
self.name = name
self.file = os.fdopen(fd, mode, bufsize)
self.close_called = False
# Because close can be called during shutdown
# we need to cache os.unlink and access it
# as self.unlink only
unlink = os.unlink
def close(self):
if not self.close_called:
self.close_called = True
try:
self.file.close()
except (OSError, IOError):
pass
try:
self.unlink(self.name)
except (OSError):
pass
def __del__(self):
self.close()
NamedTemporaryFile = TemporaryFile
else:
NamedTemporaryFile = tempfile.NamedTemporaryFile
gettempdir = tempfile.gettempdir
| apache-2.0 |
jimi-c/ansible | lib/ansible/modules/network/junos/junos_system.py | 27 | 6189 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_system
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage the system attributes on Juniper JUNOS devices
description:
- This module provides declarative management of node system attributes
on Juniper JUNOS devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configure the IP domain name
on the remote device to the provided value. Value
should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name.
domain_search:
description:
- Provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
name_servers:
description:
- List of DNS name servers by IP address to use to perform name resolution
lookups. This argument accepts either a list of DNS servers See
examples.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
type: bool
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(netconf). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(local) connections for legacy playbooks.
extends_documentation_fragment: junos
"""
EXAMPLES = """
- name: configure hostname and domain name
junos_system:
hostname: junos01
domain_name: test.example.com
domain-search:
- ansible.com
- redhat.com
- juniper.com
- name: remove configuration
junos_system:
state: absent
- name: configure name servers
junos_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
"""
RETURN = """
diff.prepared:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: string
sample: >
[edit system]
+ host-name test;
+ domain-name ansible.com;
+ domain-search redhat.com;
[edit system name-server]
172.26.1.1 { ... }
+ 8.8.8.8;
"""
import collections
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.junos.junos import junos_argument_spec, tostring
from ansible.module_utils.network.junos.junos import load_config, map_params_to_obj, map_obj_to_ele
from ansible.module_utils.network.junos.junos import commit_configuration, discard_changes, locked_config
USE_PERSISTENT_CONNECTION = True
def validate_param_values(module, obj):
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(module.params.get(key), module)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
hostname=dict(),
domain_name=dict(),
domain_search=dict(type='list'),
name_servers=dict(type='list'),
state=dict(choices=['present', 'absent'], default='present'),
active=dict(default=True, type='bool')
)
argument_spec.update(junos_argument_spec)
params = ['hostname', 'domain_name', 'domain_search', 'name_servers']
required_if = [('state', 'present', params, True),
('state', 'absent', params, True),
('state', 'active', params, True),
('state', 'suspend', params, True)]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
top = 'system'
param_to_xpath_map = collections.OrderedDict()
param_to_xpath_map.update([
('hostname', {'xpath': 'host-name', 'leaf_only': True}),
('domain_name', {'xpath': 'domain-name', 'leaf_only': True}),
('domain_search', {'xpath': 'domain-search', 'leaf_only': True, 'value_req': True}),
('name_servers', {'xpath': 'name-server/name', 'is_key': True})
])
validate_param_values(module, param_to_xpath_map)
want = map_params_to_obj(module, param_to_xpath_map)
ele = map_obj_to_ele(module, want, top)
with locked_config(module):
diff = load_config(module, tostring(ele), warnings, action='merge')
commit = not module.check_mode
if diff:
if commit:
commit_configuration(module)
else:
discard_changes(module)
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 |
damonkohler/sl4a | python/src/Mac/IDLE/idlemain.py | 71 | 2786 | """
Bootstrap script for IDLE as an application bundle.
"""
import sys, os
# Change the current directory the user's home directory, that way we'll get
# a more useful default location in the open/save dialogs.
os.chdir(os.path.expanduser('~/Documents'))
# Make sure sys.executable points to the python interpreter inside the
# framework, instead of at the helper executable inside the application
# bundle (the latter works, but doesn't allow access to the window server)
#
# .../IDLE.app/
# Contents/
# MacOS/
# IDLE (a python script)
# Python{-32} (symlink)
# Resources/
# idlemain.py (this module)
# ...
#
# ../IDLE.app/Contents/MacOS/Python{-32} is symlinked to
# ..Library/Frameworks/Python.framework/Versions/m.n
# /Resources/Python.app/Contents/MacOS/Python{-32}
# which is the Python interpreter executable
#
# The flow of control is as follows:
# 1. IDLE.app is launched which starts python running the IDLE script
# 2. IDLE script exports
# PYTHONEXECUTABLE = .../IDLE.app/Contents/MacOS/Python{-32}
# (the symlink to the framework python)
# 3. IDLE script alters sys.argv and uses os.execve to replace itself with
# idlemain.py running under the symlinked python.
# This is the magic step.
# 4. During interpreter initialization, because PYTHONEXECUTABLE is defined,
# sys.executable may get set to an unuseful value.
#
# (Note that the IDLE script and the setting of PYTHONEXECUTABLE is
# generated automatically by bundlebuilder in the Python 2.x build.
# Also, IDLE invoked via command line, i.e. bin/idle, bypasses all of
# this.)
#
# Now fix up the execution environment before importing idlelib.
# Reset sys.executable to its normal value, the actual path of
# the interpreter in the framework, by following the symlink
# exported in PYTHONEXECUTABLE.
pyex = os.environ['PYTHONEXECUTABLE']
sys.executable = os.path.join(os.path.dirname(pyex), os.readlink(pyex))
# Remove any sys.path entries for the Resources dir in the IDLE.app bundle.
p = pyex.partition('.app')
if p[2].startswith('/Contents/MacOS/Python'):
sys.path = [value for value in sys.path if
value.partition('.app') != (p[0], p[1], '/Contents/Resources')]
# Unexport PYTHONEXECUTABLE so that the other Python processes started
# by IDLE have a normal sys.executable.
del os.environ['PYTHONEXECUTABLE']
# Look for the -psn argument that the launcher adds and remove it, it will
# only confuse the IDLE startup code.
for idx, value in enumerate(sys.argv):
if value.startswith('-psn_'):
del sys.argv[idx]
break
# Now it is safe to import idlelib.
from idlelib.PyShell import main
if __name__ == '__main__':
main()
| apache-2.0 |
cloudmesh/sp17-i524 | project/S17-IR-P013/code/weather_data_analysis/run/wda_mapper.py | 19 | 2000 | #!/usr/bin/env python
import sys
import logging
import iu.i524.S17IRP013.hadoop.hbase_to_hdfs as h2h
DEFAULT_STATION_ID = 'DST:IND000DEF'
logging.basicConfig(format = '%(asctime)s %(message)s',\
datefmt = '%m/%d/%Y %I:%M:%S %p',\
filename = 'wda_app.log',\
level=logging.DEBUG)
def get_default_result():
result = dict()
result['TMAX'] = [DEFAULT_STATION_ID,0]
result['PRCP'] = [DEFAULT_STATION_ID,0]
result['TAVG'] = [DEFAULT_STATION_ID,0]
result['TMIN'] = [DEFAULT_STATION_ID,100]
return result
def compare_props(prop,result):
logging.info(prop)
if prop['parameter'] == 'TMAX':
if float(prop['value']) > float(result['TMAX'][1]) or result['TMAX'][1] == 0:
result['TMAX'][0] = prop['station_id']
result['TMAX'][1] = prop['value']
elif prop['parameter'] == 'TAVG':
if float(prop['value']) > float(result['TAVG'][1]) or result['TAVG'][1] == 0:
result['TAVG'][0] = prop['station_id']
result['TAVG'][1] = prop['value']
elif prop['parameter'] == 'PRCP':
if float(prop['value']) > float(result['PRCP'][1]) or result['PRCP'][1] == 0:
result['PRCP'][0] = prop['station_id']
result['PRCP'][1] = prop['value']
elif prop['parameter'] == 'TMIN':
if float(prop['value']) < float(result['TMIN'][1]) or result['TMIN'][1] == 0:
result['TMIN'][0] = prop['station_id']
result['TMIN'][1] = prop['value']
return result
# input comes from STDIN (standard input)
index = 0
for year_month in sys.stdin:
year_month = year_month.strip()
data_list = h2h.find_by_id(row_key=year_month)
tmax = 70
tmin=-70
tavg=0
prcp=0
result = get_default_result()
## Run analysis
for prop in data_list:
result = compare_props(prop=prop,result=result)
#print '%s\t%s' % (index, str(result))
print str(result)
| apache-2.0 |
kaushik94/boto | tests/unit/dynamodb/test_types.py | 9 | 4007 | #!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from decimal import Decimal
from tests.unit import unittest
from boto.dynamodb import types
from boto.dynamodb.exceptions import DynamoDBNumberError
class TestDynamizer(unittest.TestCase):
def setUp(self):
pass
def test_encoding_to_dynamodb(self):
dynamizer = types.Dynamizer()
self.assertEqual(dynamizer.encode('foo'), {'S': 'foo'})
self.assertEqual(dynamizer.encode(54), {'N': '54'})
self.assertEqual(dynamizer.encode(Decimal('1.1')), {'N': '1.1'})
self.assertEqual(dynamizer.encode(set([1, 2, 3])),
{'NS': ['1', '2', '3']})
self.assertEqual(dynamizer.encode(set(['foo', 'bar'])),
{'SS': ['foo', 'bar']})
self.assertEqual(dynamizer.encode(types.Binary('\x01')),
{'B': 'AQ=='})
self.assertEqual(dynamizer.encode(set([types.Binary('\x01')])),
{'BS': ['AQ==']})
def test_decoding_to_dynamodb(self):
dynamizer = types.Dynamizer()
self.assertEqual(dynamizer.decode({'S': 'foo'}), 'foo')
self.assertEqual(dynamizer.decode({'N': '54'}), 54)
self.assertEqual(dynamizer.decode({'N': '1.1'}), Decimal('1.1'))
self.assertEqual(dynamizer.decode({'NS': ['1', '2', '3']}),
set([1, 2, 3]))
self.assertEqual(dynamizer.decode({'SS': ['foo', 'bar']}),
set(['foo', 'bar']))
self.assertEqual(dynamizer.decode({'B': 'AQ=='}), types.Binary('\x01'))
self.assertEqual(dynamizer.decode({'BS': ['AQ==']}),
set([types.Binary('\x01')]))
def test_float_conversion_errors(self):
dynamizer = types.Dynamizer()
# When supporting decimals, certain floats will work:
self.assertEqual(dynamizer.encode(1.25), {'N': '1.25'})
# And some will generate errors, which is why it's best
# to just use Decimals directly:
with self.assertRaises(DynamoDBNumberError):
dynamizer.encode(1.1)
def test_lossy_float_conversions(self):
dynamizer = types.LossyFloatDynamizer()
# Just testing the differences here, specifically float conversions:
self.assertEqual(dynamizer.encode(1.1), {'N': '1.1'})
self.assertEqual(dynamizer.decode({'N': '1.1'}), 1.1)
self.assertEqual(dynamizer.encode(set([1.1])),
{'NS': ['1.1']})
self.assertEqual(dynamizer.decode({'NS': ['1.1', '2.2', '3.3']}),
set([1.1, 2.2, 3.3]))
class TestBinary(unittest.TestCase):
def test_bad_input(self):
with self.assertRaises(TypeError):
data = types.Binary(1)
def test_good_input(self):
data = types.Binary(chr(1))
self.assertEqual('\x01', str(data))
if __name__ == '__main__':
unittest.main()
| mit |
yangming85/lettuce | tests/integration/lib/Django-1.3/tests/modeltests/unmanaged_models/models.py | 91 | 3475 | """
Models can have a ``managed`` attribute, which specifies whether the SQL code
is generated for the table on various manage.py operations.
"""
from django.db import models
# All of these models are creatd in the database by Django.
class A01(models.Model):
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'A01'
def __unicode__(self):
return self.f_a
class B01(models.Model):
fk_a = models.ForeignKey(A01)
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'B01'
# 'managed' is True by default. This tests we can set it explicitly.
managed = True
def __unicode__(self):
return self.f_a
class C01(models.Model):
mm_a = models.ManyToManyField(A01, db_table='D01')
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'C01'
def __unicode__(self):
return self.f_a
# All of these models use the same tables as the previous set (they are shadows
# of possibly a subset of the columns). There should be no creation errors,
# since we have told Django they aren't managed by Django.
class A02(models.Model):
f_a = models.CharField(max_length=10, db_index=True)
class Meta:
db_table = 'A01'
managed = False
def __unicode__(self):
return self.f_a
class B02(models.Model):
class Meta:
db_table = 'B01'
managed = False
fk_a = models.ForeignKey(A02)
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
def __unicode__(self):
return self.f_a
# To re-use the many-to-many intermediate table, we need to manually set up
# things up.
class C02(models.Model):
mm_a = models.ManyToManyField(A02, through="Intermediate")
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'C01'
managed = False
def __unicode__(self):
return self.f_a
class Intermediate(models.Model):
a02 = models.ForeignKey(A02, db_column="a01_id")
c02 = models.ForeignKey(C02, db_column="c01_id")
class Meta:
db_table = 'D01'
managed = False
#
# These next models test the creation (or not) of many to many join tables
# between managed and unmanaged models. A join table between two unmanaged
# models shouldn't be automatically created (see #10647).
#
# Firstly, we need some models that will create the tables, purely so that the
# tables are created. This is a test setup, not a requirement for unmanaged
# models.
class Proxy1(models.Model):
class Meta:
db_table = "unmanaged_models_proxy1"
class Proxy2(models.Model):
class Meta:
db_table = "unmanaged_models_proxy2"
class Unmanaged1(models.Model):
class Meta:
managed = False
db_table = "unmanaged_models_proxy1"
# Unmanged with an m2m to unmanaged: the intermediary table won't be created.
class Unmanaged2(models.Model):
mm = models.ManyToManyField(Unmanaged1)
class Meta:
managed = False
db_table = "unmanaged_models_proxy2"
# Here's an unmanaged model with an m2m to a managed one; the intermediary
# table *will* be created (unless given a custom `through` as for C02 above).
class Managed1(models.Model):
mm = models.ManyToManyField(Unmanaged1)
| gpl-3.0 |
aaltinisik/OCBAltinkaya | addons/l10n_in_hr_payroll/report/report_payroll_advice.py | 374 | 3442 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import osv
from openerp.report import report_sxw
from openerp.tools import amount_to_text_en
class payroll_advice_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(payroll_advice_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_month': self.get_month,
'convert': self.convert,
'get_detail': self.get_detail,
'get_bysal_total': self.get_bysal_total,
})
self.context = context
def get_month(self, input_date):
payslip_pool = self.pool.get('hr.payslip')
res = {
'from_name': '', 'to_name': ''
}
slip_ids = payslip_pool.search(self.cr, self.uid, [('date_from','<=',input_date), ('date_to','>=',input_date)], context=self.context)
if slip_ids:
slip = payslip_pool.browse(self.cr, self.uid, slip_ids, context=self.context)[0]
from_date = datetime.strptime(slip.date_from, '%Y-%m-%d')
to_date = datetime.strptime(slip.date_to, '%Y-%m-%d')
res['from_name']= from_date.strftime('%d')+'-'+from_date.strftime('%B')+'-'+from_date.strftime('%Y')
res['to_name']= to_date.strftime('%d')+'-'+to_date.strftime('%B')+'-'+to_date.strftime('%Y')
return res
def convert(self, amount, cur):
return amount_to_text_en.amount_to_text(amount, 'en', cur);
def get_bysal_total(self):
return self.total_bysal
def get_detail(self, line_ids):
result = []
self.total_bysal = 0.00
for l in line_ids:
res = {}
res.update({
'name': l.employee_id.name,
'acc_no': l.name,
'ifsc_code': l.ifsc_code,
'bysal': l.bysal,
'debit_credit': l.debit_credit,
})
self.total_bysal += l.bysal
result.append(res)
return result
class wrapped_report_payroll_advice(osv.AbstractModel):
_name = 'report.l10n_in_hr_payroll.report_payrolladvice'
_inherit = 'report.abstract_report'
_template = 'l10n_in_hr_payroll.report_payrolladvice'
_wrapped_report_class = payroll_advice_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
meizhan/SVMelegans | Common/libsvm-3.17/tools/grid.py | 49 | 15304 | #!/usr/bin/env python
__all__ = ['find_parameters']
import os, sys, traceback, getpass, time, re
from threading import Thread
from subprocess import *
if sys.version_info[0] < 3:
from Queue import Queue
else:
from queue import Queue
telnet_workers = []
ssh_workers = []
nr_local_worker = 1
class GridOption:
def __init__(self, dataset_pathname, options):
dirname = os.path.dirname(__file__)
if sys.platform != 'win32':
self.svmtrain_pathname = os.path.join(dirname, '../svm-train')
self.gnuplot_pathname = '/usr/bin/gnuplot'
else:
# example for windows
self.svmtrain_pathname = os.path.join(dirname, r'..\windows\svm-train.exe')
# svmtrain_pathname = r'c:\Program Files\libsvm\windows\svm-train.exe'
self.gnuplot_pathname = r'c:\tmp\gnuplot\binary\pgnuplot.exe'
self.fold = 5
self.c_begin, self.c_end, self.c_step = -5, 15, 2
self.g_begin, self.g_end, self.g_step = 3, -15, -2
self.grid_with_c, self.grid_with_g = True, True
self.dataset_pathname = dataset_pathname
self.dataset_title = os.path.split(dataset_pathname)[1]
self.out_pathname = '{0}.out'.format(self.dataset_title)
self.png_pathname = '{0}.png'.format(self.dataset_title)
self.pass_through_string = ' '
self.resume_pathname = None
self.parse_options(options)
def parse_options(self, options):
if type(options) == str:
options = options.split()
i = 0
pass_through_options = []
while i < len(options):
if options[i] == '-log2c':
i = i + 1
if options[i] == 'null':
self.grid_with_c = False
else:
self.c_begin, self.c_end, self.c_step = map(float,options[i].split(','))
elif options[i] == '-log2g':
i = i + 1
if options[i] == 'null':
self.grid_with_g = False
else:
self.g_begin, self.g_end, self.g_step = map(float,options[i].split(','))
elif options[i] == '-v':
i = i + 1
self.fold = options[i]
elif options[i] in ('-c','-g'):
raise ValueError('Use -log2c and -log2g.')
elif options[i] == '-svmtrain':
i = i + 1
self.svmtrain_pathname = options[i]
elif options[i] == '-gnuplot':
i = i + 1
if options[i] == 'null':
self.gnuplot_pathname = None
else:
self.gnuplot_pathname = options[i]
elif options[i] == '-out':
i = i + 1
if options[i] == 'null':
self.out_pathname = None
else:
self.out_pathname = options[i]
elif options[i] == '-png':
i = i + 1
self.png_pathname = options[i]
elif options[i] == '-resume':
if i == (len(options)-1) or options[i+1].startswith('-'):
self.resume_pathname = self.dataset_title + '.out'
else:
i = i + 1
self.resume_pathname = options[i]
else:
pass_through_options.append(options[i])
i = i + 1
self.pass_through_string = ' '.join(pass_through_options)
if not os.path.exists(self.svmtrain_pathname):
raise IOError('svm-train executable not found')
if not os.path.exists(self.dataset_pathname):
raise IOError('dataset not found')
if self.resume_pathname and not os.path.exists(self.resume_pathname):
raise IOError('file for resumption not found')
if not self.grid_with_c and not self.grid_with_g:
raise ValueError('-log2c and -log2g should not be null simultaneously')
if self.gnuplot_pathname and not os.path.exists(self.gnuplot_pathname):
sys.stderr.write('gnuplot executable not found\n')
self.gnuplot_pathname = None
def redraw(db,best_param,gnuplot,options,tofile=False):
if len(db) == 0: return
begin_level = round(max(x[2] for x in db)) - 3
step_size = 0.5
best_log2c,best_log2g,best_rate = best_param
# if newly obtained c, g, or cv values are the same,
# then stop redrawing the contour.
if all(x[0] == db[0][0] for x in db): return
if all(x[1] == db[0][1] for x in db): return
if all(x[2] == db[0][2] for x in db): return
if tofile:
gnuplot.write(b"set term png transparent small linewidth 2 medium enhanced\n")
gnuplot.write("set output \"{0}\"\n".format(options.png_pathname.replace('\\','\\\\')).encode())
#gnuplot.write(b"set term postscript color solid\n")
#gnuplot.write("set output \"{0}.ps\"\n".format(options.dataset_title).encode().encode())
elif sys.platform == 'win32':
gnuplot.write(b"set term windows\n")
else:
gnuplot.write( b"set term x11\n")
gnuplot.write(b"set xlabel \"log2(C)\"\n")
gnuplot.write(b"set ylabel \"log2(gamma)\"\n")
gnuplot.write("set xrange [{0}:{1}]\n".format(options.c_begin,options.c_end).encode())
gnuplot.write("set yrange [{0}:{1}]\n".format(options.g_begin,options.g_end).encode())
gnuplot.write(b"set contour\n")
gnuplot.write("set cntrparam levels incremental {0},{1},100\n".format(begin_level,step_size).encode())
gnuplot.write(b"unset surface\n")
gnuplot.write(b"unset ztics\n")
gnuplot.write(b"set view 0,0\n")
gnuplot.write("set title \"{0}\"\n".format(options.dataset_title).encode())
gnuplot.write(b"unset label\n")
gnuplot.write("set label \"Best log2(C) = {0} log2(gamma) = {1} accuracy = {2}%\" \
at screen 0.5,0.85 center\n". \
format(best_log2c, best_log2g, best_rate).encode())
gnuplot.write("set label \"C = {0} gamma = {1}\""
" at screen 0.5,0.8 center\n".format(2**best_log2c, 2**best_log2g).encode())
gnuplot.write(b"set key at screen 0.9,0.9\n")
gnuplot.write(b"splot \"-\" with lines\n")
db.sort(key = lambda x:(x[0], -x[1]))
prevc = db[0][0]
for line in db:
if prevc != line[0]:
gnuplot.write(b"\n")
prevc = line[0]
gnuplot.write("{0[0]} {0[1]} {0[2]}\n".format(line).encode())
gnuplot.write(b"e\n")
gnuplot.write(b"\n") # force gnuplot back to prompt when term set failure
gnuplot.flush()
def calculate_jobs(options):
def range_f(begin,end,step):
# like range, but works on non-integer too
seq = []
while True:
if step > 0 and begin > end: break
if step < 0 and begin < end: break
seq.append(begin)
begin = begin + step
return seq
def permute_sequence(seq):
n = len(seq)
if n <= 1: return seq
mid = int(n/2)
left = permute_sequence(seq[:mid])
right = permute_sequence(seq[mid+1:])
ret = [seq[mid]]
while left or right:
if left: ret.append(left.pop(0))
if right: ret.append(right.pop(0))
return ret
c_seq = permute_sequence(range_f(options.c_begin,options.c_end,options.c_step))
g_seq = permute_sequence(range_f(options.g_begin,options.g_end,options.g_step))
if not options.grid_with_c:
c_seq = [None]
if not options.grid_with_g:
g_seq = [None]
nr_c = float(len(c_seq))
nr_g = float(len(g_seq))
i, j = 0, 0
jobs = []
while i < nr_c or j < nr_g:
if i/nr_c < j/nr_g:
# increase C resolution
line = []
for k in range(0,j):
line.append((c_seq[i],g_seq[k]))
i = i + 1
jobs.append(line)
else:
# increase g resolution
line = []
for k in range(0,i):
line.append((c_seq[k],g_seq[j]))
j = j + 1
jobs.append(line)
resumed_jobs = {}
if options.resume_pathname is None:
return jobs, resumed_jobs
for line in open(options.resume_pathname, 'r'):
line = line.strip()
rst = re.findall(r'rate=([0-9.]+)',line)
if not rst:
continue
rate = float(rst[0])
c, g = None, None
rst = re.findall(r'log2c=([0-9.-]+)',line)
if rst:
c = float(rst[0])
rst = re.findall(r'log2g=([0-9.-]+)',line)
if rst:
g = float(rst[0])
resumed_jobs[(c,g)] = rate
return jobs, resumed_jobs
class WorkerStopToken: # used to notify the worker to stop or if a worker is dead
pass
class Worker(Thread):
def __init__(self,name,job_queue,result_queue,options):
Thread.__init__(self)
self.name = name
self.job_queue = job_queue
self.result_queue = result_queue
self.options = options
def run(self):
while True:
(cexp,gexp) = self.job_queue.get()
if cexp is WorkerStopToken:
self.job_queue.put((cexp,gexp))
# print('worker {0} stop.'.format(self.name))
break
try:
c, g = None, None
if cexp != None:
c = 2.0**cexp
if gexp != None:
g = 2.0**gexp
rate = self.run_one(c,g)
if rate is None: raise RuntimeError('get no rate')
except:
# we failed, let others do that and we just quit
traceback.print_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
self.job_queue.put((cexp,gexp))
sys.stderr.write('worker {0} quit.\n'.format(self.name))
break
else:
self.result_queue.put((self.name,cexp,gexp,rate))
def get_cmd(self,c,g):
options=self.options
cmdline = options.svmtrain_pathname
if options.grid_with_c:
cmdline += ' -c {0} '.format(c)
if options.grid_with_g:
cmdline += ' -g {0} '.format(g)
cmdline += ' -v {0} {1} {2} '.format\
(options.fold,options.pass_through_string,options.dataset_pathname)
return cmdline
class LocalWorker(Worker):
def run_one(self,c,g):
cmdline = self.get_cmd(c,g)
result = Popen(cmdline,shell=True,stdout=PIPE,stderr=PIPE,stdin=PIPE).stdout
for line in result.readlines():
if str(line).find('Cross') != -1:
return float(line.split()[-1][0:-1])
class SSHWorker(Worker):
def __init__(self,name,job_queue,result_queue,host,options):
Worker.__init__(self,name,job_queue,result_queue,options)
self.host = host
self.cwd = os.getcwd()
def run_one(self,c,g):
cmdline = 'ssh -x -t -t {0} "cd {1}; {2}"'.format\
(self.host,self.cwd,self.get_cmd(c,g))
result = Popen(cmdline,shell=True,stdout=PIPE,stderr=PIPE,stdin=PIPE).stdout
for line in result.readlines():
if str(line).find('Cross') != -1:
return float(line.split()[-1][0:-1])
class TelnetWorker(Worker):
def __init__(self,name,job_queue,result_queue,host,username,password,options):
Worker.__init__(self,name,job_queue,result_queue,options)
self.host = host
self.username = username
self.password = password
def run(self):
import telnetlib
self.tn = tn = telnetlib.Telnet(self.host)
tn.read_until('login: ')
tn.write(self.username + '\n')
tn.read_until('Password: ')
tn.write(self.password + '\n')
# XXX: how to know whether login is successful?
tn.read_until(self.username)
#
print('login ok', self.host)
tn.write('cd '+os.getcwd()+'\n')
Worker.run(self)
tn.write('exit\n')
def run_one(self,c,g):
cmdline = self.get_cmd(c,g)
result = self.tn.write(cmdline+'\n')
(idx,matchm,output) = self.tn.expect(['Cross.*\n'])
for line in output.split('\n'):
if str(line).find('Cross') != -1:
return float(line.split()[-1][0:-1])
def find_parameters(dataset_pathname, options=''):
def update_param(c,g,rate,best_c,best_g,best_rate,worker,resumed):
if (rate > best_rate) or (rate==best_rate and g==best_g and c<best_c):
best_rate,best_c,best_g = rate,c,g
stdout_str = '[{0}] {1} {2} (best '.format\
(worker,' '.join(str(x) for x in [c,g] if x is not None),rate)
output_str = ''
if c != None:
stdout_str += 'c={0}, '.format(2.0**best_c)
output_str += 'log2c={0} '.format(c)
if g != None:
stdout_str += 'g={0}, '.format(2.0**best_g)
output_str += 'log2g={0} '.format(g)
stdout_str += 'rate={0})'.format(best_rate)
print(stdout_str)
if options.out_pathname and not resumed:
output_str += 'rate={0}\n'.format(rate)
result_file.write(output_str)
result_file.flush()
return best_c,best_g,best_rate
options = GridOption(dataset_pathname, options);
if options.gnuplot_pathname:
gnuplot = Popen(options.gnuplot_pathname,stdin = PIPE,stdout=PIPE,stderr=PIPE).stdin
else:
gnuplot = None
# put jobs in queue
jobs,resumed_jobs = calculate_jobs(options)
job_queue = Queue(0)
result_queue = Queue(0)
for (c,g) in resumed_jobs:
result_queue.put(('resumed',c,g,resumed_jobs[(c,g)]))
for line in jobs:
for (c,g) in line:
if (c,g) not in resumed_jobs:
job_queue.put((c,g))
# hack the queue to become a stack --
# this is important when some thread
# failed and re-put a job. It we still
# use FIFO, the job will be put
# into the end of the queue, and the graph
# will only be updated in the end
job_queue._put = job_queue.queue.appendleft
# fire telnet workers
if telnet_workers:
nr_telnet_worker = len(telnet_workers)
username = getpass.getuser()
password = getpass.getpass()
for host in telnet_workers:
worker = TelnetWorker(host,job_queue,result_queue,
host,username,password,options)
worker.start()
# fire ssh workers
if ssh_workers:
for host in ssh_workers:
worker = SSHWorker(host,job_queue,result_queue,host,options)
worker.start()
# fire local workers
for i in range(nr_local_worker):
worker = LocalWorker('local',job_queue,result_queue,options)
worker.start()
# gather results
done_jobs = {}
if options.out_pathname:
if options.resume_pathname:
result_file = open(options.out_pathname, 'a')
else:
result_file = open(options.out_pathname, 'w')
db = []
best_rate = -1
best_c,best_g = None,None
for (c,g) in resumed_jobs:
rate = resumed_jobs[(c,g)]
best_c,best_g,best_rate = update_param(c,g,rate,best_c,best_g,best_rate,'resumed',True)
for line in jobs:
for (c,g) in line:
while (c,g) not in done_jobs:
(worker,c1,g1,rate1) = result_queue.get()
done_jobs[(c1,g1)] = rate1
if (c1,g1) not in resumed_jobs:
best_c,best_g,best_rate = update_param(c1,g1,rate1,best_c,best_g,best_rate,worker,False)
db.append((c,g,done_jobs[(c,g)]))
if gnuplot and options.grid_with_c and options.grid_with_g:
redraw(db,[best_c, best_g, best_rate],gnuplot,options)
redraw(db,[best_c, best_g, best_rate],gnuplot,options,True)
if options.out_pathname:
result_file.close()
job_queue.put((WorkerStopToken,None))
best_param, best_cg = {}, []
if best_c != None:
best_param['c'] = 2.0**best_c
best_cg += [2.0**best_c]
if best_g != None:
best_param['g'] = 2.0**best_g
best_cg += [2.0**best_g]
print('{0} {1}'.format(' '.join(map(str,best_cg)), best_rate))
return best_rate, best_param
if __name__ == '__main__':
def exit_with_help():
print("""\
Usage: grid.py [grid_options] [svm_options] dataset
grid_options :
-log2c {begin,end,step | "null"} : set the range of c (default -5,15,2)
begin,end,step -- c_range = 2^{begin,...,begin+k*step,...,end}
"null" -- do not grid with c
-log2g {begin,end,step | "null"} : set the range of g (default 3,-15,-2)
begin,end,step -- g_range = 2^{begin,...,begin+k*step,...,end}
"null" -- do not grid with g
-v n : n-fold cross validation (default 5)
-svmtrain pathname : set svm executable path and name
-gnuplot {pathname | "null"} :
pathname -- set gnuplot executable path and name
"null" -- do not plot
-out {pathname | "null"} : (default dataset.out)
pathname -- set output file path and name
"null" -- do not output file
-png pathname : set graphic output file path and name (default dataset.png)
-resume [pathname] : resume the grid task using an existing output file (default pathname is dataset.out)
This is experimental. Try this option only if some parameters have been checked for the SAME data.
svm_options : additional options for svm-train""")
sys.exit(1)
if len(sys.argv) < 2:
exit_with_help()
dataset_pathname = sys.argv[-1]
options = sys.argv[1:-1]
try:
find_parameters(dataset_pathname, options)
except (IOError,ValueError) as e:
sys.stderr.write(str(e) + '\n')
sys.stderr.write('Try "grid.py" for more information.\n')
sys.exit(1)
| mit |
dimara/ganeti | lib/hypervisor/hv_xen.py | 1 | 51356 | #
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Xen hypervisors
"""
import logging
import errno
import string # pylint: disable=W0402
import shutil
from cStringIO import StringIO
from ganeti import constants
from ganeti import errors
from ganeti import utils
from ganeti.hypervisor import hv_base
from ganeti import netutils
from ganeti import objects
from ganeti import pathutils
XEND_CONFIG_FILE = utils.PathJoin(pathutils.XEN_CONFIG_DIR, "xend-config.sxp")
XL_CONFIG_FILE = utils.PathJoin(pathutils.XEN_CONFIG_DIR, "xen/xl.conf")
VIF_BRIDGE_SCRIPT = utils.PathJoin(pathutils.XEN_CONFIG_DIR,
"scripts/vif-bridge")
_DOM0_NAME = "Domain-0"
_DISK_LETTERS = string.ascii_lowercase
_FILE_DRIVER_MAP = {
constants.FD_LOOP: "file",
constants.FD_BLKTAP: "tap:aio",
constants.FD_BLKTAP2: "tap2:tapdisk:aio",
}
def _CreateConfigCpus(cpu_mask):
"""Create a CPU config string for Xen's config file.
"""
# Convert the string CPU mask to a list of list of int's
cpu_list = utils.ParseMultiCpuMask(cpu_mask)
if len(cpu_list) == 1:
all_cpu_mapping = cpu_list[0]
if all_cpu_mapping == constants.CPU_PINNING_OFF:
# If CPU pinning has 1 entry that's "all", then remove the
# parameter from the config file
return None
else:
# If CPU pinning has one non-all entry, mapping all vCPUS (the entire
# VM) to one physical CPU, using format 'cpu = "C"'
return "cpu = \"%s\"" % ",".join(map(str, all_cpu_mapping))
else:
def _GetCPUMap(vcpu):
if vcpu[0] == constants.CPU_PINNING_ALL_VAL:
cpu_map = constants.CPU_PINNING_ALL_XEN
else:
cpu_map = ",".join(map(str, vcpu))
return "\"%s\"" % cpu_map
# build the result string in format 'cpus = [ "c", "c", "c" ]',
# where each c is a physical CPU number, a range, a list, or any
# combination
return "cpus = [ %s ]" % ", ".join(map(_GetCPUMap, cpu_list))
def _RunInstanceList(fn, instance_list_errors):
"""Helper function for L{_GetAllInstanceList} to retrieve the list
of instances from xen.
@type fn: callable
@param fn: Function to query xen for the list of instances
@type instance_list_errors: list
@param instance_list_errors: Error list
@rtype: list
"""
result = fn()
if result.failed:
logging.error("Retrieving the instance list from xen failed (%s): %s",
result.fail_reason, result.output)
instance_list_errors.append(result)
raise utils.RetryAgain()
# skip over the heading
return result.stdout.splitlines()
def _ParseInstanceList(lines, include_node):
"""Parses the output of listing instances by xen.
@type lines: list
@param lines: Result of retrieving the instance list from xen
@type include_node: boolean
@param include_node: If True, return information for Dom0
@return: list of tuple containing (name, id, memory, vcpus, state, time
spent)
"""
result = []
# Iterate through all lines while ignoring header
for line in lines[1:]:
# The format of lines is:
# Name ID Mem(MiB) VCPUs State Time(s)
# Domain-0 0 3418 4 r----- 266.2
data = line.split()
if len(data) != 6:
raise errors.HypervisorError("Can't parse instance list,"
" line: %s" % line)
try:
data[1] = int(data[1])
data[2] = int(data[2])
data[3] = int(data[3])
data[4] = _XenToHypervisorInstanceState(data[4])
data[5] = float(data[5])
except (TypeError, ValueError), err:
raise errors.HypervisorError("Can't parse instance list,"
" line: %s, error: %s" % (line, err))
# skip the Domain-0 (optional)
if include_node or data[0] != _DOM0_NAME:
result.append(data)
return result
def _GetAllInstanceList(fn, include_node, delays, timeout):
"""Return the list of instances including running and shutdown.
See L{_RunInstanceList} and L{_ParseInstanceList} for parameter details.
"""
instance_list_errors = []
try:
lines = utils.Retry(_RunInstanceList, delays, timeout,
args=(fn, instance_list_errors))
except utils.RetryTimeout:
if instance_list_errors:
instance_list_result = instance_list_errors.pop()
errmsg = ("listing instances failed, timeout exceeded (%s): %s" %
(instance_list_result.fail_reason, instance_list_result.output))
else:
errmsg = "listing instances failed"
raise errors.HypervisorError(errmsg)
return _ParseInstanceList(lines, include_node)
def _IsInstanceRunning(instance_info):
"""Determine whether an instance is running.
An instance is running if it is in the following Xen states:
running, blocked, paused, or dying (about to be destroyed / shutdown).
For some strange reason, Xen once printed 'rb----' which does not make any
sense because an instance cannot be both running and blocked. Fortunately,
for Ganeti 'running' or 'blocked' is the same as 'running'.
A state of nothing '------' means that the domain is runnable but it is not
currently running. That means it is in the queue behind other domains waiting
to be scheduled to run.
http://old-list-archives.xenproject.org/xen-users/2007-06/msg00849.html
A dying instance is about to be removed, but it is still consuming resources,
and counts as running.
@type instance_info: string
@param instance_info: Information about instance, as supplied by Xen.
@rtype: bool
@return: Whether an instance is running.
"""
return instance_info == "r-----" \
or instance_info == "rb----" \
or instance_info == "-b----" \
or instance_info == "-----d" \
or instance_info == "------"
def _IsInstanceShutdown(instance_info):
"""Determine whether the instance is shutdown.
An instance is shutdown when a user shuts it down from within, and we do not
remove domains to be able to detect that.
The dying state has been added as a precaution, as Xen's status reporting is
weird.
"""
return instance_info == "---s--" \
or instance_info == "---s-d"
def _IgnorePaused(instance_info):
"""Removes information about whether a Xen state is paused from the state.
As it turns out, an instance can be reported as paused in almost any
condition. Paused instances can be paused, running instances can be paused for
scheduling, and any other condition can appear to be paused as a result of
races or improbable conditions in Xen's status reporting.
As we do not use Xen's pause commands in any way at the time, we can simply
ignore the paused field and save ourselves a lot of trouble.
Should we ever use the pause commands, several samples would be needed before
we could confirm the domain as paused.
"""
return instance_info.replace('p', '-')
def _XenToHypervisorInstanceState(instance_info):
"""Maps Xen states to hypervisor states.
@type instance_info: string
@param instance_info: Information about instance, as supplied by Xen.
@rtype: L{hv_base.HvInstanceState}
"""
instance_info = _IgnorePaused(instance_info)
if _IsInstanceRunning(instance_info):
return hv_base.HvInstanceState.RUNNING
elif _IsInstanceShutdown(instance_info):
return hv_base.HvInstanceState.SHUTDOWN
else:
raise errors.HypervisorError("hv_xen._XenToHypervisorInstanceState:"
" unhandled Xen instance state '%s'" %
instance_info)
def _GetRunningInstanceList(fn, include_node, delays, timeout):
"""Return the list of running instances.
See L{_GetAllInstanceList} for parameter details.
"""
instances = _GetAllInstanceList(fn, include_node, delays, timeout)
return [i for i in instances if hv_base.HvInstanceState.IsRunning(i[4])]
def _GetShutdownInstanceList(fn, include_node, delays, timeout):
"""Return the list of shutdown instances.
See L{_GetAllInstanceList} for parameter details.
"""
instances = _GetAllInstanceList(fn, include_node, delays, timeout)
return [i for i in instances if hv_base.HvInstanceState.IsShutdown(i[4])]
def _ParseNodeInfo(info):
"""Return information about the node.
@return: a dict with the following keys (memory values in MiB):
- memory_total: the total memory size on the node
- memory_free: the available memory on the node for instances
- nr_cpus: total number of CPUs
- nr_nodes: in a NUMA system, the number of domains
- nr_sockets: the number of physical CPU sockets in the node
- hv_version: the hypervisor version in the form (major, minor)
"""
result = {}
cores_per_socket = threads_per_core = nr_cpus = None
xen_major, xen_minor = None, None
memory_total = None
memory_free = None
for line in info.splitlines():
fields = line.split(":", 1)
if len(fields) < 2:
continue
(key, val) = map(lambda s: s.strip(), fields)
# Note: in Xen 3, memory has changed to total_memory
if key in ("memory", "total_memory"):
memory_total = int(val)
elif key == "free_memory":
memory_free = int(val)
elif key == "nr_cpus":
nr_cpus = result["cpu_total"] = int(val)
elif key == "nr_nodes":
result["cpu_nodes"] = int(val)
elif key == "cores_per_socket":
cores_per_socket = int(val)
elif key == "threads_per_core":
threads_per_core = int(val)
elif key == "xen_major":
xen_major = int(val)
elif key == "xen_minor":
xen_minor = int(val)
if None not in [cores_per_socket, threads_per_core, nr_cpus]:
result["cpu_sockets"] = nr_cpus / (cores_per_socket * threads_per_core)
if memory_free is not None:
result["memory_free"] = memory_free
if memory_total is not None:
result["memory_total"] = memory_total
if not (xen_major is None or xen_minor is None):
result[constants.HV_NODEINFO_KEY_VERSION] = (xen_major, xen_minor)
return result
def _MergeInstanceInfo(info, instance_list):
"""Updates node information from L{_ParseNodeInfo} with instance info.
@type info: dict
@param info: Result from L{_ParseNodeInfo}
@type instance_list: list of tuples
@param instance_list: list of instance information; one tuple per instance
@rtype: dict
"""
total_instmem = 0
for (name, _, mem, vcpus, _, _) in instance_list:
if name == _DOM0_NAME:
info["memory_dom0"] = mem
info["cpu_dom0"] = vcpus
# Include Dom0 in total memory usage
total_instmem += mem
memory_free = info.get("memory_free")
memory_total = info.get("memory_total")
# Calculate memory used by hypervisor
if None not in [memory_total, memory_free, total_instmem]:
info["memory_hv"] = memory_total - memory_free - total_instmem
return info
def _GetNodeInfo(info, instance_list):
"""Combines L{_MergeInstanceInfo} and L{_ParseNodeInfo}.
@type instance_list: list of tuples
@param instance_list: list of instance information; one tuple per instance
"""
return _MergeInstanceInfo(_ParseNodeInfo(info), instance_list)
def _GetConfigFileDiskData(block_devices, blockdev_prefix,
_letters=_DISK_LETTERS):
"""Get disk directives for Xen config file.
This method builds the xen config disk directive according to the
given disk_template and block_devices.
@param block_devices: list of tuples (cfdev, rldev):
- cfdev: dict containing ganeti config disk part
- rldev: ganeti.block.bdev.BlockDev object
@param blockdev_prefix: a string containing blockdevice prefix,
e.g. "sd" for /dev/sda
@return: string containing disk directive for xen instance config file
"""
if len(block_devices) > len(_letters):
raise errors.HypervisorError("Too many disks")
disk_data = []
for sd_suffix, (cfdev, dev_path, _) in zip(_letters, block_devices):
sd_name = blockdev_prefix + sd_suffix
if cfdev.mode == constants.DISK_RDWR:
mode = "w"
else:
mode = "r"
if cfdev.dev_type in constants.DTS_FILEBASED:
driver = _FILE_DRIVER_MAP[cfdev.logical_id[0]]
else:
driver = "phy"
disk_data.append("'%s:%s,%s,%s'" % (driver, dev_path, sd_name, mode))
return disk_data
def _QuoteCpuidField(data):
"""Add quotes around the CPUID field only if necessary.
Xen CPUID fields come in two shapes: LIBXL strings, which need quotes around
them, and lists of XEND strings, which don't.
@param data: Either type of parameter.
@return: The quoted version thereof.
"""
return "'%s'" % data if data.startswith("host") else data
def _ConfigureNIC(instance, seq, nic, tap):
"""Run the network configuration script for a specified NIC
See L{hv_base.ConfigureNIC}.
@type instance: instance object
@param instance: instance we're acting on
@type seq: int
@param seq: nic sequence number
@type nic: nic object
@param nic: nic we're acting on
@type tap: str
@param tap: the host's tap interface this NIC corresponds to
"""
hv_base.ConfigureNIC(pathutils.XEN_IFUP_OS, instance, seq, nic, tap)
class XenHypervisor(hv_base.BaseHypervisor):
"""Xen generic hypervisor interface
This is the Xen base class used for both Xen PVM and HVM. It contains
all the functionality that is identical for both.
"""
CAN_MIGRATE = True
REBOOT_RETRY_COUNT = 60
REBOOT_RETRY_INTERVAL = 10
_ROOT_DIR = pathutils.RUN_DIR + "/xen-hypervisor"
_NICS_DIR = _ROOT_DIR + "/nic" # contains NICs' info
_DIRS = [_ROOT_DIR, _NICS_DIR]
_INSTANCE_LIST_DELAYS = (0.3, 1.5, 1.0)
_INSTANCE_LIST_TIMEOUT = 5
ANCILLARY_FILES = [
XEND_CONFIG_FILE,
XL_CONFIG_FILE,
VIF_BRIDGE_SCRIPT,
]
ANCILLARY_FILES_OPT = [
XL_CONFIG_FILE,
]
def __init__(self, _cfgdir=None, _run_cmd_fn=None, _cmd=None):
hv_base.BaseHypervisor.__init__(self)
if _cfgdir is None:
self._cfgdir = pathutils.XEN_CONFIG_DIR
else:
self._cfgdir = _cfgdir
if _run_cmd_fn is None:
self._run_cmd_fn = utils.RunCmd
else:
self._run_cmd_fn = _run_cmd_fn
self._cmd = _cmd
@staticmethod
def _GetCommandFromHvparams(hvparams):
"""Returns the Xen command extracted from the given hvparams.
@type hvparams: dict of strings
@param hvparams: hypervisor parameters
"""
if hvparams is None or constants.HV_XEN_CMD not in hvparams:
raise errors.HypervisorError("Cannot determine xen command.")
else:
return hvparams[constants.HV_XEN_CMD]
def _GetCommand(self, hvparams):
"""Returns Xen command to use.
@type hvparams: dict of strings
@param hvparams: hypervisor parameters
"""
if self._cmd is None:
cmd = XenHypervisor._GetCommandFromHvparams(hvparams)
else:
cmd = self._cmd
if cmd not in constants.KNOWN_XEN_COMMANDS:
raise errors.ProgrammerError("Unknown Xen command '%s'" % cmd)
return cmd
def _RunXen(self, args, hvparams, timeout=None):
"""Wrapper around L{utils.process.RunCmd} to run Xen command.
@type hvparams: dict of strings
@param hvparams: dictionary of hypervisor params
@type timeout: int or None
@param timeout: if a timeout (in seconds) is specified, the command will be
terminated after that number of seconds.
@see: L{utils.process.RunCmd}
"""
cmd = []
if timeout is not None:
cmd.extend(["timeout", str(timeout)])
cmd.extend([self._GetCommand(hvparams)])
cmd.extend(args)
return self._run_cmd_fn(cmd)
def _ConfigFileName(self, instance_name):
"""Get the config file name for an instance.
@param instance_name: instance name
@type instance_name: str
@return: fully qualified path to instance config file
@rtype: str
"""
return utils.PathJoin(self._cfgdir, instance_name)
@classmethod
def _WriteNICInfoFile(cls, instance, idx, nic):
"""Write the Xen config file for the instance.
This version of the function just writes the config file from static data.
"""
instance_name = instance.name
dirs = [(dname, constants.RUN_DIRS_MODE)
for dname in cls._DIRS + [cls._InstanceNICDir(instance_name)]]
utils.EnsureDirs(dirs)
cfg_file = cls._InstanceNICFile(instance_name, idx)
data = StringIO()
data.write("TAGS=\"%s\"\n" % r"\ ".join(instance.GetTags()))
if nic.netinfo:
netinfo = objects.Network.FromDict(nic.netinfo)
for k, v in netinfo.HooksDict().iteritems():
data.write("%s=\"%s\"\n" % (k, v))
data.write("MAC=%s\n" % nic.mac)
if nic.ip:
data.write("IP=%s\n" % nic.ip)
data.write("INTERFACE_INDEX=%s\n" % str(idx))
if nic.name:
data.write("INTERFACE_NAME=%s\n" % nic.name)
data.write("INTERFACE_UUID=%s\n" % nic.uuid)
data.write("MODE=%s\n" % nic.nicparams[constants.NIC_MODE])
data.write("LINK=%s\n" % nic.nicparams[constants.NIC_LINK])
data.write("VLAN=%s\n" % nic.nicparams[constants.NIC_VLAN])
try:
utils.WriteFile(cfg_file, data=data.getvalue())
except EnvironmentError, err:
raise errors.HypervisorError("Cannot write Xen instance configuration"
" file %s: %s" % (cfg_file, err))
@staticmethod
def VersionsSafeForMigration(src, target):
"""Decide if migration is likely to suceed for hypervisor versions.
Given two versions of a hypervisor, give a guess whether live migration
from the one version to the other version is likely to succeed. For Xen,
the heuristics is, that an increase by one on the second digit is OK. This
fits with the current numbering scheme.
@type src: list or tuple
@type target: list or tuple
@rtype: bool
"""
if src == target:
return True
if len(src) < 2 or len(target) < 2:
return False
return src[0] == target[0] and target[1] in [src[1], src[1] + 1]
@classmethod
def _InstanceNICDir(cls, instance_name):
"""Returns the directory holding the tap device files for a given instance.
"""
return utils.PathJoin(cls._NICS_DIR, instance_name)
@classmethod
def _InstanceNICFile(cls, instance_name, seq):
"""Returns the name of the file containing the tap device for a given NIC
"""
return utils.PathJoin(cls._InstanceNICDir(instance_name), str(seq))
@classmethod
def _GetConfig(cls, instance, startup_memory, block_devices):
"""Build Xen configuration for an instance.
"""
raise NotImplementedError
def _WriteNicConfig(self, config, instance, hvp):
vif_data = []
# only XenHvmHypervisor has these hvparams
nic_type = hvp.get(constants.HV_NIC_TYPE, None)
vif_type = hvp.get(constants.HV_VIF_TYPE, None)
nic_type_str = ""
if nic_type or vif_type:
if nic_type is None:
if vif_type:
nic_type_str = ", type=%s" % vif_type
elif nic_type == constants.HT_NIC_PARAVIRTUAL:
nic_type_str = ", type=paravirtualized"
else:
# parameter 'model' is only valid with type 'ioemu'
nic_type_str = ", model=%s, type=%s" % \
(nic_type, constants.HT_HVM_VIF_IOEMU)
for idx, nic in enumerate(instance.nics):
nic_args = {}
nic_args["mac"] = "%s%s" % (nic.mac, nic_type_str)
if nic.name and \
nic.name.startswith(constants.INSTANCE_COMMUNICATION_NIC_PREFIX):
tap = hv_base.GenerateTapName()
nic_args["vifname"] = tap
nic_args["script"] = pathutils.XEN_VIF_METAD_SETUP
nic.name = tap
else:
ip = getattr(nic, "ip", None)
if ip is not None:
nic_args["ip"] = ip
if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
nic_args["bridge"] = nic.nicparams[constants.NIC_LINK]
elif nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_OVS:
nic_args["bridge"] = nic.nicparams[constants.NIC_LINK]
if nic.nicparams[constants.NIC_VLAN]:
nic_args["bridge"] += nic.nicparams[constants.NIC_VLAN]
if hvp[constants.HV_VIF_SCRIPT]:
nic_args["script"] = hvp[constants.HV_VIF_SCRIPT]
nic_str = ", ".join(["%s=%s" % p for p in nic_args.items()])
vif_data.append("'%s'" % (nic_str, ))
self._WriteNICInfoFile(instance, idx, nic)
config.write("vif = [%s]\n" % ",".join(vif_data))
def _WriteConfigFile(self, instance_name, data):
"""Write the Xen config file for the instance.
This version of the function just writes the config file from static data.
"""
# just in case it exists
utils.RemoveFile(utils.PathJoin(self._cfgdir, "auto", instance_name))
cfg_file = self._ConfigFileName(instance_name)
try:
utils.WriteFile(cfg_file, data=data)
except EnvironmentError, err:
raise errors.HypervisorError("Cannot write Xen instance configuration"
" file %s: %s" % (cfg_file, err))
def _ReadConfigFile(self, instance_name):
"""Returns the contents of the instance config file.
"""
filename = self._ConfigFileName(instance_name)
try:
file_content = utils.ReadFile(filename)
except EnvironmentError, err:
raise errors.HypervisorError("Failed to load Xen config file: %s" % err)
return file_content
def _RemoveConfigFile(self, instance_name):
"""Remove the xen configuration file.
"""
utils.RemoveFile(self._ConfigFileName(instance_name))
try:
shutil.rmtree(self._InstanceNICDir(instance_name))
except OSError, err:
if err.errno != errno.ENOENT:
raise
def _StashConfigFile(self, instance_name):
"""Move the Xen config file to the log directory and return its new path.
"""
old_filename = self._ConfigFileName(instance_name)
base = ("%s-%s" %
(instance_name, utils.TimestampForFilename()))
new_filename = utils.PathJoin(pathutils.LOG_XEN_DIR, base)
utils.RenameFile(old_filename, new_filename)
return new_filename
def _GetInstanceList(self, include_node, hvparams):
"""Wrapper around module level L{_GetAllInstanceList}.
@type hvparams: dict of strings
@param hvparams: hypervisor parameters to be used on this node
"""
return _GetAllInstanceList(lambda: self._RunXen(["list"], hvparams),
include_node, delays=self._INSTANCE_LIST_DELAYS,
timeout=self._INSTANCE_LIST_TIMEOUT)
def ListInstances(self, hvparams=None):
"""Get the list of running instances.
@type hvparams: dict of strings
@param hvparams: the instance's hypervisor params
@rtype: list of strings
@return: names of running instances
"""
instance_list = _GetRunningInstanceList(
lambda: self._RunXen(["list"], hvparams),
False, delays=self._INSTANCE_LIST_DELAYS,
timeout=self._INSTANCE_LIST_TIMEOUT)
return [info[0] for info in instance_list]
def GetInstanceInfo(self, instance_name, hvparams=None):
"""Get instance properties.
@type instance_name: string
@param instance_name: the instance name
@type hvparams: dict of strings
@param hvparams: the instance's hypervisor params
@return: tuple (name, id, memory, vcpus, stat, times)
"""
instance_list = self._GetInstanceList(instance_name == _DOM0_NAME, hvparams)
result = None
for data in instance_list:
if data[0] == instance_name:
result = data
break
return result
def GetAllInstancesInfo(self, hvparams=None):
"""Get properties of all instances.
@type hvparams: dict of strings
@param hvparams: hypervisor parameters
@rtype: (string, string, int, int, HypervisorInstanceState, int)
@return: list of tuples (name, id, memory, vcpus, state, times)
"""
return self._GetInstanceList(False, hvparams)
def _MakeConfigFile(self, instance, startup_memory, block_devices):
"""Gather configuration details and write to disk.
See L{_GetConfig} for arguments.
"""
buf = StringIO()
buf.write("# Automatically generated by Ganeti. Do not edit!\n")
buf.write("\n")
buf.write(self._GetConfig(instance, startup_memory, block_devices))
buf.write("\n")
self._WriteConfigFile(instance.name, buf.getvalue())
def StartInstance(self, instance, block_devices, startup_paused):
"""Start an instance.
"""
startup_memory = self._InstanceStartupMemory(instance)
self._MakeConfigFile(instance, startup_memory, block_devices)
cmd = ["create"]
if startup_paused:
cmd.append("-p")
cmd.append(self._ConfigFileName(instance.name))
result = self._RunXen(cmd, instance.hvparams)
if result.failed:
# Move the Xen configuration file to the log directory to avoid
# leaving a stale config file behind.
stashed_config = self._StashConfigFile(instance.name)
raise errors.HypervisorError("Failed to start instance %s: %s (%s). Moved"
" config file to %s" %
(instance.name, result.fail_reason,
result.output, stashed_config))
for nic_seq, nic in enumerate(instance.nics):
if nic.name and nic.name.startswith("gnt.com."):
_ConfigureNIC(instance, nic_seq, nic, nic.name)
def StopInstance(self, instance, force=False, retry=False, name=None,
timeout=None):
"""Stop an instance.
A soft shutdown can be interrupted. A hard shutdown tries forever.
"""
assert(timeout is None or force is not None)
if name is None:
name = instance.name
return self._StopInstance(name, force, instance.hvparams, timeout)
def _ShutdownInstance(self, name, hvparams, timeout):
"""Shutdown an instance if the instance is running.
The '-w' flag waits for shutdown to complete which avoids the need
to poll in the case where we want to destroy the domain
immediately after shutdown.
@type name: string
@param name: name of the instance to stop
@type hvparams: dict of string
@param hvparams: hypervisor parameters of the instance
@type timeout: int or None
@param timeout: a timeout after which the shutdown command should be killed,
or None for no timeout
"""
instance_info = self.GetInstanceInfo(name, hvparams=hvparams)
if instance_info is None or _IsInstanceShutdown(instance_info[4]):
logging.info("Failed to shutdown instance %s, not running", name)
return None
return self._RunXen(["shutdown", "-w", name], hvparams, timeout)
def _DestroyInstance(self, name, hvparams):
"""Destroy an instance if the instance if the instance exists.
@type name: string
@param name: name of the instance to destroy
@type hvparams: dict of string
@param hvparams: hypervisor parameters of the instance
"""
instance_info = self.GetInstanceInfo(name, hvparams=hvparams)
if instance_info is None:
logging.info("Failed to destroy instance %s, does not exist", name)
return None
return self._RunXen(["destroy", name], hvparams)
# Destroy a domain only if necessary
#
# This method checks if the domain has already been destroyed before
# issuing the 'destroy' command. This step is necessary to handle
# domains created by other versions of Ganeti. For example, an
# instance created with 2.10 will be destroy by the
# '_ShutdownInstance', thus not requiring an additional destroy,
# which would cause an error if issued. See issue 619.
def _DestroyInstanceIfAlive(self, name, hvparams):
instance_info = self.GetInstanceInfo(name, hvparams=hvparams)
if instance_info is None:
raise errors.HypervisorError("Failed to destroy instance %s, already"
" destroyed" % name)
else:
self._DestroyInstance(name, hvparams)
def _StopInstance(self, name, force, hvparams, timeout):
"""Stop an instance.
@type name: string
@param name: name of the instance to destroy
@type force: boolean
@param force: whether to do a "hard" stop (destroy)
@type hvparams: dict of string
@param hvparams: hypervisor parameters of the instance
@type timeout: int or None
@param timeout: a timeout after which the shutdown command should be killed,
or None for no timeout
"""
instance_info = self.GetInstanceInfo(name, hvparams=hvparams)
if instance_info is None:
raise errors.HypervisorError("Failed to shutdown instance %s,"
" not running" % name)
if force:
result = self._DestroyInstanceIfAlive(name, hvparams)
else:
self._ShutdownInstance(name, hvparams, timeout)
result = self._DestroyInstanceIfAlive(name, hvparams)
if result is not None and result.failed and \
self.GetInstanceInfo(name, hvparams=hvparams) is not None:
raise errors.HypervisorError("Failed to stop instance %s: %s, %s" %
(name, result.fail_reason, result.output))
# Remove configuration file if stopping/starting instance was successful
self._RemoveConfigFile(name)
def RebootInstance(self, instance):
"""Reboot an instance.
"""
ini_info = self.GetInstanceInfo(instance.name, hvparams=instance.hvparams)
if ini_info is None:
raise errors.HypervisorError("Failed to reboot instance %s,"
" not running" % instance.name)
result = self._RunXen(["reboot", instance.name], instance.hvparams)
if result.failed:
raise errors.HypervisorError("Failed to reboot instance %s: %s, %s" %
(instance.name, result.fail_reason,
result.output))
def _CheckInstance():
new_info = self.GetInstanceInfo(instance.name, hvparams=instance.hvparams)
# check if the domain ID has changed or the run time has decreased
if (new_info is not None and
(new_info[1] != ini_info[1] or new_info[5] < ini_info[5])):
return
raise utils.RetryAgain()
try:
utils.Retry(_CheckInstance, self.REBOOT_RETRY_INTERVAL,
self.REBOOT_RETRY_INTERVAL * self.REBOOT_RETRY_COUNT)
except utils.RetryTimeout:
raise errors.HypervisorError("Failed to reboot instance %s: instance"
" did not reboot in the expected interval" %
(instance.name, ))
def BalloonInstanceMemory(self, instance, mem):
"""Balloon an instance memory to a certain value.
@type instance: L{objects.Instance}
@param instance: instance to be accepted
@type mem: int
@param mem: actual memory size to use for instance runtime
"""
result = self._RunXen(["mem-set", instance.name, mem], instance.hvparams)
if result.failed:
raise errors.HypervisorError("Failed to balloon instance %s: %s (%s)" %
(instance.name, result.fail_reason,
result.output))
# Update configuration file
cmd = ["sed", "-ie", "s/^memory.*$/memory = %s/" % mem]
cmd.append(self._ConfigFileName(instance.name))
result = utils.RunCmd(cmd)
if result.failed:
raise errors.HypervisorError("Failed to update memory for %s: %s (%s)" %
(instance.name, result.fail_reason,
result.output))
def GetNodeInfo(self, hvparams=None):
"""Return information about the node.
@see: L{_GetNodeInfo} and L{_ParseNodeInfo}
"""
result = self._RunXen(["info"], hvparams)
if result.failed:
logging.error("Can't retrieve xen hypervisor information (%s): %s",
result.fail_reason, result.output)
return None
instance_list = self._GetInstanceList(True, hvparams)
return _GetNodeInfo(result.stdout, instance_list)
@classmethod
def GetInstanceConsole(cls, instance, primary_node, node_group,
hvparams, beparams):
"""Return a command for connecting to the console of an instance.
"""
xen_cmd = XenHypervisor._GetCommandFromHvparams(hvparams)
ndparams = node_group.FillND(primary_node)
return objects.InstanceConsole(instance=instance.name,
kind=constants.CONS_SSH,
host=primary_node.name,
port=ndparams.get(constants.ND_SSH_PORT),
user=constants.SSH_CONSOLE_USER,
command=[pathutils.XEN_CONSOLE_WRAPPER,
xen_cmd, instance.name])
def Verify(self, hvparams=None):
"""Verify the hypervisor.
For Xen, this verifies that the xend process is running.
@type hvparams: dict of strings
@param hvparams: hypervisor parameters to be verified against
@return: Problem description if something is wrong, C{None} otherwise
"""
if hvparams is None:
return "Could not verify the hypervisor, because no hvparams were" \
" provided."
if constants.HV_XEN_CMD in hvparams:
xen_cmd = hvparams[constants.HV_XEN_CMD]
try:
self._CheckToolstack(xen_cmd)
except errors.HypervisorError:
return "The configured xen toolstack '%s' is not available on this" \
" node." % xen_cmd
result = self._RunXen(["info"], hvparams)
if result.failed:
return "Retrieving information from xen failed: %s, %s" % \
(result.fail_reason, result.output)
return None
def MigrationInfo(self, instance):
"""Get instance information to perform a migration.
@type instance: L{objects.Instance}
@param instance: instance to be migrated
@rtype: string
@return: content of the xen config file
"""
return self._ReadConfigFile(instance.name)
def AcceptInstance(self, instance, info, target):
"""Prepare to accept an instance.
@type instance: L{objects.Instance}
@param instance: instance to be accepted
@type info: string
@param info: content of the xen config file on the source node
@type target: string
@param target: target host (usually ip), on this node
"""
pass
def FinalizeMigrationDst(self, instance, info, success):
"""Finalize an instance migration.
After a successful migration we write the xen config file.
We do nothing on a failure, as we did not change anything at accept time.
@type instance: L{objects.Instance}
@param instance: instance whose migration is being finalized
@type info: string
@param info: content of the xen config file on the source node
@type success: boolean
@param success: whether the migration was a success or a failure
"""
if success:
self._WriteConfigFile(instance.name, info)
def MigrateInstance(self, cluster_name, instance, target, live):
"""Migrate an instance to a target node.
The migration will not be attempted if the instance is not
currently running.
@type instance: L{objects.Instance}
@param instance: the instance to be migrated
@type target: string
@param target: ip address of the target node
@type live: boolean
@param live: perform a live migration
"""
port = instance.hvparams[constants.HV_MIGRATION_PORT]
return self._MigrateInstance(cluster_name, instance.name, target, port,
live, instance.hvparams)
def _MigrateInstance(self, cluster_name, instance_name, target, port, live,
hvparams, _ping_fn=netutils.TcpPing):
"""Migrate an instance to a target node.
@see: L{MigrateInstance} for details
"""
if hvparams is None:
raise errors.HypervisorError("No hvparams provided.")
if self.GetInstanceInfo(instance_name, hvparams=hvparams) is None:
raise errors.HypervisorError("Instance not running, cannot migrate")
cmd = self._GetCommand(hvparams)
if (cmd == constants.XEN_CMD_XM and
not _ping_fn(target, port, live_port_needed=True)):
raise errors.HypervisorError("Remote host %s not listening on port"
" %s, cannot migrate" % (target, port))
args = ["migrate"]
if cmd == constants.XEN_CMD_XM:
args.extend(["-p", "%d" % port])
if live:
args.append("-l")
elif cmd == constants.XEN_CMD_XL:
args.extend([
"-s", constants.XL_SSH_CMD % cluster_name,
"-C", self._ConfigFileName(instance_name),
])
else:
raise errors.HypervisorError("Unsupported Xen command: %s" % self._cmd)
args.extend([instance_name, target])
result = self._RunXen(args, hvparams)
if result.failed:
raise errors.HypervisorError("Failed to migrate instance %s: %s" %
(instance_name, result.output))
def FinalizeMigrationSource(self, instance, success, live):
"""Finalize the instance migration on the source node.
@type instance: L{objects.Instance}
@param instance: the instance that was migrated
@type success: bool
@param success: whether the migration succeeded or not
@type live: bool
@param live: whether the user requested a live migration or not
"""
# pylint: disable=W0613
if success:
# remove old xen file after migration succeeded
try:
self._RemoveConfigFile(instance.name)
except EnvironmentError:
logging.exception("Failure while removing instance config file")
def GetMigrationStatus(self, instance):
"""Get the migration status
As MigrateInstance for Xen is still blocking, if this method is called it
means that MigrateInstance has completed successfully. So we can safely
assume that the migration was successful and notify this fact to the client.
@type instance: L{objects.Instance}
@param instance: the instance that is being migrated
@rtype: L{objects.MigrationStatus}
@return: the status of the current migration (one of
L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
progress info that can be retrieved from the hypervisor
"""
return objects.MigrationStatus(status=constants.HV_MIGRATION_COMPLETED)
def PowercycleNode(self, hvparams=None):
"""Xen-specific powercycle.
This first does a Linux reboot (which triggers automatically a Xen
reboot), and if that fails it tries to do a Xen reboot. The reason
we don't try a Xen reboot first is that the xen reboot launches an
external command which connects to the Xen hypervisor, and that
won't work in case the root filesystem is broken and/or the xend
daemon is not working.
@type hvparams: dict of strings
@param hvparams: hypervisor params to be used on this node
"""
try:
self.LinuxPowercycle()
finally:
xen_cmd = self._GetCommand(hvparams)
utils.RunCmd([xen_cmd, "debug", "R"])
def _CheckToolstack(self, xen_cmd):
"""Check whether the given toolstack is available on the node.
@type xen_cmd: string
@param xen_cmd: xen command (e.g. 'xm' or 'xl')
"""
binary_found = self._CheckToolstackBinary(xen_cmd)
if not binary_found:
raise errors.HypervisorError("No '%s' binary found on node." % xen_cmd)
elif xen_cmd == constants.XEN_CMD_XL:
if not self._CheckToolstackXlConfigured():
raise errors.HypervisorError("Toolstack '%s' is not enabled on this"
"node." % xen_cmd)
def _CheckToolstackBinary(self, xen_cmd):
"""Checks whether the xen command's binary is found on the machine.
"""
if xen_cmd not in constants.KNOWN_XEN_COMMANDS:
raise errors.HypervisorError("Unknown xen command '%s'." % xen_cmd)
result = self._run_cmd_fn(["which", xen_cmd])
return not result.failed
def _CheckToolstackXlConfigured(self):
"""Checks whether xl is enabled on an xl-capable node.
@rtype: bool
@returns: C{True} if 'xl' is enabled, C{False} otherwise
"""
result = self._run_cmd_fn([constants.XEN_CMD_XL, "help"])
if not result.failed:
return True
elif result.failed:
if "toolstack" in result.stderr:
return False
# xl fails for some other reason than the toolstack
else:
raise errors.HypervisorError("Cannot run xen ('%s'). Error: %s."
% (constants.XEN_CMD_XL, result.stderr))
def WriteXenConfigEvents(config, hvp):
config.write("on_poweroff = 'preserve'\n")
if hvp[constants.HV_REBOOT_BEHAVIOR] == constants.INSTANCE_REBOOT_ALLOWED:
config.write("on_reboot = 'restart'\n")
else:
config.write("on_reboot = 'destroy'\n")
config.write("on_crash = 'restart'\n")
class XenPvmHypervisor(XenHypervisor):
"""Xen PVM hypervisor interface"""
PARAMETERS = {
constants.HV_USE_BOOTLOADER: hv_base.NO_CHECK,
constants.HV_BOOTLOADER_PATH: hv_base.OPT_FILE_CHECK,
constants.HV_BOOTLOADER_ARGS: hv_base.NO_CHECK,
constants.HV_KERNEL_PATH: hv_base.REQ_FILE_CHECK,
constants.HV_INITRD_PATH: hv_base.OPT_FILE_CHECK,
constants.HV_ROOT_PATH: hv_base.NO_CHECK,
constants.HV_KERNEL_ARGS: hv_base.NO_CHECK,
constants.HV_MIGRATION_PORT: hv_base.REQ_NET_PORT_CHECK,
constants.HV_MIGRATION_MODE: hv_base.MIGRATION_MODE_CHECK,
# TODO: Add a check for the blockdev prefix (matching [a-z:] or similar).
constants.HV_BLOCKDEV_PREFIX: hv_base.NO_CHECK,
constants.HV_REBOOT_BEHAVIOR:
hv_base.ParamInSet(True, constants.REBOOT_BEHAVIORS),
constants.HV_CPU_MASK: hv_base.OPT_MULTI_CPU_MASK_CHECK,
constants.HV_CPU_CAP: hv_base.OPT_NONNEGATIVE_INT_CHECK,
constants.HV_CPU_WEIGHT:
(False, lambda x: 0 < x < 65536, "invalid weight", None, None),
constants.HV_VIF_SCRIPT: hv_base.OPT_FILE_CHECK,
constants.HV_XEN_CMD:
hv_base.ParamInSet(True, constants.KNOWN_XEN_COMMANDS),
constants.HV_XEN_CPUID: hv_base.NO_CHECK,
constants.HV_SOUNDHW: hv_base.NO_CHECK,
}
def _GetConfig(self, instance, startup_memory, block_devices):
"""Write the Xen config file for the instance.
"""
hvp = instance.hvparams
config = StringIO()
config.write("# this is autogenerated by Ganeti, please do not edit\n#\n")
# if bootloader is True, use bootloader instead of kernel and ramdisk
# parameters.
if hvp[constants.HV_USE_BOOTLOADER]:
# bootloader handling
bootloader_path = hvp[constants.HV_BOOTLOADER_PATH]
if bootloader_path:
config.write("bootloader = '%s'\n" % bootloader_path)
else:
raise errors.HypervisorError("Bootloader enabled, but missing"
" bootloader path")
bootloader_args = hvp[constants.HV_BOOTLOADER_ARGS]
if bootloader_args:
config.write("bootargs = '%s'\n" % bootloader_args)
else:
# kernel handling
kpath = hvp[constants.HV_KERNEL_PATH]
config.write("kernel = '%s'\n" % kpath)
# initrd handling
initrd_path = hvp[constants.HV_INITRD_PATH]
if initrd_path:
config.write("ramdisk = '%s'\n" % initrd_path)
# rest of the settings
config.write("memory = %d\n" % startup_memory)
config.write("maxmem = %d\n" % instance.beparams[constants.BE_MAXMEM])
config.write("vcpus = %d\n" % instance.beparams[constants.BE_VCPUS])
cpu_pinning = _CreateConfigCpus(hvp[constants.HV_CPU_MASK])
if cpu_pinning:
config.write("%s\n" % cpu_pinning)
cpu_cap = hvp[constants.HV_CPU_CAP]
if cpu_cap:
config.write("cpu_cap=%d\n" % cpu_cap)
cpu_weight = hvp[constants.HV_CPU_WEIGHT]
if cpu_weight:
config.write("cpu_weight=%d\n" % cpu_weight)
config.write("name = '%s'\n" % instance.name)
self._WriteNicConfig(config, instance, hvp)
disk_data = \
_GetConfigFileDiskData(block_devices, hvp[constants.HV_BLOCKDEV_PREFIX])
config.write("disk = [%s]\n" % ",".join(disk_data))
if hvp[constants.HV_ROOT_PATH]:
config.write("root = '%s'\n" % hvp[constants.HV_ROOT_PATH])
WriteXenConfigEvents(config, hvp)
config.write("extra = '%s'\n" % hvp[constants.HV_KERNEL_ARGS])
cpuid = hvp[constants.HV_XEN_CPUID]
if cpuid:
config.write("cpuid = %s\n" % _QuoteCpuidField(cpuid))
if hvp[constants.HV_SOUNDHW]:
config.write("soundhw = '%s'\n" % hvp[constants.HV_SOUNDHW])
return config.getvalue()
class XenHvmHypervisor(XenHypervisor):
"""Xen HVM hypervisor interface"""
ANCILLARY_FILES = XenHypervisor.ANCILLARY_FILES + [
pathutils.VNC_PASSWORD_FILE,
]
ANCILLARY_FILES_OPT = XenHypervisor.ANCILLARY_FILES_OPT + [
pathutils.VNC_PASSWORD_FILE,
]
PARAMETERS = {
constants.HV_ACPI: hv_base.NO_CHECK,
constants.HV_BOOT_ORDER: (True, ) +
(lambda x: x and len(x.strip("acdn")) == 0,
"Invalid boot order specified, must be one or more of [acdn]",
None, None),
constants.HV_CDROM_IMAGE_PATH: hv_base.OPT_FILE_CHECK,
constants.HV_DISK_TYPE:
hv_base.ParamInSet(True, constants.HT_HVM_VALID_DISK_TYPES),
constants.HV_NIC_TYPE:
hv_base.ParamInSet(True, constants.HT_HVM_VALID_NIC_TYPES),
constants.HV_PAE: hv_base.NO_CHECK,
constants.HV_VNC_BIND_ADDRESS:
(False, netutils.IP4Address.IsValid,
"VNC bind address is not a valid IP address", None, None),
constants.HV_KERNEL_PATH: hv_base.REQ_FILE_CHECK,
constants.HV_DEVICE_MODEL: hv_base.REQ_FILE_CHECK,
constants.HV_VNC_PASSWORD_FILE: hv_base.REQ_FILE_CHECK,
constants.HV_MIGRATION_PORT: hv_base.REQ_NET_PORT_CHECK,
constants.HV_MIGRATION_MODE: hv_base.MIGRATION_MODE_CHECK,
constants.HV_USE_LOCALTIME: hv_base.NO_CHECK,
# TODO: Add a check for the blockdev prefix (matching [a-z:] or similar).
constants.HV_BLOCKDEV_PREFIX: hv_base.NO_CHECK,
# Add PCI passthrough
constants.HV_PASSTHROUGH: hv_base.NO_CHECK,
constants.HV_REBOOT_BEHAVIOR:
hv_base.ParamInSet(True, constants.REBOOT_BEHAVIORS),
constants.HV_CPU_MASK: hv_base.OPT_MULTI_CPU_MASK_CHECK,
constants.HV_CPU_CAP: hv_base.NO_CHECK,
constants.HV_CPU_WEIGHT:
(False, lambda x: 0 < x < 65535, "invalid weight", None, None),
constants.HV_VIF_TYPE:
hv_base.ParamInSet(False, constants.HT_HVM_VALID_VIF_TYPES),
constants.HV_VIF_SCRIPT: hv_base.OPT_FILE_CHECK,
constants.HV_VIRIDIAN: hv_base.NO_CHECK,
constants.HV_XEN_CMD:
hv_base.ParamInSet(True, constants.KNOWN_XEN_COMMANDS),
constants.HV_XEN_CPUID: hv_base.NO_CHECK,
constants.HV_SOUNDHW: hv_base.NO_CHECK,
}
def _GetConfig(self, instance, startup_memory, block_devices):
"""Create a Xen 3.1 HVM config file.
"""
hvp = instance.hvparams
config = StringIO()
# kernel handling
kpath = hvp[constants.HV_KERNEL_PATH]
config.write("kernel = '%s'\n" % kpath)
config.write("builder = 'hvm'\n")
config.write("memory = %d\n" % startup_memory)
config.write("maxmem = %d\n" % instance.beparams[constants.BE_MAXMEM])
config.write("vcpus = %d\n" % instance.beparams[constants.BE_VCPUS])
cpu_pinning = _CreateConfigCpus(hvp[constants.HV_CPU_MASK])
if cpu_pinning:
config.write("%s\n" % cpu_pinning)
cpu_cap = hvp[constants.HV_CPU_CAP]
if cpu_cap:
config.write("cpu_cap=%d\n" % cpu_cap)
cpu_weight = hvp[constants.HV_CPU_WEIGHT]
if cpu_weight:
config.write("cpu_weight=%d\n" % cpu_weight)
config.write("name = '%s'\n" % instance.name)
if hvp[constants.HV_PAE]:
config.write("pae = 1\n")
else:
config.write("pae = 0\n")
if hvp[constants.HV_ACPI]:
config.write("acpi = 1\n")
else:
config.write("acpi = 0\n")
if hvp[constants.HV_VIRIDIAN]:
config.write("viridian = 1\n")
else:
config.write("viridian = 0\n")
config.write("apic = 1\n")
config.write("device_model = '%s'\n" % hvp[constants.HV_DEVICE_MODEL])
config.write("boot = '%s'\n" % hvp[constants.HV_BOOT_ORDER])
config.write("sdl = 0\n")
config.write("usb = 1\n")
config.write("usbdevice = 'tablet'\n")
config.write("vnc = 1\n")
if hvp[constants.HV_VNC_BIND_ADDRESS] is None:
config.write("vnclisten = '%s'\n" % constants.VNC_DEFAULT_BIND_ADDRESS)
else:
config.write("vnclisten = '%s'\n" % hvp[constants.HV_VNC_BIND_ADDRESS])
if instance.network_port > constants.VNC_BASE_PORT:
display = instance.network_port - constants.VNC_BASE_PORT
config.write("vncdisplay = %s\n" % display)
config.write("vncunused = 0\n")
else:
config.write("# vncdisplay = 1\n")
config.write("vncunused = 1\n")
vnc_pwd_file = hvp[constants.HV_VNC_PASSWORD_FILE]
try:
password = utils.ReadFile(vnc_pwd_file)
except EnvironmentError, err:
raise errors.HypervisorError("Failed to open VNC password file %s: %s" %
(vnc_pwd_file, err))
config.write("vncpasswd = '%s'\n" % password.rstrip())
config.write("serial = 'pty'\n")
if hvp[constants.HV_USE_LOCALTIME]:
config.write("localtime = 1\n")
self._WriteNicConfig(config, instance, hvp)
disk_data = \
_GetConfigFileDiskData(block_devices, hvp[constants.HV_BLOCKDEV_PREFIX])
iso_path = hvp[constants.HV_CDROM_IMAGE_PATH]
if iso_path:
iso = "'file:%s,hdc:cdrom,r'" % iso_path
disk_data.append(iso)
config.write("disk = [%s]\n" % (",".join(disk_data)))
# Add PCI passthrough
pci_pass_arr = []
pci_pass = hvp[constants.HV_PASSTHROUGH]
if pci_pass:
pci_pass_arr = pci_pass.split(";")
config.write("pci = %s\n" % pci_pass_arr)
WriteXenConfigEvents(config, hvp)
cpuid = hvp[constants.HV_XEN_CPUID]
if cpuid:
config.write("cpuid = %s\n" % _QuoteCpuidField(cpuid))
if hvp[constants.HV_SOUNDHW]:
config.write("soundhw = '%s'\n" % hvp[constants.HV_SOUNDHW])
return config.getvalue()
| bsd-2-clause |
sshleifer/object_detection_kitti | attention_ocr/python/metrics_test.py | 15 | 3393 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the metrics module."""
import contextlib
import numpy as np
import tensorflow as tf
import metrics
class AccuracyTest(tf.test.TestCase):
def setUp(self):
tf.test.TestCase.setUp(self)
self.rng = np.random.RandomState([11, 23, 50])
self.num_char_classes = 3
self.batch_size = 4
self.seq_length = 5
self.rej_char = 42
@contextlib.contextmanager
def initialized_session(self):
"""Wrapper for test session context manager with required initialization.
Yields:
A session object that should be used as a context manager.
"""
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
yield sess
def _fake_labels(self):
return self.rng.randint(
low=0,
high=self.num_char_classes,
size=(self.batch_size, self.seq_length),
dtype='int32')
def _incorrect_copy(self, values, bad_indexes):
incorrect = np.copy(values)
incorrect[bad_indexes] = values[bad_indexes] + 1
return incorrect
def test_sequence_accuracy_identical_samples(self):
labels_tf = tf.convert_to_tensor(self._fake_labels())
accuracy_tf = metrics.sequence_accuracy(labels_tf, labels_tf,
self.rej_char)
with self.initialized_session() as sess:
accuracy_np = sess.run(accuracy_tf)
self.assertAlmostEqual(accuracy_np, 1.0)
def test_sequence_accuracy_one_char_difference(self):
ground_truth_np = self._fake_labels()
ground_truth_tf = tf.convert_to_tensor(ground_truth_np)
prediction_tf = tf.convert_to_tensor(
self._incorrect_copy(ground_truth_np, bad_indexes=((0, 0))))
accuracy_tf = metrics.sequence_accuracy(prediction_tf, ground_truth_tf,
self.rej_char)
with self.initialized_session() as sess:
accuracy_np = sess.run(accuracy_tf)
# 1 of 4 sequences is incorrect.
self.assertAlmostEqual(accuracy_np, 1.0 - 1.0 / self.batch_size)
def test_char_accuracy_one_char_difference_with_padding(self):
ground_truth_np = self._fake_labels()
ground_truth_tf = tf.convert_to_tensor(ground_truth_np)
prediction_tf = tf.convert_to_tensor(
self._incorrect_copy(ground_truth_np, bad_indexes=((0, 0))))
accuracy_tf = metrics.char_accuracy(prediction_tf, ground_truth_tf,
self.rej_char)
with self.initialized_session() as sess:
accuracy_np = sess.run(accuracy_tf)
chars_count = self.seq_length * self.batch_size
self.assertAlmostEqual(accuracy_np, 1.0 - 1.0 / chars_count)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
benthomasson/ansible | lib/ansible/plugins/connection/winrm.py | 12 | 25694 | # (c) 2014, Chris Church <[email protected]>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import inspect
import os
import re
import shlex
import socket
import traceback
import json
import tempfile
import subprocess
import itertools
HAVE_KERBEROS = False
try:
import kerberos
HAVE_KERBEROS = True
except ImportError:
pass
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.errors import AnsibleFileNotFound
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.parse import urlunsplit
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.six import binary_type
from ansible.plugins.connection import ConnectionBase
from ansible.plugins.shell.powershell import exec_wrapper, become_wrapper, leaf_exec
from ansible.utils.hashing import secure_hash
from ansible.utils.path import makedirs_safe
try:
import winrm
from winrm import Response
from winrm.protocol import Protocol
HAS_WINRM = True
except ImportError as e:
HAS_WINRM = False
try:
import xmltodict
HAS_XMLTODICT = True
except ImportError as e:
HAS_XMLTODICT = False
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
'''WinRM connections over HTTP/HTTPS.'''
transport = 'winrm'
module_implementation_preferences = ('.ps1', '.exe', '')
become_methods = ['runas']
allow_executable = False
def __init__(self, *args, **kwargs):
self.has_pipelining = True
self.always_pipeline_modules = True
self.has_native_async = True
self.protocol = None
self.shell_id = None
self.delegate = None
self._shell_type = 'powershell'
# FUTURE: Add runas support
super(Connection, self).__init__(*args, **kwargs)
def transport_test(self, connect_timeout):
''' Test the transport mechanism, if available '''
host = self._winrm_host
port = int(self._winrm_port)
display.vvv("attempting transport test to %s:%s" % (host, port))
sock = socket.create_connection((host, port), connect_timeout)
sock.close()
def set_host_overrides(self, host, hostvars=None):
'''
Override WinRM-specific options from host variables.
'''
if not HAS_WINRM:
return
self._winrm_host = self._play_context.remote_addr
self._winrm_port = int(self._play_context.port or 5986)
self._winrm_scheme = hostvars.get('ansible_winrm_scheme', 'http' if self._winrm_port == 5985 else 'https')
self._winrm_path = hostvars.get('ansible_winrm_path', '/wsman')
self._winrm_user = self._play_context.remote_user
self._winrm_pass = self._play_context.password
self._become_method = self._play_context.become_method
self._become_user = self._play_context.become_user
self._become_pass = self._play_context.become_pass
self._kinit_cmd = hostvars.get('ansible_winrm_kinit_cmd', 'kinit')
if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'):
self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES)
else:
# for legacy versions of pywinrm, use the values we know are supported
self._winrm_supported_authtypes = set(['plaintext', 'ssl', 'kerberos'])
# TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic
transport_selector = 'ssl' if self._winrm_scheme == 'https' else 'plaintext'
if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)):
self._winrm_transport = 'kerberos,%s' % transport_selector
else:
self._winrm_transport = transport_selector
self._winrm_transport = hostvars.get('ansible_winrm_transport', self._winrm_transport)
if isinstance(self._winrm_transport, string_types):
self._winrm_transport = [x.strip() for x in self._winrm_transport.split(',') if x.strip()]
unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes)
if unsupported_transports:
raise AnsibleError('The installed version of WinRM does not support transport(s) %s' % list(unsupported_transports))
# if kerberos is among our transports and there's a password specified, we're managing the tickets
kinit_mode = to_text(hostvars.get('ansible_winrm_kinit_mode', '')).strip()
if kinit_mode == "":
# HACK: ideally, remove multi-transport stuff
self._kerb_managed = "kerberos" in self._winrm_transport and self._winrm_pass
elif kinit_mode == "managed":
self._kerb_managed = True
elif kinit_mode == "manual":
self._kerb_managed = False
else:
raise AnsibleError('Unknown ansible_winrm_kinit_mode value: "%s" (must be "managed" or "manual")' % kinit_mode)
# arg names we're going passing directly
internal_kwarg_mask = set(['self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path', 'kinit_mode', 'kinit_cmd'])
self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass)
argspec = inspect.getargspec(Protocol.__init__)
supported_winrm_args = set(argspec.args)
supported_winrm_args.update(internal_kwarg_mask)
passed_winrm_args = set([v.replace('ansible_winrm_', '') for v in hostvars if v.startswith('ansible_winrm_')])
unsupported_args = passed_winrm_args.difference(supported_winrm_args)
# warn for kwargs unsupported by the installed version of pywinrm
for arg in unsupported_args:
display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg))
# pass through matching kwargs, excluding the list we want to treat specially
for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args):
self._winrm_kwargs[arg] = hostvars['ansible_winrm_%s' % arg]
# Until pykerberos has enough goodies to implement a rudimentary kinit/klist, simplest way is to let each connection
# auth itself with a private CCACHE.
def _kerb_auth(self, principal, password):
if password is None:
password = ""
self._kerb_ccache = tempfile.NamedTemporaryFile()
display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name)
krb5ccname = "FILE:%s" % self._kerb_ccache.name
krbenv = dict(KRB5CCNAME=krb5ccname)
os.environ["KRB5CCNAME"] = krb5ccname
kinit_cmdline = [self._kinit_cmd, principal]
display.vvvvv("calling kinit for principal %s" % principal)
p = subprocess.Popen(kinit_cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=krbenv)
# TODO: unicode/py3
stdout, stderr = p.communicate(password + b'\n')
if p.returncode != 0:
raise AnsibleConnectionFailure("Kerberos auth failure: %s" % stderr.strip())
display.vvvvv("kinit succeeded for principal %s" % principal)
def _winrm_connect(self):
'''
Establish a WinRM connection over HTTP/HTTPS.
'''
display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
(self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host)
netloc = '%s:%d' % (self._winrm_host, self._winrm_port)
endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', ''))
errors = []
for transport in self._winrm_transport:
if transport == 'kerberos':
if not HAVE_KERBEROS:
errors.append('kerberos: the python kerberos library is not installed')
continue
if self._kerb_managed:
self._kerb_auth(self._winrm_user, self._winrm_pass)
display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host)
try:
protocol = Protocol(endpoint, transport=transport, **self._winrm_kwargs)
# open the shell from connect so we know we're able to talk to the server
if not self.shell_id:
self.shell_id = protocol.open_shell(codepage=65001) # UTF-8
display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host)
return protocol
except Exception as e:
err_msg = to_text(e).strip()
if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I):
raise AnsibleError('the connection attempt timed out')
m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg)
if m:
code = int(m.groups()[0])
if code == 401:
err_msg = 'the specified credentials were rejected by the server'
elif code == 411:
return protocol
errors.append(u'%s: %s' % (transport, err_msg))
display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host)
if errors:
raise AnsibleConnectionFailure(', '.join(map(to_native, errors)))
else:
raise AnsibleError('No transport found for WinRM connection')
def _winrm_send_input(self, protocol, shell_id, command_id, stdin, eof=False):
rq = {'env:Envelope': protocol._get_soap_header(
resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd',
action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send',
shell_id=shell_id)}
stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\
.setdefault('rsp:Stream', {})
stream['@Name'] = 'stdin'
stream['@CommandId'] = command_id
stream['#text'] = base64.b64encode(to_bytes(stdin))
if eof:
stream['@End'] = 'true'
protocol.send_message(xmltodict.unparse(rq))
def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None):
if not self.protocol:
self.protocol = self._winrm_connect()
self._connected = True
if from_exec:
display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
else:
display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
command_id = None
try:
stdin_push_failed = False
command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None))
# TODO: try/except around this, so we can get/return the command result on a broken pipe or other failure (probably more useful than the 500 that
# comes from this)
try:
if stdin_iterator:
for (data, is_last) in stdin_iterator:
self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last)
except Exception as ex:
from traceback import format_exc
display.warning("FATAL ERROR DURING FILE TRANSFER: %s" % format_exc(ex))
stdin_push_failed = True
if stdin_push_failed:
raise AnsibleError('winrm send_input failed')
# NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy).
# FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure.
resptuple = self.protocol.get_command_output(self.shell_id, command_id)
# ensure stdout/stderr are text for py3
# FUTURE: this should probably be done internally by pywinrm
response = Response(tuple(to_text(v) if isinstance(v, binary_type) else v for v in resptuple))
# TODO: check result from response and set stdin_push_failed if we have nonzero
if from_exec:
display.vvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
else:
display.vvvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out), host=self._winrm_host)
display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err), host=self._winrm_host)
if stdin_push_failed:
raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s' % (response.std_out, response.std_err))
return response
finally:
if command_id:
self.protocol.cleanup_command(self.shell_id, command_id)
def _connect(self):
if not HAS_WINRM:
raise AnsibleError("winrm or requests is not installed: %s" % to_text(e))
elif not HAS_XMLTODICT:
raise AnsibleError("xmltodict is not installed: %s" % to_text(e))
super(Connection, self)._connect()
if not self.protocol:
self.protocol = self._winrm_connect()
self._connected = True
return self
def _reset(self): # used by win_reboot (and any other action that might need to bounce the state)
self.protocol = None
self.shell_id = None
self._connect()
def _create_raw_wrapper_payload(self, cmd, environment=dict()):
payload = {
'module_entry': to_text(base64.b64encode(to_bytes(cmd))),
'powershell_modules': {},
'actions': ['exec'],
'exec': to_text(base64.b64encode(to_bytes(leaf_exec))),
'environment': environment
}
return json.dumps(payload)
def _wrapper_payload_stream(self, payload, buffer_size=200000):
payload_bytes = to_bytes(payload)
byte_count = len(payload_bytes)
for i in range(0, byte_count, buffer_size):
yield payload_bytes[i:i + buffer_size], i + buffer_size >= byte_count
def exec_command(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)
# TODO: display something meaningful here
display.vvv("EXEC (via pipeline wrapper)")
stdin_iterator = None
if in_data:
stdin_iterator = self._wrapper_payload_stream(in_data)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator)
result.std_out = to_bytes(result.std_out)
result.std_err = to_bytes(result.std_err)
# parse just stderr from CLIXML output
if self.is_clixml(result.std_err):
try:
result.std_err = self.parse_clixml_stream(result.std_err)
except:
# unsure if we're guaranteed a valid xml doc- use raw output in case of error
pass
return (result.status_code, result.std_out, result.std_err)
def exec_command_old(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd_parts = shlex.split(to_bytes(cmd), posix=False)
cmd_parts = map(to_text, cmd_parts)
script = None
cmd_ext = cmd_parts and self._shell._unquote(cmd_parts[0]).lower()[-4:] or ''
# Support running .ps1 files (via script/raw).
if cmd_ext == '.ps1':
script = '& %s' % cmd
# Support running .bat/.cmd files; change back to the default system encoding instead of UTF-8.
elif cmd_ext in ('.bat', '.cmd'):
script = '[System.Console]::OutputEncoding = [System.Text.Encoding]::Default; & %s' % cmd
# Encode the command if not already encoded; supports running simple PowerShell commands via raw.
elif '-EncodedCommand' not in cmd_parts:
script = cmd
if script:
cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False)
if '-EncodedCommand' in cmd_parts:
encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
decoded_cmd = to_text(base64.b64decode(encoded_cmd).decode('utf-16-le'))
display.vvv("EXEC %s" % decoded_cmd, host=self._winrm_host)
else:
display.vvv("EXEC %s" % cmd, host=self._winrm_host)
try:
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
except Exception:
traceback.print_exc()
raise AnsibleConnectionFailure("failed to exec cmd %s" % cmd)
result.std_out = to_bytes(result.std_out)
result.std_err = to_bytes(result.std_err)
# parse just stderr from CLIXML output
if self.is_clixml(result.std_err):
try:
result.std_err = self.parse_clixml_stream(result.std_err)
except:
# unsure if we're guaranteed a valid xml doc- use raw output in case of error
pass
return (result.status_code, result.std_out, result.std_err)
def is_clixml(self, value):
return value.startswith(b"#< CLIXML")
# hacky way to get just stdout- not always sure of doc framing here, so use with care
def parse_clixml_stream(self, clixml_doc, stream_name='Error'):
clear_xml = clixml_doc.replace(b'#< CLIXML\r\n', b'')
doc = xmltodict.parse(clear_xml)
lines = [l.get('#text', '').replace('_x000D__x000A_', '') for l in doc.get('Objs', {}).get('S', {}) if l.get('@S') == stream_name]
return '\r\n'.join(lines)
# FUTURE: determine buffer size at runtime via remote winrm config?
def _put_file_stdin_iterator(self, in_path, out_path, buffer_size=250000):
in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict'))
offset = 0
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
for out_data in iter((lambda: in_file.read(buffer_size)), b''):
offset += len(out_data)
self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host)
# yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded
b64_data = base64.b64encode(out_data) + b'\r\n'
# cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal
yield b64_data, (in_file.tell() == in_size)
if offset == 0: # empty file, return an empty buffer + eof to close it
yield "", True
def put_file(self, in_path, out_path):
super(Connection, self).put_file(in_path, out_path)
out_path = self._shell._unquote(out_path)
display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound('file or module does not exist: "%s"' % in_path)
script_template = u'''
begin {{
$path = '{0}'
$DebugPreference = "Continue"
$ErrorActionPreference = "Stop"
Set-StrictMode -Version 2
$fd = [System.IO.File]::Create($path)
$sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
$bytes = @() #initialize for empty file case
}}
process {{
$bytes = [System.Convert]::FromBase64String($input)
$sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null
$fd.Write($bytes, 0, $bytes.Length)
}}
end {{
$sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null
$hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant()
$fd.Close()
Write-Output "{{""sha1"":""$hash""}}"
}}
'''
script = script_template.format(self._shell._escape(out_path))
cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path))
# TODO: improve error handling
if result.status_code != 0:
raise AnsibleError(to_native(result.std_err))
put_output = json.loads(result.std_out)
remote_sha1 = put_output.get("sha1")
if not remote_sha1:
raise AnsibleError("Remote sha1 was not returned")
local_sha1 = secure_hash(in_path)
if not remote_sha1 == local_sha1:
raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_native(remote_sha1), to_native(local_sha1)))
def fetch_file(self, in_path, out_path):
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._shell._unquote(in_path)
out_path = out_path.replace('\\', '/')
display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
buffer_size = 2**19 # 0.5MB chunks
makedirs_safe(os.path.dirname(out_path))
out_file = None
try:
offset = 0
while True:
try:
script = '''
If (Test-Path -PathType Leaf "%(path)s")
{
$stream = New-Object IO.FileStream("%(path)s", [System.IO.FileMode]::Open, [System.IO.FileAccess]::Read, [IO.FileShare]::ReadWrite);
$stream.Seek(%(offset)d, [System.IO.SeekOrigin]::Begin) | Out-Null;
$buffer = New-Object Byte[] %(buffer_size)d;
$bytesRead = $stream.Read($buffer, 0, %(buffer_size)d);
$bytes = $buffer[0..($bytesRead-1)];
[System.Convert]::ToBase64String($bytes);
$stream.Close() | Out-Null;
}
ElseIf (Test-Path -PathType Container "%(path)s")
{
Write-Host "[DIR]";
}
Else
{
Write-Error "%(path)s does not exist";
Exit 1;
}
''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset)
display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host)
cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
raise IOError(to_native(result.std_err))
if result.std_out.strip() == '[DIR]':
data = None
else:
data = base64.b64decode(result.std_out.strip())
if data is None:
makedirs_safe(out_path)
break
else:
if not out_file:
# If out_path is a directory and we're expecting a file, bail out now.
if os.path.isdir(to_bytes(out_path, errors='surrogate_or_strict')):
break
out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb')
out_file.write(data)
if len(data) < buffer_size:
break
offset += len(data)
except Exception:
traceback.print_exc()
raise AnsibleError('failed to transfer file to "%s"' % out_path)
finally:
if out_file:
out_file.close()
def close(self):
if self.protocol and self.shell_id:
display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host)
self.protocol.close_shell(self.shell_id)
self.shell_id = None
self.protocol = None
self._connected = False
| gpl-3.0 |
adrianschlatter/python-ivi | ivi/agilent/agilentDSOX3032A.py | 7 | 1694 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent3000A import *
class agilentDSOX3032A(agilent3000A):
"Agilent InfiniiVision DSOX3032A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSO-X 3032A')
super(agilentDSOX3032A, self).__init__(*args, **kwargs)
self._analog_channel_count = 2
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 350e6
self._init_channels()
| mit |
prepare/TestWeasyPrint | weasyprint/html.py | 4 | 11779 |
# coding: utf8
"""
weasyprint.html
---------------
Specific handling for some HTML elements, especially replaced elements.
Replaced elements (eg. <img> elements) are rendered externally and
behave as an atomic opaque box in CSS. In general, they may or may not
have intrinsic dimensions. But the only replaced elements currently
supported in WeasyPrint are images with intrinsic dimensions.
:copyright: Copyright 2011-2014 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
import os.path
import logging
import re
from .css import get_child_text
from .formatting_structure import boxes
from .urls import get_url_attribute
from .compat import xrange, urljoin
from .logger import LOGGER
from . import CSS
# XXX temporarily disable logging for user-agent stylesheet
level = LOGGER.level
LOGGER.setLevel(logging.ERROR)
HTML5_UA_STYLESHEET = CSS(
filename=os.path.join(os.path.dirname(__file__), 'css', 'html5_ua.css'))
LOGGER.setLevel(level)
# http://whatwg.org/C#space-character
HTML_WHITESPACE = ' \t\n\f\r'
HTML_SPACE_SEPARATED_TOKENS_RE = re.compile('[^%s]+' % HTML_WHITESPACE)
def ascii_lower(string):
r"""Transform (only) ASCII letters to lower case: A-Z is mapped to a-z.
:param string: An Unicode string.
:returns: A new Unicode string.
This is used for `ASCII case-insensitive
<http://whatwg.org/C#ascii-case-insensitive>`_ matching.
This is different from the :meth:`~py:str.lower` method of Unicode strings
which also affect non-ASCII characters,
sometimes mapping them into the ASCII range:
>>> keyword = u'Bac\N{KELVIN SIGN}ground'
>>> assert keyword.lower() == u'background'
>>> assert ascii_lower(keyword) != keyword.lower()
>>> assert ascii_lower(keyword) == u'bac\N{KELVIN SIGN}ground'
"""
# This turns out to be faster than unicode.translate()
return string.encode('utf8').lower().decode('utf8')
def element_has_link_type(element, link_type):
"""
Return whether the given element has a ``rel`` attribute with the
given link type.
:param link_type: Must be a lower-case string.
"""
return any(ascii_lower(token) == link_type for token in
HTML_SPACE_SEPARATED_TOKENS_RE.findall(element.get('rel', '')))
# Maps HTML tag names to function taking an HTML element and returning a Box.
HTML_HANDLERS = {}
def handle_element(element, box, get_image_from_uri):
"""Handle HTML elements that need special care.
:returns: a (possibly empty) list of boxes.
"""
if box.element_tag in HTML_HANDLERS:
return HTML_HANDLERS[element.tag](element, box, get_image_from_uri)
else:
return [box]
def handler(tag):
"""Return a decorator registering a function handling ``tag`` elements."""
def decorator(function):
"""Decorator registering a function handling ``tag`` elements."""
HTML_HANDLERS[tag] = function
return function
return decorator
def make_replaced_box(element, box, image):
"""Wrap an image in a replaced box.
That box is either block-level or inline-level, depending on what the
element should be.
"""
if box.style.display in ('block', 'list-item', 'table'):
type_ = boxes.BlockReplacedBox
else:
# TODO: support images with 'display: table-cell'?
type_ = boxes.InlineReplacedBox
return type_(element.tag, element.sourceline, box.style, image)
@handler('img')
def handle_img(element, box, get_image_from_uri):
"""Handle ``<img>`` elements, return either an image or the alt-text.
See: http://www.w3.org/TR/html5/embedded-content-1.html#the-img-element
"""
src = get_url_attribute(element, 'src')
alt = element.get('alt')
if src:
image = get_image_from_uri(src)
if image is not None:
return [make_replaced_box(element, box, image)]
else:
# Invalid image, use the alt-text.
if alt:
return [box.copy_with_children(
[boxes.TextBox.anonymous_from(box, alt)])]
elif alt == '':
# The element represents nothing
return []
else:
assert alt is None
# TODO: find some indicator that an image is missing.
# For now, just remove the image.
return []
else:
if alt:
return [box.copy_with_children(
[boxes.TextBox.anonymous_from(box, alt)])]
else:
return []
@handler('embed')
def handle_embed(element, box, get_image_from_uri):
"""Handle ``<embed>`` elements, return either an image or nothing.
See: http://www.w3.org/TR/html5/the-iframe-element.html#the-embed-element
"""
src = get_url_attribute(element, 'src')
type_ = element.get('type', '').strip()
if src:
image = get_image_from_uri(src, type_)
if image is not None:
return [make_replaced_box(element, box, image)]
# No fallback.
return []
@handler('object')
def handle_object(element, box, get_image_from_uri):
"""Handle ``<object>`` elements, return either an image or the fallback
content.
See: http://www.w3.org/TR/html5/the-iframe-element.html#the-object-element
"""
data = get_url_attribute(element, 'data')
type_ = element.get('type', '').strip()
if data:
image = get_image_from_uri(data, type_)
if image is not None:
return [make_replaced_box(element, box, image)]
# The element’s children are the fallback.
return [box]
def integer_attribute(element, box, name, minimum=1):
"""Read an integer attribute from the HTML element and set it on the box.
"""
value = element.get(name, '').strip()
if value:
try:
value = int(value)
except ValueError:
pass
else:
if value >= minimum:
setattr(box, name, value)
@handler('colgroup')
def handle_colgroup(element, box, _get_image_from_uri):
"""Handle the ``span`` attribute."""
if isinstance(box, boxes.TableColumnGroupBox):
if any(child.tag == 'col' for child in element):
box.span = None # sum of the children’s spans
else:
integer_attribute(element, box, 'span')
box.children = (
boxes.TableColumnBox.anonymous_from(box, [])
for _i in xrange(box.span))
return [box]
@handler('col')
def handle_col(element, box, _get_image_from_uri):
"""Handle the ``span`` attribute."""
if isinstance(box, boxes.TableColumnBox):
integer_attribute(element, box, 'span')
if box.span > 1:
# Generate multiple boxes
# http://lists.w3.org/Archives/Public/www-style/2011Nov/0293.html
return [box.copy() for _i in xrange(box.span)]
return [box]
@handler('th')
@handler('td')
def handle_td(element, box, _get_image_from_uri):
"""Handle the ``colspan``, ``rowspan`` attributes."""
if isinstance(box, boxes.TableCellBox):
# HTML 4.01 gives special meaning to colspan=0
# http://www.w3.org/TR/html401/struct/tables.html#adef-rowspan
# but HTML 5 removed it
# http://www.w3.org/TR/html5/tabular-data.html#attr-tdth-colspan
# rowspan=0 is still there though.
integer_attribute(element, box, 'colspan')
integer_attribute(element, box, 'rowspan', minimum=0)
return [box]
@handler('a')
def handle_a(element, box, _get_image_from_uri):
"""Handle the ``rel`` attribute."""
box.is_attachment = element_has_link_type(element, 'attachment')
return [box]
def find_base_url(html_document, fallback_base_url):
"""Return the base URL for the document.
See http://www.w3.org/TR/html5/urls.html#document-base-url
"""
first_base_element = next(iter(html_document.iter('base')), None)
if first_base_element is not None:
href = first_base_element.get('href', '').strip()
if href:
return urljoin(fallback_base_url, href)
return fallback_base_url
def get_html_metadata(html_document):
"""
Relevant specs:
http://www.whatwg.org/html#the-title-element
http://www.whatwg.org/html#standard-metadata-names
http://wiki.whatwg.org/wiki/MetaExtensions
http://microformats.org/wiki/existing-rel-values#HTML5_link_type_extensions
"""
title = None
description = None
generator = None
keywords = []
authors = []
created = None
modified = None
attachments = []
for element in html_document.iter('title', 'meta', 'link'):
if element.tag == 'title' and title is None:
title = get_child_text(element)
elif element.tag == 'meta':
name = ascii_lower(element.get('name', ''))
content = element.get('content', '')
if name == 'keywords':
for keyword in map(strip_whitespace, content.split(',')):
if keyword not in keywords:
keywords.append(keyword)
elif name == 'author':
authors.append(content)
elif name == 'description' and description is None:
description = content
elif name == 'generator' and generator is None:
generator = content
elif name == 'dcterms.created' and created is None:
created = parse_w3c_date(name, element.sourceline, content)
elif name == 'dcterms.modified' and modified is None:
modified = parse_w3c_date(name, element.sourceline, content)
elif element.tag == 'link' and element_has_link_type(
element, 'attachment'):
url = get_url_attribute(element, 'href')
title = element.get('title', None)
if url is None:
LOGGER.warning('Missing href in <link rel="attachment">')
else:
attachments.append((url, title))
return dict(title=title, description=description, generator=generator,
keywords=keywords, authors=authors,
created=created, modified=modified,
attachments=attachments)
def strip_whitespace(string):
"""Use the HTML definition of "space character",
not all Unicode Whitespace.
http://www.whatwg.org/html#strip-leading-and-trailing-whitespace
http://www.whatwg.org/html#space-character
"""
return string.strip(' \t\n\f\r')
# YYYY (eg 1997)
# YYYY-MM (eg 1997-07)
# YYYY-MM-DD (eg 1997-07-16)
# YYYY-MM-DDThh:mmTZD (eg 1997-07-16T19:20+01:00)
# YYYY-MM-DDThh:mm:ssTZD (eg 1997-07-16T19:20:30+01:00)
# YYYY-MM-DDThh:mm:ss.sTZD (eg 1997-07-16T19:20:30.45+01:00)
W3C_DATE_RE = re.compile('''
^
[ \t\n\f\r]*
(?P<year>\d\d\d\d)
(?:
-(?P<month>0\d|1[012])
(?:
-(?P<day>[012]\d|3[01])
(?:
T(?P<hour>[01]\d|2[0-3])
:(?P<minute>[0-5]\d)
(?:
:(?P<second>[0-5]\d)
(?:\.\d+)? # Second fraction, ignored
)?
(?:
Z | # UTC
(?P<tz_hour>[+-](?:[01]\d|2[0-3]))
:(?P<tz_minute>[0-5]\d)
)
)?
)?
)?
[ \t\n\f\r]*
$
''', re.VERBOSE)
def parse_w3c_date(meta_name, source_line, string):
"""http://www.w3.org/TR/NOTE-datetime"""
if W3C_DATE_RE.match(string):
return string
else:
LOGGER.warning('Invalid date in <meta name="%s"> line %i: %r',
meta_name, source_line, string)
| bsd-3-clause |
jkthompson/nupic | tests/unit/py2/nupic/algorithms/fast_cla_classifier_test.py | 17 | 1609 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for the FastCLAClassifier.
This test extends the test for the Python CLAClassifier to ensure that both
classifiers and their tests stay in sync.
"""
import unittest2 as unittest
from nupic.bindings.algorithms import FastCLAClassifier
# Don't import the CLAClassifierTest directly or the unittest.main() will pick
# it up and run it.
import cla_classifier_test
class FastCLAClassifierTest(cla_classifier_test.CLAClassifierTest):
"""Unit tests for FastCLAClassifier class."""
def setUp(self):
self._classifier = FastCLAClassifier
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Arcanemagus/SickRage | lib/unidecode/x07a.py | 252 | 4669 | data = (
'Xi ', # 0x00
'Kao ', # 0x01
'Lang ', # 0x02
'Fu ', # 0x03
'Ze ', # 0x04
'Shui ', # 0x05
'Lu ', # 0x06
'Kun ', # 0x07
'Gan ', # 0x08
'Geng ', # 0x09
'Ti ', # 0x0a
'Cheng ', # 0x0b
'Tu ', # 0x0c
'Shao ', # 0x0d
'Shui ', # 0x0e
'Ya ', # 0x0f
'Lun ', # 0x10
'Lu ', # 0x11
'Gu ', # 0x12
'Zuo ', # 0x13
'Ren ', # 0x14
'Zhun ', # 0x15
'Bang ', # 0x16
'Bai ', # 0x17
'Ji ', # 0x18
'Zhi ', # 0x19
'Zhi ', # 0x1a
'Kun ', # 0x1b
'Leng ', # 0x1c
'Peng ', # 0x1d
'Ke ', # 0x1e
'Bing ', # 0x1f
'Chou ', # 0x20
'Zu ', # 0x21
'Yu ', # 0x22
'Su ', # 0x23
'Lue ', # 0x24
'[?] ', # 0x25
'Yi ', # 0x26
'Xi ', # 0x27
'Bian ', # 0x28
'Ji ', # 0x29
'Fu ', # 0x2a
'Bi ', # 0x2b
'Nuo ', # 0x2c
'Jie ', # 0x2d
'Zhong ', # 0x2e
'Zong ', # 0x2f
'Xu ', # 0x30
'Cheng ', # 0x31
'Dao ', # 0x32
'Wen ', # 0x33
'Lian ', # 0x34
'Zi ', # 0x35
'Yu ', # 0x36
'Ji ', # 0x37
'Xu ', # 0x38
'Zhen ', # 0x39
'Zhi ', # 0x3a
'Dao ', # 0x3b
'Jia ', # 0x3c
'Ji ', # 0x3d
'Gao ', # 0x3e
'Gao ', # 0x3f
'Gu ', # 0x40
'Rong ', # 0x41
'Sui ', # 0x42
'You ', # 0x43
'Ji ', # 0x44
'Kang ', # 0x45
'Mu ', # 0x46
'Shan ', # 0x47
'Men ', # 0x48
'Zhi ', # 0x49
'Ji ', # 0x4a
'Lu ', # 0x4b
'Su ', # 0x4c
'Ji ', # 0x4d
'Ying ', # 0x4e
'Wen ', # 0x4f
'Qiu ', # 0x50
'Se ', # 0x51
'[?] ', # 0x52
'Yi ', # 0x53
'Huang ', # 0x54
'Qie ', # 0x55
'Ji ', # 0x56
'Sui ', # 0x57
'Xiao ', # 0x58
'Pu ', # 0x59
'Jiao ', # 0x5a
'Zhuo ', # 0x5b
'Tong ', # 0x5c
'Sai ', # 0x5d
'Lu ', # 0x5e
'Sui ', # 0x5f
'Nong ', # 0x60
'Se ', # 0x61
'Hui ', # 0x62
'Rang ', # 0x63
'Nuo ', # 0x64
'Yu ', # 0x65
'Bin ', # 0x66
'Ji ', # 0x67
'Tui ', # 0x68
'Wen ', # 0x69
'Cheng ', # 0x6a
'Huo ', # 0x6b
'Gong ', # 0x6c
'Lu ', # 0x6d
'Biao ', # 0x6e
'[?] ', # 0x6f
'Rang ', # 0x70
'Zhuo ', # 0x71
'Li ', # 0x72
'Zan ', # 0x73
'Xue ', # 0x74
'Wa ', # 0x75
'Jiu ', # 0x76
'Qiong ', # 0x77
'Xi ', # 0x78
'Qiong ', # 0x79
'Kong ', # 0x7a
'Yu ', # 0x7b
'Sen ', # 0x7c
'Jing ', # 0x7d
'Yao ', # 0x7e
'Chuan ', # 0x7f
'Zhun ', # 0x80
'Tu ', # 0x81
'Lao ', # 0x82
'Qie ', # 0x83
'Zhai ', # 0x84
'Yao ', # 0x85
'Bian ', # 0x86
'Bao ', # 0x87
'Yao ', # 0x88
'Bing ', # 0x89
'Wa ', # 0x8a
'Zhu ', # 0x8b
'Jiao ', # 0x8c
'Qiao ', # 0x8d
'Diao ', # 0x8e
'Wu ', # 0x8f
'Gui ', # 0x90
'Yao ', # 0x91
'Zhi ', # 0x92
'Chuang ', # 0x93
'Yao ', # 0x94
'Tiao ', # 0x95
'Jiao ', # 0x96
'Chuang ', # 0x97
'Jiong ', # 0x98
'Xiao ', # 0x99
'Cheng ', # 0x9a
'Kou ', # 0x9b
'Cuan ', # 0x9c
'Wo ', # 0x9d
'Dan ', # 0x9e
'Ku ', # 0x9f
'Ke ', # 0xa0
'Zhui ', # 0xa1
'Xu ', # 0xa2
'Su ', # 0xa3
'Guan ', # 0xa4
'Kui ', # 0xa5
'Dou ', # 0xa6
'[?] ', # 0xa7
'Yin ', # 0xa8
'Wo ', # 0xa9
'Wa ', # 0xaa
'Ya ', # 0xab
'Yu ', # 0xac
'Ju ', # 0xad
'Qiong ', # 0xae
'Yao ', # 0xaf
'Yao ', # 0xb0
'Tiao ', # 0xb1
'Chao ', # 0xb2
'Yu ', # 0xb3
'Tian ', # 0xb4
'Diao ', # 0xb5
'Ju ', # 0xb6
'Liao ', # 0xb7
'Xi ', # 0xb8
'Wu ', # 0xb9
'Kui ', # 0xba
'Chuang ', # 0xbb
'Zhao ', # 0xbc
'[?] ', # 0xbd
'Kuan ', # 0xbe
'Long ', # 0xbf
'Cheng ', # 0xc0
'Cui ', # 0xc1
'Piao ', # 0xc2
'Zao ', # 0xc3
'Cuan ', # 0xc4
'Qiao ', # 0xc5
'Qiong ', # 0xc6
'Dou ', # 0xc7
'Zao ', # 0xc8
'Long ', # 0xc9
'Qie ', # 0xca
'Li ', # 0xcb
'Chu ', # 0xcc
'Shi ', # 0xcd
'Fou ', # 0xce
'Qian ', # 0xcf
'Chu ', # 0xd0
'Hong ', # 0xd1
'Qi ', # 0xd2
'Qian ', # 0xd3
'Gong ', # 0xd4
'Shi ', # 0xd5
'Shu ', # 0xd6
'Miao ', # 0xd7
'Ju ', # 0xd8
'Zhan ', # 0xd9
'Zhu ', # 0xda
'Ling ', # 0xdb
'Long ', # 0xdc
'Bing ', # 0xdd
'Jing ', # 0xde
'Jing ', # 0xdf
'Zhang ', # 0xe0
'Yi ', # 0xe1
'Si ', # 0xe2
'Jun ', # 0xe3
'Hong ', # 0xe4
'Tong ', # 0xe5
'Song ', # 0xe6
'Jing ', # 0xe7
'Diao ', # 0xe8
'Yi ', # 0xe9
'Shu ', # 0xea
'Jing ', # 0xeb
'Qu ', # 0xec
'Jie ', # 0xed
'Ping ', # 0xee
'Duan ', # 0xef
'Shao ', # 0xf0
'Zhuan ', # 0xf1
'Ceng ', # 0xf2
'Deng ', # 0xf3
'Cui ', # 0xf4
'Huai ', # 0xf5
'Jing ', # 0xf6
'Kan ', # 0xf7
'Jing ', # 0xf8
'Zhu ', # 0xf9
'Zhu ', # 0xfa
'Le ', # 0xfb
'Peng ', # 0xfc
'Yu ', # 0xfd
'Chi ', # 0xfe
'Gan ', # 0xff
)
| gpl-3.0 |
luthfii/xsched | tools/python/xen/xend/XendVnet.py | 52 | 5488 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <[email protected]>
# Copyright (C) 2005 XenSource Ltd
#============================================================================
"""Handler for vnet operations.
"""
from xen.util import Brctl
from xen.xend import sxp
from xen.xend.XendError import XendError
from xen.xend.XendLogging import log
from xen.xend.xenstore.xstransact import xstransact
def vnet_cmd(cmd):
out = None
try:
try:
out = file("/proc/vnet/policy", "wb")
sxp.show(cmd, out)
except IOError, ex:
raise XendError(str(ex))
finally:
if out: out.close()
class XendVnetInfo:
vifctl_ops = {'up': 'vif.add', 'down': 'vif.del'}
def __init__(self, dbpath, config=None):
if config:
self.id = str(sxp.child_value(config, 'id'))
self.dbid = self.id.replace(':', '-')
self.dbpath = dbpath + '/' + self.dbid
self.config = config
else:
self.dbpath = dbpath
self.importFromDB()
self.bridge = sxp.child_value(self.config, 'bridge')
if not self.bridge:
self.bridge = "vnet%s" % self.id
self.vnetif = sxp.child_value(self.config, 'vnetif')
if not self.vnetif:
self.vnetif = "vnif%s" % self.id
def exportToDB(self, save=False, sync=False):
to_store = {
'id' : self.id,
'dbid' : self.dbid,
'config' : sxp.to_string(self.config)
}
xstransact.Write(self.dbpath, to_store)
def importFromDB(self):
(self.id, self.dbid, c) = xstransact.Gather(self.dbpath,
('id', str),
('dbid', str),
('config', str))
self.config = sxp.from_string(c)
def sxpr(self):
return self.config
def configure(self):
log.info("Configuring vnet %s", self.id)
val = vnet_cmd(['vnet.add'] + sxp.children(self.config))
Brctl.bridge_create(self.bridge)
Brctl.vif_bridge_add({'bridge': self.bridge, 'vif': self.vnetif})
return val
def delete(self):
log.info("Deleting vnet %s", self.id)
Brctl.vif_bridge_rem({'bridge': self.bridge, 'vif': self.vnetif})
Brctl.bridge_del(self.bridge)
val = vnet_cmd(['vnet.del', self.id])
xstransact.Remove(self.dbpath)
return val
def vifctl(self, op, vif, vmac):
try:
fn = self.vifctl_ops[op]
return vnet_cmd([fn, ['vnet', self.id], ['vif', vif], ['vmac', vmac]])
except XendError:
log.warning("vifctl failed: op=%s vif=%s mac=%s", op, vif, vmac)
class XendVnet:
"""Index of all vnets. Singleton.
"""
dbpath = "/vnet"
def __init__(self):
# Table of vnet info indexed by vnet id.
self.vnet = {}
listing = xstransact.List(self.dbpath)
for entry in listing:
try:
info = XendVnetInfo(self.dbpath + '/' + entry)
self.vnet[info.id] = info
info.configure()
except XendError, ex:
log.warning("Failed to configure vnet %s: %s", str(info.id), str(ex))
except Exception, ex:
log.exception("Vnet error")
xstransact.Remove(self.dbpath + '/' + entry)
def vnet_of_bridge(self, bridge):
"""Get the vnet for a bridge (if any).
@param bridge: bridge name
@return vnet or None
"""
for v in self.vnet.values():
if v.bridge == bridge:
return v
else:
return None
def vnet_ls(self):
"""List all vnet ids.
"""
return self.vnet.keys()
def vnets(self):
"""List all vnets.
"""
return self.vnet.values()
def vnet_get(self, id):
"""Get a vnet.
@param id vnet id
"""
id = str(id)
return self.vnet.get(id)
def vnet_create(self, config):
"""Create a vnet.
@param config: config
"""
info = XendVnetInfo(self.dbpath, config=config)
self.vnet[info.id] = info
info.exportToDB()
info.configure()
def vnet_delete(self, id):
"""Delete a vnet.
@param id: vnet id
"""
info = self.vnet_get(id)
if info:
del self.vnet[id]
info.delete()
def instance():
global inst
try:
inst
except:
inst = XendVnet()
return inst
| gpl-2.0 |
froch/kubernetes-py | kubernetes_py/models/v1beta1/CronJobTemplate.py | 6 | 1863 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
from kubernetes_py.models.v1.JobSpec import JobSpec
from kubernetes_py.models.v1.ObjectMeta import ObjectMeta
class CronJobTemplate(object):
def __init__(self, model=None):
super(CronJobTemplate, self).__init__()
self._metadata = ObjectMeta()
self._spec = JobSpec()
if model is not None:
self._build_with_model(model)
def _build_with_model(self, model=None):
if "metadata" in model:
self.metadata = ObjectMeta(model["metadata"])
if "spec" in model:
self.spec = JobSpec(model["spec"])
# ------------------------------------------------------------------------------------- metadata
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, meta=None):
if not isinstance(meta, ObjectMeta):
raise SyntaxError("CronJobTemplate: metadata: [ {} ] is invalid.".format(meta))
self._metadata = meta
# ------------------------------------------------------------------------------------- spec
@property
def spec(self):
return self._spec
@spec.setter
def spec(self, spec=None):
if not isinstance(spec, JobSpec):
raise SyntaxError("CronJobTemplate: spec: [ {} ] is invalid.".format(spec))
self._spec = spec
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.metadata is not None:
data["metadata"] = self.metadata.serialize()
if self.spec is not None:
data["spec"] = self.spec.serialize()
return data
| apache-2.0 |
tensorflow/mesh | mesh_tensorflow/transformer/transformer_layers.py | 1 | 85747 | # coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers for the Transformer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import math
import gin
import mesh_tensorflow as mtf
from mesh_tensorflow import layers
from mesh_tensorflow.transformer import attention
from mesh_tensorflow.transformer import transformer
import tensorflow.compat.v1 as tf
@gin.configurable
class DenseReluDense(transformer.TransformerLayer):
"""Two dense layers with ReLU or other activation on hidden layer."""
def __init__(self, hidden_size=4096, dropout_rate=0.0, activation="relu",
use_bias=False):
"""Create a DenseReluDense.
Args:
hidden_size: an integer - size of the hidden layer
dropout_rate: a floating-point number
activation: an activation function or a list of activation functions.
see documentation for mtf.layers.dense_product()
use_bias: a boolean, whether to use bias in the dense layers.
"""
self.hidden_size = hidden_size
self.dropout_rate = dropout_rate
self.activation = activation
self.use_bias = use_bias
def call(self, context, x, losses=None):
"""Call the layer."""
io_channels = x.shape.dims[-1]
hidden_channels = mtf.Dimension("d_ff", self.hidden_size)
h = mtf.layers.dense_product(x,
reduced_dims=x.shape.dims[-1:],
new_dims=hidden_channels,
activation_functions=self.activation,
use_bias=self.use_bias,
variable_dtype=context.variable_dtype,
name="wi",
expert_dims=context.model.ensemble_dims)
if context.train and self.dropout_rate != 0.0:
h = mtf.dropout(h, context.train, keep_prob=1.0 - self.dropout_rate,
noise_shape=h.shape - context.length_dim)
return mtf.layers.dense(h, io_channels,
use_bias=self.use_bias,
activation=None,
variable_dtype=context.variable_dtype,
reduced_dims=h.shape.dims[-1:],
name="wo",
expert_dims=context.model.ensemble_dims)
def attention_params(context,
kv_dim,
num_heads,
num_memory_heads=0,
shared_kv=False,
no_query=False,
combine_dims=True,
keep_query_heads_dims=False,
fold_scaling_into_initializer=True):
"""Attention Parameters for Transformer Layers.
The num_heads argument indicates the number of read-heads.
For the familiar behavior described in "Attention Is All You Need", set
num_memory_heads=0.
If num_memory_heads==1, then there is only a single write-head, and multiple
read-heads. This leads to faster incremental decoding, since the
recurrent state is smaller
If num_memory_heads > 1, then num_memory_heads indicates the number of
write-heads. A fraction of the read-heads read each write-head.
num_memory_heads must divide num_heads. This behavior has not yet been tested.
no query flag is set to true when we do not want to create parameters
for query params (for synthesizer model).
Args:
context: a transformer.Context
kv_dim: a dimension (for key and value channels)
num_heads: an integer
num_memory_heads: an optional integer
shared_kv: a boolean
no_query: a boolean
combine_dims: a boolean
keep_query_heads_dims: a boolean
fold_scaling_into_initializer: a boolean
Returns:
an attention.AttentionParams object
"""
if num_heads == 1:
query_heads_dims = None
memory_heads_dims = None
elif num_memory_heads == 0:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = query_heads_dims
elif num_memory_heads == 1:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = None
else:
if num_heads % num_memory_heads != 0:
raise ValueError("num_memory_heads must divide num_heads")
memory_heads_dims = [mtf.Dimension("heads", num_memory_heads)]
query_heads_dims = memory_heads_dims + [
mtf.Dimension("query_heads", num_heads // num_memory_heads)]
return attention.AttentionParams(
context.mesh,
query_input_dim=context.model.model_dim,
memory_input_dim=context.model.model_dim,
output_dim=context.model.model_dim,
key_dim=kv_dim,
value_dim=kv_dim,
query_heads_dims=query_heads_dims,
memory_heads_dims=memory_heads_dims,
variable_dtype=context.variable_dtype,
shared_kv=shared_kv,
no_query=no_query,
ensemble_dim=context.model.ensemble_dim,
combine_dims=combine_dims,
keep_query_heads_dims=keep_query_heads_dims,
fold_scaling_into_initializer=fold_scaling_into_initializer)
@gin.configurable
class SelfAttention(transformer.TransformerLayer):
"""Multi-head self-attention layer."""
def __init__(self,
num_heads=8,
num_memory_heads=0,
key_value_size=128,
shared_kv=False,
dropout_rate=0.0,
attention_kwargs=None,
relative_attention_type=None,
relative_attention_num_buckets=32,
attention_func=None,
combine_dims=True,
keep_query_heads_dims=False,
fold_scaling_into_initializer=True):
"""Create a SelfAttention Layer.
Args:
num_heads: an integer
num_memory_heads: an optional integer
key_value_size: an integer
shared_kv: a boolean
dropout_rate: a float
attention_kwargs: a dictionary of kwargs for attention.attention
relative_attention_type: an optional string - one of
(None, "bias", "bias_shared", "contextual")
relative_attention_num_buckets: an integer
attention_func: attention function: None/'hybrid'.
combine_dims: a boolean
keep_query_heads_dims: a boolean
fold_scaling_into_initializer: a boolean
"""
self.num_heads = num_heads
self.num_memory_heads = num_memory_heads
self.key_value_size = key_value_size
self.shared_kv = shared_kv
self.dropout_rate = dropout_rate
self.attention_kwargs = attention_kwargs or {}
self.relative_attention_type = relative_attention_type
self.relative_attention_num_buckets = relative_attention_num_buckets
self.attention_func = attention_func
self.combine_dims = combine_dims
self.keep_query_heads_dims = keep_query_heads_dims
self.fold_scaling_into_initializer = fold_scaling_into_initializer
def layer_output_from_attention_output(self, context, attention_output,
losses):
return attention_output
def expected_attention_output_shape(self, x, params):
if self.keep_query_heads_dims:
return mtf.Shape(x.shape[:-1] + params.query_heads_dims + x.shape[-1:])
return x.shape
def attention_kwargs_from_context(self, context):
kwargs = copy.copy(self.attention_kwargs)
kwargs["dropout_rate"] = self.dropout_rate if context.train else 0.0
if "dropout_broadcast_dims" not in kwargs:
kwargs["dropout_broadcast_dims"] = [context.length_dim]
return kwargs
def make_params(self, context):
return attention_params(
context=context,
kv_dim=self.kv_dim,
num_heads=self.num_heads,
num_memory_heads=self.num_memory_heads,
shared_kv=self.shared_kv,
combine_dims=self.combine_dims,
keep_query_heads_dims=self.keep_query_heads_dims,
fold_scaling_into_initializer=self.fold_scaling_into_initializer)
def call(self, context, x, losses=None):
"""Call the layer."""
params = self.make_params(context)
q = params.compute_q(x)
memory_length = self.memory_length(context)
if context.mode == "incremental":
m = x
else:
m = mtf.replace_dimensions(x, context.length_dim, memory_length)
if self.shared_kv:
kv = params.compute_kv(m)
else:
k = params.compute_k(m)
v = params.compute_v(m)
if context.mode == "incremental":
one_hot = mtf.one_hot(
context.position, memory_length, dtype=context.activation_dtype)
inv_one_hot = 1.0 - one_hot
if self.shared_kv:
old_kv = context.get_states(1)
kv = old_kv * inv_one_hot + kv * one_hot
else:
old_k, old_v = context.get_states(2)
k = old_k * inv_one_hot + k * one_hot
v = old_v * inv_one_hot + v * one_hot
memory_position = mtf.range(context.mesh, memory_length, tf.int32)
else:
memory_position = self.rename_length_to_memory_length(
context.position, context)
if context.mode == "incremental" or context.mode == "first_part":
context.record_new_states([kv] if self.shared_kv else [k, v])
if self.shared_kv:
k = kv
v = kv
o = self.attention_fn(
q, k, v, context=context, memory_length_dim=memory_length,
key_dim=self.kv_dim, value_dim=self.kv_dim,
bias=self.compute_bias(context, memory_position, x,
params.query_heads_dims, q),
**self.attention_kwargs_from_context(context))
attention_output_shape = self.expected_attention_output_shape(x, params)
attention_output = params.compute_output(
o, output_shape=attention_output_shape)
return self.layer_output_from_attention_output(context, attention_output,
losses)
def compute_bias(self, context, memory_position, x, heads_dims, q):
"""Compute attention bias.
Args:
context: a transformer.Context
memory_position: an int32 tensor containing memory_length dimension.
x: a Tensor - the query antecedent - required for relative attention
heads_dims: a list of dimensions
q: a Tensor - the queries - required for contextual relative attention
Returns:
a Tensor or None
"""
min_relative_position = self.min_relative_position(context) # pylint: disable=assignment-from-none
max_relative_position = self.max_relative_position(context) # pylint: disable=assignment-from-none
biases = []
relative_position = memory_position - context.position
if min_relative_position is not None:
visible = mtf.greater_equal(relative_position, min_relative_position)
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
if max_relative_position is not None:
visible = mtf.less_equal(relative_position, max_relative_position)
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
if context.read_priority is not None:
visible = mtf.greater_equal(
context.read_priority,
mtf.layers.rename_length_to_memory_length(context.write_priority))
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
sequence_id = None
# Subsequence id should only be set if we are in the decoder and have
# multiple targets per input. This will allow each sub-target to only attend
# to itself.
if isinstance(context.subsequence_id, mtf.Tensor):
sequence_id = context.subsequence_id
elif isinstance(context.sequence_id, mtf.Tensor):
sequence_id = context.sequence_id
if (sequence_id is not None and context.length_dim in sequence_id.shape):
visible = mtf.equal(
sequence_id,
self.rename_length_to_memory_length(sequence_id, context))
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
if self.relative_attention_type is not None:
buckets_dim = mtf.Dimension(
"buckets", self.relative_attention_num_buckets)
bidirectional = not context.model.fully_autoregressive
rp_bucket = _relative_position_bucket(
relative_position,
bidirectional=bidirectional,
num_buckets=buckets_dim.size)
if (self.relative_attention_type == "bias" or
self.relative_attention_type == "bias_shared"):
bias_shape = context.model.ensemble_dims + heads_dims + [buckets_dim]
values = None
cache = self.relative_attention_type == "bias_shared"
if cache:
cache_key = ("self_attention_bias",
min_relative_position,
max_relative_position,
tuple(heads_dims))
if cache_key in context.cache:
values = context.cache[cache_key]
if values is None:
values = mtf.get_variable(
context.mesh, "relative_attention_bias",
bias_shape, dtype=context.variable_dtype)
if cache:
context.cache[cache_key] = values
elif self.relative_attention_type == "contextual":
values = layers.dense(
q, reduced_dims=[self.kv_dim],
new_dims=[buckets_dim],
variable_dtype=context.variable_dtype,
name="relative_attention_ak",
use_bias=False,
expert_dims=context.model.ensemble_dims + heads_dims)
else:
raise ValueError("unrecognized relative_attention_type \"%s\"" %
self.relative_attention_type)
biases.append(mtf.gather(values, rp_bucket, buckets_dim))
return mtf.add_n(biases) if biases else None
@property
def kv_dim(self):
return mtf.Dimension("d_kv", self.key_value_size)
def memory_length(self, context):
return mtf.Dimension("memory_length", context.length_dim.size)
def rename_length_to_memory_length(self, x, context):
return mtf.replace_dimensions(
x, context.length_dim, self.memory_length(context))
def min_relative_position(self, context):
return None
def max_relative_position(self, context):
return None
@property
def attention_fn(self):
if self.attention_func == "hybrid":
return attention.hybrid_attention
else:
return attention.attention
@gin.configurable
class ExpertsSelfAttention(SelfAttention):
"""Expert-layers for SelfAttention computations."""
def __init__(self,
num_experts=16,
loss_coef=1e-2,
group_size=1024,
capacity_factor_train=1.25,
capacity_factor_eval=2.0,
moe_gating="switch",
min_expert_capacity=4,
switch_policy_train="input_jitter",
switch_policy_eval="input_jitter",
switch_dropout=0.0,
switch_temperature=1.0,
switch_jitter=1e-2,
switch_top_k=4,
hidden_size=3072,
use_experts_attention=True,
**kwargs):
super(ExpertsSelfAttention, self).__init__(**kwargs)
self._hparams = mtf.transformer.moe.HParams(
moe_gating=moe_gating,
num_experts=num_experts,
loss_coef=loss_coef,
group_size=group_size,
min_expert_capacity=min_expert_capacity,
capacity_factor_train=capacity_factor_train,
capacity_factor_eval=capacity_factor_eval,
switch_policy_train=switch_policy_train,
switch_policy_eval=switch_policy_eval,
switch_dropout=switch_dropout,
switch_temperature=switch_temperature,
switch_jitter=switch_jitter,
switch_top_k=switch_top_k,
hidden_size=hidden_size,
use_experts_attention=use_experts_attention)
def make_params(self, context):
num_heads = self.num_heads
num_memory_heads = self.num_memory_heads
if num_heads == 1:
query_heads_dims = None
memory_heads_dims = None
elif num_memory_heads == 0:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = query_heads_dims
elif num_memory_heads == 1:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = None
else:
if num_heads % num_memory_heads != 0:
raise ValueError("num_memory_heads must divide num_heads")
memory_heads_dims = [mtf.Dimension("heads", num_memory_heads)]
query_heads_dims = memory_heads_dims + [
mtf.Dimension("query_heads", num_heads // num_memory_heads)]
return attention.ExpertsAttentionParams(
context.mesh,
query_input_dim=context.model.model_dim,
memory_input_dim=context.model.model_dim,
output_dim=context.model.model_dim,
key_dim=self.kv_dim,
value_dim=self.kv_dim,
query_heads_dims=query_heads_dims,
memory_heads_dims=memory_heads_dims,
variable_dtype=context.variable_dtype,
shared_kv=self.shared_kv,
no_query=False,
ensemble_dim=context.model.ensemble_dim,
combine_dims=self.combine_dims,
keep_query_heads_dims=self.keep_query_heads_dims,
fold_scaling_into_initializer=self.fold_scaling_into_initializer,
context=context,
experts_hparams=self._hparams)
@gin.configurable
class ExpertsEncDecAttention(ExpertsSelfAttention):
"""Expert-layers for EncDecAttention computations."""
def __init__(self, relative_attention_type=None, **kwargs):
super(ExpertsEncDecAttention, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
return enc_dec_attention(self, self._get_memory_antecedent(context),
context, x, losses)
@gin.configurable
class Synthesizer(SelfAttention):
"""Multi-head Synthesizer layer https://arxiv.org/abs/2005.00743."""
def __init__(self,
num_heads=8,
num_memory_heads=0,
key_value_size=128,
shared_kv=False,
dropout_rate=0.0,
attention_kwargs=None,
relative_attention_type=None,
relative_attention_num_buckets=32,
attention_func=None,
combine_dims=True,
keep_query_heads_dims=False,
synthesize_mode="random_plus_alpha",
fold_scaling_into_initializer=True,
**kwargs):
"""Create a Synthesizer Layer.
Args:
num_heads: an integer
num_memory_heads: an optional integer
key_value_size: an integer
shared_kv: a boolean
dropout_rate: a float
attention_kwargs: a dictionary of kwargs for attention.attention
relative_attention_type: an optional string - one of
(None, "bias", "bias_shared", "contextual")
relative_attention_num_buckets: an integer
attention_func: attention function: None/'hybrid'.
combine_dims: a boolean
keep_query_heads_dims: a boolean
synthesize_mode: a string to select synthesizer variant
fold_scaling_into_initializer: a boolean
**kwargs: additional constructor params
"""
super(Synthesizer, self).__init__(**kwargs)
self.num_heads = num_heads
self.num_memory_heads = num_memory_heads
self.key_value_size = key_value_size
self.shared_kv = shared_kv
self.dropout_rate = dropout_rate
self.attention_kwargs = attention_kwargs or {}
self.relative_attention_type = relative_attention_type
self.relative_attention_num_buckets = relative_attention_num_buckets
self.attention_func = attention_func
self.combine_dims = combine_dims
self.keep_query_heads_dims = keep_query_heads_dims
self.synthesize_mode = synthesize_mode
self.fold_scaling_into_initializer = fold_scaling_into_initializer
self.no_query = False
if "plus" in self.synthesize_mode:
self.shared_kv = False
self.no_query = False
elif "minus" in self.synthesize_mode:
# We still keep the query as first projection
self.shared_kv = True
self.no_query = False
else:
self.shared_kv = True
self.no_query = True
def make_params(self, context):
return attention_params(
context=context,
kv_dim=self.kv_dim,
num_heads=self.num_heads,
num_memory_heads=self.num_memory_heads,
shared_kv=self.shared_kv,
no_query=self.no_query,
fold_scaling_into_initializer=self.fold_scaling_into_initializer)
def call(self, context, x, losses=None):
"""Call the layer."""
params = self.make_params(context)
memory_length = self.memory_length(context)
if context.mode == "incremental":
m = x
else:
m = mtf.replace_dimensions(x, context.length_dim, memory_length)
if self.shared_kv:
kv = params.compute_kv(m)
else:
k = params.compute_k(m)
v = params.compute_v(m)
if self.no_query:
# we don't use q for some synthesizer modes that don't use QKV at all.
q = x
else:
q = params.compute_q(x)
if self.shared_kv:
k = kv
v = kv
if context.mode == "incremental":
one_hot = mtf.one_hot(
context.position, memory_length, dtype=context.activation_dtype)
inv_one_hot = 1.0 - one_hot
old_k, old_v = context.get_states(2)
k = old_k * inv_one_hot + k * one_hot
v = old_v * inv_one_hot + v * one_hot
memory_position = mtf.range(context.mesh, memory_length, tf.int32)
else:
memory_position = self.rename_length_to_memory_length(
context.position, context)
if context.mode == "incremental" or context.mode == "first_part":
context.record_new_states([k, v])
o = attention.synthetic_attention(q, k, v, memory_length,
self.kv_dim, self.kv_dim,
self.compute_bias(context,
memory_position,
x,
params.query_heads_dims,
q),
synthesize=True,
synthesize_mode=self.synthesize_mode,
context=context,
**self.attention_kwargs_from_context(
context))
attention_output_shape = self.expected_attention_output_shape(x, params)
attention_output = params.compute_output(
o, output_shape=attention_output_shape)
return self.layer_output_from_attention_output(context, attention_output,
losses)
@gin.configurable
def relative_position_spans(context, num_sentinels=gin.REQUIRED):
"""Compute relative positions between inputs and targets.
Used by enc_dec_attention_bias.
Assumes that inputs and targets were generated by a span-filling objective:
The inputs consist of the original text with some spans removed and replaced
by single sentinels.
The targets consist of the dropped spans, each preceded by a single sentinel.
Sentinels are the last tokens in the vocabulary.
e.g.
inputs: A B C <S> F G H <S>
shifted-targets: <BOS> <S> D E <S> I J K
Relative positions are computed by identifying a target token with the
corresponding sentinel in the input and returning the distance between these
two tokens in the input.
Target tokens which precede all sentinels get identified with the beginning of
the input. So if we apply this to a problem with no sentinels, all target
tokens will be indentified with the beginning of the input. We assume this is
the case during incremental decoding, so this code will not work properly to
incrementally decode a problem with sentinels. This may not be an issue,
since the span-filling objective is primarily used for unsupervised
pre-training.
Args:
context: a Context
num_sentinels: an integer. Should have the same value as
SentencePieceVocabulary.extra_ids
Returns:
a Tensor
"""
decoder_id = context.inputs
encoder_id = context.encoder_inputs
decoder_length = context.length_dim
encoder_length = context.encoder_length_dim
mesh = encoder_id.mesh
encoder_pos = mtf.range(mesh, encoder_length, tf.int32)
if decoder_length not in decoder_id.shape.dims:
# we are doing incremental decoding.
# Map the target token to the beginning of the input.
dec_to_enc_pos = 0
else:
vocab_size = context.model.input_vocab_size_unpadded
def sentinel_mask(t):
return mtf.cast(mtf.greater_equal(
t, vocab_size - num_sentinels), tf.int32)
decoder_is_sentinel = sentinel_mask(decoder_id)
encoder_is_sentinel = sentinel_mask(encoder_id)
encoder_segment_id = mtf.cumsum(encoder_is_sentinel, encoder_length)
decoder_segment_id = mtf.cumsum(decoder_is_sentinel, decoder_length)
encoder_sequence_id = context.encoder_sequence_id
decoder_sequence_id = context.sequence_id
if encoder_sequence_id is not None:
# distinguish segments from different sequences
multiplier = max(encoder_length.size, decoder_length.size)
encoder_segment_id += encoder_sequence_id * multiplier
decoder_segment_id += decoder_sequence_id * multiplier
dec_to_enc_pos = mtf.reduce_sum(
mtf.cast(mtf.less(encoder_segment_id, decoder_segment_id), tf.int32),
reduced_dim=encoder_length)
return dec_to_enc_pos - encoder_pos
@gin.configurable
def enc_dec_attention_bias(layer,
context,
heads_dims,
relative_position_fn=relative_position_spans):
"""Compute bias term for encoder-decoder attention.
Args:
layer: a TransformerLayer
context: a Context
heads_dims: a list of Dimension
relative_position_fn: an optional function
Returns:
a Tensor
"""
biases = []
if context.encoder_sequence_id and context.sequence_id:
visible = mtf.equal(context.sequence_id, context.encoder_sequence_id)
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
if (layer.relative_attention_type == "bias" or
layer.relative_attention_type == "bias_shared"):
buckets_dim = mtf.Dimension(
"buckets", layer.relative_attention_num_buckets)
bias_shape = context.model.ensemble_dims + heads_dims + [buckets_dim]
values = None
cache = layer.relative_attention_type == "bias_shared"
if cache:
cache_key = ("enc_dec_relative_attention_bias", tuple(heads_dims))
if cache_key in context.cache:
values = context.cache[cache_key]
if values is None:
values = mtf.get_variable(
context.mesh, "enc_dec_relative_attention_bias",
bias_shape, dtype=context.variable_dtype)
if cache:
context.cache[cache_key] = values
rel_pos = relative_position_fn(context)
rp_bucket = _relative_position_bucket(
rel_pos,
bidirectional=True,
num_buckets=buckets_dim.size)
biases.append(mtf.gather(values, rp_bucket, buckets_dim))
elif layer.relative_attention_type is not None:
raise ValueError("unrecognized relative_attention_type \"%s\"" %
layer.relative_attention_type)
return mtf.add_n(biases) if biases else None
@gin.configurable
def enc_dec_attention(self_attention_layer, memory_antecedent, context, x,
losses, attention_fn=attention.attention):
"""Multi-head attention over the encoder outputs."""
memory_input_dim = memory_antecedent.shape[-1]
if memory_input_dim != context.model.model_dim:
raise NotImplementedError(
"TODO(noam): support different model_dim in encoder and decoder.")
params = self_attention_layer.make_params(context)
q = params.compute_q(x)
if context.mode == "incremental":
k, v, memory_length = context.get_constant_state()
else:
m = memory_antecedent
if self_attention_layer.shared_kv:
kv = params.compute_kv(m)
k = kv
v = kv
else:
k = params.compute_k(m)
v = params.compute_v(m)
memory_length, = [d for d in m.shape.dims if d.name == "memory_length"]
if context.mode == "first_part":
context.record_constant_state((k, v, memory_length))
bias = enc_dec_attention_bias(self_attention_layer,
context,
params.query_heads_dims)
a = attention_fn(
q, k, v, memory_length, self_attention_layer.kv_dim,
self_attention_layer.kv_dim, bias,
context=context,
**self_attention_layer.attention_kwargs_from_context(context))
attention_output_shape = self_attention_layer.expected_attention_output_shape(
x, params)
attention_output = params.compute_output(
a, output_shape=attention_output_shape)
return self_attention_layer.layer_output_from_attention_output(
context, attention_output, losses)
@gin.configurable
class EncDecAttention(SelfAttention):
"""Multi-head attention over encoder output."""
def __init__(self, relative_attention_type=None, **kwargs):
super(EncDecAttention, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
return enc_dec_attention(self, self._get_memory_antecedent(context),
context, x, losses,
attention_fn=self.attention_fn)
@property
def attention_fn(self):
return attention.attention
@gin.configurable
class TransparentEncDecAttention(EncDecAttention):
"""Transparent multi-head attention over encoder output."""
def __init__(self,
layers_per_encoder_module=gin.REQUIRED,
layers_per_decoder_module=gin.REQUIRED,
encoder_num_modules=gin.REQUIRED,
decoder_num_modules=gin.REQUIRED,
dropout_rate=0.0,
**kwargs):
"""Create a transparent attention EncDec Layer.
Args:
layers_per_encoder_module: positive integer telling how many layer are in
each repeated module in the encoder
layers_per_decoder_module: positive integer telling how many layer are in
each repeated module in the decoder
encoder_num_modules: positive integer of how many repeated modules there
are in the encoder
decoder_num_modules: positive integer of how many repeated modules there
are in the decoder
dropout_rate: positive float, the dropout rate for the matrix relating
encoder outputs to decoder inputs
**kwargs: additional constructor params
"""
super(TransparentEncDecAttention, self).__init__(**kwargs)
self.layers_per_encoder_module = layers_per_encoder_module
self.layers_per_decoder_module = layers_per_decoder_module
self.encoder_num_modules = encoder_num_modules
self.decoder_num_modules = decoder_num_modules
self.dropout_rate = dropout_rate
def _get_memory_antecedent(self, context):
decoder_module_index = context.layer_index // self.layers_per_decoder_module
decoder_inputs = self._get_decoder_inputs(context)
return decoder_inputs[decoder_module_index]
def _get_decoder_inputs(self, context):
"""Computes the inputs to the decoder when using transparent attention.
We must cache on the context in order to ensure that we are not replicating
variables when the layer's call function is called in different tf variable
scopes.
Args:
context: a Context
Returns:
a list containing `self.num_decoder_modules` of tensors with shape
[<batch_dims>, length_dim, output_vocab_dim]
"""
if hasattr(context, "decoder_layers_per_module"):
return context.decoder_layers_per_module
encoder_layer_outputs = [
mtf.layers.rename_length_to_memory_length(output)
for output in context.encoder_layer_outputs
]
layers_per_module = self.layers_per_encoder_module
encoder_module_outputs_dim = mtf.Dimension(
"encoder_module_outputs", size=self.encoder_num_modules + 1)
decoder_module_inputs_dim = mtf.Dimension(
"decoder_module_inputs", size=self.decoder_num_modules)
encoder_module_outputs = mtf.stack(
[encoder_layer_outputs[0]] +
encoder_layer_outputs[layers_per_module::layers_per_module],
dim_name="encoder_module_outputs")
stddev = 1.0
if not mtf.layers.unit_scaling_convention():
stddev *= encoder_module_outputs_dim.size ** -0.5
w = mtf.get_variable(
context.mesh,
"w",
mtf.Shape([encoder_module_outputs_dim, decoder_module_inputs_dim]),
initializer=tf.random_normal_initializer(stddev=stddev),
dtype=context.variable_dtype)
w = mtf.dropout(w, context.train, 1.0 - self.dropout_rate)
s = mtf.softmax(w, reduced_dim=encoder_module_outputs_dim)
z = mtf.layers.us_einsum([s, encoder_module_outputs],
reduced_dims=[encoder_module_outputs_dim])
input_per_decoder = mtf.split(
z,
split_dim=decoder_module_inputs_dim,
num_or_size_splits=decoder_module_inputs_dim.size)
context.decoder_layers_per_module = [
mtf.reshape(inpt, z.shape.dims[1:]) for inpt in input_per_decoder
]
return context.decoder_layers_per_module
@gin.configurable
class LocalSelfAttention(SelfAttention):
"""Multi-head local self-attention layer."""
def __init__(self,
radius=128,
num_heads=8,
num_memory_heads=0,
key_value_size=128,
shared_kv=False,
dropout_rate=0.0,
attention_kwargs=None,):
super(LocalSelfAttention, self).__init__(
num_heads,
num_memory_heads,
key_value_size,
shared_kv,
dropout_rate,
attention_kwargs)
self.radius = radius
def call(self, context, x, losses=None):
"""Call the layer."""
params = self.make_params(context)
q = params.compute_q(x)
if self.shared_kv:
kv = params.compute_kv(x)
k = kv
v = kv
else:
k = params.compute_k(x)
v = params.compute_v(x)
if context.mode == "incremental":
if self.shared_kv:
prev_kv, = context.get_states(1)
else:
prev_k, prev_v = context.get_states(2)
current_position = mtf.equal(
mtf.range(context.mesh, self.window_dim, dtype=tf.int32),
mtf.mod(context.position, self.radius))
if self.shared_kv:
kv = mtf.where(current_position, kv, prev_kv,
output_shape=prev_kv.shape)
k = kv
v = kv
context.record_new_states([kv])
else:
k = mtf.where(current_position, params.compute_k(x), prev_k,
output_shape=prev_k.shape)
v = mtf.where(current_position, params.compute_v(x), prev_v,
output_shape=prev_v.shape)
context.record_new_states([k, v])
window_pos = mtf.range(context.mesh, self.window_dim, tf.int32)
visible = mtf.greater_equal(context.position, window_pos)
bias = attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype)
o = attention.attention(
q,
k,
v,
self.window_dim,
self.kv_dim,
self.kv_dim,
bias,
**self.attention_kwargs_from_context(context))
elif context.length_dim.size <= max(256, self.radius * 4):
# nothing fancy - just do full attention and mask
memory_length = self.rename_length_to_memory_length(
context.position, context)
o = attention.attention(
q, self.rename_length_to_memory_length(k, context),
self.rename_length_to_memory_length(v, context),
self.memory_length(context), self.kv_dim, self.kv_dim,
self.compute_bias(context, memory_length, x, params.query_heads_dims,
q), **self.attention_kwargs_from_context(context))
else:
# fancy local attention algorithm
o = attention.local_attention_1d(
q=q,
k=k,
v=None if self.shared_kv else v,
length_dim=context.length_dim,
key_dim=self.kv_dim,
value_dim=self.kv_dim,
length_dim_num_splits=1, # TODO(noam): look at the layout
autoregressive=context.model.fully_autoregressive,
radius=self.radius,
sequence_id=context.sequence_id,
write_priority=context.write_priority,
read_priority=context.read_priority,
attention_kwargs=self.attention_kwargs_from_context(context))
if context.mode == "first_part":
window_pos = mtf.range(context.mesh, self.window_dim, tf.int32)
pos = mtf.range(context.mesh, context.length_dim, tf.int32)
select_recent = mtf.cast(
mtf.equal(mtf.mod(pos, self.radius), window_pos), x.dtype)
select_recent *= mtf.cast(
mtf.less(pos, context.initial_position), x.dtype)
select_recent *= mtf.cast(
mtf.greater_equal(
pos, context.initial_position - self.radius), x.dtype)
state_shape = (k.shape - [context.length_dim, self.kv_dim]
+ [self.window_dim, self.kv_dim])
k_state = mtf.einsum(
[k, select_recent], output_shape=state_shape,
reduced_dims=[context.length_dim])
context.new_states.append(k_state)
if not self.shared_kv:
v_state = mtf.einsum(
[v, select_recent], output_shape=state_shape,
reduced_dims=[context.length_dim])
context.new_states.append(v_state)
return params.compute_output(o, output_shape=x.shape)
def min_relative_position(self, context):
return 1 - self.radius
def max_relative_position(self, context):
return None if context.model.fully_autoregressive else self.radius
@property
def window_dim(self):
return mtf.Dimension("window", self.radius)
def _relative_position_bucket(relative_position,
bidirectional=True,
num_buckets=32,
max_distance=128):
"""Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are
invalid.
We use smaller buckets for small absolute relative_position and larger buckets
for larger absolute relative_positions. All relative positions >=max_distance
map to the same bucket. All relative positions <=-max_distance map to the
same bucket. This should allow for more graceful generalization to longer
sequences than the model has been trained on.
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += mtf.to_int32(mtf.less(n, 0)) * num_buckets
n = mtf.abs(n)
else:
n = mtf.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = mtf.less(n, max_exact)
val_if_large = max_exact + mtf.to_int32(
mtf.log(mtf.to_float(n) / max_exact)
/ math.log(max_distance / max_exact) * (num_buckets - max_exact))
val_if_large = mtf.minimum(val_if_large, num_buckets - 1)
ret += mtf.where(is_small, n, val_if_large)
return ret
@gin.configurable
class TalkingHeadsSelfAttention(SelfAttention):
"""Experimental Talking-heads self-attention layer.
https://arxiv.org/abs/2003.02436
This is a variant where there are (optionally) extra learned linear
projections on the attention logits and attention weights. These linear
projections are across attention heads (but not across different query or
memory positions).
The user specifies three sets of mtf.Dimension:
key_heads_dims: "heads" dimensions the queries, keys and ther dot-product
softmax_heads_dims: "heads" dimensions for the logits and their softmax
value_heads_dims: "heads" dimensions for the values
If these three sets are identical, then this layer is identical to ordinary
multi-head attention.
If key_heads_dims != softmax_heads_dims, then a learned linear projection
is applied to compute the logits. This projection reduces out dimensions
in (key_heads_dims-softmax_heads_dims) and inserts dimensions in
(softmax_heads_dims-key_heads_dims).
If softmax_heads_dims != value_heads_dims, then a learned linear
projection is applied to the weights (the output of the softmax). This
projection reduces out dimensions in (softmax_heads_dims-value_heads_dims)
and inserts dimensions in (value_heads_dims-softmax_heads_dims).
TPU performance is lousy due to small matrix sizes.
Early experiments show that quality can be significantly better than baseline.
An additional supported option is dynamic talking-heads projections where the
talking-heads projections themselves contain terms that depend on the inputs.
Each of the logits-projection and the weights-projection can depend on either
or both of the query-antecedent X or the memory-antecedent Y. This gives
a total of four dynamic projections which can be enabled individually.
To enable, set the dynamic_projections argument to a list containing a
some or all of the strings ["x2l", "m2l", "x2w", "m2w"].
Example:
TalkingHeadsSelfAttention.key_heads_dims = [("key_heads", 12)]
TalkingHeadsSelfAttention.softmax_heads_dims = [("heads", 32)]
TalkingHeadsSelfAttention.value_heads_dims = [("value_heads", 12)]
TalkingHeadsSelfAttention.key_size = 64
TalkingHeadsSelfAttention.value_size = 64
d_model = 1024
We start with an input x
x: [length, d_model]
The input is first transformed into queries, keys and values:
queries: [query_length, key_heads, key_size]
keys: [memory_length, key_heads, key_size]
values: [memory_length, value_heads, value_size]
queries and keys get einsummed to produce a tensor p:
p: [query_length, memory_length, key_heads]
p gets linearly transformed with a learned weight matrix with shape
[key_heads, softmax_heads] to produce logits
logits: [query_length, memory_length, softmax_heads]
take the softmax of logits (across memory_length to produce weights)
h: [query_length, memory_length, softmax_heads]
Now a learned linear projection with shape [softmax_heads, value_heads]
on h produces the weights.
weights: [query_length, memory_length, value_heads]
As usual, we einsum the weights with the values.
o: [query_length, value_heads, value_size]
Finally, project o back to the desired output dimension
y: [query_length, d_model]
Also, this doesn't model-parallelize trivially. To model-parallelize, you
should add one heads-dimension that is present in all of key_heads_dims,
softmax_heads_dims, value_heads_dims. Call this dimension "heads" and shard
that over multiple devices. Then also include additional different
heads-dimension for the keys, softmax, and values.
"""
def __init__(self, # pylint: disable=super-init-not-called
key_heads_dims=(("heads", 12),),
softmax_heads_dims=(("heads", 12),),
value_heads_dims=(("heads", 12),),
key_size=64,
value_size=64,
dropout_rate=0.0,
relative_attention_type=None,
relative_attention_num_buckets=32,
dynamic_projections=None,
dynamic_projections_init_scale=1e-2):
"""Create a SelfAttention Layer.
Args:
key_heads_dims: a list of mtf.Dimension or (name, size) pairs
softmax_heads_dims: a list of mtf.Dimension or (name, size) pairs
value_heads_dims: a list of mtf.Dimension or (name, size) pairs
key_size: an integer
value_size: an integer
dropout_rate: a float
relative_attention_type: an optional string - one of
(None, "bias", "bias_shared", "contextual")
relative_attention_num_buckets: an integer
dynamic_projections: an optional sequence containing a subset of
["x2l", "m2l", "x2w", "m2w"] (see class comments)
dynamic_projections_init_scale: a float - initializer variance scaling
factor for these dynamic projections. We have observed learning
difficulties when this value is too large.
"""
self.key_heads_dims = [mtf.convert_to_dimension(d) for d in key_heads_dims]
self.softmax_heads_dims = [
mtf.convert_to_dimension(d) for d in softmax_heads_dims]
self.value_heads_dims = [
mtf.convert_to_dimension(d) for d in value_heads_dims]
self.key_dim = mtf.Dimension("d_k", key_size)
self.value_dim = mtf.Dimension("d_v", value_size)
self.dropout_rate = dropout_rate
self.relative_attention_type = relative_attention_type
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dynamic_projections = dynamic_projections or []
self.dynamic_projections_init_scale = dynamic_projections_init_scale
def compute_q(self, context, x):
# Scale the initializer variance by 1.0/d_k
# This scales the initializer by rsqrt(d_k)
init_scale = 1.0
if not mtf.layers.unit_scaling_convention():
init_scale /= self.key_dim.size
kernel_initializer = mtf.layers.VarianceScalingInitializer(init_scale)
return mtf.layers.dense(
x, reduced_dims=[context.model.model_dim],
new_dims=self.key_heads_dims + [self.key_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="q", expert_dims=context.model.ensemble_dims,
kernel_initializer=kernel_initializer)
def compute_k(self, context, x):
return mtf.layers.dense(
x, reduced_dims=[context.model.model_dim],
new_dims=self.key_heads_dims + [self.key_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="k", expert_dims=context.model.ensemble_dims)
def compute_v(self, context, x):
return mtf.layers.dense(
x, reduced_dims=[context.model.model_dim],
new_dims=self.value_heads_dims + [self.value_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="v", expert_dims=context.model.ensemble_dims)
def compute_y(self, context, u):
return mtf.layers.dense(
u, reduced_dims=self.value_heads_dims + [self.value_dim],
new_dims=[context.model.model_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="y", expert_dims=context.model.ensemble_dims)
def call(self, context, x, losses=None):
"""Call the layer."""
memory_length = self.memory_length(context)
q = self.compute_q(context, x)
if context.mode == "incremental":
m = x
else:
m = mtf.replace_dimensions(x, context.length_dim, memory_length)
k = self.compute_k(context, m)
v = self.compute_v(context, m)
if context.mode == "incremental":
one_hot = mtf.one_hot(
context.position, memory_length, dtype=context.activation_dtype)
inv_one_hot = 1.0 - one_hot
old_k, old_v = context.get_states(2)
k = old_k * inv_one_hot + k * one_hot
v = old_v * inv_one_hot + v * one_hot
memory_position = mtf.range(context.mesh, memory_length, tf.int32)
else:
memory_position = self.rename_length_to_memory_length(
context.position, context)
if context.mode == "incremental" or context.mode == "first_part":
context.record_new_states([k, v])
bias = self.compute_bias(context, memory_position, x,
self.softmax_heads_dims, q)
return self.attention_internal(context, x, m, q, k, v, memory_length, bias)
def attention_internal(self, context, x, m, q, k, v, memory_length, bias):
p = mtf.layers.us_einsum([q, k], reduced_dims=[self.key_dim])
logits = self.talking_heads(
context, p, "logits", self.key_heads_dims, self.softmax_heads_dims,
dynamic_projections_from=(
([x] if "x2l" in self.dynamic_projections else []) +
([m] if "m2l" in self.dynamic_projections else [])))
if bias is not None:
logits += bias
h = mtf.softmax(logits, memory_length)
weights = self.talking_heads(
context, h, "weights", self.softmax_heads_dims, self.value_heads_dims,
dynamic_projections_from=(
([x] if "x2w" in self.dynamic_projections else []) +
([m] if "m2w" in self.dynamic_projections else [])))
# TODO(noam): make dropout_broadcast_dims configurable
dropout_broadcast_dims = [context.length_dim]
weights = mtf.dropout(
weights, context.train, rate=self.dropout_rate,
noise_shape=weights.shape - dropout_broadcast_dims)
u = mtf.einsum([weights, v], reduced_dims=[memory_length])
return self.compute_y(context, u)
def talking_heads(
self, context, inp, name, input_heads_dims, output_heads_dims,
dynamic_projections_from=None):
shared_dims = [d for d in input_heads_dims if d in output_heads_dims]
reduced_dims = [d for d in input_heads_dims if d not in output_heads_dims]
new_dims = [d for d in output_heads_dims if d not in input_heads_dims]
if not (reduced_dims or new_dims):
# Output dimensions are same as input dimensions. Return the input
return inp
elif dynamic_projections_from:
# There are one or more dynamic talking-heads-projections
with tf.variable_scope(name):
# static projection - this is the same as the static projection in the
# "else" case below. We create the weight matrix with get_variable
# instead of calling mtf.layers.dense() so that we can fold the
# static projection into one of the dynamic projections.
static_p_initializer = mtf.layers.VarianceScalingInitializer()(
reduced_dims, new_dims)
static_p_shape = (
context.model.ensemble_dims + shared_dims + reduced_dims + new_dims)
static_p = mtf.get_variable(inp.mesh,
"kernel",
static_p_shape,
initializer=static_p_initializer,
dtype=context.variable_dtype)
ps = []
for i, dp_from in enumerate(dynamic_projections_from):
init_scale = self.dynamic_projections_init_scale
if not mtf.layers.unit_scaling_convention():
init_scale /= mtf.Shape(reduced_dims).size
kernel_initializer = mtf.layers.VarianceScalingInitializer(
init_scale)
ps.append(
mtf.layers.dense(
dp_from, reduced_dims=[context.model.model_dim],
new_dims=shared_dims + reduced_dims + new_dims,
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="%s_dynamic_%d" % (name, i),
expert_dims=context.model.ensemble_dims,
kernel_initializer=kernel_initializer))
# Fold the static projection into one of the static projections.
# Mathematically, we could add all the dynamic projections together
# here, but it would create a very large tensor which contained
# both the query-length and memory-length dimensions, and would
# probably be slower in practice.
ps[0] += static_p
return mtf.add_n(
[mtf.layers.us_einsum([inp, p], reduced_dims=reduced_dims)
for p in ps])
else:
# No dynamic projections. Static talking-heads projection only
return mtf.layers.dense(
inp, reduced_dims=reduced_dims,
new_dims=new_dims,
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name=name, expert_dims=context.model.ensemble_dims + shared_dims)
@gin.configurable
class TalkingHeadsEncDecAttention(TalkingHeadsSelfAttention):
"""Talking-heads attention over encoder output.
See comments on TalkingHeadsSelfAttention.
"""
def __init__(self, relative_attention_type=None, **kwargs):
super(TalkingHeadsEncDecAttention, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
m = self._get_memory_antecedent(context)
memory_input_dim = m.shape[-1]
if memory_input_dim != context.model.model_dim:
raise NotImplementedError(
"TODO(noam): support different model_dim in encoder and decoder.")
q = self.compute_q(context, x)
if context.mode == "incremental":
k, v, memory_length = context.get_constant_state()
else:
k = self.compute_k(context, m)
v = self.compute_v(context, m)
memory_length, = [d for d in m.shape.dims if d.name == "memory_length"]
if context.mode == "first_part":
context.record_constant_state((k, v, memory_length))
bias = enc_dec_attention_bias(self,
context,
self.softmax_heads_dims)
return self.attention_internal(context, x, m, q, k, v, memory_length, bias)
@gin.configurable
class GeneralBilinearSelfAttention(SelfAttention):
"""General Bilinear Self-Attention.
Described in the forthcoming talking-heads paper.
Equivalent to multi-head attentino where d_kv == d_model.
It is redundant to have projections on both q and k.
It is redundant to have projections on both v and output.
We therefore omit the projections on k and v, making the two identical.
"""
def __init__(self, # pylint: disable=super-init-not-called
heads_dims=(("heads", 12),),
dropout_rate=0.0,
relative_attention_type=None,
relative_attention_num_buckets=32):
"""Create a GeneralBilinearSelfAttention Layer.
Args:
heads_dims: a list of mtf.Dimension or (name, size) pairs
dropout_rate: a float
relative_attention_type: an optional string - one of
(None, "bias", "bias_shared", "contextual")
relative_attention_num_buckets: an integer
"""
self.heads_dims = [
mtf.convert_to_dimension(d) for d in heads_dims]
self.dropout_rate = dropout_rate
self.relative_attention_type = relative_attention_type
self.relative_attention_num_buckets = relative_attention_num_buckets
def compute_q(self, context, x):
# Scale the initializer variance by 1.0/d_k
# This scales the initializer by rsqrt(d_k)
init_scale = 1.0
if not mtf.layers.unit_scaling_convention():
init_scale /= context.model.model_dim.size
return mtf.layers.dense(
x, reduced_dims=[context.model.model_dim],
new_dims=self.heads_dims + [context.model.model_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="q", expert_dims=context.model.ensemble_dims,
kernel_initializer=mtf.layers.VarianceScalingInitializer(init_scale))
def compute_y(self, context, u):
return mtf.layers.dense(
u, reduced_dims=self.heads_dims + [context.model.model_dim],
new_dims=[context.model.model_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="y", expert_dims=context.model.ensemble_dims)
def call(self, context, x, losses=None):
"""Call the layer."""
memory_length = self.memory_length(context)
q = self.compute_q(context, x)
if context.mode == "incremental":
m = x
else:
m = mtf.replace_dimensions(x, context.length_dim, memory_length)
if context.mode == "incremental":
one_hot = mtf.one_hot(
context.position, memory_length, dtype=context.activation_dtype)
inv_one_hot = 1.0 - one_hot
old_m, = context.get_states(1)
m = old_m * inv_one_hot + one_hot * m
memory_position = mtf.range(context.mesh, memory_length, tf.int32)
else:
memory_position = self.rename_length_to_memory_length(
context.position, context)
if context.mode == "incremental" or context.mode == "first_part":
context.record_new_states([m])
bias = self.compute_bias(context, memory_position, x, self.heads_dims, q)
return self.attention_internal(context, q, m, memory_length, bias)
def attention_internal(self, context, q, m, memory_length, bias):
logits = mtf.layers.us_einsum(
[q, m], reduced_dims=[context.model.model_dim])
if bias is not None:
logits += bias
weights = mtf.softmax(logits, memory_length)
# TODO(noam): make dropout_broadcast_dims configurable
dropout_broadcast_dims = [context.length_dim]
weights = mtf.dropout(
weights, context.train,
rate=self.dropout_rate,
noise_shape=weights.shape - dropout_broadcast_dims)
u = mtf.einsum([weights, m], reduced_dims=[memory_length])
return self.compute_y(context, u)
@gin.configurable
class GeneralBilinearEncDecAttention(GeneralBilinearSelfAttention):
"""Talking-heads attention over encoder output.
See comments on GBMSelfAttention.
"""
def __init__(self, relative_attention_type=None, **kwargs):
super(GeneralBilinearEncDecAttention, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
memory_antecedent = self._get_memory_antecedent(context)
memory_input_dim = memory_antecedent.shape[-1]
if memory_input_dim != context.model.model_dim:
raise NotImplementedError(
"TODO(noam): support different model_dim in encoder and decoder.")
q = self.compute_q(context, x)
if context.mode == "incremental":
m, memory_length = context.get_constant_state()
else:
m = memory_antecedent
memory_length, = [d for d in m.shape.dims if d.name == "memory_length"]
if context.mode == "first_part":
context.record_constant_state((m, memory_length))
bias = enc_dec_attention_bias(self,
context,
self.heads_dims)
return self.attention_internal(context, q, m, memory_length, bias)
@gin.configurable
class BranchedSelfAttention(SelfAttention):
"""Branched self attention."""
def __init__(self, **kwargs):
super(BranchedSelfAttention, self).__init__(
combine_dims=False, keep_query_heads_dims=True, **kwargs)
if self.num_memory_heads != 0:
raise ValueError("Set num_memory_heads to 0 for branched attention.")
self.dense_layer = DenseReluDense()
self.kappa_init = tf.random_uniform_initializer(minval=0.0, maxval=1.0)
self.alpha_init = tf.random_uniform_initializer(minval=0.0, maxval=1.0)
def _constraint(self, z):
"""Keep z non-negative and summing to 1."""
z = mtf.relu(z)
return z / mtf.reduce_sum(z + 10**-4)
def layer_output_from_attention_output(self, context, attention_output,
losses):
heads_dim = mtf.Dimension("heads", self.num_heads)
kappa = mtf.get_variable(
context.mesh,
"kappa",
mtf.Shape([heads_dim]),
initializer=self.kappa_init,
dtype=context.variable_dtype,
constraint=self._constraint)
alpha = mtf.get_variable(
context.mesh,
"alpha",
mtf.Shape([heads_dim]),
initializer=self.alpha_init,
dtype=context.variable_dtype,
constraint=self._constraint)
o = mtf.einsum([attention_output, kappa],
output_shape=attention_output.shape)
o = self.dense_layer.call(context, o, losses)
o = mtf.einsum([o, alpha], output_shape=o.shape)
o = mtf.reduce_sum(o, reduced_dim=heads_dim)
return o
@gin.configurable
class BranchedEncDecAttention(BranchedSelfAttention):
"""Branched attention over encoder output."""
def __init__(self, relative_attention_type=None, **kwargs):
super(BranchedEncDecAttention, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
return enc_dec_attention(self, self._get_memory_antecedent(context),
context, x, losses)
class Conv1D(transformer.TransformerLayer):
"""Parent class for convolutional layers for common decoding logics.
When convolutional layers are used in the decoder, the incremental decoding
requires common features such as storing and accessing the recurrent state
information. These features do not depend on the specifics of the
convolutional layer (e.g., depthwise convolution, lightweight) as long as they
have the fixed receptive field defined by the filter size. This class
provides the methods for such features.
"""
def record_states_first_part_mode(self,
context,
x,
filter_size,
length_dim_name="length"):
"""Record the states during the first part mode.
l: current layer index
k: convolution filter size
x(l): input tensor to layer `l` for the first_part mode with the shape
[<batch_dims>, length, d_model].
The first_part mode is called once before the incremental mode is called for
the actual decoding process. The purpose is to set the recurrent states in
context.states, which are accessed during the incremental mode via
context.get_states. There are two cases depending on partial sequences are
present or not.
1) with partial sequences
When partial sequences are present, we decode from the position after the
partial sequence, but we need to use the information contained in the
partial sequence.
x(l) = [x1, x2, 0, 0, 0]
context.initial_position = 2 (the actual decoding should start from index
2).
Then we record the state = [0, x1, x2]. If partial sequences are shorter
than the filter size, we zero pad from the left.
2) Without partial sequences
x(l) = [0, 0, 0, 0, 0]
context.initial_position = 0
Then we record the state = [0, 0, 0]
These two cases can be handled with the following pseudocode. Let
i = context.initial_position.
state = x[:, i-filter_size:i, :] and store this as state.
Equivalently we can shift x by filter_size and slice
shifted_x = shift(x, length_dim)
state = shifted_x[:, i:i + filter_size, :]
Args:
context: a transformer.Context.
x: a Tensor.
filter_size: an intger - convolution filter size.
length_dim_name: a string - a dimension name for the length mtf.Dimension.
"""
length_dim = x.shape.dims[-2]
# Slice shifted_x[:, i:i + self.filter_size, :]
filter_dim = mtf.Dimension(length_dim_name, filter_size)
indices = mtf.range(x.mesh, filter_dim, dtype=tf.int32)
indices = context.initial_position + indices
# Assumes that x.shape = [<batch_dims>, length_dim, model_dim]
output_shape = mtf.Shape(x.shape.dims[:-2] + [filter_dim] +
x.shape.dims[-1:])
shifted_x = mtf.shift(x, filter_size, length_dim, wrap=False)
state = mtf.gather(
shifted_x, indices, length_dim, output_shape=output_shape)
context.record_new_states([state])
def record_states_incremental_mode(self, context, x, filter_size,
length_dim_name="length"):
"""Record the states during the first part mode.
l: current layer index
t: current decoding time step
k: convolution filter size
x(l, t): input vector to layer `l` at time step `t` for the incremental
mode with the shape [<batch_dims>, d_model].
During the incremental mode, the input to the conv layer x(l, t) does not
have the length dim because the input vector x corresponds to the current
decoding time step. We want to restore the input to the current layer in the
previous time steps (stored in the context.states) and combine with the
input at the current time step. This method does the following.
1) Restore the states: [x(l, t-k), ..., x(l, t-1)]
2) Combine with the current input: [x(l, t-k+1), ..., x(l, t-1), x(l, t)]
3) Store the new state and return it to be used as an input to the conv
layer.
It is important to note that the state being recorded is not used by the
next layer; it is used by the same layer but at the future time steps.
Args:
context: a transformer.Context.
x: a Tensor.
filter_size: an intger - convolution filter size.
length_dim_name: a string - a dimension name for the length mtf.Dimension.
Returns:
x: a Tensor of shape [<batch_dims>, filter_size, d_model].
"""
# Augment x with the states
filter_dim = mtf.Dimension(length_dim_name, filter_size)
input_state = context.get_states(1)[0]
position = mtf.constant(
x.mesh,
filter_size - 1, # Always use the last position.
shape=mtf.Shape(x.shape.dims[:-1]), # Pick out batch dims.
dtype=tf.int32)
# [batch, d_model] -> [batch, filter, d_model]
x = self.update_state(
input_state, x, position, filter_dim, dtype=context.activation_dtype)
# new state include the input for [t-filter, ..., t] steps.
context.record_new_states([x])
return x
def update_state(self, old_state, x, position, filter_dim, dtype):
"""Augment the current input to the old state.
[x(l, t-k), ..., x(l, t-1)], x(l, t) ->
[x(l, t-k+1), ..., x(l, t-1), x(l, t)]
Args:
old_state: a Tensor of shape [<batch_dims>, filter_size, d_model]
x: a Tensor of shape [<batch_dims>, d_model]
position: a Tensor of shape [<batch_dims>]
filter_dim: an mtf.Dimension corresponding to the filter size.
dtype: a mtf.VariableDType
Returns:
new_state: a Tensor of shape [<batch_dims>, filter_size, d_model].
"""
# [<batch_dims>, length, d_model]
shifted_state = mtf.shift(old_state, -1, filter_dim, wrap=False)
# [<batch_dims>, length]
one_hot = mtf.one_hot(position, filter_dim, dtype=dtype)
# [<batch_dims>, length, d_model]
shifted_x = one_hot * x
new_state = shifted_state + shifted_x
return new_state
@gin.configurable
class Conv1DLayer(Conv1D):
"""1D convolution over sequence length with model dim as channels.
One caveat is that this layer does nothing to stop information from bleeding
across packed examples.
"""
def __init__(self, filter_size, output_size, activation="linear"): # pylint: disable=super-init-not-called
"""Create a Conv1DLayer.
Args:
filter_size: a positive integer, the size of convolutional kernel.
output_size: a positive integer, the number of channels in the output.
activation: an optional string function name from namespace mtf, a
function to be applied to the layer output. If not provided or set to
"linear", then no function will be applied.
"""
self._filter_size = filter_size
self._output_size = output_size
self._activation = activation
def call(self, context, x, losses=None):
"""Call the layer."""
if context.mode == "first_part":
self.record_states_first_part_mode(context, x, self.filter_size)
if context.mode == "incremental":
x = self.record_states_incremental_mode(context, x, self.filter_size)
padding = "VALID"
else:
# The first_part mode also needs masking because it may have partial
# sequences.
mask = mtf.cast(
mtf.not_equal(context.inputs, 0), context.activation_dtype)
x *= mask
padding = "SAME"
model_dim = x.shape.dims[-1]
input_dim = mtf.Dimension("input_dim", model_dim.size)
x = mtf.replace_dimensions(x, model_dim, input_dim)
output_dim = mtf.Dimension(model_dim.name, self._output_size)
output = mtf.layers.conv1d(
x,
output_dim=output_dim,
filter_size=self._filter_size,
padding=padding,
filter_initializer=tf.glorot_uniform_initializer())
if context.mode == "incremental":
filter_dim = mtf.Dimension("length", self.filter_size)
# [batch_dims, 1, output_dim] -> [batch_dims, output_dim]
output = mtf.reduce_sum(
output, reduced_dim=mtf.Dimension(filter_dim.name, 1))
if self._activation != "linear":
activation_fn = getattr(mtf, self._activation)
output = activation_fn(output)
return output
@property
def filter_size(self):
return self._filter_size
@gin.configurable
class SeparableConv1DLayer(Conv1D):
"""1D separable convolution over sequence length with model dim as channels.
One caveat is that this layer does nothing to stop information from bleeding
across packed examples.
"""
def __init__(self, # pylint: disable=super-init-not-called
min_relative_pos,
max_relative_pos,
output_size,
depthwise_filter_initializer_scale=1.0,
pointwise_filter_initializer_scale=1.0,
activation="linear"):
"""Create a SeparableConv1DLayer.
The filter size will be `max_relative_pos - min_relative_pos + 1`.
Args:
min_relative_pos: an integer, the inclusive minimum relative positive of
the depthwise filter, where a relative position of zero means the left
end of the filter aligns with the left end of the input.
max_relative_pos: an integer, the inclusive maximum relative position of
the depthwise filter, where a relative position of zero means the right
end of the filter aligns with the right end of the input.
output_size: a positive integer, the number of channels in the output.
depthwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the depthwise filter.
pointwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the pointwise filter.
activation: an optional string function name from namespace mtf, a
function to be applied to the layer output. If not provided or set to
"linear", then no function will be applied.
"""
self._min_relative_pos = min_relative_pos
self._max_relative_pos = max_relative_pos
self._output_size = output_size
self._depthwise_filter_initializer_scale = depthwise_filter_initializer_scale
self._pointwise_filter_initializer_scale = pointwise_filter_initializer_scale
self._activation = activation
def call(self, context, x, losses=None, all_kernel_wts=None):
"""Call the layer."""
if context.mode == "first_part":
self.record_states_first_part_mode(context, x, self.filter_size)
if context.mode == "incremental":
x = self.record_states_incremental_mode(context, x, self.filter_size)
else:
# Mask padding.
# TODO(karishmamalkan): Change the inputs_for_mask_creation to use decoder
# when using with decoder
inputs_for_mask_creation = context.inputs
mask = mtf.cast(
mtf.not_equal(inputs_for_mask_creation, 0), context.activation_dtype)
x *= mask
model_dim = x.shape.dims[-1]
output_dim = mtf.Dimension(model_dim.name, self._output_size)
output = mtf.layers.separable_conv1d(
x,
output_dim=output_dim,
min_relative_pos=self._min_relative_pos,
max_relative_pos=self._max_relative_pos,
depthwise_filter_initializer_scale=self
._depthwise_filter_initializer_scale,
pointwise_filter_initializer_scale=self
._pointwise_filter_initializer_scale,
use_bias=True,
kernel_depth_weights=all_kernel_wts)
if context.mode == "incremental":
filter_dim = mtf.Dimension("length", self.filter_size)
# Drop unnecessary portion [batch, length, d_model] -> [batch, d_model]
# Only the last sequence position is relevant.
output = mtf.gather(output, [self.filter_size - 1], filter_dim)
if self._activation != "linear":
activation_fn = getattr(mtf, self._activation)
output = activation_fn(output)
return output
@property
def filter_size(self):
return self._max_relative_pos - self._min_relative_pos + 1
@gin.configurable
class Conv1DLocalAttn(SeparableConv1DLayer):
"""Lightweight 1D separable convolution over sequence length with d_model as channels.
Lightweight 1D separable convolution over sequence length, with separated over
model_dim as channels, containing a fixed number of unique channels
repeated/stacked over the model_dim.
"""
def __init__(self,
min_relative_pos,
max_relative_pos,
output_size,
depthwise_filter_initializer_scale=1.0,
pointwise_filter_initializer_scale=1.0,
activation="linear",
num_unique_depth_filters=1):
"""Create a LightweightConv1DLayer.
The filter size will be `max_relative_pos - min_relative_pos + 1`
The value of the Filter is depthwise separable, and the filter is tied and
repeats at every "num_unique_depth_filters" elements.
Args:
min_relative_pos: an integer, the inclusive minimum relative positive of
the depthwise filter, where a relative position of zero means the left
end of the filter aligns with the left end of the input.
max_relative_pos: an integer, the inclusive maximum relative position of
the depthwise filter, where a relative position of zero means the right
end of the filter aligns with the right end of the input.
output_size: a positive integer, the number of channels in the output.
depthwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the depthwise filter.
pointwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the pointwise filter.
activation: an optional string function name from namespace mtf, a
function to be applied to the layer output. If not provided or set to
"linear", then no function will be applied.
num_unique_depth_filters: The number of unique depth filter values. The
unique filter is repeated along the depth dim every
num_unique_depth_filters elements.
"""
super(Conv1DLocalAttn,
self).__init__(min_relative_pos, max_relative_pos, output_size,
depthwise_filter_initializer_scale,
pointwise_filter_initializer_scale, activation)
self._num_unique_depth_filters = num_unique_depth_filters
assert (self._output_size % self._num_unique_depth_filters == 0), (
"The number of elements in the unique depth filter should exactly "
"divide the number of output channels. You set "
"num_unique_depth_filters=%d, output_size(num_output_channels)=%d") % (
self._num_unique_depth_filters, self._output_size)
def call(self, context, x, losses=None):
"""Call the layer."""
depth_dim = x.shape.dims[-1]
initializer_scale = self._depthwise_filter_initializer_scale or 1.0
kernel_size = self._max_relative_pos - self._min_relative_pos + 1
kernel_initializer = mtf.layers.VarianceScalingInitializer(
scale=initializer_scale / kernel_size)
repeated_kernel_dim = [
mtf.Dimension(
"repeated_kernel_dim",
size=int(depth_dim.size / self._num_unique_depth_filters))
]
all_kernel_wts = []
for i in range(kernel_size):
# get a kernel variable of size num_unique_depth_filters, and replicate it
# to span the size of the complete depth(d_model) of x
kernel_wt = self.get_kernel_wt(
x,
repeated_kernel_dim=repeated_kernel_dim,
kernel_initializer=kernel_initializer,
i=i,
context=context,
variable_dtype=context.variable_dtype,
master_dtype=tf.float32,
slice_dtype=tf.float32)
repeated_kernel_wts = [kernel_wt] * self._num_unique_depth_filters
repeated_kernel_wts_concat = mtf.concat(
repeated_kernel_wts, concat_dim_name="repeated_kernel_dim")
repeated_kernel_wts_concat = mtf.rename_dimension(
repeated_kernel_wts_concat, "repeated_kernel_dim", "d_model")
all_kernel_wts.append(repeated_kernel_wts_concat)
# modify the kernel weights, such that they are softmaxed over the width of
# the kernel. We do this by stacking the individual kernel positions,
# performing the softmax, and then re-separating the stack.
stacked_kernel_weights = mtf.stack(all_kernel_wts, "new_stack_dim")
softmaxed_kernel_weights = mtf.softmax(
stacked_kernel_weights, reduced_dim=stacked_kernel_weights.shape[0]
) # the softmax is calculated over the new_stack_dim we created
unstacked_kernel_weights = mtf.unstack(softmaxed_kernel_weights,
stacked_kernel_weights.shape[0])
return super(Conv1DLocalAttn, self).call(context, x, losses,
unstacked_kernel_weights)
@gin.configurable
class LightweightConv1DLocalAttn(Conv1DLocalAttn):
"""Lightweight 1D separable convolution over seq_len with d_model as channels.
Lightweight 1D separable convolution over sequence length, with separated over
model_dim as channels, containing a fixed number of unique channels
repeated/stacked over the model_dim.
"""
def get_kernel_wt(self,
x,
repeated_kernel_dim,
kernel_initializer,
i,
context,
variable_dtype,
master_dtype=tf.float32,
slice_dtype=tf.float32):
kernel_wt = mtf.layers.get_dense_kernel_weights(
x,
new_dims=[],
reduced_dims=[],
expert_dims=repeated_kernel_dim,
kernel_initializer=kernel_initializer,
name="lightwt_depthwise_dense_%d" % (i),
variable_dtype=context.variable_dtype,
master_dtype=tf.float32,
slice_dtype=tf.float32)
return kernel_wt
@gin.configurable
class DynamicConv1DLocalAttn(Conv1DLocalAttn):
"""Dynamic 1D separable convolution over seq_len with d_model as channels.
Dynamic kernels predicted based on input at a position of the seq_len. Conv
operation separated over model_dim as channels, containing a fixed number of
unique channels repeated/stacked over the model_dim.
"""
def get_kernel_wt(self,
x,
repeated_kernel_dim,
kernel_initializer,
i,
context,
variable_dtype,
master_dtype=tf.float32,
slice_dtype=tf.float32):
kernel_wt = mtf.layers.dense(
x,
new_dims=repeated_kernel_dim,
reduced_dims=[context.model.model_dim],
expert_dims=[],
kernel_initializer=kernel_initializer,
name="dyn_conv_depthwise_dense_%d" % (i),
variable_dtype=context.variable_dtype,
master_dtype=tf.float32,
slice_dtype=tf.float32)
return kernel_wt
@gin.configurable
class LocalConvAttnBlock(transformer.TransformerLayer):
"""Conv Attention Block for Lightweight and dynamic conv attention.
Lightweight/Dynamic separable convolution over sequence length as described in
https://arxiv.org/pdf/1901.10430.pdf.
"""
def __init__(self,
min_relative_pos,
max_relative_pos,
output_size,
depthwise_filter_initializer_scale=1.0,
pointwise_filter_initializer_scale=1.0,
activation="linear",
num_unique_depth_filters=1,
attention_type="lightweight_conv"):
"""Create a LightweightConv1DAttnBlock.
The filter size will be `max_relative_pos - min_relative_pos + 1`
The value of the Filter is depthwise separable, and the filter is tied and
repeats at every "num_unique_depth_filters" elements.
Args:
min_relative_pos: an integer, the inclusive minimum relative positive of
the depthwise filter, where a relative position of zero means the left
end of the filter aligns with the left end of the input.
max_relative_pos: an integer, the inclusive maximum relative position of
the depthwise filter, where a relative position of zero means the right
end of the filter aligns with the right end of the input.
output_size: a positive integer, the number of channels in the output.
depthwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the depthwise filter.
pointwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the pointwise filter.
activation: an optional string function name from namespace mtf, a
function to be applied to the layer output. If not provided or set to
"linear", then no function will be applied.
num_unique_depth_filters: The number of unique depth filter values. The
unique filter is repeated along the depth dim every
num_unique_depth_filters elements.
attention_type: Type of conv attn -"lightweight_conv"/"dynamic_conv"
"""
if attention_type == "lightweight_conv":
self.conv_local_attn_layer = LightweightConv1DLocalAttn(
min_relative_pos, max_relative_pos, output_size,
depthwise_filter_initializer_scale,
pointwise_filter_initializer_scale, activation)
elif attention_type == "dynamic_conv":
self.conv_local_attn_layer = DynamicConv1DLocalAttn(
min_relative_pos, max_relative_pos, output_size,
depthwise_filter_initializer_scale,
pointwise_filter_initializer_scale, activation)
else:
raise NotImplementedError("This attention type not implemented")
def call(self, context, x, losses=None):
"""Call the layer."""
gated_ip = mtf.layers.dense_product(
x,
reduced_dims=[context.model.model_dim],
new_dims=[context.model.model_dim],
activation_functions=["linear", "sigmoid"],
variable_dtype=context.variable_dtype,
name="local_conv_inp")
attn_output = self.conv_local_attn_layer.call(context, gated_ip, losses)
op_projection = mtf.layers.dense(
attn_output,
reduced_dims=[context.model.model_dim],
new_dims=[context.model.model_dim],
activation=None,
variable_dtype=context.variable_dtype,
name="local_conv_attn_op_projection")
return op_projection
@gin.configurable
class ParallelLayer(transformer.TransformerLayer):
"""Multiple layers in parallel.
Outputs are summed and divided by sqrt(n).
"""
def __init__(self,
layer_classes=(DenseReluDense, SelfAttention),
use_scope=True):
"""Create a ParallelLayer.
Args:
layer_classes: a list of TransformerLayer classes
use_scope: boolean, default True, which indicates whether to use unique
variable names for each parallel_layer. Here for backward compatibility.
"""
self.layer_classes = [l() for l in layer_classes]
self.use_scope = use_scope
def call(self, context, x, losses=None):
"""Call the layer."""
layer_outputs = []
if self.use_scope:
# Provide unique variable name scopes to avoid overwriting.
for i, l in enumerate(self.layer_classes):
with tf.variable_scope("parallel_layer_%d" % i):
layer_output = l.call(context, x, losses=losses)
layer_outputs.append(layer_output)
else:
layer_outputs = [
l.call(context, x, losses=losses) for l in self.layer_classes
]
return mtf.add_n(layer_outputs) * (len(self.layer_classes)**-0.5)
| apache-2.0 |
Alwnikrotikz/marinemap | lingcod/common/registration_backend/__init__.py | 3 | 1718 | from registration.backends.default import DefaultBackend
from django.db import transaction
from django import forms
from django.contrib.sites.models import Site, RequestSite
from django.contrib.auth.models import User, Group
from registration.models import RegistrationManager, RegistrationProfile
from registration.forms import RegistrationForm
from registration import signals
from django.conf import settings
class CustomRegistrationForm(RegistrationForm):
first_name = forms.CharField(label="First Name")
last_name = forms.CharField(label="Last Name")
class LingcodBackend(DefaultBackend):
def get_form_class(self, request):
return CustomRegistrationForm
def register(self, request, **kwargs):
"""
Given a username, firstname, lastname, email address and password,
register a new user account, which will initially be inactive.
See django-registration docs for more info
"""
username, email, password, first, last = kwargs['username'], kwargs['email'], kwargs['password1'], \
kwargs['first_name'], kwargs['last_name']
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
new_user = RegistrationProfile.objects.create_inactive_user(username, email, password, site)
new_user.first_name = first
new_user.last_name = last
new_user.is_active = False
webreg_group = Group.objects.get(name=settings.GROUP_REGISTERED_BY_WEB)
new_user.groups.add(webreg_group)
new_user.save()
signals.user_registered.send(sender=self.__class__, user=new_user, request=request)
return new_user
| bsd-3-clause |
tarunbhardwaj/trytond-magento | wizard.py | 3 | 14359 | # -*- coding: utf-8 -*-
import magento
import json
from .api import Core
from trytond.model import ModelView, fields
from trytond.pool import PoolMeta, Pool
from trytond.transaction import Transaction
from trytond.pyson import PYSONEncoder, Eval
from trytond.wizard import (
Wizard, StateView, Button, StateAction, StateTransition
)
__all__ = [
'ExportMagentoShipmentStatusStart',
'ExportMagentoShipmentStatus', 'ConfigureMagento',
'TestMagentoConnectionStart', 'ImportWebsitesStart',
'ImportStoresStart', 'FailureStart', 'SuccessStart',
]
__metaclass__ = PoolMeta
class ExportMagentoShipmentStatusStart(ModelView):
"Export Shipment Status View"
__name__ = 'magento.wizard_export_shipment_status.start'
message = fields.Text("Message", readonly=True)
class ExportMagentoShipmentStatus(Wizard):
"""
Export Shipment Status Wizard
Exports shipment status for sale orders related to current store view
"""
__name__ = 'magento.wizard_export_shipment_status'
start = StateView(
'magento.wizard_export_shipment_status.start',
'magento.wizard_export_magento_shipment_status_view_start_form',
[
Button('Cancel', 'end', 'tryton-cancel'),
Button('Continue', 'export_', 'tryton-ok', default=True),
]
)
export_ = StateAction('sale.act_sale_form')
def default_start(self, data):
"""
Sets default data for wizard
:param data: Wizard data
"""
Channel = Pool().get('sale.channel')
channel = Channel(Transaction().context.get('active_id'))
channel.validate_magento_channel()
return {
'message':
"This wizard will export shipment status for all the " +
"shipments related to this store view. To export tracking " +
"information also for these shipments please check the " +
"checkbox for Export Tracking Information on Store View."
}
def do_export_(self, action):
"""Handles the transition"""
Channel = Pool().get('sale.channel')
channel = Channel(Transaction().context.get('active_id'))
channel.validate_magento_channel()
sales = channel.export_shipment_status_to_magento()
action['pyson_domain'] = PYSONEncoder().encode(
[('id', 'in', map(int, sales))]
)
return action, {}
def transition_export_(self):
return 'end'
class ConfigureMagento(Wizard):
"""
Wizard To Configure Magento
"""
__name__ = 'magento.wizard_configure_magento'
start = StateView(
'magento.wizard_test_connection.start',
'magento.wizard_test_magento_connection_view_form',
[
Button('Cancel', 'end', 'tryton-cancel'),
Button('Next', 'website', 'tryton-go-next', 'True'),
]
)
website = StateTransition()
import_website = StateView(
'magento.wizard_import_websites.start',
'magento.wizard_import_websites_view_form',
[
Button('Next', 'store', 'tryton-go-next', 'True'),
]
)
store = StateTransition()
import_store = StateView(
'magento.wizard_import_stores.start',
'magento.wizard_import_stores_view_form',
[
Button('Next', 'success', 'tryton-go-next', 'True'),
]
)
success = StateView(
'magento.wizard_configuration_success.start',
'magento.wizard_configuration_success_view_form',
[
Button('Ok', 'end', 'tryton-ok')
]
)
failure = StateView(
'magento.wizard_configuration_failure.start',
'magento.wizard_configuration_failure_view_form',
[
Button('Ok', 'end', 'tryton-ok')
]
)
def default_start(self, data):
"""
Test the connection for current magento channel
"""
Channel = Pool().get('sale.channel')
magento_channel = Channel(Transaction().context.get('active_id'))
magento_channel.validate_magento_channel()
# Test Connection
magento_channel.test_magento_connection()
return {
'channel': magento_channel.id
}
def transition_website(self):
"""
Import websites for current magento channel
"""
magento_channel = self.start.channel
self.import_website.__class__.magento_websites.selection = \
self.get_websites()
if not (
magento_channel.magento_website_id and
magento_channel.magento_store_id
):
return 'import_website'
if not self.validate_websites():
return 'failure'
return 'end'
def transition_store(self):
"""
Initialize the values of website in sale channel
"""
self.import_store.__class__.magento_stores.selection = \
self.get_stores()
return 'import_store'
def default_success(self, data):
"""
Initialize the values of store in sale channel
"""
channel = self.start.channel
imported_store = self.import_store.magento_stores
imported_website = self.import_website.magento_websites
magento_website = json.loads(imported_website)
channel.magento_website_id = magento_website['id']
channel.magento_website_name = magento_website['name']
channel.magento_website_code = magento_website['code']
magento_store = json.loads(imported_store)
channel.magento_store_id = magento_store['store_id']
channel.magento_store_name = magento_store['name']
channel.save()
return {}
def get_websites(self):
"""
Returns the list of websites
"""
magento_channel = self.start.channel
with Core(
magento_channel.magento_url, magento_channel.magento_api_user,
magento_channel.magento_api_key
) as core_api:
websites = core_api.websites()
selection = []
for website in websites:
# XXX: An UGLY way to map json to selection, fix me
website_data = {
'code': website['code'],
'id': website['website_id'],
'name': website['name']
}
website_data = json.dumps(website_data)
selection.append((website_data, website['name']))
return selection
def get_stores(self):
"""
Return list of all stores
"""
magento_channel = self.start.channel
selected_website = json.loads(self.import_website.magento_websites)
with Core(
magento_channel.magento_url, magento_channel.magento_api_user,
magento_channel.magento_api_key
) as core_api:
stores = core_api.stores(selected_website['id'])
all_stores = []
for store in stores:
# Create the new dictionary of required values from a dictionary,
# and convert it into the string
store_data = {
'store_id': store['default_store_id'],
'name': store['name']
}
store_data = json.dumps(store_data)
all_stores.append((store_data, store['name']))
return all_stores
def validate_websites(self):
"""
Validate the website of magento channel
"""
magento_channel = self.start.channel
current_website_configurations = {
'code': magento_channel.magento_website_code,
'id': str(magento_channel.magento_website_id),
'name': magento_channel.magento_website_name
}
current_website = (
json.dumps(current_website_configurations),
magento_channel.magento_website_name
)
if current_website not in self.get_websites():
return False
return True
class TestMagentoConnectionStart(ModelView):
"Test Connection"
__name__ = 'magento.wizard_test_connection.start'
channel = fields.Many2One(
'sale.channel', 'Sale Channel', required=True, readonly=True
)
class ImportWebsitesStart(ModelView):
"""
Import Websites Start View
"""
__name__ = 'magento.wizard_import_websites.start'
magento_websites = fields.Selection([], 'Select Website', required=True)
class ImportStoresStart(ModelView):
"""
Import stores from websites
"""
__name__ = 'magento.wizard_import_stores.start'
magento_stores = fields.Selection([], 'Select Store', required=True)
class FailureStart(ModelView):
"""
Failure wizard
"""
__name__ = 'magento.wizard_configuration_failure.start'
class SuccessStart(ModelView):
"""
Get Done
"""
__name__ = 'magento.wizard_configuration_success.start'
class UpdateMagentoCatalogStart(ModelView):
'Update Catalog View'
__name__ = 'magento.update_catalog.start'
class UpdateMagentoCatalog(Wizard):
'''
Update Catalog
This is a wizard to update already imported products
'''
__name__ = 'magento.update_catalog'
start = StateView(
'magento.update_catalog.start',
'magento.magento_update_catalog_start_view_form', [
Button('Cancel', 'end', 'tryton-cancel'),
Button('Continue', 'update_', 'tryton-ok', default=True),
]
)
update_ = StateAction('product.act_template_form')
def do_update_(self, action):
"""Handles the transition"""
Channel = Pool().get('sale.channel')
channel = Channel(Transaction().context.get('active_id'))
channel.validate_magento_channel()
product_template_ids = self.update_products(channel)
action['pyson_domain'] = PYSONEncoder().encode(
[('id', 'in', product_template_ids)])
return action, {}
def transition_import_(self):
return 'end'
def update_products(self, channel):
"""
Updates products for current magento_channel
:param channel: Browse record of channel
:return: List of product IDs
"""
ChannelListing = Pool().get('product.product.channel_listing')
products = []
channel_listings = ChannelListing.search([
('channel', '=', self),
('state', '=', 'active'),
])
with Transaction().set_context({'current_channel': channel.id}):
for listing in channel_listings:
products.append(
listing.product.update_from_magento()
)
return map(int, products)
class ExportDataWizardConfigure(ModelView):
"Export Data Start View"
__name__ = 'sale.channel.export_data.configure'
category = fields.Many2One(
'product.category', 'Magento Category', states={
'required': Eval('channel_source') == 'magento',
'invisible': Eval('channel_source') != 'magento',
}, depends=['channel_source'], domain=[('magento_ids', 'not in', [])],
)
attribute_set = fields.Selection(
[], 'Attribute Set', states={
'required': Eval('channel_source') == 'magento',
'invisible': Eval('channel_source') != 'magento',
}, depends=['channel_source'],
)
channel_source = fields.Char("Channel Source")
@classmethod
def get_attribute_sets(cls):
"""Get the list of attribute sets from magento for the current channel
:return: Tuple of attribute sets where each tuple consists of (ID,Name)
"""
Channel = Pool().get('sale.channel')
if not Transaction().context.get('active_id'):
return []
channel = Channel(Transaction().context['active_id'])
channel.validate_magento_channel()
with magento.ProductAttributeSet(
channel.magento_url, channel.magento_api_user,
channel.magento_api_key
) as attribute_set_api:
attribute_sets = attribute_set_api.list()
return [(
attribute_set['set_id'], attribute_set['name']
) for attribute_set in attribute_sets]
@classmethod
def fields_view_get(cls, view_id=None, view_type='form'):
"""This method is overridden to populate the selection field for
attribute_set with the attribute sets from the current channel's
counterpart on magento.
This overridding has to be done because `active_id` is not available
if the meth:get_attribute_sets is called directly from the field.
"""
rv = super(
ExportDataWizardConfigure, cls
).fields_view_get(view_id, view_type)
rv['fields']['attribute_set']['selection'] = cls.get_attribute_sets()
return rv
class ExportDataWizard:
"Wizard to export data to external channel"
__name__ = 'sale.channel.export_data'
configure = StateView(
'sale.channel.export_data.configure',
'magento.export_data_configure_view_form',
[
Button('Cancel', 'end', 'tryton-cancel'),
Button('Continue', 'next', 'tryton-go-next', default=True),
]
)
def default_configure(self, data):
Channel = Pool().get('sale.channel')
channel = Channel(Transaction().context.get('active_id'))
return {
'channel_source': channel.source
}
def transition_next(self):
Channel = Pool().get('sale.channel')
channel = Channel(Transaction().context.get('active_id'))
if channel.source == 'magento':
return 'configure'
return super(ExportDataWizard, self).transition_next()
def transition_export_(self):
"""
Export the products for the selected category on this channel
"""
Channel = Pool().get('sale.channel')
channel = Channel(Transaction().context['active_id'])
if channel.source != 'magento':
return super(ExportDataWizard, self).transition_export_()
with Transaction().set_context({
'current_channel': channel.id,
'magento_attribute_set': self.start.attribute_set,
'category': self.start.category,
}):
return super(ExportDataWizard, self).transition_export_()
| bsd-3-clause |
c2g14/2015cd_0512 | static/Brython3.1.1-20150328-091302/Lib/_thread.py | 740 | 4879 | """Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import _thread
except ImportError:
import _dummy_thread as _thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
# A dummy value
TIMEOUT_MAX = 2**31
# NOTE: this module can be imported early in the extension building process,
# and so top level imports of other modules should be avoided. Instead, all
# imports are done when needed on a function-by-function basis. Since threads
# are disabled, the import lock should not be an issue anyway (??).
error = RuntimeError
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of _thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by _thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
import traceback
traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of _thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of _thread.get_ident().
Since this module should only be used when _threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of _thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of _thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
class LockType(object):
"""Class implementing dummy implementation of _thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the _thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None, timeout=-1):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
if timeout > 0:
import time
time.sleep(timeout)
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
# Brython-specific to avoid circular references between threading and _threading_local
class _local:
pass | agpl-3.0 |
artifacts/AFCache | src/python/afcpkg.py | 2 | 8835 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2008 Artifacts - Fine Software Development
# http://www.artifacts.de
# Author: Martin Borho ([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import sys
import time
import logging
import mimetypes
import fnmatch
from urlparse import urlparse
from optparse import OptionParser
from zipfile import ZipFile
rfc1123_format = '%a, %d %b %Y %H:%M:%S GMT+00:00'
# add mimetypes
mimetypes.add_type('application/json', '.json', strict=True)
class AFCachePackager(object):
def __init__(self, **kwargs):
self.maxage = kwargs.get('maxage')
self.baseurl = kwargs.get('baseurl')
if not self.baseurl:
self.baseurl = 'afcpkg://localhost'
self.lastmodfile = kwargs.get('lastmodfile')
self.lastmodplus = kwargs.get('lastmodplus')
self.lastmodminus = kwargs.get('lastmodminus')
self.folder = kwargs.get('folder')
self.include_all = kwargs.get('include_all')
self.outfile = kwargs.get('outfile')
if not self.outfile:
self.outfile = 'afcache-archive.zip'
self.max_size = kwargs.get('max_size')
self.excludes = kwargs.get('excludes', [])
self.mime = kwargs.get('mime')
self.errors = []
self.logger = kwargs.get('logger',logging.getLogger(__file__))
self._check_input()
def _check_input(self):
if not self.folder:
self.errors.append('import-folder (--folder) is missing')
elif not os.path.isdir(self.folder):
self.errors.append('import-folder does not exists')
if not self.maxage:
self.errors.append('maxage is missing')
def _get_host(self, baseurl):
p = urlparse(baseurl)
if p.hostname:
return p.hostname
else:
self.errors.append('baseurl invalid')
return None
def build_zipcache(self):
manifest = []
hostname = self._get_host(self.baseurl)
if self.errors:
return None
try:
zip = ZipFile(self.outfile, 'w')
except IOError, e:
self.logger.error('exiting: creation of zipfile failed!')
return None
else:
for dirpath, dirnames, filenames in os.walk(self.folder):
# skip empty dirs
if not filenames:
continue
for name in filenames:
path = os.path.join(dirpath, name)
# skip hidden files if
if not self.include_all:
if name.startswith('.') or path.find('/.') > -1:
self.logger.info("skipping "+path)
continue
# skip big files if
if self.max_size and (os.path.getsize(path) > self.max_size):
self.logger.info("skipping big file "+path)
continue
# exclude paths if
if self.excludes:
exclude_file = None
for ex_filter in self.excludes:
if fnmatch.fnmatch(path, ex_filter):
exclude_file = True
self.logger.info("excluded "+path)
break
if exclude_file: continue
# detect mime-type
mime_type = ''
if self.mime:
mime_tuple = mimetypes.guess_type(path, False)
if mime_tuple[0]: mime_type = mime_tuple[0]
else: self.logger.warning("mime-type unknown: "+path)
# handle lastmodified
if self.lastmodfile: lastmod = os.path.getmtime(os.path.join(dirpath, name))
else: lastmod = time.time()
if self.lastmodplus: lastmod += self.lastmodplus
elif self.lastmodminus: lastmod -= self.lastmodminus
# handle path forms
rel_path = os.path.join(dirpath.replace(os.path.normpath(self.folder),''),name)
exported_path = hostname+rel_path
# add data
self.logger.info("adding "+ exported_path)
zip.write(path, exported_path)
# add manifest line
last_mod_date = time.strftime(rfc1123_format,time.gmtime(lastmod))
expire_date = time.strftime(rfc1123_format,time.gmtime(lastmod+self.maxage))
manifest_line = '%s ; %s ; %s' % (self.baseurl+rel_path, last_mod_date, expire_date)
# add mime type
if self.mime:
manifest_line += ' ; '+mime_type
manifest.append(manifest_line)
# add manifest to zip
self.logger.info("adding manifest")
zip.writestr("manifest.afcache", "\n".join(manifest))
return True
def main():
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(levelname)-2s %(message)s')
logger = logging.getLogger(__file__)
usage = "Usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("--maxage", dest="maxage", type="int", help="max-age in seconds")
parser.add_option("--baseurl", dest="baseurl",
help="base url, e.g. http://www.foo.bar (WITHOUT trailig slash)")
parser.add_option("--lastmodifiedfile", dest="lastmodfile", action="store_true",
help="use lastmodified from file instead of now")
parser.add_option("--lastmodifiedplus", dest="lastmodplus", type="int",
help="add n seconds to file's lastmodfied date")
parser.add_option("--lastmodifiedminus", dest="lastmodminus", type="int",
help="substract n seconds from file's lastmodfied date")
parser.add_option("--folder", dest="folder",
help="folder containing resources")
parser.add_option("-a", dest="include_all", action="store_true",
help="include all files. By default, files starting with a dot are excluded.")
parser.add_option("--outfile", dest="outfile",
help="Output filename. Default: afcache-archive.zip")
parser.add_option("--maxItemFileSize", dest="max_size", type="int",
help="Maximum filesize of a cacheable item.")
parser.add_option("--exclude", dest="excludes",action="append",
help="Regexp filter for filepaths. Add one --exclude for every pattern.")
parser.add_option("--mime", dest="mime", action="store_true",
help="add file mime types to manifest.afcache")
(options, args) = parser.parse_args()
packager = AFCachePackager(
maxage=options.maxage,
baseurl=options.baseurl,
lastmodfile=options.lastmodfile,
lastmodplus=options.lastmodplus,
lastmodminus=options.lastmodminus,
folder=options.folder,
include_all=options.include_all,
outfile=options.outfile,
max_size=options.max_size,
excludes=options.excludes,
mime=options.mime,
logger=logger
)
packager.build_zipcache()
if packager.errors:
print "Error: "+"\nError: ".join(packager.errors)
if __name__ == "__main__":
main()
| apache-2.0 |
ujenmr/ansible | lib/ansible/module_utils/aws/elb_utils.py | 74 | 3399 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible.module_utils.ec2 import AWSRetry
# Non-ansible imports
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass
def get_elb(connection, module, elb_name):
"""
Get an ELB based on name. If not found, return None.
:param connection: AWS boto3 elbv2 connection
:param module: Ansible module
:param elb_name: Name of load balancer to get
:return: boto3 ELB dict or None if not found
"""
try:
return _get_elb(connection, module, elb_name)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
@AWSRetry.jittered_backoff()
def _get_elb(connection, module, elb_name):
"""
Get an ELB based on name using AWSRetry. If not found, return None.
:param connection: AWS boto3 elbv2 connection
:param module: Ansible module
:param elb_name: Name of load balancer to get
:return: boto3 ELB dict or None if not found
"""
try:
load_balancer_paginator = connection.get_paginator('describe_load_balancers')
return (load_balancer_paginator.paginate(Names=[elb_name]).build_full_result())['LoadBalancers'][0]
except (BotoCoreError, ClientError) as e:
if e.response['Error']['Code'] == 'LoadBalancerNotFound':
return None
else:
raise e
def get_elb_listener(connection, module, elb_arn, listener_port):
"""
Get an ELB listener based on the port provided. If not found, return None.
:param connection: AWS boto3 elbv2 connection
:param module: Ansible module
:param elb_arn: ARN of the ELB to look at
:param listener_port: Port of the listener to look for
:return: boto3 ELB listener dict or None if not found
"""
try:
listener_paginator = connection.get_paginator('describe_listeners')
listeners = (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=elb_arn).build_full_result())['Listeners']
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
l = None
for listener in listeners:
if listener['Port'] == listener_port:
l = listener
break
return l
def get_elb_listener_rules(connection, module, listener_arn):
"""
Get rules for a particular ELB listener using the listener ARN.
:param connection: AWS boto3 elbv2 connection
:param module: Ansible module
:param listener_arn: ARN of the ELB listener
:return: boto3 ELB rules list
"""
try:
return AWSRetry.jittered_backoff()(connection.describe_rules)(ListenerArn=listener_arn)['Rules']
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
def convert_tg_name_to_arn(connection, module, tg_name):
"""
Get ARN of a target group using the target group's name
:param connection: AWS boto3 elbv2 connection
:param module: Ansible module
:param tg_name: Name of the target group
:return: target group ARN string
"""
try:
response = AWSRetry.jittered_backoff()(connection.describe_target_groups)(Names=[tg_name])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
tg_arn = response['TargetGroups'][0]['TargetGroupArn']
return tg_arn
| gpl-3.0 |
eugena/django | tests/foreign_object/test_empty_join.py | 232 | 1498 | from django.test import TestCase
from .models import SlugPage
class RestrictedConditionsTests(TestCase):
def setUp(self):
slugs = [
'a',
'a/a',
'a/b',
'a/b/a',
'x',
'x/y/z',
]
SlugPage.objects.bulk_create([SlugPage(slug=slug) for slug in slugs])
def test_restrictions_with_no_joining_columns(self):
"""
Test that it's possible to create a working related field that doesn't
use any joining columns, as long as an extra restriction is supplied.
"""
a = SlugPage.objects.get(slug='a')
self.assertListEqual(
[p.slug for p in SlugPage.objects.filter(ascendants=a)],
['a', 'a/a', 'a/b', 'a/b/a'],
)
self.assertEqual(
[p.slug for p in a.descendants.all()],
['a', 'a/a', 'a/b', 'a/b/a'],
)
aba = SlugPage.objects.get(slug='a/b/a')
self.assertListEqual(
[p.slug for p in SlugPage.objects.filter(descendants__in=[aba])],
['a', 'a/b', 'a/b/a'],
)
self.assertListEqual(
[p.slug for p in aba.ascendants.all()],
['a', 'a/b', 'a/b/a'],
)
def test_empty_join_conditions(self):
x = SlugPage.objects.get(slug='x')
message = "Join generated an empty ON clause."
with self.assertRaisesMessage(ValueError, message):
list(SlugPage.objects.filter(containers=x))
| bsd-3-clause |
jenalgit/django | django/contrib/gis/geos/collections.py | 47 | 4542 | """
This module houses the Geometry Collection objects:
GeometryCollection, MultiPoint, MultiLineString, and MultiPolygon
"""
from ctypes import byref, c_int, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.geometry import (
GEOSGeometry, ProjectInterpolateMixin,
)
from django.contrib.gis.geos.libgeos import get_pointer_arr
from django.contrib.gis.geos.linestring import LinearRing, LineString
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.utils.six.moves import range
class GeometryCollection(GEOSGeometry):
_typeid = 7
def __init__(self, *args, **kwargs):
"Initializes a Geometry Collection from a sequence of Geometry objects."
# Checking the arguments
if not args:
raise TypeError('Must provide at least one Geometry to initialize %s.' % self.__class__.__name__)
if len(args) == 1:
# If only one geometry provided or a list of geometries is provided
# in the first argument.
if isinstance(args[0], (tuple, list)):
init_geoms = args[0]
else:
init_geoms = args
else:
init_geoms = args
# Ensuring that only the permitted geometries are allowed in this collection
# this is moved to list mixin super class
self._check_allowed(init_geoms)
# Creating the geometry pointer array.
collection = self._create_collection(len(init_geoms), iter(init_geoms))
super(GeometryCollection, self).__init__(collection, **kwargs)
def __iter__(self):
"Iterates over each Geometry in the Collection."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of geometries in this Collection."
return self.num_geom
# ### Methods for compatibility with ListMixin ###
def _create_collection(self, length, items):
# Creating the geometry pointer array.
geoms = get_pointer_arr(length)
for i, g in enumerate(items):
# this is a little sloppy, but makes life easier
# allow GEOSGeometry types (python wrappers) or pointer types
geoms[i] = capi.geom_clone(getattr(g, 'ptr', g))
return capi.create_collection(c_int(self._typeid), byref(geoms), c_uint(length))
def _get_single_internal(self, index):
return capi.get_geomn(self.ptr, index)
def _get_single_external(self, index):
"Returns the Geometry from this Collection at the given index (0-based)."
# Checking the index and returning the corresponding GEOS geometry.
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
def _set_list(self, length, items):
"Create a new collection, and destroy the contents of the previous pointer."
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_collection(length, items)
if srid:
self.srid = srid
capi.destroy_geom(prev_ptr)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
@property
def kml(self):
"Returns the KML for this Geometry Collection."
return '<MultiGeometry>%s</MultiGeometry>' % ''.join(g.kml for g in self)
@property
def tuple(self):
"Returns a tuple of all the coordinates in this Geometry Collection"
return tuple(g.tuple for g in self)
coords = tuple
# MultiPoint, MultiLineString, and MultiPolygon class definitions.
class MultiPoint(GeometryCollection):
_allowed = Point
_typeid = 4
class MultiLineString(ProjectInterpolateMixin, GeometryCollection):
_allowed = (LineString, LinearRing)
_typeid = 5
@property
def merged(self):
"""
Returns a LineString representing the line merge of this
MultiLineString.
"""
return self._topology(capi.geos_linemerge(self.ptr))
class MultiPolygon(GeometryCollection):
_allowed = Polygon
_typeid = 6
@property
def cascaded_union(self):
"Returns a cascaded union of this MultiPolygon."
return GEOSGeometry(capi.geos_cascaded_union(self.ptr), self.srid)
# Setting the allowed types here since GeometryCollection is defined before
# its subclasses.
GeometryCollection._allowed = (Point, LineString, LinearRing, Polygon, MultiPoint, MultiLineString, MultiPolygon)
| bsd-3-clause |
MobinRanjbar/hue | apps/proxy/src/proxy/views.py | 33 | 4641 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Proxies HTTP requests through the Desktop server.
# This is intended to be used to view the "built-in"
# UIs.
#
# TODO(philip): Instead of whitelists, also offer a way
# to create links (within the application) to trusted
# URLs, by appending an HMAC to the parameters.
import logging
import re
from urllib2 import urlopen
from urlparse import urlparse, urlunparse
from django.core import urlresolvers
from django.http import HttpResponse
from desktop.lib.exceptions import MessageException
from proxy import conf
LOGGER = logging.getLogger(__name__)
def check_host_port(host, port):
"""
Return true if this host:port pair is allowed to be proxied.
"""
# Check explicit whitelist
hostport = "%s:%d" % (host, port)
for regexp in conf.WHITELIST.get():
if regexp.match(hostport):
return True
return False
def check_blacklist(host, port, path):
"""
Return true if this host:port path combo is allowed to be proxied.
"""
blacklist = conf.BLACKLIST.get()
if not blacklist:
return True
# Make a canonical path, since "/forbidden//path" (string) does not match
# "/forbidden/path" (regex).
has_trailing_slash = path.endswith('/')
path_elems = path.split('/')
path_elems = [ p for p in path_elems if p ]
canon_url = "%s:%s/%s" % (host, port, '/'.join(path_elems))
if has_trailing_slash:
canon_url += '/'
for regexp in blacklist:
if regexp.match(canon_url):
return False
return True
def proxy(request, host, port, path):
"""
Proxies an HTTP request by fetching the data
and re-writing links.
"""
port = int(port)
if not check_host_port(host, port):
raise MessageException(
("%s:%d is not whitelisted for reverse proxying, nor a daemon that Cluster Health " +
"is aware of. Contact your administrator.") % (host, port))
if not check_blacklist(host, port, path):
raise MessageException(
"Access to %s:%s%s is blocked. Contact your administrator." % (host, port, path))
# The tuple here is: (scheme, netloc, path, params, query, fragment).
# We don't support params or fragment.
url = urlunparse(("http", "%s:%d" % (host,port),
path,
None,
request.META.get("QUERY_STRING"),
None))
LOGGER.info("Retrieving %s." % url)
if request.method == 'POST':
post_data = request.POST.urlencode()
else:
post_data = None
data = urlopen(url, data=post_data)
content_type = data.headers.get("content-type", "text/plain")
if not re.match(r'^text/html\s*(?:;.*)?$', content_type):
resp_text = data.read(1024*1024) # read 1MB
else:
resp_text = _rewrite_links(data)
request.path = _reverse(host, port, path)
return HttpResponse(resp_text, content_type=data.headers.get("content-type"))
def _reverse(host, port, path):
return urlresolvers.reverse("proxy.views.proxy",
kwargs=dict(host=host, port=port, path=path))
def _rewrite_url(url):
"""Used by _rewrite_links"""
scheme, netloc, path, params, query, fragment = urlparse(url)
if scheme != "http": # scheme
# Only re-write http URLs, since that's all
# we proxy.
return url
if ":" in netloc: # netloc
host, port = netloc.rsplit(":", 1)
else:
host, port = netloc, str(80)
path = path or "/"
try:
# We may hit invalid urls. Return None to strip out the link entirely.
out = _reverse(host, port, path)
except urlresolvers.NoReverseMatch, ex:
LOGGER.error("Encountered malformed URL '%s' when rewriting proxied page." % (url,))
return None
if query:
out = out + "?" + query
return out
def _rewrite_links(data):
import lxml.html
html = lxml.html.parse(data)
html.getroot().rewrite_links(_rewrite_url, resolve_base_href=True, base_href=data.geturl())
return lxml.html.tostring(html)
| apache-2.0 |
r-kitaev/lucid-python-werkzeug | examples/upload.py | 44 | 1273 | #!/usr/bin/env python
"""
Simple Upload Application
~~~~~~~~~~~~~~~~~~~~~~~~~
All uploaded files are directly send back to the client.
:copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.serving import run_simple
from werkzeug.wrappers import BaseRequest, BaseResponse
from werkzeug.wsgi import wrap_file
def view_file(req):
if not 'uploaded_file' in req.files:
return BaseResponse('no file uploaded')
f = req.files['uploaded_file']
return BaseResponse(wrap_file(req.environ, f), mimetype=f.content_type,
direct_passthrough=True)
def upload_file(req):
return BaseResponse('''
<h1>Upload File</h1>
<form action="" method="post" enctype="multipart/form-data">
<input type="file" name="uploaded_file">
<input type="submit" value="Upload">
</form>
''', mimetype='text/html')
def application(environ, start_response):
req = BaseRequest(environ)
if req.method == 'POST':
resp = view_file(req)
else:
resp = upload_file(req)
return resp(environ, start_response)
if __name__ == '__main__':
run_simple('localhost', 5000, application, use_debugger=True)
| bsd-3-clause |
followloda/PornGuys | FlaskServer/venv/Lib/encodings/rot_13.py | 497 | 2579 | #!/usr/bin/env python
""" Python Character Mapping Codec for ROT13.
See http://ucsub.colorado.edu/~kominek/rot13/ for details.
Written by Marc-Andre Lemburg ([email protected]).
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='rot-13',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0041: 0x004e,
0x0042: 0x004f,
0x0043: 0x0050,
0x0044: 0x0051,
0x0045: 0x0052,
0x0046: 0x0053,
0x0047: 0x0054,
0x0048: 0x0055,
0x0049: 0x0056,
0x004a: 0x0057,
0x004b: 0x0058,
0x004c: 0x0059,
0x004d: 0x005a,
0x004e: 0x0041,
0x004f: 0x0042,
0x0050: 0x0043,
0x0051: 0x0044,
0x0052: 0x0045,
0x0053: 0x0046,
0x0054: 0x0047,
0x0055: 0x0048,
0x0056: 0x0049,
0x0057: 0x004a,
0x0058: 0x004b,
0x0059: 0x004c,
0x005a: 0x004d,
0x0061: 0x006e,
0x0062: 0x006f,
0x0063: 0x0070,
0x0064: 0x0071,
0x0065: 0x0072,
0x0066: 0x0073,
0x0067: 0x0074,
0x0068: 0x0075,
0x0069: 0x0076,
0x006a: 0x0077,
0x006b: 0x0078,
0x006c: 0x0079,
0x006d: 0x007a,
0x006e: 0x0061,
0x006f: 0x0062,
0x0070: 0x0063,
0x0071: 0x0064,
0x0072: 0x0065,
0x0073: 0x0066,
0x0074: 0x0067,
0x0075: 0x0068,
0x0076: 0x0069,
0x0077: 0x006a,
0x0078: 0x006b,
0x0079: 0x006c,
0x007a: 0x006d,
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
### Filter API
def rot13(infile, outfile):
outfile.write(infile.read().encode('rot-13'))
if __name__ == '__main__':
import sys
rot13(sys.stdin, sys.stdout)
| gpl-3.0 |
TomHeatwole/osf.io | api_tests/registrations/views/test_registration_embeds.py | 15 | 2612 | from nose.tools import * # flake8: noqa
import functools
from framework.auth.core import Auth
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
AuthUserFactory,
RegistrationFactory
)
class TestRegistrationEmbeds(ApiTestCase):
def setUp(self):
super(TestRegistrationEmbeds, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(self.user)
make_public_node = functools.partial(ProjectFactory, is_public=False, creator=self.user)
self.root_node = make_public_node()
self.child1 = make_public_node(parent=self.root_node)
self.child2 = make_public_node(parent=self.root_node)
self.contribs = [AuthUserFactory() for i in range(2)]
for contrib in self.contribs:
self.root_node.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.child1.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.contrib1 = self.contribs[0]
self.contrib2 = self.contribs[1]
self.subchild = ProjectFactory(parent=self.child2, creator=self.contrib1)
self.registration = RegistrationFactory(project=self.root_node, is_public=True)
self.registration_child = RegistrationFactory(project=self.child1, is_public=True)
def test_embed_children(self):
url = '/{0}registrations/{1}/?embed=children'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
json = res.json
embeds = json['data']['embeds']
assert_equal(len(embeds['children']['data']), 2)
titles = [self.child1.title, self.child2.title]
for child in embeds['children']['data']:
assert_in(child['attributes']['title'], titles)
def test_embed_contributors(self):
url = '/{0}registrations/{1}/?embed=contributors'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
ids = [c._id for c in self.contribs] + [self.user._id]
for contrib in embeds['contributors']['data']:
assert_in(contrib['id'], ids)
def test_embed_attributes_not_relationships(self):
url = '/{}registrations/{}/?embed=title'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.contrib1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: title")
| apache-2.0 |
AthinaB/synnefo | snf-cyclades-gtools/docs/conf.py | 9 | 2020 | import sys, os
sys.path.insert(0, os.path.abspath('../'))
from synnefo.versions.ganeti import __version__
project = u'snf-cyclades-gtools'
copyright = u'2012-2013, GRNET'
version = __version__
release = __version__
html_title = 'synnefo ' + version
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_theme_options = {
'collapsiblesidebar': 'true',
'footerbgcolor': '#55b577',
'footertextcolor': '#000000',
'sidebarbgcolor': '#ffffff',
'sidebarbtncolor': '#f2f2f2',
'sidebartextcolor': '#000000',
'sidebarlinkcolor': '#328e4a',
'relbarbgcolor': '#55b577',
'relbartextcolor': '#ffffff',
'relbarlinkcolor': '#ffffff',
'bgcolor': '#ffffff',
'textcolor': '#000000',
'headbgcolor': '#ffffff',
'headtextcolor': '#000000',
'headlinkcolor': '#c60f0f',
'linkcolor': '#328e4a',
'visitedlinkcolor': '#63409b',
'codebgcolor': '#eeffcc',
'codetextcolor': '#333333'
}
html_static_path = ['_static']
htmlhelp_basename = 'synnefodoc'
intersphinx_mapping = {
'pithon': ('http://docs.python.org/', None),
'django': ('https://docs.djangoproject.com/en/dev/',
'https://docs.djangoproject.com/en/dev/_objects/')
}
SYNNEFO_DOCS_BASE_URL = 'http://www.synnefo.org/docs'
SYNNEFO_PROJECTS = {
'synnefo': 'dev',
'pithos': 'dev',
'snf-webproject': 'dev',
'snf-common': 'dev',
'snf-image': 'dev',
'snf-cyclades-app': 'dev'
}
for name, ver in SYNNEFO_PROJECTS.iteritems():
intersphinx_mapping[name.replace("-","")] = (SYNNEFO_DOCS_BASE_URL +
'%s/%s/' % (name, ver),
None)
extensions = ['sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode']
| gpl-3.0 |
ViralLeadership/numpy | numpy/core/_methods.py | 103 | 4308 | """
Array methods which are called by both the C-code for the method
and the Python code for the NumPy-namespace function
"""
from __future__ import division, absolute_import, print_function
import warnings
from numpy.core import multiarray as mu
from numpy.core import umath as um
from numpy.core.numeric import asanyarray
from numpy.core import numerictypes as nt
# save those O(100) nanoseconds!
umr_maximum = um.maximum.reduce
umr_minimum = um.minimum.reduce
umr_sum = um.add.reduce
umr_prod = um.multiply.reduce
umr_any = um.logical_or.reduce
umr_all = um.logical_and.reduce
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
# small reductions
def _amax(a, axis=None, out=None, keepdims=False):
return umr_maximum(a, axis, None, out, keepdims)
def _amin(a, axis=None, out=None, keepdims=False):
return umr_minimum(a, axis, None, out, keepdims)
def _sum(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_sum(a, axis, dtype, out, keepdims)
def _prod(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_prod(a, axis, dtype, out, keepdims)
def _any(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_any(a, axis, dtype, out, keepdims)
def _all(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_all(a, axis, dtype, out, keepdims)
def _count_reduce_items(arr, axis):
if axis is None:
axis = tuple(range(arr.ndim))
if not isinstance(axis, tuple):
axis = (axis,)
items = 1
for ax in axis:
items *= arr.shape[ax]
return items
def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
arr = asanyarray(a)
rcount = _count_reduce_items(arr, axis)
# Make this warning show up first
if rcount == 0:
warnings.warn("Mean of empty slice.", RuntimeWarning)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
ret = umr_sum(arr, axis, dtype, out, keepdims)
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
arr = asanyarray(a)
rcount = _count_reduce_items(arr, axis)
# Make this warning show up on top.
if ddof >= rcount:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
# Compute the mean.
# Note that if dtype is not of inexact type then arraymean will
# not be either.
arrmean = umr_sum(arr, axis, dtype, keepdims=True)
if isinstance(arrmean, mu.ndarray):
arrmean = um.true_divide(
arrmean, rcount, out=arrmean, casting='unsafe', subok=False)
else:
arrmean = arrmean.dtype.type(arrmean / rcount)
# Compute sum of squared deviations from mean
# Note that x may not be inexact and that we need it to be an array,
# not a scalar.
x = asanyarray(arr - arrmean)
if issubclass(arr.dtype.type, nt.complexfloating):
x = um.multiply(x, um.conjugate(x), out=x).real
else:
x = um.multiply(x, x, out=x)
ret = umr_sum(x, axis, dtype, out, keepdims)
# Compute degrees of freedom and make sure it is not negative.
rcount = max([rcount - ddof, 0])
# divide by degrees of freedom
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(ret, mu.ndarray):
ret = um.sqrt(ret, out=ret)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(um.sqrt(ret))
else:
ret = um.sqrt(ret)
return ret
| bsd-3-clause |
edx/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/httpwrappers/tests.py | 47 | 11505 | import copy
import pickle
from django.http import (QueryDict, HttpResponse, SimpleCookie, BadHeaderError,
parse_cookie)
from django.utils import unittest
class QueryDictTests(unittest.TestCase):
def test_missing_key(self):
q = QueryDict('')
self.assertRaises(KeyError, q.__getitem__, 'foo')
def test_immutability(self):
q = QueryDict('')
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
def test_immutable_get_with_default(self):
q = QueryDict('')
self.assertEqual(q.get('foo', 'default'), 'default')
def test_immutable_basic_operations(self):
q = QueryDict('')
self.assertEqual(q.getlist('foo'), [])
self.assertEqual(q.has_key('foo'), False)
self.assertEqual('foo' in q, False)
self.assertEqual(q.items(), [])
self.assertEqual(q.lists(), [])
self.assertEqual(q.items(), [])
self.assertEqual(q.keys(), [])
self.assertEqual(q.values(), [])
self.assertEqual(len(q), 0)
self.assertEqual(q.urlencode(), '')
def test_single_key_value(self):
"""Test QueryDict with one key/value pair"""
q = QueryDict('foo=bar')
self.assertEqual(q['foo'], 'bar')
self.assertRaises(KeyError, q.__getitem__, 'bar')
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertEqual(q.get('foo', 'default'), 'bar')
self.assertEqual(q.get('bar', 'default'), 'default')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertEqual(q.getlist('bar'), [])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
self.assertTrue(q.has_key('foo'))
self.assertTrue('foo' in q)
self.assertFalse(q.has_key('bar'))
self.assertFalse('bar' in q)
self.assertEqual(q.items(), [(u'foo', u'bar')])
self.assertEqual(q.lists(), [(u'foo', [u'bar'])])
self.assertEqual(q.keys(), ['foo'])
self.assertEqual(q.values(), ['bar'])
self.assertEqual(len(q), 1)
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar')
self.assertEqual(q.urlencode(), 'foo=bar')
def test_urlencode(self):
q = QueryDict('', mutable=True)
q['next'] = '/a&b/'
self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/a%26b/')
q = QueryDict('', mutable=True)
q['next'] = u'/t\xebst&key/'
self.assertEqual(q.urlencode(), 'next=%2Ft%C3%ABst%26key%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/t%C3%ABst%26key/')
def test_mutable_copy(self):
"""A copy of a QueryDict is mutable."""
q = QueryDict('').copy()
self.assertRaises(KeyError, q.__getitem__, "foo")
q['name'] = 'john'
self.assertEqual(q['name'], 'john')
def test_mutable_delete(self):
q = QueryDict('').copy()
q['name'] = 'john'
del q['name']
self.assertFalse('name' in q)
def test_basic_mutable_operations(self):
q = QueryDict('').copy()
q['name'] = 'john'
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.get('name', 'default'), 'john')
self.assertEqual(q.getlist('name'), ['john'])
self.assertEqual(q.getlist('foo'), [])
q.setlist('foo', ['bar', 'baz'])
self.assertEqual(q.get('foo', 'default'), 'baz')
self.assertEqual(q.getlist('foo'), ['bar', 'baz'])
q.appendlist('foo', 'another')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another'])
self.assertEqual(q['foo'], 'another')
self.assertTrue(q.has_key('foo'))
self.assertTrue('foo' in q)
self.assertEqual(q.items(), [(u'foo', u'another'), (u'name', u'john')])
self.assertEqual(q.lists(), [(u'foo', [u'bar', u'baz', u'another']), (u'name', [u'john'])])
self.assertEqual(q.keys(), [u'foo', u'name'])
self.assertEqual(q.values(), [u'another', u'john'])
self.assertEqual(len(q), 2)
q.update({'foo': 'hello'})
self.assertEqual(q['foo'], 'hello')
self.assertEqual(q.get('foo', 'not available'), 'hello')
self.assertEqual(q.getlist('foo'), [u'bar', u'baz', u'another', u'hello'])
self.assertEqual(q.pop('foo'), [u'bar', u'baz', u'another', u'hello'])
self.assertEqual(q.pop('foo', 'not there'), 'not there')
self.assertEqual(q.get('foo', 'not there'), 'not there')
self.assertEqual(q.setdefault('foo', 'bar'), 'bar')
self.assertEqual(q['foo'], 'bar')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertEqual(q.urlencode(), 'foo=bar&name=john')
q.clear()
self.assertEqual(len(q), 0)
def test_multiple_keys(self):
"""Test QueryDict with two key/value pairs with same keys."""
q = QueryDict('vote=yes&vote=no')
self.assertEqual(q['vote'], u'no')
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertEqual(q.get('vote', 'default'), u'no')
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.getlist('vote'), [u'yes', u'no'])
self.assertEqual(q.getlist('foo'), [])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
self.assertEqual(q.has_key('vote'), True)
self.assertEqual('vote' in q, True)
self.assertEqual(q.has_key('foo'), False)
self.assertEqual('foo' in q, False)
self.assertEqual(q.items(), [(u'vote', u'no')])
self.assertEqual(q.lists(), [(u'vote', [u'yes', u'no'])])
self.assertEqual(q.keys(), [u'vote'])
self.assertEqual(q.values(), [u'no'])
self.assertEqual(len(q), 1)
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar')
self.assertRaises(AttributeError, q.__delitem__, 'vote')
def test_invalid_input_encoding(self):
"""
QueryDicts must be able to handle invalid input encoding (in this
case, bad UTF-8 encoding).
"""
q = QueryDict('foo=bar&foo=\xff')
self.assertEqual(q['foo'], u'\ufffd')
self.assertEqual(q.getlist('foo'), [u'bar', u'\ufffd'])
def test_pickle(self):
q = QueryDict('')
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1, True)
q = QueryDict('a=b&c=d')
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1, True)
q = QueryDict('a=b&c=d&a=1')
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1 , True)
def test_update_from_querydict(self):
"""Regression test for #8278: QueryDict.update(QueryDict)"""
x = QueryDict("a=1&a=2", mutable=True)
y = QueryDict("a=3&a=4")
x.update(y)
self.assertEqual(x.getlist('a'), [u'1', u'2', u'3', u'4'])
def test_non_default_encoding(self):
"""#13572 - QueryDict with a non-default encoding"""
q = QueryDict('sbb=one', encoding='rot_13')
self.assertEqual(q.encoding , 'rot_13' )
self.assertEqual(q.items() , [(u'foo', u'bar')] )
self.assertEqual(q.urlencode() , 'sbb=one' )
q = q.copy()
self.assertEqual(q.encoding , 'rot_13' )
self.assertEqual(q.items() , [(u'foo', u'bar')] )
self.assertEqual(q.urlencode() , 'sbb=one' )
self.assertEqual(copy.copy(q).encoding , 'rot_13' )
self.assertEqual(copy.deepcopy(q).encoding , 'rot_13')
class HttpResponseTests(unittest.TestCase):
def test_unicode_headers(self):
r = HttpResponse()
# If we insert a unicode value it will be converted to an ascii
r['value'] = u'test value'
self.assertTrue(isinstance(r['value'], str))
# An error is raised ~hen a unicode object with non-ascii is assigned.
self.assertRaises(UnicodeEncodeError, r.__setitem__, 'value', u't\xebst value')
# An error is raised when a unicode object with non-ASCII format is
# passed as initial mimetype or content_type.
self.assertRaises(UnicodeEncodeError, HttpResponse,
mimetype=u't\xebst value')
# HttpResponse headers must be convertible to ASCII.
self.assertRaises(UnicodeEncodeError, HttpResponse,
content_type=u't\xebst value')
# The response also converts unicode keys to strings.)
r[u'test'] = 'testing key'
l = list(r.items())
l.sort()
self.assertEqual(l[1], ('test', 'testing key'))
# It will also raise errors for keys with non-ascii data.
self.assertRaises(UnicodeEncodeError, r.__setitem__, u't\xebst key', 'value')
def test_newlines_in_headers(self):
# Bug #10188: Do not allow newlines in headers (CR or LF)
r = HttpResponse()
self.assertRaises(BadHeaderError, r.__setitem__, 'test\rstr', 'test')
self.assertRaises(BadHeaderError, r.__setitem__, 'test\nstr', 'test')
class CookieTests(unittest.TestCase):
def test_encode(self):
"""
Test that we don't output tricky characters in encoded value
"""
# Python 2.4 compatibility note: Python 2.4's cookie implementation
# always returns Set-Cookie headers terminating in semi-colons.
# That's not the bug this test is looking for, so ignore it.
c = SimpleCookie()
c['test'] = "An,awkward;value"
self.assertTrue(";" not in c.output().rstrip(';')) # IE compat
self.assertTrue("," not in c.output().rstrip(';')) # Safari compat
def test_decode(self):
"""
Test that we can still preserve semi-colons and commas
"""
c = SimpleCookie()
c['test'] = "An,awkward;value"
c2 = SimpleCookie()
c2.load(c.output())
self.assertEqual(c['test'].value, c2['test'].value)
def test_decode_2(self):
"""
Test that we haven't broken normal encoding
"""
c = SimpleCookie()
c['test'] = "\xf0"
c2 = SimpleCookie()
c2.load(c.output())
self.assertEqual(c['test'].value, c2['test'].value)
def test_nonstandard_keys(self):
"""
Test that a single non-standard cookie name doesn't affect all cookies. Ticket #13007.
"""
self.assertTrue('good_cookie' in parse_cookie('good_cookie=yes;bad:cookie=yes').keys())
| gpl-3.0 |
apavlo/h-store | third_party/python/boto/cloudfront/identity.py | 47 | 4489 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
class OriginAccessIdentity:
def __init__(self, connection=None, config=None, id='',
s3_user_id='', comment=''):
self.connection = connection
self.config = config
self.id = id
self.s3_user_id = s3_user_id
self.comment = comment
self.etag = None
def startElement(self, name, attrs, connection):
if name == 'CloudFrontOriginAccessIdentityConfig':
self.config = OriginAccessIdentityConfig()
return self.config
else:
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'S3CanonicalUserId':
self.s3_user_id = value
elif name == 'Comment':
self.comment = value
else:
setattr(self, name, value)
def update(self, comment=None):
new_config = OriginAccessIdentityConfig(self.connection,
self.config.caller_reference,
self.config.comment)
if comment != None:
new_config.comment = comment
self.etag = self.connection.set_origin_identity_config(self.id, self.etag, new_config)
self.config = new_config
def delete(self):
return self.connection.delete_origin_access_identity(self.id, self.etag)
def uri(self):
return 'origin-access-identity/cloudfront/%s' % self.id
class OriginAccessIdentityConfig:
def __init__(self, connection=None, caller_reference='', comment=''):
self.connection = connection
if caller_reference:
self.caller_reference = caller_reference
else:
self.caller_reference = str(uuid.uuid4())
self.comment = comment
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<CloudFrontOriginAccessIdentityConfig xmlns="http://cloudfront.amazonaws.com/doc/2009-09-09/">\n'
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += '</CloudFrontOriginAccessIdentityConfig>\n'
return s
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Comment':
self.comment = value
elif name == 'CallerReference':
self.caller_reference = value
else:
setattr(self, name, value)
class OriginAccessIdentitySummary:
def __init__(self, connection=None, id='',
s3_user_id='', comment=''):
self.connection = connection
self.id = id
self.s3_user_id = s3_user_id
self.comment = comment
self.etag = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'S3CanonicalUserId':
self.s3_user_id = value
elif name == 'Comment':
self.comment = value
else:
setattr(self, name, value)
def get_origin_access_identity(self):
return self.connection.get_origin_access_identity_info(self.id)
| gpl-3.0 |
thirdwing/mxnet | example/gluon/tree_lstm/scripts/preprocess-sick.py | 61 | 4765 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Preprocessing script for SICK data.
"""
import os
import glob
def make_dirs(dirs):
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
def dependency_parse(filepath, cp='', tokenize=True):
print('\nDependency parsing ' + filepath)
dirpath = os.path.dirname(filepath)
filepre = os.path.splitext(os.path.basename(filepath))[0]
tokpath = os.path.join(dirpath, filepre + '.toks')
parentpath = os.path.join(dirpath, filepre + '.parents')
relpath = os.path.join(dirpath, filepre + '.rels')
tokenize_flag = '-tokenize - ' if tokenize else ''
cmd = ('java -cp %s DependencyParse -tokpath %s -parentpath %s -relpath %s %s < %s'
% (cp, tokpath, parentpath, relpath, tokenize_flag, filepath))
os.system(cmd)
def constituency_parse(filepath, cp='', tokenize=True):
dirpath = os.path.dirname(filepath)
filepre = os.path.splitext(os.path.basename(filepath))[0]
tokpath = os.path.join(dirpath, filepre + '.toks')
parentpath = os.path.join(dirpath, filepre + '.cparents')
tokenize_flag = '-tokenize - ' if tokenize else ''
cmd = ('java -cp %s ConstituencyParse -tokpath %s -parentpath %s %s < %s'
% (cp, tokpath, parentpath, tokenize_flag, filepath))
os.system(cmd)
def build_vocab(filepaths, dst_path, lowercase=True):
vocab = set()
for filepath in filepaths:
with open(filepath) as f:
for line in f:
if lowercase:
line = line.lower()
vocab |= set(line.split())
with open(dst_path, 'w') as f:
for w in sorted(vocab):
f.write(w + '\n')
def split(filepath, dst_dir):
with open(filepath) as datafile, \
open(os.path.join(dst_dir, 'a.txt'), 'w') as afile, \
open(os.path.join(dst_dir, 'b.txt'), 'w') as bfile, \
open(os.path.join(dst_dir, 'id.txt'), 'w') as idfile, \
open(os.path.join(dst_dir, 'sim.txt'), 'w') as simfile:
datafile.readline()
for line in datafile:
i, a, b, sim, ent = line.strip().split('\t')
idfile.write(i + '\n')
afile.write(a + '\n')
bfile.write(b + '\n')
simfile.write(sim + '\n')
def parse(dirpath, cp=''):
dependency_parse(os.path.join(dirpath, 'a.txt'), cp=cp, tokenize=True)
dependency_parse(os.path.join(dirpath, 'b.txt'), cp=cp, tokenize=True)
constituency_parse(os.path.join(dirpath, 'a.txt'), cp=cp, tokenize=True)
constituency_parse(os.path.join(dirpath, 'b.txt'), cp=cp, tokenize=True)
if __name__ == '__main__':
print('=' * 80)
print('Preprocessing SICK dataset')
print('=' * 80)
base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
data_dir = os.path.join(base_dir, 'data')
sick_dir = os.path.join(data_dir, 'sick')
lib_dir = os.path.join(base_dir, 'lib')
train_dir = os.path.join(sick_dir, 'train')
dev_dir = os.path.join(sick_dir, 'dev')
test_dir = os.path.join(sick_dir, 'test')
make_dirs([train_dir, dev_dir, test_dir])
# java classpath for calling Stanford parser
classpath = ':'.join([
lib_dir,
os.path.join(lib_dir, 'stanford-parser/stanford-parser.jar'),
os.path.join(lib_dir, 'stanford-parser/stanford-parser-3.5.1-models.jar')])
# split into separate files
split(os.path.join(sick_dir, 'SICK_train.txt'), train_dir)
split(os.path.join(sick_dir, 'SICK_trial.txt'), dev_dir)
split(os.path.join(sick_dir, 'SICK_test_annotated.txt'), test_dir)
# parse sentences
parse(train_dir, cp=classpath)
parse(dev_dir, cp=classpath)
parse(test_dir, cp=classpath)
# get vocabulary
build_vocab(
glob.glob(os.path.join(sick_dir, '*/*.toks')),
os.path.join(sick_dir, 'vocab.txt'))
build_vocab(
glob.glob(os.path.join(sick_dir, '*/*.toks')),
os.path.join(sick_dir, 'vocab-cased.txt'),
lowercase=False)
| apache-2.0 |
ahhda/sympy | sympy/physics/hep/gamma_matrices.py | 77 | 31065 | from sympy import S
from sympy.tensor.tensor import TensorIndexType, TensorIndex,\
TensMul, TensorHead, tensorsymmetry, TensorType,\
TensAdd, tensor_mul, get_lines, Tensor
from sympy.core.containers import Tuple
from sympy.core.compatibility import range
DiracSpinorIndex = TensorIndexType('DiracSpinorIndex', dim=4, dummy_fmt="S")
class _LorentzContainer(object):
"""
Helper to collect LorentzIndex indices in various dimensions.
It collects LorentzIndex TensorIndexType that have been implemented in the code,
and stores them in a dict()
"""
lorentz_types = dict()
def __new__(cls, dim=4, eps_dim=None, dummy_fmt="L"):
if (dim, eps_dim) in _LorentzContainer.lorentz_types:
return _LorentzContainer.lorentz_types[(dim, eps_dim)]
new_L = TensorIndexType("LorentzIndex", dim=dim, eps_dim=eps_dim, dummy_fmt=dummy_fmt)
_LorentzContainer.lorentz_types[(dim, eps_dim)] = new_L
return new_L
class GammaMatrixHead(TensorHead):
r"""
Class to wrap a ``TensorHead`` for gamma matrices.
``dim`` dimension of the gamma matrix.
``eps_dim`` correction for dimensional regularization, use None if not needed.
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrixHead
>>> from sympy.tensor.tensor import tensor_indices
>>> G = GammaMatrixHead()
>>> i = tensor_indices('i', G.LorentzIndex)
>>> G(i)
gamma(i, auto_left, -auto_right)
Note that there is already an instance of GammaMatrixHead in four dimensions:
GammaMatrix, which is simply declare as
``GammaMatrix = GammaMatrixHead()``
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix
>>> from sympy.tensor.tensor import tensor_indices
>>> i = tensor_indices('i', GammaMatrix.LorentzIndex)
>>> GammaMatrix(i)
gamma(i, auto_left, -auto_right)
To access the metric tensor
>>> GammaMatrix.LorentzIndex.metric
metric(LorentzIndex,LorentzIndex)
"""
_gmhd = dict()
def __new__(cls, dim=4, eps_dim=4):
key = (dim, eps_dim)
if key in GammaMatrixHead._gmhd:
return GammaMatrixHead._gmhd[key]
lorentz = _LorentzContainer(*key)
gmh = TensorHead.__new__(cls, "gamma", TensorType(Tuple(lorentz, DiracSpinorIndex, DiracSpinorIndex), tensorsymmetry([1], [1], [1])), comm=2, matrix_behavior=True)
GammaMatrixHead._gmhd[key] = gmh
gmh.LorentzIndex = lorentz
return gmh
@staticmethod
def extract_type_tens(expression):
"""
Extract from a ``TensExpr`` all elements of this type.
Returns two tensor expressions:
* the first contains all ``TensorHead`` of this type.
* the second contains all remaining.
"""
if isinstance(expression, Tensor):
sp = [expression]
elif isinstance(expression, TensMul):
sp = expression.args
else:
raise ValueError('wrong type')
# Collect all gamma matrices of the same dimension
new_expr = S.One
residual_expr = S.One
for i in sp:
if isinstance(i, Tensor) and isinstance(i.args[0], GammaMatrixHead):
new_expr *= i
else:
residual_expr *= i
return new_expr, residual_expr
@staticmethod
def simplify_this_type(expression):
extracted_expr, residual_expr = GammaMatrixHead.extract_type_tens(expression)
res_expr = GammaMatrixHead._simplify_single_line(extracted_expr)
return res_expr * residual_expr
@staticmethod
def simplify_gpgp(ex, sort=True):
"""
simplify products ``G(i)*p(-i)*G(j)*p(-j) -> p(i)*p(-i)``
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> p, q = tensorhead('p, q', [G.LorentzIndex], [[1]])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', G.LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> G.simplify_gpgp(ps*qs*qs)
gamma(-L_0, auto_left, -auto_right)*p(L_0)*q(L_1)*q(-L_1)
"""
def _simplify_gpgp(ex):
tids = ex._tids
components = tids.components
a = []
for i in range(len(components)):
if not isinstance(components[i], GammaMatrixHead):
continue
dum = tids.dum
for dx in dum:
if dx[2] == i:
p_pos1 = dx[3]
elif dx[3] == i:
p_pos1 = dx[2]
else:
continue
comp1 = components[p_pos1]
if comp1.comm == 0 and comp1.rank == 1:
a.append((i, p_pos1))
if not a:
return ex
elim = set()
tv = []
hit = True
coeff = S.One
ta = None
while hit:
hit = False
for i, ai in enumerate(a[:-1]):
if ai[0] in elim:
continue
if ai[0] != a[i + 1][0] - 1:
continue
if components[ai[1]] != components[a[i + 1][1]]:
continue
elim.add(ai[0])
elim.add(ai[1])
elim.add(a[i + 1][0])
elim.add(a[i + 1][1])
if not ta:
ta = ex.split()
mu = TensorIndex('mu', GammaMatrix.LorentzIndex)
ind1 = ta[ai[0]].get_indices()[1]
ind2 = ta[ai[0] + 1].get_indices()[2]
hit = True
if i == 0:
coeff = ex.coeff
tx = components[ai[1]](mu)*components[ai[1]](-mu)
tv.append(tx*DiracSpinorIndex.delta(ind1, ind2))
break
if tv:
a = [x for j, x in enumerate(ta) if j not in elim]
a.extend(tv)
t = tensor_mul(*a)*coeff
t = t.contract_metric(DiracSpinorIndex.delta)
return t
else:
return ex
if sort:
ex = ex.sorted_components()
while 1:
t = _simplify_gpgp(ex)
if t != ex:
ex = t
else:
return t
@staticmethod
def simplify_lines(ex):
"""
simplify a product of gamma matrices
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix, DiracSpinorIndex
>>> from sympy.tensor.tensor import tensor_indices
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', GammaMatrix.LorentzIndex)
>>> s0,s1,s2,s3,s4,s5,s6,s7 = tensor_indices('s0:8', DiracSpinorIndex)
>>> G = GammaMatrix
>>> t = G(i1,s1,-s2)*G(i4,s7,-s6)*G(i2,s2,-s3)*G(i3,s4,-s5)*G(i5,s6,-s7)
>>> G.simplify_lines(t)
4*gamma(i3, s4, -s5)*gamma(i1, s1, -S_0)*gamma(i2, S_0, -s3)*metric(i4, i5)
"""
lines, traces, rest = get_lines(ex, DiracSpinorIndex)
a = ex.split()
trest = tensor_mul(*[x for i, x in enumerate(a) if i in rest])
tlines = []
for line in lines:
first = a[line[0]]
last = a[line[-1]]
first = [x[0] for x in first.free if x[1] == 1][0]
last = [x[0] for x in last.free if x[1] == 2][0]
tx = tensor_mul(*[x for i, x in enumerate(a) if i in line])
tx1 = GammaMatrixHead._simplify_single_line(tx)
tlines.append(tx1)
traces = [GammaMatrix._trace_single_line(tensor_mul(*[x for i, x in enumerate(a) if i in line])) for line in traces]
res = tensor_mul(*([trest] + tlines + traces))
return res
def gamma_trace(self, t):
"""
trace of a single line of gamma matrices
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> p, q = tensorhead('p, q', [G.LorentzIndex], [[1]])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', G.LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> G.gamma_trace(G(i0)*G(i1))
4*metric(i0, i1)
>>> G.gamma_trace(ps*ps) - 4*p(i0)*p(-i0)
0
>>> G.gamma_trace(ps*qs + ps*ps) - 4*p(i0)*p(-i0) - 4*p(i0)*q(-i0)
0
"""
#assert any(x == DiracSpinorIndex.auto_right for x, p, c, in t._tids.free)
if isinstance(t, TensAdd):
res = TensAdd(*[self._trace_single_line(x) for x in t.args])
return res
t = self._simplify_single_line(t)
res = self._trace_single_line(t)
return res
@staticmethod
def _simplify_single_line(expression):
"""
Simplify single-line product of gamma matrices.
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, DiracSpinorIndex as DS
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> p = tensorhead('p', [G.LorentzIndex], [[1]])
>>> i0,i1 = tensor_indices('i0:2', G.LorentzIndex)
>>> G._simplify_single_line(G(i0)*G(i1)*p(-i1)*G(-i0)) + 2*G(i0)*p(-i0)
0
"""
t1, t2 = GammaMatrixHead.extract_type_tens(expression)
if t1 != 1:
t1 = GammaMatrixHead._kahane_simplify(t1.coeff, t1._tids)
res = t1*t2
return res
def _trace_single_line(self, t):
"""
Evaluate the trace of a single gamma matrix line inside a ``TensExpr``.
Notes
=====
If there are ``DiracSpinorIndex.auto_left`` and ``DiracSpinorIndex.auto_right``
indices trace over them; otherwise traces are not implied (explain)
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> p = tensorhead('p', [G.LorentzIndex], [[1]])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', G.LorentzIndex)
>>> G._trace_single_line(G(i0)*G(i1))
4*metric(i0, i1)
>>> G._trace_single_line(G(i0)*p(-i0)*G(i1)*p(-i1)) - 4*p(i0)*p(-i0)
0
"""
def _trace_single_line1(t):
t = t.sorted_components()
components = t.components
ncomps = len(components)
g = self.LorentzIndex.metric
sg = DiracSpinorIndex.delta
# gamma matirices are in a[i:j]
hit = 0
for i in range(ncomps):
if isinstance(components[i], GammaMatrixHead):
hit = 1
break
for j in range(i + hit, ncomps):
if not isinstance(components[j], GammaMatrixHead):
break
else:
j = ncomps
numG = j - i
if numG == 0:
spinor_free = [_[0] for _ in t._tids.free if _[0].tensortype is DiracSpinorIndex]
tcoeff = t.coeff
if spinor_free == [DiracSpinorIndex.auto_left, -DiracSpinorIndex.auto_right]:
t = t*DiracSpinorIndex.delta(-DiracSpinorIndex.auto_left, DiracSpinorIndex.auto_right)
t = t.contract_metric(sg)
return t/tcoeff if tcoeff else t
else:
return t/tcoeff if tcoeff else t
if numG % 2 == 1:
return TensMul.from_data(S.Zero, [], [], [])
elif numG > 4:
t = t.substitute_indices((-DiracSpinorIndex.auto_right, -DiracSpinorIndex.auto_index), (DiracSpinorIndex.auto_left, DiracSpinorIndex.auto_index))
a = t.split()
ind1, lind1, rind1 = a[i].args[-1]
ind2, lind2, rind2 = a[i + 1].args[-1]
aa = a[:i] + a[i + 2:]
t1 = tensor_mul(*aa)*g(ind1, ind2)*sg(lind1, rind1)*sg(lind2, rind2)
t1 = t1.contract_metric(g)
t1 = t1.contract_metric(sg)
args = [t1]
sign = 1
for k in range(i + 2, j):
sign = -sign
ind2, lind2, rind2 = a[k].args[-1]
aa = a[:i] + a[i + 1:k] + a[k + 1:]
t2 = sign*tensor_mul(*aa)*g(ind1, ind2)*sg(lind1, rind1)*sg(lind2, rind2)
t2 = t2.contract_metric(g)
t2 = t2.contract_metric(sg)
t2 = GammaMatrixHead.simplify_gpgp(t2, False)
args.append(t2)
t3 = TensAdd(*args)
#aa = _tensorlist_contract_metric(aa, g(ind1, ind2))
#t3 = t3.canon_bp()
t3 = self._trace_single_line(t3)
return t3
else:
a = t.split()
if len(t.components) == 1:
if t.components[0] is DiracSpinorIndex.delta:
return 4 # FIXME only for D=4
t1 = self._gamma_trace1(*a[i:j])
a2 = a[:i] + a[j:]
t2 = tensor_mul(*a2)
t3 = t1*t2
if not t3:
return t3
t3 = t3.contract_metric(g)
return t3
if isinstance(t, TensAdd):
a = [x.coeff*_trace_single_line1(x) for x in t.args]
return TensAdd(*a)
elif isinstance(t, (Tensor, TensMul)):
r = t.coeff*_trace_single_line1(t)
return r
else:
return t
def _gamma_trace1(self, *a):
gctr = 4 # FIXME specific for d=4
g = self.LorentzIndex.metric
if not a:
return gctr
n = len(a)
if n%2 == 1:
#return TensMul.from_data(S.Zero, [], [], [])
return S.Zero
if n == 2:
ind0 = a[0].get_indices()[0]
ind1 = a[1].get_indices()[0]
return gctr*g(ind0, ind1)
if n == 4:
ind0 = a[0].get_indices()[0]
ind1 = a[1].get_indices()[0]
ind2 = a[2].get_indices()[0]
ind3 = a[3].get_indices()[0]
return gctr*(g(ind0, ind1)*g(ind2, ind3) - \
g(ind0, ind2)*g(ind1, ind3) + g(ind0, ind3)*g(ind1, ind2))
@staticmethod
def _kahane_simplify(coeff, tids):
r"""
This function cancels contracted elements in a product of four
dimensional gamma matrices, resulting in an expression equal to the given
one, without the contracted gamma matrices.
Parameters
==========
`coeff` the coefficient of the tensor expression.
`tids` TIDS object representing the gamma matrix expression to simplify.
Notes
=====
If spinor indices are given, the matrices must be given in
the order given in the product.
Algorithm
=========
The idea behind the algorithm is to use some well-known identities,
i.e., for contractions enclosing an even number of `\gamma` matrices
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N}} \gamma_\mu = 2 (\gamma_{a_{2N}} \gamma_{a_1} \cdots \gamma_{a_{2N-1}} + \gamma_{a_{2N-1}} \cdots \gamma_{a_1} \gamma_{a_{2N}} )`
for an odd number of `\gamma` matrices
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N+1}} \gamma_\mu = -2 \gamma_{a_{2N+1}} \gamma_{a_{2N}} \cdots \gamma_{a_{1}}`
Instead of repeatedly applying these identities to cancel out all contracted indices,
it is possible to recognize the links that would result from such an operation,
the problem is thus reduced to a simple rearrangement of free gamma matrices.
Examples
========
When using, always remember that the original expression coefficient
has to be handled separately
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, DiracSpinorIndex as DS
>>> from sympy.tensor.tensor import tensor_indices, tensorhead, TensMul, TensAdd
>>> i0, i1, i2 = tensor_indices('i0:3', G.LorentzIndex)
>>> s0,s1,s2,s3,s4,s5 = tensor_indices('s0:6', DS)
>>> ta = G(i0)*G(-i0)
>>> G._kahane_simplify(ta.coeff, ta._tids) - 4*DS.delta(DS.auto_left, -DS.auto_right)
0
>>> tb = G(i0)*G(i1)*G(-i0)
>>> G._kahane_simplify(tb.coeff, tb._tids)
-2*gamma(i1, auto_left, -auto_right)
>>> t = G(i0, s0, -s1)*G(-i0,s1,-s2)
>>> G._kahane_simplify(t.coeff, t._tids) - 4*DS.delta(s0, -s2)
0
>>> t = G(i0, s0, -s1)*G(-i0,s1,-s0)
>>> G._kahane_simplify(t.coeff, t._tids)
16
If there are no contractions, the same expression is returned
>>> tc = 3*G(i0)*G(i1)
>>> G._kahane_simplify(tc.coeff, tc._tids)
3*gamma(i0, auto_left, S_0)*gamma(i1, -S_0, -auto_right)
References
==========
[1] Algorithm for Reducing Contracted Products of gamma Matrices, Joseph Kahane, Journal of Mathematical Physics, Vol. 9, No. 10, October 1968.
"""
for c in tids.components:
if not(isinstance(tids.components[0], GammaMatrixHead)):
raise ValueError('use only gamma matrices')
n = len(tids.components)
for p0, p1, c0, c1 in tids.dum:
if p0 == 0:
continue
dc = abs(c0 - c1)
if dc not in (1, n - 1):
raise ValueError('wrong gamma matrix ordering')
free = [_ for _ in tids.free if _[1] == 0]
spinor_free = [_ for _ in tids.free if _[1] != 0]
if len(spinor_free) == 2:
spinor_free.sort(key=lambda x: x[2])
assert spinor_free[0][1] == 1 and spinor_free[-1][1] == 2
assert spinor_free[0][2] == 0
elif spinor_free:
raise ValueError('spinor indices do not match')
dum = sorted([_ for _ in tids.dum if _[0] == 0 and _[1] == 0])
if len(dum) == 0: # or GammaMatrixHead:
# no contractions in `expression`, just return it.
return TensMul.from_TIDS(coeff, tids)
# find the `first_dum_pos`, i.e. the position of the first contracted
# gamma matrix, Kahane's algorithm as described in his paper requires the
# gamma matrix expression to start with a contracted gamma matrix, this is
# a workaround which ignores possible initial free indices, and re-adds
# them later.
dum_zip = list(zip(*dum))[2:]
first_dum_pos = min(min(dum_zip[0]), min(dum_zip[1]))
total_number = len(free) + len(dum)*2
number_of_contractions = len(dum)
free_pos = [None]*total_number
for i in free:
free_pos[i[2]] = i[0]
# `index_is_free` is a list of booleans, to identify index position
# and whether that index is free or dummy.
index_is_free = [False]*total_number
for i, indx in enumerate(free):
if indx[1] != 0:
raise ValueError("indx[1] should be equal to 0")
index_is_free[indx[2]] = True
# `links` is a dictionary containing the graph described in Kahane's paper,
# to every key correspond one or two values, representing the linked indices.
# All values in `links` are integers, negative numbers are used in the case
# where it is necessary to insert gamma matrices between free indices, in
# order to make Kahane's algorithm work (see paper).
links = dict()
for i in range(first_dum_pos, total_number):
links[i] = []
# `cum_sign` is a step variable to mark the sign of every index, see paper.
cum_sign = -1
# `cum_sign_list` keeps storage for all `cum_sign` (every index).
cum_sign_list = [None]*total_number
block_free_count = 0
# multiply `resulting_coeff` by the coefficient parameter, the rest
# of the algorithm ignores a scalar coefficient.
resulting_coeff = S.One * coeff
# initialize a lisf of lists of indices. The outer list will contain all
# additive tensor expressions, while the inner list will contain the
# free indices (rearranged according to the algorithm).
resulting_indices = [[]]
# start to count the `connected_components`, which together with the number
# of contractions, determines a -1 or +1 factor to be multiplied.
connected_components = 1
# First loop: here we fill `cum_sign_list`, and draw the links
# among consecutive indices (they are stored in `links`). Links among
# non-consecutive indices will be drawn later.
for i, is_free in enumerate(index_is_free):
# if `expression` starts with free indices, they are ignored here;
# they are later added as they are to the beginning of all
# `resulting_indices` list of lists of indices.
if i < first_dum_pos:
continue
if is_free:
block_free_count += 1
# if previous index was free as well, draw an arch in `links`.
if block_free_count > 1:
links[i - 1].append(i)
links[i].append(i - 1)
else:
# Change the sign of the index (`cum_sign`) if the number of free
# indices preceding it is even.
cum_sign *= 1 if (block_free_count % 2) else -1
if block_free_count == 0 and i != first_dum_pos:
# check if there are two consecutive dummy indices:
# in this case create virtual indices with negative position,
# these "virtual" indices represent the insertion of two
# gamma^0 matrices to separate consecutive dummy indices, as
# Kahane's algorithm requires dummy indices to be separated by
# free indices. The product of two gamma^0 matrices is unity,
# so the new expression being examined is the same as the
# original one.
if cum_sign == -1:
links[-1-i] = [-1-i+1]
links[-1-i+1] = [-1-i]
if (i - cum_sign) in links:
if i != first_dum_pos:
links[i].append(i - cum_sign)
if block_free_count != 0:
if i - cum_sign < len(index_is_free):
if index_is_free[i - cum_sign]:
links[i - cum_sign].append(i)
block_free_count = 0
cum_sign_list[i] = cum_sign
# The previous loop has only created links between consecutive free indices,
# it is necessary to properly create links among dummy (contracted) indices,
# according to the rules described in Kahane's paper. There is only one exception
# to Kahane's rules: the negative indices, which handle the case of some
# consecutive free indices (Kahane's paper just describes dummy indices
# separated by free indices, hinting that free indices can be added without
# altering the expression result).
for i in dum:
if i[0] != 0:
raise ValueError("i[0] should be 0")
if i[1] != 0:
raise ValueError("i[1] should be 0")
# get the positions of the two contracted indices:
pos1 = i[2]
pos2 = i[3]
# create Kahane's upper links, i.e. the upper arcs between dummy
# (i.e. contracted) indices:
links[pos1].append(pos2)
links[pos2].append(pos1)
# create Kahane's lower links, this corresponds to the arcs below
# the line described in the paper:
# first we move `pos1` and `pos2` according to the sign of the indices:
linkpos1 = pos1 + cum_sign_list[pos1]
linkpos2 = pos2 + cum_sign_list[pos2]
# otherwise, perform some checks before creating the lower arcs:
# make sure we are not exceeding the total number of indices:
if linkpos1 >= total_number:
continue
if linkpos2 >= total_number:
continue
# make sure we are not below the first dummy index in `expression`:
if linkpos1 < first_dum_pos:
continue
if linkpos2 < first_dum_pos:
continue
# check if the previous loop created "virtual" indices between dummy
# indices, in such a case relink `linkpos1` and `linkpos2`:
if (-1-linkpos1) in links:
linkpos1 = -1-linkpos1
if (-1-linkpos2) in links:
linkpos2 = -1-linkpos2
# move only if not next to free index:
if linkpos1 >= 0 and not index_is_free[linkpos1]:
linkpos1 = pos1
if linkpos2 >=0 and not index_is_free[linkpos2]:
linkpos2 = pos2
# create the lower arcs:
if linkpos2 not in links[linkpos1]:
links[linkpos1].append(linkpos2)
if linkpos1 not in links[linkpos2]:
links[linkpos2].append(linkpos1)
# This loop starts from the `first_dum_pos` index (first dummy index)
# walks through the graph deleting the visited indices from `links`,
# it adds a gamma matrix for every free index in encounters, while it
# completely ignores dummy indices and virtual indices.
pointer = first_dum_pos
previous_pointer = 0
while True:
if pointer in links:
next_ones = links.pop(pointer)
else:
break
if previous_pointer in next_ones:
next_ones.remove(previous_pointer)
previous_pointer = pointer
if next_ones:
pointer = next_ones[0]
else:
break
if pointer == previous_pointer:
break
if pointer >=0 and free_pos[pointer] is not None:
for ri in resulting_indices:
ri.append(free_pos[pointer])
# The following loop removes the remaining connected components in `links`.
# If there are free indices inside a connected component, it gives a
# contribution to the resulting expression given by the factor
# `gamma_a gamma_b ... gamma_z + gamma_z ... gamma_b gamma_a`, in Kahanes's
# paper represented as {gamma_a, gamma_b, ... , gamma_z},
# virtual indices are ignored. The variable `connected_components` is
# increased by one for every connected component this loop encounters.
# If the connected component has virtual and dummy indices only
# (no free indices), it contributes to `resulting_indices` by a factor of two.
# The multiplication by two is a result of the
# factor {gamma^0, gamma^0} = 2 I, as it appears in Kahane's paper.
# Note: curly brackets are meant as in the paper, as a generalized
# multi-element anticommutator!
while links:
connected_components += 1
pointer = min(links.keys())
previous_pointer = pointer
# the inner loop erases the visited indices from `links`, and it adds
# all free indices to `prepend_indices` list, virtual indices are
# ignored.
prepend_indices = []
while True:
if pointer in links:
next_ones = links.pop(pointer)
else:
break
if previous_pointer in next_ones:
if len(next_ones) > 1:
next_ones.remove(previous_pointer)
previous_pointer = pointer
if next_ones:
pointer = next_ones[0]
if pointer >= first_dum_pos and free_pos[pointer] is not None:
prepend_indices.insert(0, free_pos[pointer])
# if `prepend_indices` is void, it means there are no free indices
# in the loop (and it can be shown that there must be a virtual index),
# loops of virtual indices only contribute by a factor of two:
if len(prepend_indices) == 0:
resulting_coeff *= 2
# otherwise, add the free indices in `prepend_indices` to
# the `resulting_indices`:
else:
expr1 = prepend_indices
expr2 = list(reversed(prepend_indices))
resulting_indices = [expri + ri for ri in resulting_indices for expri in (expr1, expr2)]
# sign correction, as described in Kahane's paper:
resulting_coeff *= -1 if (number_of_contractions - connected_components + 1) % 2 else 1
# power of two factor, as described in Kahane's paper:
resulting_coeff *= 2**(number_of_contractions)
# If `first_dum_pos` is not zero, it means that there are trailing free gamma
# matrices in front of `expression`, so multiply by them:
for i in range(0, first_dum_pos):
[ri.insert(0, free_pos[i]) for ri in resulting_indices]
resulting_expr = S.Zero
for i in resulting_indices:
temp_expr = S.One
for j in i:
temp_expr *= GammaMatrix(j)
resulting_expr += temp_expr
t = resulting_coeff * resulting_expr
t1 = None
if isinstance(t, TensAdd):
t1 = t.args[0]
elif isinstance(t, TensMul):
t1 = t
if t1:
spinor_free1 = [_ for _ in t1._tids.free if _[1] != 0]
if spinor_free1:
if spinor_free:
t = t.substitute_indices((DiracSpinorIndex.auto_left, spinor_free[0][0]), (-DiracSpinorIndex.auto_right, spinor_free[-1][0]))
else:
# FIXME trace
t = t*DiracSpinorIndex.delta(DiracSpinorIndex.auto_right, -DiracSpinorIndex.auto_left)
t = GammaMatrix.simplify_lines(t)
else:
if spinor_free:
t = t*DiracSpinorIndex.delta(spinor_free[0][0], spinor_free[-1][0])
else:
t = t*4
else:
if spinor_free:
t = t*DiracSpinorIndex.delta(spinor_free[0][0], spinor_free[-1][0])
else:
t = t*4
return t
GammaMatrix = GammaMatrixHead()
| bsd-3-clause |
fhaoquan/kbengine | kbe/src/lib/python/Lib/tkinter/simpledialog.py | 94 | 11407 | #
# An Introduction to Tkinter
#
# Copyright (c) 1997 by Fredrik Lundh
#
# This copyright applies to Dialog, askinteger, askfloat and asktring
#
# [email protected]
# http://www.pythonware.com
#
"""This modules handles dialog boxes.
It contains the following public symbols:
SimpleDialog -- A simple but flexible modal dialog box
Dialog -- a base class for dialogs
askinteger -- get an integer from the user
askfloat -- get a float from the user
askstring -- get a string from the user
"""
from tkinter import *
from tkinter import messagebox
import tkinter # used at _QueryDialog for tkinter._default_root
class SimpleDialog:
def __init__(self, master,
text='', buttons=[], default=None, cancel=None,
title=None, class_=None):
if class_:
self.root = Toplevel(master, class_=class_)
else:
self.root = Toplevel(master)
if title:
self.root.title(title)
self.root.iconname(title)
self.message = Message(self.root, text=text, aspect=400)
self.message.pack(expand=1, fill=BOTH)
self.frame = Frame(self.root)
self.frame.pack()
self.num = default
self.cancel = cancel
self.default = default
self.root.bind('<Return>', self.return_event)
for num in range(len(buttons)):
s = buttons[num]
b = Button(self.frame, text=s,
command=(lambda self=self, num=num: self.done(num)))
if num == default:
b.config(relief=RIDGE, borderwidth=8)
b.pack(side=LEFT, fill=BOTH, expand=1)
self.root.protocol('WM_DELETE_WINDOW', self.wm_delete_window)
self._set_transient(master)
def _set_transient(self, master, relx=0.5, rely=0.3):
widget = self.root
widget.withdraw() # Remain invisible while we figure out the geometry
widget.transient(master)
widget.update_idletasks() # Actualize geometry information
if master.winfo_ismapped():
m_width = master.winfo_width()
m_height = master.winfo_height()
m_x = master.winfo_rootx()
m_y = master.winfo_rooty()
else:
m_width = master.winfo_screenwidth()
m_height = master.winfo_screenheight()
m_x = m_y = 0
w_width = widget.winfo_reqwidth()
w_height = widget.winfo_reqheight()
x = m_x + (m_width - w_width) * relx
y = m_y + (m_height - w_height) * rely
if x+w_width > master.winfo_screenwidth():
x = master.winfo_screenwidth() - w_width
elif x < 0:
x = 0
if y+w_height > master.winfo_screenheight():
y = master.winfo_screenheight() - w_height
elif y < 0:
y = 0
widget.geometry("+%d+%d" % (x, y))
widget.deiconify() # Become visible at the desired location
def go(self):
self.root.wait_visibility()
self.root.grab_set()
self.root.mainloop()
self.root.destroy()
return self.num
def return_event(self, event):
if self.default is None:
self.root.bell()
else:
self.done(self.default)
def wm_delete_window(self):
if self.cancel is None:
self.root.bell()
else:
self.done(self.cancel)
def done(self, num):
self.num = num
self.root.quit()
class Dialog(Toplevel):
'''Class to open dialogs.
This class is intended as a base class for custom dialogs
'''
def __init__(self, parent, title = None):
'''Initialize a dialog.
Arguments:
parent -- a parent window (the application window)
title -- the dialog title
'''
Toplevel.__init__(self, parent)
self.withdraw() # remain invisible for now
# If the master is not viewable, don't
# make the child transient, or else it
# would be opened withdrawn
if parent.winfo_viewable():
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
body = Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
self.buttonbox()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
if self.parent is not None:
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.deiconify() # become visible now
self.initial_focus.focus_set()
# wait for window to appear on screen before calling grab_set
self.wait_visibility()
self.grab_set()
self.wait_window(self)
def destroy(self):
'''Destroy the window'''
self.initial_focus = None
Toplevel.destroy(self)
#
# construction hooks
def body(self, master):
'''create dialog body.
return widget that should have initial focus.
This method should be overridden, and is called
by the __init__ method.
'''
pass
def buttonbox(self):
'''add standard button box.
override if you do not want the standard buttons
'''
box = Frame(self)
w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
w = Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
#
# standard button semantics
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.withdraw()
self.update_idletasks()
try:
self.apply()
finally:
self.cancel()
def cancel(self, event=None):
# put focus back to the parent window
if self.parent is not None:
self.parent.focus_set()
self.destroy()
#
# command hooks
def validate(self):
'''validate the data
This method is called automatically to validate the data before the
dialog is destroyed. By default, it always validates OK.
'''
return 1 # override
def apply(self):
'''process the data
This method is called automatically to process the data, *after*
the dialog is destroyed. By default, it does nothing.
'''
pass # override
# --------------------------------------------------------------------
# convenience dialogues
class _QueryDialog(Dialog):
def __init__(self, title, prompt,
initialvalue=None,
minvalue = None, maxvalue = None,
parent = None):
if not parent:
parent = tkinter._default_root
self.prompt = prompt
self.minvalue = minvalue
self.maxvalue = maxvalue
self.initialvalue = initialvalue
Dialog.__init__(self, parent, title)
def destroy(self):
self.entry = None
Dialog.destroy(self)
def body(self, master):
w = Label(master, text=self.prompt, justify=LEFT)
w.grid(row=0, padx=5, sticky=W)
self.entry = Entry(master, name="entry")
self.entry.grid(row=1, padx=5, sticky=W+E)
if self.initialvalue is not None:
self.entry.insert(0, self.initialvalue)
self.entry.select_range(0, END)
return self.entry
def validate(self):
try:
result = self.getresult()
except ValueError:
messagebox.showwarning(
"Illegal value",
self.errormessage + "\nPlease try again",
parent = self
)
return 0
if self.minvalue is not None and result < self.minvalue:
messagebox.showwarning(
"Too small",
"The allowed minimum value is %s. "
"Please try again." % self.minvalue,
parent = self
)
return 0
if self.maxvalue is not None and result > self.maxvalue:
messagebox.showwarning(
"Too large",
"The allowed maximum value is %s. "
"Please try again." % self.maxvalue,
parent = self
)
return 0
self.result = result
return 1
class _QueryInteger(_QueryDialog):
errormessage = "Not an integer."
def getresult(self):
return int(self.entry.get())
def askinteger(title, prompt, **kw):
'''get an integer from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is an integer
'''
d = _QueryInteger(title, prompt, **kw)
return d.result
class _QueryFloat(_QueryDialog):
errormessage = "Not a floating point value."
def getresult(self):
return float(self.entry.get())
def askfloat(title, prompt, **kw):
'''get a float from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is a float
'''
d = _QueryFloat(title, prompt, **kw)
return d.result
class _QueryString(_QueryDialog):
def __init__(self, *args, **kw):
if "show" in kw:
self.__show = kw["show"]
del kw["show"]
else:
self.__show = None
_QueryDialog.__init__(self, *args, **kw)
def body(self, master):
entry = _QueryDialog.body(self, master)
if self.__show is not None:
entry.configure(show=self.__show)
return entry
def getresult(self):
return self.entry.get()
def askstring(title, prompt, **kw):
'''get a string from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is a string
'''
d = _QueryString(title, prompt, **kw)
return d.result
if __name__ == '__main__':
def test():
root = Tk()
def doit(root=root):
d = SimpleDialog(root,
text="This is a test dialog. "
"Would this have been an actual dialog, "
"the buttons below would have been glowing "
"in soft pink light.\n"
"Do you believe this?",
buttons=["Yes", "No", "Cancel"],
default=0,
cancel=2,
title="Test Dialog")
print(d.go())
print(askinteger("Spam", "Egg count", initialvalue=12*12))
print(askfloat("Spam", "Egg weight\n(in tons)", minvalue=1,
maxvalue=100))
print(askstring("Spam", "Egg label"))
t = Button(root, text='Test', command=doit)
t.pack()
q = Button(root, text='Quit', command=t.quit)
q.pack()
t.mainloop()
test()
| lgpl-3.0 |
alirizakeles/zato | code/zato-server/src/zato/server/service/internal/email/smtp.py | 1 | 2958 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2014 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from contextlib import closing
from time import time
# Zato
from zato.common import SMTPMessage, version
from zato.common.broker_message import EMAIL
from zato.common.odb.model import SMTP
from zato.common.odb.query import email_smtp_list
from zato.server.service.internal import AdminService, AdminSIO, ChangePasswordBase
from zato.server.service.meta import CreateEditMeta, DeleteMeta, GetListMeta
elem = 'email_smtp'
model = SMTP
label = 'an SMTP connection'
broker_message = EMAIL
broker_message_prefix = 'SMTP_'
list_func = email_smtp_list
def instance_hook(service, input, instance, attrs):
instance.username = input.username or '' # So it's not stored as None/NULL
class GetList(AdminService):
_filter_by = SMTP.name,
__metaclass__ = GetListMeta
class Create(AdminService):
__metaclass__ = CreateEditMeta
class Edit(AdminService):
__metaclass__ = CreateEditMeta
class Delete(AdminService):
__metaclass__ = DeleteMeta
class ChangePassword(ChangePasswordBase):
""" Changes the password of an SMTP connection.
"""
password_required = False
class SimpleIO(ChangePasswordBase.SimpleIO):
request_elem = 'zato_email_smtp_change_password_request'
response_elem = 'zato_email_smtp_change_password_response'
def handle(self):
def _auth(instance, password):
instance.password = password
return self._handle(SMTP, _auth, EMAIL.SMTP_CHANGE_PASSWORD.value)
class Ping(AdminService):
class SimpleIO(AdminSIO):
request_elem = 'zato_email_smtp_ping_request'
response_elem = 'zato_email_smtp_ping_response'
input_required = ('id',)
output_required = ('info',)
def handle(self):
with closing(self.odb.session()) as session:
item = session.query(SMTP).filter_by(id=self.request.input.id).one()
msg = SMTPMessage()
msg.from_ = item.ping_address
msg.to = item.ping_address
msg.cc = item.ping_address
msg.bcc = item.ping_address
msg.subject = 'Zato SMTP ping (Α Β Γ Δ Ε Ζ Η)'.encode('utf-8')
msg.body = 'Hello from {}\nUTF-8 test: Α Β Γ Δ Ε Ζ Η'.encode('utf-8').format(version)
msg.headers['Charset'] = 'utf-8'
msg.attach('utf-8.txt', 'Α Β Γ Δ Ε Ζ Η Θ Ι Κ Λ Μ Ν Ξ Ο Π Ρ Σ Τ Υ Φ Χ Ψ Ω'.encode('utf-8'))
msg.attach('ascii.txt', 'A B C D E F G H I J K L M N O P Q R S T U V W X Y Z')
start_time = time()
self.email.smtp.get(item.name, True).conn.send(msg)
response_time = time() - start_time
self.response.payload.info = 'Ping submitted, took:`{0:03.4f} s`, check server logs for details.'.format(response_time)
| gpl-3.0 |
Cinntax/home-assistant | homeassistant/components/tellduslive/config_flow.py | 4 | 5271 | """Config flow for Tellduslive."""
import asyncio
import logging
import os
import async_timeout
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST
from homeassistant.util.json import load_json
from .const import (
APPLICATION_NAME,
CLOUD_NAME,
DOMAIN,
KEY_SCAN_INTERVAL,
KEY_SESSION,
NOT_SO_PRIVATE_KEY,
PUBLIC_KEY,
SCAN_INTERVAL,
TELLDUS_CONFIG_FILE,
)
KEY_TOKEN = "token"
KEY_TOKEN_SECRET = "token_secret"
_LOGGER = logging.getLogger(__name__)
@config_entries.HANDLERS.register("tellduslive")
class FlowHandler(config_entries.ConfigFlow):
"""Handle a config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Init config flow."""
self._hosts = [CLOUD_NAME]
self._host = None
self._session = None
self._scan_interval = SCAN_INTERVAL
def _get_auth_url(self):
from tellduslive import Session
self._session = Session(
public_key=PUBLIC_KEY,
private_key=NOT_SO_PRIVATE_KEY,
host=self._host,
application=APPLICATION_NAME,
)
return self._session.authorize_url
async def async_step_user(self, user_input=None):
"""Let user select host or cloud."""
if self.hass.config_entries.async_entries(DOMAIN):
return self.async_abort(reason="already_setup")
if user_input is not None or len(self._hosts) == 1:
if user_input is not None and user_input[CONF_HOST] != CLOUD_NAME:
self._host = user_input[CONF_HOST]
return await self.async_step_auth()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{vol.Required(CONF_HOST): vol.In(list(self._hosts))}
),
)
async def async_step_auth(self, user_input=None):
"""Handle the submitted configuration."""
errors = {}
if user_input is not None:
if await self.hass.async_add_executor_job(self._session.authorize):
host = self._host or CLOUD_NAME
if self._host:
session = {CONF_HOST: host, KEY_TOKEN: self._session.access_token}
else:
session = {
KEY_TOKEN: self._session.access_token,
KEY_TOKEN_SECRET: self._session.access_token_secret,
}
return self.async_create_entry(
title=host,
data={
CONF_HOST: host,
KEY_SCAN_INTERVAL: self._scan_interval.seconds,
KEY_SESSION: session,
},
)
errors["base"] = "auth_error"
try:
with async_timeout.timeout(10):
auth_url = await self.hass.async_add_executor_job(self._get_auth_url)
if not auth_url:
return self.async_abort(reason="authorize_url_fail")
except asyncio.TimeoutError:
return self.async_abort(reason="authorize_url_timeout")
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error generating auth url")
return self.async_abort(reason="authorize_url_fail")
_LOGGER.debug("Got authorization URL %s", auth_url)
return self.async_show_form(
step_id="auth",
errors=errors,
description_placeholders={
"app_name": APPLICATION_NAME,
"auth_url": auth_url,
},
)
async def async_step_discovery(self, user_input):
"""Run when a Tellstick is discovered."""
from tellduslive import supports_local_api
_LOGGER.info("Discovered tellstick device: %s", user_input)
if supports_local_api(user_input[1]):
_LOGGER.info("%s support local API", user_input[1])
self._hosts.append(user_input[0])
return await self.async_step_user()
async def async_step_import(self, user_input):
"""Import a config entry."""
if self.hass.config_entries.async_entries(DOMAIN):
return self.async_abort(reason="already_setup")
self._scan_interval = user_input[KEY_SCAN_INTERVAL]
if user_input[CONF_HOST] != DOMAIN:
self._hosts.append(user_input[CONF_HOST])
if not await self.hass.async_add_executor_job(
os.path.isfile, self.hass.config.path(TELLDUS_CONFIG_FILE)
):
return await self.async_step_user()
conf = await self.hass.async_add_executor_job(
load_json, self.hass.config.path(TELLDUS_CONFIG_FILE)
)
host = next(iter(conf))
if user_input[CONF_HOST] != host:
return await self.async_step_user()
host = CLOUD_NAME if host == "tellduslive" else host
return self.async_create_entry(
title=host,
data={
CONF_HOST: host,
KEY_SCAN_INTERVAL: self._scan_interval.seconds,
KEY_SESSION: next(iter(conf.values())),
},
)
| apache-2.0 |
firelab/windninja-mobile | WindNinja-Server/windninja_server/windninjaqueue/manager.py | 1 | 3293 | import sys
import time
import datetime
from operator import itemgetter
import subprocess
import windninja_server.windninjaqueue.windninja as wn
import windninja_server.windninjaqueue.queue as wnqueue
VERBOSE = False
CANCEL = False
LOOP_PAUSE = 5
MAX_RUNNING_JOBS = 5
# NOTES: this is a simple 'max' processes queue manager - first in, first out based on "created" time
# other possible enhancements short term:
# * user round robin
# * user priority
# * checks if processes are still running
# * CPU/RAM usage limits?
def write_stdout(s):
sys.stdout.write("[{}]:{}\n".format(datetime.datetime.now().isoformat(), s))
sys.stdout.flush()
def get_available_cores():
available_cores_proc = """mpstat -P ALL | cut -d" " -f10 | tail -n 8 | awk '$1 < 0.25 { print }' | sort -n | wc -l"""
available_cores = subprocess.check_output(available_cores_proc, shell=True)
available_cores = available_cores.decode("utf-8").strip()
return int(available_cores)
def main_loop(config):
# initialize configuration
global LOOP_PAUSE
LOOP_PAUSE = config.get("loop_interval", LOOP_PAUSE)
global MAX_RUNNING_JOBS
MAX_RUNNING_JOBS = config.get("max_running_jobs", MAX_RUNNING_JOBS)
wn.PYTHON_EXECUTABLE = config["windninja_wrapper"]["executable"]
wn.WN_WRAPPER = config["windninja_wrapper"]["script"]
wn.WN_WRAPPER_OPTIONS = config["windninja_wrapper"]["options"]
wnqueue.set_Queue(config)
# start the loop
while not CANCEL:
try:
time.sleep(LOOP_PAUSE)
except KeyboardInterrupt:
pass
except InterruptedError:
pass
try:
# get count of "current running jobs"
current_running = len(
wnqueue.find_items_by_status(wnqueue.QueueStatus.running)
)
# find pending jobs and sort by time created
available_jobs = MAX_RUNNING_JOBS - current_running
pending_jobs = wnqueue.find_items_by_status(wnqueue.QueueStatus.pending)
pending_jobs.sort(key=itemgetter("created"))
pending_job_count = len(pending_jobs)
if VERBOSE:
write_stdout(
"Jobs - running jobs: {0} ; available jobs: {1}; pending jobs:{2}".format(
current_running, available_jobs, pending_job_count
)
)
for job in pending_jobs:
if available_jobs > 0:
id = job["id"]
write_stdout("enqueue job: {}".format(id))
status, pid, message = wn.start_job(id)
wnqueue.update_queue_item_status(id, status, message)
if status == wnqueue.QueueStatus.running:
available_jobs -= 1
pending_job_count -= 1
else:
write_stdout("job [{}] failed to start: {}".format(id, message))
else:
write_stdout(
"Running jobs filled - remaining: {0}".format(pending_job_count)
)
break
except Exception as loop_ex:
write_stdout("Unhandled expection in main loop: {}".format(str(loop_ex)))
| unlicense |
sjlehtin/django | django/db/backends/postgresql/base.py | 12 | 10558 | """
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import threading
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.utils import DatabaseError as WrappedDatabaseError
from django.utils.functional import cached_property
from django.utils.safestring import SafeText
from django.utils.version import get_version_tuple
try:
import psycopg2 as Database
import psycopg2.extensions
import psycopg2.extras
except ImportError as e:
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
def psycopg2_version():
version = psycopg2.__version__.split(' ', 1)[0]
return get_version_tuple(version)
PSYCOPG2_VERSION = psycopg2_version()
if PSYCOPG2_VERSION < (2, 5, 4):
raise ImproperlyConfigured("psycopg2_version 2.5.4 or newer is required; you have %s" % psycopg2.__version__)
# Some of these import psycopg2, so import them after checking if it's installed.
from .client import DatabaseClient # NOQA isort:skip
from .creation import DatabaseCreation # NOQA isort:skip
from .features import DatabaseFeatures # NOQA isort:skip
from .introspection import DatabaseIntrospection # NOQA isort:skip
from .operations import DatabaseOperations # NOQA isort:skip
from .schema import DatabaseSchemaEditor # NOQA isort:skip
from .utils import utc_tzinfo_factory # NOQA isort:skip
psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
psycopg2.extras.register_uuid()
# Register support for inet[] manually so we don't have to handle the Inet()
# object on load all the time.
INETARRAY_OID = 1041
INETARRAY = psycopg2.extensions.new_array_type(
(INETARRAY_OID,),
'INETARRAY',
psycopg2.extensions.UNICODE,
)
psycopg2.extensions.register_type(INETARRAY)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
display_name = 'PostgreSQL'
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BigAutoField': 'bigserial',
'BinaryField': 'bytea',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'interval',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'smallint',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'uuid',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE '%%' || {} || '%%'",
'icontains': "LIKE '%%' || UPPER({}) || '%%'",
'startswith': "LIKE {} || '%%'",
'istartswith': "LIKE UPPER({}) || '%%'",
'endswith': "LIKE '%%' || {}",
'iendswith': "LIKE '%%' || UPPER({})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
# PostgreSQL backend-specific attributes.
_named_cursor_idx = 0
def get_connection_params(self):
settings_dict = self.settings_dict
# None may be used to connect to the default 'postgres' db
if settings_dict['NAME'] == '':
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
conn_params = {
'database': settings_dict['NAME'] or 'postgres',
}
conn_params.update(settings_dict['OPTIONS'])
conn_params.pop('isolation_level', None)
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = settings_dict['PASSWORD']
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
return conn_params
def get_new_connection(self, conn_params):
connection = Database.connect(**conn_params)
# self.isolation_level must be set:
# - after connecting to the database in order to obtain the database's
# default when no value is explicitly specified in options.
# - before calling _set_autocommit() because if autocommit is on, that
# will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT.
options = self.settings_dict['OPTIONS']
try:
self.isolation_level = options['isolation_level']
except KeyError:
self.isolation_level = connection.isolation_level
else:
# Set the isolation level to the value from OPTIONS.
if self.isolation_level != connection.isolation_level:
connection.set_session(isolation_level=self.isolation_level)
return connection
def ensure_timezone(self):
self.ensure_connection()
conn_timezone_name = self.connection.get_parameter_status('TimeZone')
timezone_name = self.timezone_name
if timezone_name and conn_timezone_name != timezone_name:
with self.connection.cursor() as cursor:
cursor.execute(self.ops.set_time_zone_sql(), [timezone_name])
return True
return False
def init_connection_state(self):
self.connection.set_client_encoding('UTF8')
timezone_changed = self.ensure_timezone()
if timezone_changed:
# Commit after setting the time zone (see #17062)
if not self.get_autocommit():
self.connection.commit()
def create_cursor(self, name=None):
if name:
# In autocommit mode, the cursor will be used outside of a
# transaction, hence use a holdable cursor.
cursor = self.connection.cursor(name, scrollable=False, withhold=self.connection.autocommit)
else:
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return cursor
def chunked_cursor(self):
self._named_cursor_idx += 1
return self._cursor(
name='_django_curs_%d_%d' % (
# Avoid reusing name in other threads
threading.current_thread().ident,
self._named_cursor_idx,
)
)
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
Check constraints by setting them to immediate. Return them to deferred
afterward.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1")
except Database.Error:
return False
else:
return True
@property
def _nodb_connection(self):
nodb_connection = super()._nodb_connection
try:
nodb_connection.ensure_connection()
except (Database.DatabaseError, WrappedDatabaseError):
warnings.warn(
"Normally Django will use a connection to the 'postgres' database "
"to avoid running initialization queries against the production "
"database when it's not needed (for example, when running tests). "
"Django was unable to create a connection to the 'postgres' database "
"and will use the default database instead.",
RuntimeWarning
)
settings_dict = self.settings_dict.copy()
settings_dict['NAME'] = settings.DATABASES[DEFAULT_DB_ALIAS]['NAME']
nodb_connection = self.__class__(
self.settings_dict.copy(),
alias=self.alias,
allow_thread_sharing=False)
return nodb_connection
@cached_property
def pg_version(self):
with self.temporary_connection():
return self.connection.server_version
| bsd-3-clause |
bjolivot/ansible | lib/ansible/modules/cloud/cloudstack/cs_portforward.py | 15 | 14347 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_portforward
short_description: Manages port forwarding rules on Apache CloudStack based clouds.
description:
- Create, update and remove port forwarding rules.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address the rule is assigned to.
required: true
vm:
description:
- Name of virtual machine which we make the port forwarding rule for.
- Required if C(state=present).
required: false
default: null
state:
description:
- State of the port forwarding rule.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
protocol:
description:
- Protocol of the port forwarding rule.
required: false
default: 'tcp'
choices: [ 'tcp', 'udp' ]
public_port:
description:
- Start public port for this rule.
required: true
public_end_port:
description:
- End public port for this rule.
- If not specified equal C(public_port).
required: false
default: null
private_port:
description:
- Start private port for this rule.
required: true
private_end_port:
description:
- End private port for this rule.
- If not specified equal C(private_port).
required: false
default: null
open_firewall:
description:
- Whether the firewall rule for public port should be created, while creating the new rule.
- Use M(cs_firewall) for managing firewall rules.
required: false
default: false
vm_guest_ip:
description:
- VM guest NIC secondary IP address for the port forwarding rule.
required: false
default: false
network:
description:
- Name of the network.
required: false
default: null
version_added: "2.3"
vpc:
description:
- Name of the VPC.
required: false
default: null
version_added: "2.3"
domain:
description:
- Domain the C(vm) is related to.
required: false
default: null
account:
description:
- Account the C(vm) is related to.
required: false
default: null
project:
description:
- Name of the project the C(vm) is located in.
required: false
default: null
zone:
description:
- Name of the zone in which the virtual machine is in.
- If not set, default zone is used.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "To delete all tags, set a empty list e.g. C(tags: [])."
required: false
default: null
aliases: [ 'tag' ]
version_added: "2.4"
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# 1.2.3.4:80 -> web01:8080
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: web01
public_port: 80
private_port: 8080
# forward SSH and open firewall
- local_action:
module: cs_portforward
ip_address: '{{ public_ip }}'
vm: '{{ inventory_hostname }}'
public_port: '{{ ansible_ssh_port }}'
private_port: 22
open_firewall: true
# forward DNS traffic, but do not open firewall
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: '{{ inventory_hostname }}'
public_port: 53
private_port: 53
protocol: udp
# remove ssh port forwarding
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
public_port: 22
private_port: 22
state: absent
'''
RETURN = '''
---
id:
description: UUID of the public IP address.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: string
sample: 1.2.3.4
protocol:
description: Protocol.
returned: success
type: string
sample: tcp
private_port:
description: Start port on the virtual machine's IP address.
returned: success
type: int
sample: 80
private_end_port:
description: End port on the virtual machine's IP address.
returned: success
type: int
public_port:
description: Start port on the public IP address.
returned: success
type: int
sample: 80
public_end_port:
description: End port on the public IP address.
returned: success
type: int
sample: 80
tags:
description: Tags related to the port forwarding.
returned: success
type: list
sample: []
vm_name:
description: Name of the virtual machine.
returned: success
type: string
sample: web-01
vm_display_name:
description: Display name of the virtual machine.
returned: success
type: string
sample: web-01
vm_guest_ip:
description: IP of the virtual machine.
returned: success
type: string
sample: 10.101.65.152
vpc:
description: Name of the VPC.
version_added: "2.3"
returned: success
type: string
sample: my_vpc
network:
description: Name of the network.
version_added: "2.3"
returned: success
type: string
sample: dmz
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackPortforwarding, self).__init__(module)
self.returns = {
'virtualmachinedisplayname': 'vm_display_name',
'virtualmachinename': 'vm_name',
'ipaddress': 'ip_address',
'vmguestip': 'vm_guest_ip',
'publicip': 'public_ip',
'protocol': 'protocol',
}
# these values will be casted to int
self.returns_to_int = {
'publicport': 'public_port',
'publicendport': 'public_end_port',
'privateport': 'private_port',
'privateendport': 'private_end_port',
}
self.portforwarding_rule = None
def get_portforwarding_rule(self):
if not self.portforwarding_rule:
protocol = self.module.params.get('protocol')
public_port = self.module.params.get('public_port')
public_end_port = self.get_or_fallback('public_end_port', 'public_port')
private_port = self.module.params.get('private_port')
private_end_port = self.get_or_fallback('private_end_port', 'private_port')
args = {}
args['ipaddressid'] = self.get_ip_address(key='id')
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
portforwarding_rules = self.cs.listPortForwardingRules(**args)
if portforwarding_rules and 'portforwardingrule' in portforwarding_rules:
for rule in portforwarding_rules['portforwardingrule']:
if (protocol == rule['protocol'] and
public_port == int(rule['publicport'])):
self.portforwarding_rule = rule
break
return self.portforwarding_rule
def present_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule)
else:
portforwarding_rule = self.create_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.ensure_tags(resource=portforwarding_rule, resource_type='PortForwardingRule')
self.portforwarding_rule=portforwarding_rule
return portforwarding_rule
def create_portforwarding_rule(self):
args = {}
args['protocol'] = self.module.params.get('protocol')
args['publicport'] = self.module.params.get('public_port')
args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
args['privateport'] = self.module.params.get('private_port')
args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
args['openfirewall'] = self.module.params.get('open_firewall')
args['vmguestip'] = self.get_vm_guest_ip()
args['ipaddressid'] = self.get_ip_address(key='id')
args['virtualmachineid'] = self.get_vm(key='id')
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['networkid'] = self.get_network(key='id')
portforwarding_rule = None
self.result['changed'] = True
if not self.module.check_mode:
portforwarding_rule = self.cs.createPortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def update_portforwarding_rule(self, portforwarding_rule):
args = {}
args['protocol'] = self.module.params.get('protocol')
args['publicport'] = self.module.params.get('public_port')
args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
args['privateport'] = self.module.params.get('private_port')
args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
args['vmguestip'] = self.get_vm_guest_ip()
args['ipaddressid'] = self.get_ip_address(key='id')
args['virtualmachineid'] = self.get_vm(key='id')
args['networkid'] = self.get_network(key='id')
if self.has_changed(args, portforwarding_rule):
self.result['changed'] = True
if not self.module.check_mode:
# API broken in 4.2.1?, workaround using remove/create instead of update
# portforwarding_rule = self.cs.updatePortForwardingRule(**args)
self.absent_portforwarding_rule()
portforwarding_rule = self.cs.createPortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def absent_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
self.result['changed'] = True
args = {}
args['id'] = portforwarding_rule['id']
if not self.module.check_mode:
res = self.cs.deletePortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'portforwardingrule')
return portforwarding_rule
def get_result(self, portforwarding_rule):
super(AnsibleCloudStackPortforwarding, self).get_result(portforwarding_rule)
network_name = self.get_network(key='name')
if network_name:
self.result['network'] = network_name
vpc_name = self.get_vpc(key='name')
if vpc_name:
self.result['vpc'] = vpc_name
if portforwarding_rule:
for search_key, return_key in self.returns_to_int.items():
if search_key in portforwarding_rule:
self.result[return_key] = int(portforwarding_rule[search_key])
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address = dict(required=True),
protocol= dict(choices=['tcp', 'udp'], default='tcp'),
public_port = dict(type='int', required=True),
public_end_port = dict(type='int', default=None),
private_port = dict(type='int', required=True),
private_end_port = dict(type='int', default=None),
state = dict(choices=['present', 'absent'], default='present'),
open_firewall = dict(type='bool', default=False),
vm_guest_ip = dict(default=None),
vm = dict(default=None),
vpc = dict(default=None),
network = dict(default=None),
zone = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag'], default=None),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_pf = AnsibleCloudStackPortforwarding(module)
state = module.params.get('state')
if state in ['absent']:
pf_rule = acs_pf.absent_portforwarding_rule()
else:
pf_rule = acs_pf.present_portforwarding_rule()
result = acs_pf.get_result(pf_rule)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
saisankargochhayat/algo_quest | techdevguide/longestsubsequence.py | 1 | 3538 | # The Challenge
# Given a string S and a set of words D, find the longest word in D that is a subsequence of S.
# Word W is a subsequence of S if some number of characters, possibly zero, can be deleted from S to form W, without reordering the remaining characters.
# Note: D can appear in any format (list, hash table, prefix tree, etc.
# For example, given the input of S = "abppplee" and D = {"able", "ale", "apple", "bale", "kangaroo"} the correct output would be "apple"
# The words "able" and "ale" are both subsequences of S, but they are shorter than "apple".
# The word "bale" is not a subsequence of S because even though S has all the right letters, they are not in the right order.
# The word "kangaroo" is the longest word in D, but it isn't a subsequence of S.
# Learning objectives
# This question gives you the chance to practice with algorithms and data structures. It’s also a good example of why careful analysis for Big-O performance is often worthwhile, as is careful exploration of common and worst-case input conditions.
from collections import defaultdict
def findLongestSubSeq(S: str, D: dict):
sMap = defaultdict(list)
for i, c in enumerate(S):
sMap[c].append(i)
for word in sorted(D, key=len, reverse=True):
count = 0
for i, ch in enumerate(word):
indexList = sMap[ch]
found = False
for j in indexList:
if j >= i:
found = True
count += 1
break
if not found:
break
if count == len(word):
return word
return None
#!/usr/bin/env python
# import collections
# import sys
# def find_longest_word_in_string(letters, words):
# letter_positions = collections.defaultdict(list)
# # For each letter in 'letters', collect all the indices at which it appears.
# # O(#letters) space and speed.
# for index, letter in enumerate(letters):
# letter_positions[letter].append(index)
# # For words, in descending order by length...
# # Bails out early on first matched word, and within word on
# # impossible letter/position combinations, but worst case is
# # O(#words # avg-len) * O(#letters / 26) time; constant space.
# # With some work, could be O(#W * avg-len) * log2(#letters/26)
# # But since binary search has more overhead
# # than simple iteration, log2(#letters) is about as
# # expensive as simple iterations as long as
# # the length of the arrays for each letter is
# # “small”. If letters are randomly present in the
# # search string, the log2 is about equal in speed to simple traversal
# # up to lengths of a few hundred characters.
# for word in sorted(words, key=lambda w: len(w), reverse=True):
# pos = 0
# for letter in word:
# if letter not in letter_positions:
# break
# # Find any remaining valid positions in search string where this
# # letter appears. It would be better to do this with binary search,
# # but this is very Python-ic.
# possible_positions = [p for p in letter_positions[letter] if p >= pos]
# if not possible_positions:
# break
# pos = possible_positions[0] + 1
# else:
# # We didn't break out of the loop, so all letters have valid positions
# return word
print(findLongestSubSeq("abppplee", ["able", "ale", "apple", "bale", "kangaroo"])) | apache-2.0 |
rushiagr/keystone | keystone/common/models.py | 20 | 3286 | # Copyright (C) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base model for keystone internal services
Unless marked otherwise, all fields are strings.
"""
class Model(dict):
"""Base model class."""
def __hash__(self):
return self['id'].__hash__()
@property
def known_keys(cls):
return cls.required_keys + cls.optional_keys
class Token(Model):
"""Token object.
Required keys:
id
expires (datetime)
Optional keys:
user
tenant
metadata
trust_id
"""
required_keys = ('id', 'expires')
optional_keys = ('extra',)
class Service(Model):
"""Service object.
Required keys:
id
type
name
Optional keys:
"""
required_keys = ('id', 'type', 'name')
optional_keys = tuple()
class Endpoint(Model):
"""Endpoint object
Required keys:
id
region
service_id
Optional keys:
internalurl
publicurl
adminurl
"""
required_keys = ('id', 'region', 'service_id')
optional_keys = ('internalurl', 'publicurl', 'adminurl')
class User(Model):
"""User object.
Required keys:
id
name
domain_id
Optional keys:
password
description
email
enabled (bool, default True)
default_project_id
"""
required_keys = ('id', 'name', 'domain_id')
optional_keys = ('password', 'description', 'email', 'enabled',
'default_project_id')
class Group(Model):
"""Group object.
Required keys:
id
name
domain_id
Optional keys:
description
"""
required_keys = ('id', 'name', 'domain_id')
optional_keys = ('description',)
class Project(Model):
"""Project object.
Required keys:
id
name
domain_id
Optional Keys:
description
enabled (bool, default True)
"""
required_keys = ('id', 'name', 'domain_id')
optional_keys = ('description', 'enabled')
class Role(Model):
"""Role object.
Required keys:
id
name
"""
required_keys = ('id', 'name')
optional_keys = tuple()
class Trust(Model):
"""Trust object.
Required keys:
id
trustor_user_id
trustee_user_id
project_id
"""
required_keys = ('id', 'trustor_user_id', 'trustee_user_id', 'project_id')
optional_keys = ('expires_at',)
class Domain(Model):
"""Domain object.
Required keys:
id
name
Optional keys:
description
enabled (bool, default True)
"""
required_keys = ('id', 'name')
optional_keys = ('description', 'enabled')
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.