filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_31381 | """Rewrite assertion AST to produce nice error messages"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import errno
import imp
import itertools
import marshal
import os
import re
import string
import struct
import sys
import types
import atomicwrites
import py
import six
from _pytest._io.saferepr import saferepr
from _pytest.assertion import util
from _pytest.compat import spec_from_file_location
from _pytest.pathlib import fnmatch_ex
from _pytest.pathlib import PurePath
# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
if hasattr(sys, "pypy_version_info"):
impl = "pypy"
elif sys.platform == "java":
impl = "jython"
else:
impl = "cpython"
ver = sys.version_info
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
if sys.version_info >= (3, 5):
ast_Call = ast.Call
else:
def ast_Call(a, b, c):
return ast.Call(a, b, c, None, None)
class AssertionRewritingHook(object):
"""PEP302 Import hook which rewrites asserts."""
def __init__(self, config):
self.config = config
self.fnpats = config.getini("python_files")
self.session = None
self.modules = {}
self._rewritten_names = set()
self._register_with_pkg_resources()
self._must_rewrite = set()
# flag to guard against trying to rewrite a pyc file while we are already writing another pyc file,
# which might result in infinite recursion (#3506)
self._writing_pyc = False
self._basenames_to_check_rewrite = {"conftest"}
self._marked_for_rewrite_cache = {}
self._session_paths_checked = False
def set_session(self, session):
self.session = session
self._session_paths_checked = False
def _imp_find_module(self, name, path=None):
"""Indirection so we can mock calls to find_module originated from the hook during testing"""
return imp.find_module(name, path)
def find_module(self, name, path=None):
if self._writing_pyc:
return None
state = self.config._assertstate
if self._early_rewrite_bailout(name, state):
return None
state.trace("find_module called for: %s" % name)
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
if path is not None:
# Starting with Python 3.3, path is a _NamespacePath(), which
# causes problems if not converted to list.
path = list(path)
if len(path) == 1:
pth = path[0]
if pth is None:
try:
fd, fn, desc = self._imp_find_module(lastname, path)
except ImportError:
return None
if fd is not None:
fd.close()
tp = desc[2]
if tp == imp.PY_COMPILED:
if hasattr(imp, "source_from_cache"):
try:
fn = imp.source_from_cache(fn)
except ValueError:
# Python 3 doesn't like orphaned but still-importable
# .pyc files.
fn = fn[:-1]
else:
fn = fn[:-1]
elif tp != imp.PY_SOURCE:
# Don't know what this is.
return None
else:
fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
fn_pypath = py.path.local(fn)
if not self._should_rewrite(name, fn_pypath, state):
return None
self._rewritten_names.add(name)
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
# concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
write = not sys.dont_write_bytecode
cache_dir = os.path.join(fn_pypath.dirname, "__pycache__")
if write:
try:
os.mkdir(cache_dir)
except OSError:
e = sys.exc_info()[1].errno
if e == errno.EEXIST:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
state.trace("read only directory: %r" % fn_pypath.dirname)
write = False
else:
raise
cache_name = fn_pypath.basename[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
# Notice that even if we're in a read-only directory, I'm going
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc, state.trace)
if co is None:
state.trace("rewriting %r" % (fn,))
source_stat, co = _rewrite_test(self.config, fn_pypath)
if co is None:
# Probably a SyntaxError in the test.
return None
if write:
self._writing_pyc = True
try:
_write_pyc(state, co, source_stat, pyc)
finally:
self._writing_pyc = False
else:
state.trace("found cached rewritten pyc for %r" % (fn,))
self.modules[name] = co, pyc
return self
def _early_rewrite_bailout(self, name, state):
"""
This is a fast way to get out of rewriting modules. Profiling has
shown that the call to imp.find_module (inside of the find_module
from this class) is a major slowdown, so, this method tries to
filter what we're sure won't be rewritten before getting to it.
"""
if self.session is not None and not self._session_paths_checked:
self._session_paths_checked = True
for path in self.session._initialpaths:
# Make something as c:/projects/my_project/path.py ->
# ['c:', 'projects', 'my_project', 'path.py']
parts = str(path).split(os.path.sep)
# add 'path' to basenames to be checked.
self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0])
# Note: conftest already by default in _basenames_to_check_rewrite.
parts = name.split(".")
if parts[-1] in self._basenames_to_check_rewrite:
return False
# For matching the name it must be as if it was a filename.
path = PurePath(os.path.sep.join(parts) + ".py")
for pat in self.fnpats:
# if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based
# on the name alone because we need to match against the full path
if os.path.dirname(pat):
return False
if fnmatch_ex(pat, path):
return False
if self._is_marked_for_rewrite(name, state):
return False
state.trace("early skip of rewriting module: %s" % (name,))
return True
def _should_rewrite(self, name, fn_pypath, state):
# always rewrite conftest files
fn = str(fn_pypath)
if fn_pypath.basename == "conftest.py":
state.trace("rewriting conftest file: %r" % (fn,))
return True
if self.session is not None:
if self.session.isinitpath(fn):
state.trace("matched test file (was specified on cmdline): %r" % (fn,))
return True
# modules not passed explicitly on the command line are only
# rewritten if they match the naming convention for test files
for pat in self.fnpats:
if fn_pypath.fnmatch(pat):
state.trace("matched test file %r" % (fn,))
return True
return self._is_marked_for_rewrite(name, state)
def _is_marked_for_rewrite(self, name, state):
try:
return self._marked_for_rewrite_cache[name]
except KeyError:
for marked in self._must_rewrite:
if name == marked or name.startswith(marked + "."):
state.trace("matched marked file %r (from %r)" % (name, marked))
self._marked_for_rewrite_cache[name] = True
return True
self._marked_for_rewrite_cache[name] = False
return False
def mark_rewrite(self, *names):
"""Mark import names as needing to be rewritten.
The named module or package as well as any nested modules will
be rewritten on import.
"""
already_imported = (
set(names).intersection(sys.modules).difference(self._rewritten_names)
)
for name in already_imported:
if not AssertionRewriter.is_rewrite_disabled(
sys.modules[name].__doc__ or ""
):
self._warn_already_imported(name)
self._must_rewrite.update(names)
self._marked_for_rewrite_cache.clear()
def _warn_already_imported(self, name):
from _pytest.warning_types import PytestWarning
from _pytest.warnings import _issue_warning_captured
_issue_warning_captured(
PytestWarning("Module already imported so cannot be rewritten: %s" % name),
self.config.hook,
stacklevel=5,
)
def load_module(self, name):
co, pyc = self.modules.pop(name)
if name in sys.modules:
# If there is an existing module object named 'fullname' in
# sys.modules, the loader must use that existing module. (Otherwise,
# the reload() builtin will not work correctly.)
mod = sys.modules[name]
else:
# I wish I could just call imp.load_compiled here, but __file__ has to
# be set properly. In Python 3.2+, this all would be handled correctly
# by load_compiled.
mod = sys.modules[name] = imp.new_module(name)
try:
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
mod.__loader__ = self
# Normally, this attribute is 3.4+
mod.__spec__ = spec_from_file_location(name, co.co_filename, loader=self)
six.exec_(co, mod.__dict__)
except: # noqa
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name]
def is_package(self, name):
try:
fd, fn, desc = self._imp_find_module(name)
except ImportError:
return False
if fd is not None:
fd.close()
tp = desc[2]
return tp == imp.PKG_DIRECTORY
@classmethod
def _register_with_pkg_resources(cls):
"""
Ensure package resources can be loaded from this loader. May be called
multiple times, as the operation is idempotent.
"""
try:
import pkg_resources
# access an attribute in case a deferred importer is present
pkg_resources.__name__
except ImportError:
return
# Since pytest tests are always located in the file system, the
# DefaultProvider is appropriate.
pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
def get_data(self, pathname):
"""Optional PEP302 get_data API.
"""
with open(pathname, "rb") as f:
return f.read()
def _write_pyc(state, co, source_stat, pyc):
# Technically, we don't have to have the same pyc format as
# (C)Python, since these "pycs" should never be seen by builtin
# import. However, there's little reason deviate, and I hope
# sometime to be able to use imp.load_compiled to load them. (See
# the comment in load_module above.)
try:
with atomicwrites.atomic_write(pyc, mode="wb", overwrite=True) as fp:
fp.write(imp.get_magic())
# as of now, bytecode header expects 32-bit numbers for size and mtime (#4903)
mtime = int(source_stat.mtime) & 0xFFFFFFFF
size = source_stat.size & 0xFFFFFFFF
# "<LL" stands for 2 unsigned longs, little-ending
fp.write(struct.pack("<LL", mtime, size))
fp.write(marshal.dumps(co))
except EnvironmentError as e:
state.trace("error writing pyc file at %s: errno=%s" % (pyc, e.errno))
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, __pycache__ being a
# file etc.
return False
return True
RN = "\r\n".encode("utf-8")
N = "\n".encode("utf-8")
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
BOM_UTF8 = "\xef\xbb\xbf"
def _rewrite_test(config, fn):
"""Try to read and rewrite *fn* and return the code object."""
state = config._assertstate
try:
stat = fn.stat()
source = fn.read("rb")
except EnvironmentError:
return None, None
if ASCII_IS_DEFAULT_ENCODING:
# ASCII is the default encoding in Python 2. Without a coding
# declaration, Python 2 will complain about any bytes in the file
# outside the ASCII range. Sadly, this behavior does not extend to
# compile() or ast.parse(), which prefer to interpret the bytes as
# latin-1. (At least they properly handle explicit coding cookies.) To
# preserve this error behavior, we could force ast.parse() to use ASCII
# as the encoding by inserting a coding cookie. Unfortunately, that
# messes up line numbers. Thus, we have to check ourselves if anything
# is outside the ASCII range in the case no encoding is explicitly
# declared. For more context, see issue #269. Yay for Python 3 which
# gets this right.
end1 = source.find("\n")
end2 = source.find("\n", end1 + 1)
if (
not source.startswith(BOM_UTF8)
and cookie_re.match(source[0:end1]) is None
and cookie_re.match(source[end1 + 1 : end2]) is None
):
if hasattr(state, "_indecode"):
# encodings imported us again, so don't rewrite.
return None, None
state._indecode = True
try:
try:
source.decode("ascii")
except UnicodeDecodeError:
# Let it fail in real import.
return None, None
finally:
del state._indecode
try:
tree = ast.parse(source, filename=fn.strpath)
except SyntaxError:
# Let this pop up again in the real import.
state.trace("failed to parse: %r" % (fn,))
return None, None
rewrite_asserts(tree, fn, config)
try:
co = compile(tree, fn.strpath, "exec", dont_inherit=True)
except SyntaxError:
# It's possible that this error is from some bug in the
# assertion rewriting, but I don't know of a fast way to tell.
state.trace("failed to compile: %r" % (fn,))
return None, None
return stat, co
def _read_pyc(source, pyc, trace=lambda x: None):
"""Possibly read a pytest pyc containing rewritten code.
Return rewritten code if successful or None if not.
"""
try:
fp = open(pyc, "rb")
except IOError:
return None
with fp:
try:
mtime = int(source.mtime())
size = source.size()
data = fp.read(12)
except EnvironmentError as e:
trace("_read_pyc(%s): EnvironmentError %s" % (source, e))
return None
# Check for invalid or out of date pyc file.
if (
len(data) != 12
or data[:4] != imp.get_magic()
or struct.unpack("<LL", data[4:]) != (mtime & 0xFFFFFFFF, size & 0xFFFFFFFF)
):
trace("_read_pyc(%s): invalid or out of date pyc" % source)
return None
try:
co = marshal.load(fp)
except Exception as e:
trace("_read_pyc(%s): marshal.load error %s" % (source, e))
return None
if not isinstance(co, types.CodeType):
trace("_read_pyc(%s): not a code object" % source)
return None
return co
def rewrite_asserts(mod, module_path=None, config=None):
"""Rewrite the assert statements in mod."""
AssertionRewriter(module_path, config).run(mod)
def _saferepr(obj):
"""Get a safe repr of an object for assertion error messages.
The assertion formatting (util.format_explanation()) requires
newlines to be escaped since they are a special character for it.
Normally assertion.util.format_explanation() does this but for a
custom repr it is possible to contain one of the special escape
sequences, especially '\n{' and '\n}' are likely to be present in
JSON reprs.
"""
r = saferepr(obj)
# only occurs in python2.x, repr must return text in python3+
if isinstance(r, bytes):
# Represent unprintable bytes as `\x##`
r = u"".join(
u"\\x{:x}".format(ord(c)) if c not in string.printable else c.decode()
for c in r
)
return r.replace(u"\n", u"\\n")
from _pytest.assertion.util import format_explanation as _format_explanation # noqa
def _format_assertmsg(obj):
"""Format the custom assertion message given.
For strings this simply replaces newlines with '\n~' so that
util.format_explanation() will preserve them instead of escaping
newlines. For other objects saferepr() is used first.
"""
# reprlib appears to have a bug which means that if a string
# contains a newline it gets escaped, however if an object has a
# .__repr__() which contains newlines it does not get escaped.
# However in either case we want to preserve the newline.
replaces = [(u"\n", u"\n~"), (u"%", u"%%")]
if not isinstance(obj, six.string_types):
obj = saferepr(obj)
replaces.append((u"\\n", u"\n~"))
if isinstance(obj, bytes):
replaces = [(r1.encode(), r2.encode()) for r1, r2 in replaces]
for r1, r2 in replaces:
obj = obj.replace(r1, r2)
return obj
def _should_repr_global_name(obj):
if callable(obj):
return False
try:
return not hasattr(obj, "__name__")
except Exception:
return True
def _format_boolop(explanations, is_or):
explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
if isinstance(explanation, six.text_type):
return explanation.replace(u"%", u"%%")
else:
return explanation.replace(b"%", b"%%")
def _call_reprcompare(ops, results, expls, each_obj):
for i, res, expl in zip(range(len(ops)), results, expls):
try:
done = not res
except Exception:
done = True
if done:
break
if util._reprcompare is not None:
custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
if custom is not None:
return custom
return expl
unary_map = {ast.Not: "not %s", ast.Invert: "~%s", ast.USub: "-%s", ast.UAdd: "+%s"}
binop_map = {
ast.BitOr: "|",
ast.BitXor: "^",
ast.BitAnd: "&",
ast.LShift: "<<",
ast.RShift: ">>",
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.FloorDiv: "//",
ast.Mod: "%%", # escaped for string formatting
ast.Eq: "==",
ast.NotEq: "!=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Gt: ">",
ast.GtE: ">=",
ast.Pow: "**",
ast.Is: "is",
ast.IsNot: "is not",
ast.In: "in",
ast.NotIn: "not in",
}
# Python 3.5+ compatibility
try:
binop_map[ast.MatMult] = "@"
except AttributeError:
pass
# Python 3.4+ compatibility
if hasattr(ast, "NameConstant"):
_NameConstant = ast.NameConstant
else:
def _NameConstant(c):
return ast.Name(str(c), ast.Load())
def set_location(node, lineno, col_offset):
"""Set node location information recursively."""
def _fix(node, lineno, col_offset):
if "lineno" in node._attributes:
node.lineno = lineno
if "col_offset" in node._attributes:
node.col_offset = col_offset
for child in ast.iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, lineno, col_offset)
return node
class AssertionRewriter(ast.NodeVisitor):
"""Assertion rewriting implementation.
The main entrypoint is to call .run() with an ast.Module instance,
this will then find all the assert statements and rewrite them to
provide intermediate values and a detailed assertion error. See
http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html
for an overview of how this works.
The entry point here is .run() which will iterate over all the
statements in an ast.Module and for each ast.Assert statement it
finds call .visit() with it. Then .visit_Assert() takes over and
is responsible for creating new ast statements to replace the
original assert statement: it rewrites the test of an assertion
to provide intermediate values and replace it with an if statement
which raises an assertion error with a detailed explanation in
case the expression is false.
For this .visit_Assert() uses the visitor pattern to visit all the
AST nodes of the ast.Assert.test field, each visit call returning
an AST node and the corresponding explanation string. During this
state is kept in several instance attributes:
:statements: All the AST statements which will replace the assert
statement.
:variables: This is populated by .variable() with each variable
used by the statements so that they can all be set to None at
the end of the statements.
:variable_counter: Counter to create new unique variables needed
by statements. Variables are created using .variable() and
have the form of "@py_assert0".
:on_failure: The AST statements which will be executed if the
assertion test fails. This is the code which will construct
the failure message and raises the AssertionError.
:explanation_specifiers: A dict filled by .explanation_param()
with %-formatting placeholders and their corresponding
expressions to use in the building of an assertion message.
This is used by .pop_format_context() to build a message.
:stack: A stack of the explanation_specifiers dicts maintained by
.push_format_context() and .pop_format_context() which allows
to build another %-formatted string while already building one.
This state is reset on every new assert statement visited and used
by the other visitors.
"""
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [
ast.alias(six.moves.builtins.__name__, "@py_builtins"),
ast.alias("_pytest.assertion.rewrite", "@pytest_ar"),
]
doc = getattr(mod, "docstring", None)
expect_docstring = doc is None
if doc is not None and self.is_rewrite_disabled(doc):
return
pos = 0
lineno = 1
for item in mod.body:
if (
expect_docstring
and isinstance(item, ast.Expr)
and isinstance(item.value, ast.Str)
):
doc = item.value.s
if self.is_rewrite_disabled(doc):
return
expect_docstring = False
elif (
not isinstance(item, ast.ImportFrom)
or item.level > 0
or item.module != "__future__"
):
lineno = item.lineno
break
pos += 1
else:
lineno = item.lineno
imports = [
ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases
]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (
isinstance(field, ast.AST)
# Don't recurse into expressions as they can't contain
# asserts.
and not isinstance(field, ast.expr)
):
nodes.append(field)
@staticmethod
def is_rewrite_disabled(docstring):
return "PYTEST_DONT_REWRITE" in docstring
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@pytest_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
"""Return a new named %-formatting placeholder for expr.
This creates a %-formatting placeholder for expr in the
current formatting context, e.g. ``%(py0)s``. The placeholder
and expr are placed in the current format context so that it
can be used on the next call to .pop_format_context().
"""
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
"""Create a new formatting context.
The format context is used for when an explanation wants to
have a variable value formatted in the assertion message. In
this case the value required can be added using
.explanation_param(). Finally .pop_format_context() is used
to format a string of %-formatted values as added by
.explanation_param().
"""
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
"""Format the %-formatted string with current format context.
The expl_expr should be an ast.Str instance constructed from
the %-placeholders created by .explanation_param(). This will
add the required code to format said string to .on_failure and
return the ast.Name instance of the formatted string.
"""
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This rewrites the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1:
from _pytest.warning_types import PytestWarning
import warnings
warnings.warn_explicit(
PytestWarning("assertion is always true, perhaps remove parentheses?"),
category=None,
filename=str(self.module_path),
lineno=assert_.lineno,
)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# If in a test module, check if directly asserting None, in order to warn [Issue #3191]
if self.module_path is not None:
self.statements.append(
self.warn_about_none_ast(
top_condition, module_path=self.module_path, lineno=assert_.lineno
)
)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper("format_assertmsg", assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store()) for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def warn_about_none_ast(self, node, module_path, lineno):
"""
Returns an AST issuing a warning if the value of node is `None`.
This is used to warn the user when asserting a function that asserts
internally already.
See issue #3191 for more details.
"""
# Using parse because it is different between py2 and py3.
AST_NONE = ast.parse("None").body[0].value
val_is_none = ast.Compare(node, [ast.Is()], [AST_NONE])
send_warning = ast.parse(
"""
from _pytest.warning_types import PytestWarning
from warnings import warn_explicit
warn_explicit(
PytestWarning('asserting the value None, please use "assert is None"'),
category=None,
filename={filename!r},
lineno={lineno},
)
""".format(
filename=module_path.strpath, lineno=lineno
)
).body
return ast.If(val_is_none, send_warning, [])
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: # **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ", ".join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
new_starred = ast.Starred(res, starred.ctx)
return new_starred, "*" + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ", ".join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (ast.Compare, ast.BoolOp)):
left_expl = "({})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (ast.Compare, ast.BoolOp)):
next_expl = "({})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper(
"call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()),
)
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
the-stack_106_31383 | #!/usr/bin/python
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module parses CLI arguments for the Rebase Bot."""
import argparse
from collections import namedtuple
import re
import sys
from rebasebot import bot
GitHubBranch = namedtuple("GitHubBranch", ["url", "ns", "name", "branch"])
class GitHubBranchAction(argparse.Action):
"""An action to take a GitHub branch argument in the form:
<user or organisation>/<repo>:<branch>
The argument will be returned as a GitHubBranch object.
"""
GITHUBBRANCH = re.compile("^(?P<ns>[^/]+)/(?P<name>[^:]+):(?P<branch>.*)$")
def __call__(self, parser, namespace, values, option_string=None):
# For backward compatibility we need to ensure that the prefix was removed
values = values.removeprefix("https://github.com/")
match = self.GITHUBBRANCH.match(values)
if match is None:
parser.error(
f"GitHub branch value for {option_string} must be in "
f"the form <user or organisation>/<repo>:<branch>"
)
setattr(
namespace,
self.dest,
GitHubBranch(
f"https://github.com/{match.group('ns')}/{match.group('name')}",
match.group("ns"),
match.group("name"),
match.group("branch")
),
)
# parse_cli_arguments parses command line arguments using argparse and returns
# an object representing the populated namespace, and a list of errors
#
# testing_args should be left empty, except for during testing
def _parse_cli_arguments(testing_args=None):
_form_text = (
"in the form <user or organisation>/<repo>:<branch>, "
"e.g. kubernetes/cloud-provider-openstack:master"
)
parser = argparse.ArgumentParser(
description="Rebase on changes from an upstream repo")
parser.add_argument(
"--source",
"-s",
type=str,
required=True,
action=GitHubBranchAction,
help=(
"The source/upstream git repo to rebase changes onto in the form "
"<git url>:<branch>. Note that unlike dest and rebase this does "
"not need to be a GitHub url, hence its syntax is different."
),
)
parser.add_argument(
"--dest",
"-d",
type=str,
required=True,
action=GitHubBranchAction,
help=f"The destination/downstream GitHub repo to merge changes into {_form_text}",
)
parser.add_argument(
"--rebase",
type=str,
required=True,
action=GitHubBranchAction,
help=f"The base GitHub repo that will be used to create a pull request {_form_text}",
)
parser.add_argument(
"--git-username",
type=str,
required=False,
help="Custom git username to be used in any git commits.",
default="",
)
parser.add_argument(
"--git-email",
type=str,
required=False,
help="Custom git email to be used in any git commits.",
default="",
)
parser.add_argument(
"--working-dir",
type=str,
required=False,
help="The working directory where the git repos will be cloned.",
default=".rebase",
)
parser.add_argument(
"--github-user-token",
type=str,
required=False,
help="The path to a github user access token.",
)
parser.add_argument(
"--github-app-id",
type=int,
required=False,
help="The app ID of the GitHub app to use.",
default=137509,
)
parser.add_argument(
"--github-app-key",
type=str,
required=False,
help="The path to a github app private key.",
)
parser.add_argument(
"--github-cloner-id",
type=int,
required=False,
help="The app ID of the GitHub cloner app to use.",
default=137497,
)
parser.add_argument(
"--github-cloner-key",
type=str,
required=False,
help="The path to a github app private key.",
)
parser.add_argument(
"--slack-webhook",
type=str,
required=False,
help="The path where credentials for the slack webhook are.",
)
parser.add_argument(
"--update-go-modules",
action="store_true",
default=False,
required=False,
help="When enabled, the bot will update and vendor the go modules "
"in a separate commit.",
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
required=False,
help="When enabled, the bot will not create a PR.",
)
parser.add_argument(
"--tag-policy",
default="none",
const="none",
nargs="?",
choices=["none", "soft", "strict"],
help="Option that shows how to handle UPSTREAM tags in "
"commit messages. (default: %(default)s)")
if testing_args is not None:
args = parser.parse_args(testing_args)
else:
args = parser.parse_args()
return args
def main():
"""Rebase Bot entry point function."""
args = _parse_cli_arguments()
gh_app_key = ""
if args.github_app_key is not None:
with open(args.github_app_key, "r", encoding='utf-8') as app_key_file:
gh_app_key = app_key_file.read().strip().encode()
gh_cloner_key = ""
if args.github_cloner_key is not None:
with open(args.github_cloner_key, "r", encoding='utf-8') as app_key_file:
gh_cloner_key = app_key_file.read().strip().encode()
gh_user_token = ""
if args.github_user_token is not None:
with open(args.github_user_token, "r", encoding='utf-8') as app_key_file:
gh_user_token = app_key_file.read().strip().encode().decode('utf-8')
slack_webhook = None
if args.slack_webhook is not None:
with open(args.slack_webhook, "r", encoding='utf-8') as app_key_file:
slack_webhook = app_key_file.read().strip()
success = bot.run(
args.source,
args.dest,
args.rebase,
args.working_dir,
args.git_username,
args.git_email,
gh_user_token,
args.github_app_id,
gh_app_key,
args.github_cloner_id,
gh_cloner_key,
slack_webhook,
args.tag_policy,
update_go_modules=args.update_go_modules,
dry_run=args.dry_run,
)
if success:
sys.exit(0)
else:
sys.exit(1)
if __name__ == "__main__":
main()
|
the-stack_106_31384 | # Copyright (c) 2011 Jeff Garzik
#
# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
#
# Copyright (c) 2007 Jan-Klaas Kollhof
#
# This file is part of jsonrpc.
#
# jsonrpc is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""HTTP proxy for opening RPC connection to gramd.
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
"""
try:
import http.client as httplib
except ImportError:
import httplib
import base64
import decimal
import json
import logging
import socket
import time
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
USER_AGENT = "AuthServiceProxy/0.1"
HTTP_TIMEOUT = 30
log = logging.getLogger("GRAMRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error):
try:
errmsg = '%(message)s (%(code)i)' % rpc_error
except (KeyError, TypeError):
errmsg = ''
Exception.__init__(self, errmsg)
self.error = rpc_error
def EncodeDecimal(o):
if isinstance(o, decimal.Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
class AuthServiceProxy(object):
__id_count = 0
# ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
self.__service_url = service_url
self._service_name = service_name
self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
self.__url = urlparse.urlparse(service_url)
if self.__url.port is None:
port = 80
else:
port = self.__url.port
(user, passwd) = (self.__url.username, self.__url.password)
try:
user = user.encode('utf8')
except AttributeError:
pass
try:
passwd = passwd.encode('utf8')
except AttributeError:
pass
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
if connection:
# Callables re-use the connection of the original proxy
self.__conn = connection
elif self.__url.scheme == 'https':
self.__conn = httplib.HTTPSConnection(self.__url.hostname, port,
timeout=timeout)
else:
self.__conn = httplib.HTTPConnection(self.__url.hostname, port,
timeout=timeout)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self._service_name is not None:
name = "%s.%s" % (self._service_name, name)
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
def _request(self, method, path, postdata):
'''
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
'''
headers = {'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'}
try:
self.__conn.request(method, path, postdata, headers)
return self._get_response()
except httplib.BadStatusLine as e:
if e.line == "''": # if connection was closed, try again
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
else:
raise
except (BrokenPipeError,ConnectionResetError):
# Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset
# ConnectionResetError happens on FreeBSD with Python 3.4
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
def __call__(self, *args, **argsn):
AuthServiceProxy.__id_count += 1
log.debug("-%s-> %s %s"%(AuthServiceProxy.__id_count, self._service_name,
json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
if args and argsn:
raise ValueError('Cannot handle both named and positional arguments')
postdata = json.dumps({'version': '1.1',
'method': self._service_name,
'params': args or argsn,
'id': AuthServiceProxy.__id_count}, default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
response = self._request('POST', self.__url.path, postdata.encode('utf-8'))
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def _batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
log.debug("--> "+postdata)
return self._request('POST', self.__url.path, postdata.encode('utf-8'))
def _get_response(self):
req_start_time = time.time()
try:
http_response = self.__conn.getresponse()
except socket.timeout as e:
raise JSONRPCException({
'code': -344,
'message': '%r RPC took longer than %f seconds. Consider '
'using larger timeout for calls that take '
'longer to return.' % (self._service_name,
self.__conn.timeout)})
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
content_type = http_response.getheader('Content-Type')
if content_type != 'application/json':
raise JSONRPCException({
'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)})
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
elapsed = time.time() - req_start_time
if "error" in response and response["error"] is None:
log.debug("<-%s- [%.6f] %s"%(response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
else:
log.debug("<-- [%.6f] %s"%(elapsed,responsedata))
return response
def __truediv__(self, relative_uri):
return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn)
|
the-stack_106_31385 | import pycxsimulator
from pylab import *
n = 100 # size of grid: n * n
Dh = 1. / n # spatial resolution, assuming space is [0,1] * [0,1]
Dt = 0.01 # temporal resolution
wx, wy = -0.01, 0.03 # constant velocity of movement
xvalues, yvalues = meshgrid(arange(0, 1, Dh), arange(0, 1, Dh))
def initialize():
global config, nextconfig
# initial configuration
config = exp(-((xvalues - 0.5)**2 + (yvalues - 0.5)**2) / (0.2**2))
nextconfig = zeros([n, n])
def observe():
global config, nextconfig
cla()
imshow(config, vmin = 0, vmax = 1)
def update():
global config, nextconfig
for x in range(n):
for y in range(n):
# state-transition function
nextconfig[x, y] = config[x, y] - ( wx * config[(x+1)%n, y]
- wx * config[(x-1)%n, y]
+ wy * config[x, (y+1)%n]
- wy * config[x, (y-1)%n])\
* Dt/(2*Dh)
config, nextconfig = nextconfig, config
pycxsimulator.GUI(stepSize = 50).start(func=[initialize, observe, update])
|
the-stack_106_31388 | # -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
class clstm_clf(object):
"""
A C-LSTM classifier for text classification
Reference: A C-LSTM Neural Network for Text Classification
"""
def __init__(self, config):
self.max_length = config.max_length
self.num_classes = config.num_classes
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.filter_sizes = list(map(int, config.filter_sizes.split(",")))
self.num_filters = config.num_filters
self.hidden_size = len(self.filter_sizes) * self.num_filters
self.num_layers = config.num_layers
self.l2_reg_lambda = config.l2_reg_lambda
# Placeholders
self.batch_size = tf.placeholder(dtype=tf.int32, shape=[], name='batch_size')
self.input_x = tf.placeholder(dtype=tf.int32, shape=[None, self.max_length], name='input_x')
self.input_y = tf.placeholder(dtype=tf.int64, shape=[None], name='input_y')
self.keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob')
self.sequence_length = tf.placeholder(dtype=tf.int32, shape=[None], name='sequence_length')
# L2 loss
self.l2_loss = tf.constant(0.0)
# Word embedding
with tf.device('/cpu:0'), tf.name_scope('embedding'):
embedding = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0),
name="embedding")
embed = tf.nn.embedding_lookup(embedding, self.input_x)
inputs = tf.expand_dims(embed, -1)
# Input dropout
inputs = tf.nn.dropout(inputs, keep_prob=self.keep_prob)
conv_outputs = []
max_feature_length = self.max_length - max(self.filter_sizes) + 1
# Convolutional layer with different lengths of filters in parallel
# No max-pooling
for i, filter_size in enumerate(self.filter_sizes):
with tf.variable_scope('conv-%s' % filter_size):
# [filter size, embedding size, channels, number of filters]
filter_shape = [filter_size, self.embedding_size, 1, self.num_filters]
W = tf.get_variable('weights', filter_shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
b = tf.get_variable('biases', [self.num_filters], initializer=tf.constant_initializer(0.0))
# Convolution
conv = tf.nn.conv2d(inputs,
W,
strides=[1, 1, 1, 1],
padding='VALID',
name='conv')
# Activation function
h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')
# Remove channel dimension
h_reshape = tf.squeeze(h, [2])
# Cut the feature sequence at the end based on the maximum filter length
h_reshape = h_reshape[:, :max_feature_length, :]
conv_outputs.append(h_reshape)
# Concatenate the outputs from different filters
if len(self.filter_sizes) > 1:
rnn_inputs = tf.concat(conv_outputs, -1)
else:
rnn_inputs = h_reshape
# LSTM cell
cell = tf.contrib.rnn.LSTMCell(self.hidden_size,
forget_bias=1.0,
state_is_tuple=True,
reuse=tf.get_variable_scope().reuse)
# Add dropout to LSTM cell
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
# Stacked LSTMs
cell = tf.contrib.rnn.MultiRNNCell([cell]*self.num_layers, state_is_tuple=True)
self._initial_state = cell.zero_state(self.batch_size, dtype=tf.float32)
# Feed the CNN outputs to LSTM network
with tf.variable_scope('LSTM'):
outputs, state = tf.nn.dynamic_rnn(cell,
rnn_inputs,
initial_state=self._initial_state,
sequence_length=self.sequence_length)
self.final_state = state
# Softmax output layer
with tf.name_scope('softmax'):
softmax_w = tf.get_variable('softmax_w', shape=[self.hidden_size, self.num_classes], dtype=tf.float32)
softmax_b = tf.get_variable('softmax_b', shape=[self.num_classes], dtype=tf.float32)
# L2 regularization for output layer
self.l2_loss += tf.nn.l2_loss(softmax_w)
self.l2_loss += tf.nn.l2_loss(softmax_b)
# logits
self.logits = tf.matmul(self.final_state[self.num_layers - 1].h, softmax_w) + softmax_b
predictions = tf.nn.softmax(self.logits,name='res_tmp')
self.predictions = tf.argmax(predictions, 1, name='predictions')
# Loss
with tf.name_scope('loss'):
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_y, logits=self.logits)
self.cost = tf.reduce_mean(losses) + self.l2_reg_lambda * self.l2_loss
# Accuracy
with tf.name_scope('accuracy'):
correct_predictions = tf.equal(self.predictions, self.input_y)
self.correct_num = tf.reduce_sum(tf.cast(correct_predictions, tf.float32))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name='accuracy')
|
the-stack_106_31389 | import unittest
import django
if django.VERSION < (3, 2):
default_app_config = 'csv_permissions.apps.CsvPermissionAppConfig'
def load_tests(*args, **kwargs):
empty_suite = unittest.TestSuite()
return empty_suite
__version__ = "0.2.0"
|
the-stack_106_31390 | ######################################################################################################
# Elemento de ruido de 2 bit por trama
import random
def noise(bits_originales, metodo):
# Meto que Requiere la funcion ruido
print('Ruido de unico bit')
if metodo == 'lrc':
n = 7
x = 8
elif metodo == 'vrc':
#n = 6
n = 7
#x = 8
x = len(bits_originales[0])
elif metodo == 'crc':
n = 7
x = len(bits_originales[0])
elif metodo == 'cs':
n = 7
x = len(bits_originales[0])
# Lista vacia de 0 con la longitud de la trama original
bits_corrompidos = [['0' for col in range(x)] for row in range(len(bits_originales))]
# Analisis del ruido
for i in range(len(bits_originales)):
# Probabilidad de corromper una trama de 8 bits
probabilidad = random.randint(0,1)
print('probabilidad Trama ' + str(i) + ': ' + str(probabilidad))
# x Longitud constante de 8 bits
if probabilidad == 0:
for j in range(x):
bits_corrompidos[i][j] = bits_originales[i][j]
else:
# Bit que sera afectados en la trama afectada
aleatorio = random.randint(0,n)
print('aleatorio bit: ' + str(aleatorio))
for j in range(x):
# Copio la trama de bits al grupo de ceros vacio
bits_corrompidos[i][j] = bits_originales[i][j]
if j == aleatorio:
if bits_corrompidos[i][j] == '0':
bits_corrompidos[i][j] = '1'
else:
bits_corrompidos[i][j] = '0'
return(bits_corrompidos)
|
the-stack_106_31392 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals, absolute_import
import frappe
from frappe import _
import json
from frappe.model.document import Document
from frappe.core.doctype.user.user import extract_mentions
from frappe.desk.doctype.notification_log.notification_log import enqueue_create_notification,\
get_title, get_title_html
from frappe.utils import get_fullname
from frappe.website.render import clear_cache
from frappe.database.schema import add_column
from frappe.exceptions import ImplicitCommitError
class Comment(Document):
def after_insert(self):
self.notify_mentions()
frappe.publish_realtime('new_communication', self.as_dict(),
doctype=self.reference_doctype, docname=self.reference_name,
after_commit=True)
def validate(self):
if not self.comment_email:
self.comment_email = frappe.session.user
self.content = frappe.utils.sanitize_html(self.content)
def on_update(self):
update_comment_in_doc(self)
def on_trash(self):
self.remove_comment_from_cache()
frappe.publish_realtime('delete_communication', self.as_dict(),
doctype= self.reference_doctype, docname = self.reference_name,
after_commit=True)
def remove_comment_from_cache(self):
_comments = get_comments_from_parent(self)
for c in _comments:
if c.get("name")==self.name:
_comments.remove(c)
update_comments_in_parent(self.reference_doctype, self.reference_name, _comments)
def notify_mentions(self):
if self.reference_doctype and self.reference_name and self.content:
mentions = extract_mentions(self.content)
if not mentions:
return
sender_fullname = get_fullname(frappe.session.user)
title = get_title(self.reference_doctype, self.reference_name)
recipients = [frappe.db.get_value("User", {"enabled": 1, "name": name, "user_type": "System User", "allowed_in_mentions": 1}, "email")
for name in mentions]
notification_message = _('''{0} mentioned you in a comment in {1} {2}''')\
.format(frappe.bold(sender_fullname), frappe.bold(self.reference_doctype), get_title_html(title))
notification_doc = {
'type': 'Mention',
'document_type': self.reference_doctype,
'document_name': self.reference_name,
'subject': notification_message,
'from_user': frappe.session.user,
'email_content': self.content
}
enqueue_create_notification(recipients, notification_doc)
def on_doctype_update():
frappe.db.add_index("Comment", ["reference_doctype", "reference_name"])
frappe.db.add_index("Comment", ["link_doctype", "link_name"])
def update_comment_in_doc(doc):
"""Updates `_comments` (JSON) property in parent Document.
Creates a column `_comments` if property does not exist.
Only user created Communication or Comment of type Comment are saved.
`_comments` format
{
"comment": [String],
"by": [user],
"name": [Comment Document name]
}"""
# only comments get updates, not likes, assignments etc.
if doc.doctype == 'Comment' and doc.comment_type != 'Comment':
return
def get_truncated(content):
return (content[:97] + '...') if len(content) > 100 else content
if doc.reference_doctype and doc.reference_name and doc.content:
_comments = get_comments_from_parent(doc)
updated = False
for c in _comments:
if c.get("name")==doc.name:
c["comment"] = get_truncated(doc.content)
updated = True
if not updated:
_comments.append({
"comment": get_truncated(doc.content),
# "comment_email" for Comment and "sender" for Communication
"by": getattr(doc, 'comment_email', None) or getattr(doc, 'sender', None) or doc.owner,
"name": doc.name
})
update_comments_in_parent(doc.reference_doctype, doc.reference_name, _comments)
def get_comments_from_parent(doc):
'''
get the list of comments cached in the document record in the column
`_comments`
'''
try:
_comments = frappe.db.get_value(doc.reference_doctype, doc.reference_name, "_comments") or "[]"
except Exception as e:
if frappe.db.is_missing_table_or_column(e):
_comments = "[]"
else:
raise
try:
return json.loads(_comments)
except ValueError:
return []
def update_comments_in_parent(reference_doctype, reference_name, _comments):
"""Updates `_comments` property in parent Document with given dict.
:param _comments: Dict of comments."""
if not reference_doctype or not reference_name or frappe.db.get_value("DocType", reference_doctype, "issingle"):
return
try:
# use sql, so that we do not mess with the timestamp
frappe.db.sql("""update `tab{0}` set `_comments`=%s where name=%s""".format(reference_doctype), # nosec
(json.dumps(_comments[-100:]), reference_name))
except Exception as e:
if frappe.db.is_column_missing(e) and getattr(frappe.local, 'request', None):
# missing column and in request, add column and update after commit
frappe.local._comments = (getattr(frappe.local, "_comments", [])
+ [(reference_doctype, reference_name, _comments)])
elif frappe.db.is_data_too_long(e):
raise frappe.DataTooLongException
else:
raise ImplicitCommitError
else:
if not frappe.flags.in_patch:
reference_doc = frappe.get_doc(reference_doctype, reference_name)
if getattr(reference_doc, "route", None):
clear_cache(reference_doc.route)
def update_comments_in_parent_after_request():
"""update _comments in parent if _comments column is missing"""
if hasattr(frappe.local, "_comments"):
for (reference_doctype, reference_name, _comments) in frappe.local._comments:
add_column(reference_doctype, "_comments", "Text")
update_comments_in_parent(reference_doctype, reference_name, _comments)
frappe.db.commit()
|
the-stack_106_31393 | #!/usr/bin/env python
"""GRR specific AFF4 objects."""
import re
import time
import logging
from grr.client.components.rekall_support import rekall_types as rdf_rekall_types
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flow
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib.aff4_objects import standard
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import crypto as rdf_crypto
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import flows_pb2
from grr.server import foreman as rdf_foreman
class SpaceSeparatedStringArray(rdfvalue.RDFString):
"""A special string which stores strings as space separated."""
def __iter__(self):
for value in self._value.split():
yield value
class VFSGRRClient(standard.VFSDirectory):
"""A Remote client."""
# URN of the index for client labels.
labels_index_urn = rdfvalue.RDFURN("aff4:/index/labels/clients")
class SchemaCls(standard.VFSDirectory.SchemaCls):
"""The schema for the client."""
client_index = rdfvalue.RDFURN("aff4:/index/client")
CERT = aff4.Attribute("metadata:cert", rdf_crypto.RDFX509Cert,
"The PEM encoded cert of the client.")
FILESYSTEM = aff4.Attribute("aff4:filesystem", rdf_client.Filesystems,
"Filesystems on the client.")
CLIENT_INFO = aff4.Attribute("metadata:ClientInfo",
rdf_client.ClientInformation,
"GRR client information",
"GRR client",
default="")
LAST_BOOT_TIME = aff4.Attribute("metadata:LastBootTime",
rdfvalue.RDFDatetime,
"When the machine was last booted",
"BootTime")
FIRST_SEEN = aff4.Attribute("metadata:FirstSeen", rdfvalue.RDFDatetime,
"First time the client registered with us",
"FirstSeen")
# Information about the host.
HOSTNAME = aff4.Attribute("metadata:hostname",
rdfvalue.RDFString,
"Hostname of the host.",
"Host",
index=client_index)
FQDN = aff4.Attribute("metadata:fqdn",
rdfvalue.RDFString,
"Fully qualified hostname of the host.",
"FQDN",
index=client_index)
SYSTEM = aff4.Attribute("metadata:system", rdfvalue.RDFString,
"Operating System class.", "System")
UNAME = aff4.Attribute("metadata:uname", rdfvalue.RDFString,
"Uname string.", "Uname")
OS_RELEASE = aff4.Attribute("metadata:os_release", rdfvalue.RDFString,
"OS Major release number.", "Release")
OS_VERSION = aff4.Attribute("metadata:os_version", rdf_client.VersionString,
"OS Version number.", "Version")
# ARCH values come from platform.uname machine value, e.g. x86_64, AMD64.
ARCH = aff4.Attribute("metadata:architecture", rdfvalue.RDFString,
"Architecture.", "Architecture")
INSTALL_DATE = aff4.Attribute("metadata:install_date", rdfvalue.RDFDatetime,
"Install Date.", "Install")
# The knowledge base is used for storing data about the host and users.
# This is currently a slightly odd object as we only use some of the fields.
# The proto itself is used in Artifact handling outside of GRR (e.g. Plaso).
# Over time we will migrate fields into this proto, but for now it is a mix.
KNOWLEDGE_BASE = aff4.Attribute("metadata:knowledge_base",
rdf_client.KnowledgeBase,
"Artifact Knowledge Base", "KnowledgeBase")
GRR_CONFIGURATION = aff4.Attribute(
"aff4:client_configuration", rdf_protodict.Dict,
"Running configuration for the GRR client.", "Config")
LIBRARY_VERSIONS = aff4.Attribute(
"aff4:library_versions", rdf_protodict.Dict,
"Running library versions for the client.", "Libraries")
USERNAMES = aff4.Attribute("aff4:user_names",
SpaceSeparatedStringArray,
"A space separated list of system users.",
"Usernames",
index=client_index)
# This information is duplicated from the INTERFACES attribute but is done
# to allow for fast searching by mac address.
MAC_ADDRESS = aff4.Attribute("aff4:mac_addresses",
rdfvalue.RDFString,
"A hex encoded MAC address.",
"MAC",
index=client_index)
KERNEL = aff4.Attribute("aff4:kernel_version", rdfvalue.RDFString,
"Kernel version string.", "KernelVersion")
# Same for IP addresses.
HOST_IPS = aff4.Attribute("aff4:host_ips",
rdfvalue.RDFString,
"An IP address.",
"Host_ip",
index=client_index)
PING = aff4.Attribute("metadata:ping",
rdfvalue.RDFDatetime,
"The last time the server heard from this client.",
"LastCheckin",
versioned=False,
default=0)
CLOCK = aff4.Attribute("metadata:clock",
rdfvalue.RDFDatetime,
"The last clock read on the client "
"(Can be used to estimate client clock skew).",
"Clock",
versioned=False)
CLIENT_IP = aff4.Attribute("metadata:client_ip",
rdfvalue.RDFString,
"The ip address this client connected from.",
"Client_ip",
versioned=False)
# This is the last foreman rule that applied to us
LAST_FOREMAN_TIME = aff4.Attribute("aff4:last_foreman_time",
rdfvalue.RDFDatetime,
"The last time the foreman checked us.",
versioned=False)
LAST_INTERFACES = aff4.Attribute(
"aff4:last_interfaces",
rdf_client.Interfaces,
"Last seen network interfaces. Full history is maintained in the "
"clientid/network object. Separated for performance reasons.",
versioned=False)
LAST_CRASH = aff4.Attribute("aff4:last_crash",
rdf_client.ClientCrash,
"Last client crash.",
creates_new_object_version=False,
versioned=False)
VOLUMES = aff4.Attribute("aff4:volumes", rdf_client.Volumes,
"Client disk volumes.")
HARDWARE_INFO = aff4.Attribute("aff4:hardware_info",
rdf_client.HardwareInfo,
"Various hardware information.",
default="")
MEMORY_SIZE = aff4.Attribute("aff4:memory_size", rdfvalue.ByteSize,
"Amount of memory this client's machine has.")
# Valid client ids
CLIENT_ID_RE = re.compile(r"^C\.[0-9a-fA-F]{16}$")
@property
def age(self):
"""RDFDatetime at which the object was created."""
# TODO(user) move up to AFF4Object after some analysis of how .age is
# used in the codebase.
aff4_type = self.Get(self.Schema.TYPE)
if aff4_type:
return aff4_type.age
else:
# If there is no type attribute yet, we have only just been created and
# not flushed yet, so just set timestamp to now.
return rdfvalue.RDFDatetime().Now()
def Initialize(self):
# Our URN must be a valid client.id.
self.client_id = rdf_client.ClientURN(self.urn)
def Update(self, attribute=None, priority=None):
if attribute == "CONTAINS":
flow_id = flow.GRRFlow.StartFlow(client_id=self.client_id,
flow_name="Interrogate",
token=self.token,
priority=priority)
return flow_id
def OpenMember(self, path, mode="rw"):
return aff4.AFF4Volume.OpenMember(self, path, mode=mode)
AFF4_PREFIXES = {rdf_paths.PathSpec.PathType.OS: "/fs/os",
rdf_paths.PathSpec.PathType.TSK: "/fs/tsk",
rdf_paths.PathSpec.PathType.REGISTRY: "/registry",
rdf_paths.PathSpec.PathType.MEMORY: "/devices/memory",
rdf_paths.PathSpec.PathType.TMPFILE: "/temp"}
@staticmethod
def ClientURNFromURN(urn):
return rdf_client.ClientURN(rdfvalue.RDFURN(urn).Split()[0])
@staticmethod
def PathspecToURN(pathspec, client_urn):
"""Returns a mapping between a pathspec and an AFF4 URN.
Args:
pathspec: The PathSpec instance to convert.
client_urn: A URN of any object within the client. We use it to find the
client id.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
"""
client_urn = rdf_client.ClientURN(client_urn)
if not isinstance(pathspec, rdfvalue.RDFValue):
raise ValueError("Pathspec should be an rdfvalue.")
# If the first level is OS and the second level is TSK its probably a mount
# point resolution. We map it into the tsk branch. For example if we get:
# path: \\\\.\\Volume{1234}\\
# pathtype: OS
# mount_point: /c:/
# nested_path {
# path: /windows/
# pathtype: TSK
# }
# We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/
dev = pathspec[0].path
if pathspec[0].HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":" + str(pathspec[0].offset / 512)
if (len(pathspec) > 1 and
pathspec[0].pathtype == rdf_paths.PathSpec.PathType.OS and
pathspec[1].pathtype == rdf_paths.PathSpec.PathType.TSK):
result = [VFSGRRClient.AFF4_PREFIXES[rdf_paths.PathSpec.PathType.TSK],
dev]
# Skip the top level pathspec.
pathspec = pathspec[1]
else:
# For now just map the top level prefix based on the first pathtype
result = [VFSGRRClient.AFF4_PREFIXES[pathspec[0].pathtype]]
for p in pathspec:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":" + str(p.offset / 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result))
def GetSummary(self):
"""Gets a client summary object.
Returns:
rdf_client.ClientSummary
"""
self.max_age = 0
summary = rdf_client.ClientSummary(client_id=self.urn)
summary.system_info.node = self.Get(self.Schema.HOSTNAME)
summary.system_info.system = self.Get(self.Schema.SYSTEM)
summary.system_info.release = self.Get(self.Schema.OS_RELEASE)
summary.system_info.version = str(self.Get(self.Schema.OS_VERSION, ""))
summary.system_info.kernel = self.Get(self.Schema.KERNEL)
summary.system_info.fqdn = self.Get(self.Schema.FQDN)
summary.system_info.machine = self.Get(self.Schema.ARCH)
summary.system_info.install_date = self.Get(self.Schema.INSTALL_DATE)
kb = self.Get(self.Schema.KNOWLEDGE_BASE)
if kb:
summary.users = kb.users
summary.interfaces = self.Get(self.Schema.LAST_INTERFACES)
summary.client_info = self.Get(self.Schema.CLIENT_INFO)
summary.serial_number = self.Get(self.Schema.HARDWARE_INFO).serial_number
summary.timestamp = self.age
summary.system_manufacturer = self.Get(
self.Schema.HARDWARE_INFO).system_manufacturer
return summary
def AddLabels(self, *label_names, **kwargs):
super(VFSGRRClient, self).AddLabels(*label_names, **kwargs)
with aff4.FACTORY.Create(standard.LabelSet.CLIENT_LABELS_URN,
standard.LabelSet,
mode="w",
token=self.token) as client_labels_index:
for label_name in label_names:
client_labels_index.Add(label_name)
@staticmethod
def GetClientRequests(client_urns, token=None):
"""Returns all client requests for the given client urns."""
task_urns = [urn.Add("tasks") for urn in client_urns]
client_requests_raw = data_store.DB.MultiResolvePrefix(task_urns,
"task:",
token=token)
client_requests = {}
for client_urn, requests in client_requests_raw:
client_id = str(client_urn)[6:6 + 18]
client_requests.setdefault(client_id, [])
for _, serialized, _ in requests:
client_requests[client_id].append(rdf_flows.GrrMessage(serialized))
return client_requests
class UpdateVFSFileArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.UpdateVFSFileArgs
class UpdateVFSFile(flow.GRRFlow):
"""A flow to update VFS file."""
args_type = UpdateVFSFileArgs
ACL_ENFORCED = False
def Init(self):
self.state.Register("get_file_flow_urn")
@flow.StateHandler()
def Start(self):
"""Calls the Update() method of a given VFSFile/VFSDirectory object."""
self.Init()
client_id = rdf_client.ClientURN(self.args.vfs_file_urn.Split()[0])
data_store.DB.security_manager.CheckClientAccess(self.token.RealUID(),
client_id)
fd = aff4.FACTORY.Open(self.args.vfs_file_urn, mode="rw", token=self.token)
# Account for implicit directories.
if fd.Get(fd.Schema.TYPE) is None:
fd = fd.Upgrade(standard.VFSDirectory)
self.state.get_file_flow_urn = fd.Update(
attribute=self.args.attribute,
priority=rdf_flows.GrrMessage.Priority.HIGH_PRIORITY)
class VFSAnalysisFile(aff4.AFF4Image):
"""A file object in the VFS space."""
class SchemaCls(aff4.AFF4Image.SchemaCls):
"""The schema for AFF4 files in the GRR VFS."""
STAT = standard.VFSDirectory.SchemaCls.STAT
CONTENT_LOCK = aff4.Attribute(
"aff4:content_lock", rdfvalue.RDFURN,
"This lock contains a URN pointing to the flow that is currently "
"updating this flow.")
PATHSPEC = aff4.Attribute(
"aff4:pathspec", rdf_paths.PathSpec,
"The pathspec used to retrieve this object from the client.")
class VFSFile(VFSAnalysisFile):
"""A file object that can be updated under lock."""
class SchemaCls(VFSAnalysisFile.SchemaCls):
"""The schema for AFF4 files in the GRR VFS."""
CONTENT_LOCK = aff4.Attribute(
"aff4:content_lock", rdfvalue.RDFURN,
"This lock contains a URN pointing to the flow that is currently "
"updating this flow.")
def Update(self, attribute=None, priority=None):
"""Update an attribute from the client."""
# List the directory on the client
currently_running = self.Get(self.Schema.CONTENT_LOCK)
# Is this flow still active?
if currently_running:
flow_obj = aff4.FACTORY.Open(currently_running, token=self.token)
if flow_obj and flow_obj.GetRunner().IsRunning():
return
# The client_id is the first element of the URN
client_id = self.urn.Path().split("/", 2)[1]
# Get the pathspec for this object
pathspec = self.Get(self.Schema.STAT).pathspec
flow_urn = flow.GRRFlow.StartFlow(client_id=client_id,
flow_name="MultiGetFile",
token=self.token,
pathspecs=[pathspec],
priority=priority)
self.Set(self.Schema.CONTENT_LOCK(flow_urn))
self.Close()
return flow_urn
class MemoryImage(standard.VFSDirectory):
"""The server representation of the client's memory device."""
class SchemaCls(VFSFile.SchemaCls):
LAYOUT = aff4.Attribute("aff4:memory/geometry",
rdf_rekall_types.MemoryInformation,
"The memory layout of this image.")
class VFSMemoryFile(aff4.AFF4MemoryStream):
"""A VFS file under a VFSDirectory node which does not have storage."""
class SchemaCls(aff4.AFF4MemoryStream.SchemaCls):
"""The schema for AFF4 files in the GRR VFS."""
# Support also VFSFile attributes.
STAT = VFSFile.SchemaCls.STAT
HASH = VFSFile.SchemaCls.HASH
PATHSPEC = VFSFile.SchemaCls.PATHSPEC
CONTENT_LOCK = VFSFile.SchemaCls.CONTENT_LOCK
class GRRForeman(aff4.AFF4Object):
"""The foreman starts flows for clients depending on rules."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
"""Attributes specific to VFSDirectory."""
RULES = aff4.Attribute("aff4:rules",
rdf_foreman.ForemanRules,
"The rules the foreman uses.",
versioned=False,
creates_new_object_version=False,
default=rdf_foreman.ForemanRules())
def ExpireRules(self):
"""Removes any rules with an expiration date in the past."""
rules = self.Get(self.Schema.RULES)
new_rules = self.Schema.RULES()
now = time.time() * 1e6
expired_session_ids = set()
for rule in rules:
if rule.expires > now:
new_rules.Append(rule)
else:
for action in rule.actions:
if action.hunt_id:
expired_session_ids.add(action.hunt_id)
if expired_session_ids:
# Notify the worker to mark this hunt as terminated.
manager = queue_manager.QueueManager(token=self.token)
manager.MultiNotifyQueue([rdf_flows.GrrNotification(session_id=session_id)
for session_id in expired_session_ids])
if len(new_rules) < len(rules):
self.Set(self.Schema.RULES, new_rules)
self.Flush()
def _CheckIfHuntTaskWasAssigned(self, client_id, hunt_id):
"""Will return True if hunt's task was assigned to this client before."""
for _ in aff4.FACTORY.Stat(
[client_id.Add("flows/%s:hunt" % rdfvalue.RDFURN(hunt_id).Basename())],
token=self.token):
return True
return False
def _EvaluateRules(self, objects, rule, client_id):
"""Evaluates the rules."""
return rule.client_rule_set.Evaluate(objects, client_id)
def _RunActions(self, rule, client_id):
"""Run all the actions specified in the rule.
Args:
rule: Rule which actions are to be executed.
client_id: Id of a client where rule's actions are to be executed.
Returns:
Number of actions started.
"""
actions_count = 0
for action in rule.actions:
try:
# Say this flow came from the foreman.
token = self.token.Copy()
token.username = "Foreman"
if action.HasField("hunt_id"):
if self._CheckIfHuntTaskWasAssigned(client_id, action.hunt_id):
logging.info("Foreman: ignoring hunt %s on client %s: was started "
"here before", client_id, action.hunt_id)
else:
logging.info("Foreman: Starting hunt %s on client %s.",
action.hunt_id, client_id)
flow_cls = flow.GRRFlow.classes[action.hunt_name]
flow_cls.StartClients(action.hunt_id, [client_id])
actions_count += 1
else:
flow.GRRFlow.StartFlow(client_id=client_id,
flow_name=action.flow_name,
token=token,
**action.argv.ToDict())
actions_count += 1
# There could be all kinds of errors we don't know about when starting the
# flow/hunt so we catch everything here.
except Exception as e: # pylint: disable=broad-except
logging.exception("Failure running foreman action on client %s: %s",
action.hunt_id, e)
return actions_count
def AssignTasksToClient(self, client_id):
"""Examines our rules and starts up flows based on the client.
Args:
client_id: Client id of the client for tasks to be assigned.
Returns:
Number of assigned tasks.
"""
client_id = rdf_client.ClientURN(client_id)
rules = self.Get(self.Schema.RULES)
if not rules:
return 0
client = aff4.FACTORY.Open(client_id, mode="rw", token=self.token)
try:
last_foreman_run = client.Get(client.Schema.LAST_FOREMAN_TIME) or 0
except AttributeError:
last_foreman_run = 0
latest_rule = max(rule.created for rule in rules)
if latest_rule <= int(last_foreman_run):
return 0
# Update the latest checked rule on the client.
client.Set(client.Schema.LAST_FOREMAN_TIME(latest_rule))
client.Close()
# For efficiency we collect all the objects we want to open first and then
# open them all in one round trip.
object_urns = {}
relevant_rules = []
expired_rules = False
now = time.time() * 1e6
for rule in rules:
if rule.expires < now:
expired_rules = True
continue
if rule.created <= int(last_foreman_run):
continue
relevant_rules.append(rule)
for path in rule.client_rule_set.GetPathsToCheck():
aff4_object = client_id.Add(path)
object_urns[str(aff4_object)] = aff4_object
# Retrieve all aff4 objects we need.
objects = {}
for fd in aff4.FACTORY.MultiOpen(object_urns, token=self.token):
objects[fd.urn] = fd
actions_count = 0
for rule in relevant_rules:
if self._EvaluateRules(objects, rule, client_id):
actions_count += self._RunActions(rule, client_id)
if expired_rules:
self.ExpireRules()
return actions_count
class GRRAFF4Init(registry.InitHook):
"""Ensure critical AFF4 objects exist for GRR."""
# Must run after the AFF4 subsystem is ready.
pre = ["AFF4InitHook"]
def Run(self):
try:
# Make the foreman
with aff4.FACTORY.Create("aff4:/foreman",
GRRForeman,
token=aff4.FACTORY.root_token):
pass
except access_control.UnauthorizedAccess:
pass
class MRUCollection(aff4.AFF4Object):
"""Stores all of the MRU files from the registry."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
LAST_USED_FOLDER = aff4.Attribute("aff4:mru",
rdf_client.MRUFolder,
"The Most Recently Used files.",
default="")
class VFSFileSymlink(aff4.AFF4Stream):
"""A Delegate object for another URN."""
delegate = None
class SchemaCls(VFSFile.SchemaCls):
DELEGATE = aff4.Attribute("aff4:delegate", rdfvalue.RDFURN,
"The URN of the delegate of this object.")
def Initialize(self):
"""Open the delegate object."""
if "r" in self.mode:
delegate = self.Get(self.Schema.DELEGATE)
if delegate:
self.delegate = aff4.FACTORY.Open(delegate,
mode=self.mode,
token=self.token,
age=self.age_policy)
def Read(self, length):
if "r" not in self.mode:
raise IOError("VFSFileSymlink was not opened for reading.")
return self.delegate.Read(length)
def Seek(self, offset, whence):
return self.delegate.Seek(offset, whence)
def Tell(self):
return self.delegate.Tell()
def Close(self, sync):
super(VFSFileSymlink, self).Close(sync=sync)
if self.delegate:
return self.delegate.Close(sync)
def Write(self):
raise IOError("VFSFileSymlink not writeable.")
class VFSBlobImage(standard.BlobImage, VFSFile):
"""BlobImage with VFS attributes for use in client namespace."""
class SchemaCls(standard.BlobImage.SchemaCls, VFSFile.SchemaCls):
pass
class AFF4RekallProfile(aff4.AFF4Object):
"""A Rekall profile in the AFF4 namespace."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
PROFILE = aff4.Attribute("aff4:profile", rdf_rekall_types.RekallProfile,
"A Rekall profile.")
# The catchall client label used when compiling server-side stats about clients
# by label.
ALL_CLIENTS_LABEL = "All"
def GetAllClientLabels(token, include_catchall=False):
"""Get the set of all label names applied to all clients.
Args:
token: token to use when opening the index.
include_catchall: If true, we include ALL_CLIENTS_LABEL in the results.
Returns:
set of label name strings, including the catchall "All"
"""
labels_index = aff4.FACTORY.Create(standard.LabelSet.CLIENT_LABELS_URN,
standard.LabelSet,
mode="r",
token=token)
labels = set(labels_index.ListLabels())
if include_catchall:
labels.add(ALL_CLIENTS_LABEL)
return labels
|
the-stack_106_31397 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""data_loader"""
import os
import numpy as np
from PIL import Image
class SYSUDatasetGenerator:
"""
SYSUDatasetGenerator
"""
def __init__(self, data_dir="./data/sysu/", transform_rgb=None, transform_ir=None, color_index=None,
thermal_index=None, if_debug=False):
# Load training images (path) and labels
if if_debug:
self.train_color_image = np.load(os.path.join(data_dir, 'demo_train_rgb_resized_img.npy'))
self.train_color_label = np.load(os.path.join(data_dir, 'demo_train_rgb_resized_label.npy'))
self.train_thermal_image = np.load(os.path.join(data_dir, 'demo_train_ir_resized_img.npy'))
self.train_thermal_label = np.load(os.path.join(data_dir, 'demo_train_ir_resized_label.npy'))
else:
self.train_color_image = np.load(os.path.join(data_dir, 'train_rgb_resized_img.npy'))
self.train_color_label = np.load(os.path.join(data_dir, 'train_rgb_resized_label.npy'))
self.train_thermal_image = np.load(os.path.join(data_dir, 'train_ir_resized_img.npy'))
self.train_thermal_label = np.load(os.path.join(data_dir, 'train_ir_resized_label.npy'))
print("Color Image Size:{}".format(len(self.train_color_image)))
print("Color Label Size:{}".format(len(self.train_color_label)))
self.transform_rgb = transform_rgb
self.transform_ir = transform_ir
self.cindex = color_index
self.tindex = thermal_index
def __next__(self):
pass
def __getitem__(self, index):
# TODO: 这里要配合samplers输出的更改而更改
# print(index)
# print("self.cIndex is ",self.cIndex[index] )
# print("self.tIndex is ",self.tIndex[index] )
img1, target1 = self.train_color_image[self.cindex[index]], self.train_color_label[self.cindex[index]]
img2, target2 = self.train_thermal_image[self.tindex[index]], self.train_thermal_label[self.tindex[index]]
# print("img1 is:", img1)
# print("target1 is:", target1)
# print("img2 is:", img2)
# print("target2 is:", target2)
# img1, img2 = self.transform(img1)[0], self.transform(img2)[0]
# target1, target2 = np.array(target1, dtype=np.float32), np.array(target2, dtype=np.float32)
# img1, img2 = self.transform(img1)[0], self.transform(img2)[0]
# img1, img2 = ms.Tensor(img1, dtype=ms.float32), ms.Tensor(img2, dtype=ms.float32)
# target1, target2 = ms.Tensor(target1, dtype=ms.float32), ms.Tensor(target2, dtype=ms.float32)
return img1, img2, target1, target2
def __len__(self):
return len(self.cindex)
class TestData:
"""TestData"""
def __init__(self, test_img_file, test_label, img_size=(144, 288), transform=None):
test_image = []
for i in range(len(test_img_file)):
img = Image.open(test_img_file[i])
img = img.resize((img_size[0], img_size[1]), Image.ANTIALIAS)
pix_array = np.array(img)
test_image.append(pix_array)
test_image = np.array(test_image)
self.test_image = test_image
self.test_label = test_label
self.transform = transform
def __getitem__(self, index):
img1, target1 = self.test_image[index], self.test_label[index]
return img1, target1
def __len__(self):
return len(self.test_image)
|
the-stack_106_31400 | """
Setup tasks (requires invoke: pip install invoke)
"""
import sys
import platform
from pathlib import Path
import base64
from invoke import task
import shutil
_IS_WINDOWS = platform.system() == 'Windows'
_PY_DEFAULT_VERSION = '3.9'
if not Path('LICENSE').exists():
sys.exit('Error: Run the command from the root folder (the directory '
'with the README.md and setup.py files)')
@task
def db_credentials(c):
"""Encode db credentials (for github actions)
"""
path = str(Path('~', '.auth', 'postgres-ploomber.json').expanduser())
creds = Path(path).read_text()
print(base64.b64encode(creds.encode()).decode())
@task
def setup(c, doc=False, version=None):
"""
[conda] Setup dev environment
"""
if doc and version:
raise ValueError('doc and version options are incompatible, '
'installing docs will install python 3.8')
version = version or _PY_DEFAULT_VERSION
suffix = '' if version == _PY_DEFAULT_VERSION else version.replace('.', '')
env_name = f'ploomber{suffix}'
cmds = [
'eval "$(conda shell.bash hook)"',
f'conda activate {env_name}',
'conda install pygraphviz r-base r-irkernel --yes -c conda-forge',
'pip install --editable .[dev]',
'pip install --editable tests/assets/test_pkg',
]
if _IS_WINDOWS:
cmds.pop(0)
c.run(f'conda create --name {env_name} python={version} --yes')
c.run(' && '.join(cmds))
if doc:
cmds = [
'eval "$(conda shell.bash hook)"',
f'conda activate {env_name}'
f'conda env update --file environment.yml --name {env_name}',
]
if _IS_WINDOWS:
cmds.pop(0)
with c.cd('doc'):
c.run(' && '.join(cmds))
print(f'Done! Activate your environment with:\nconda activate {env_name}')
@task
def setup_pip(c, doc=False):
"""[pip] Setup dev environment
"""
# install ploomber in editable mode and include development dependencies
c.run('pip install --editable ".[dev]"')
# install sample package required in some tests
c.run('pip install --editable tests/assets/test_pkg')
# install doc dependencies
if doc:
c.run('pip install -r doc/requirements.txt')
print('Warning: installing with pip skips some dependencies. '
'See contributing.md "Setup with pip for details"')
@task
def docs(c):
"""Build docs
"""
with c.cd('doc'):
c.run('make html')
@task
def new(c):
"""Release a new version
"""
from pkgmt import versioneer
versioneer.version(project_root='.', tag=True)
@task
def upload(c, tag, production=True):
"""Upload to PyPI
"""
from pkgmt import versioneer
versioneer.upload(tag, production=production)
print('Remember to update binder-env!')
@task
def test(c, report=False):
"""Run tests
"""
c.run('pytest tests --cov ploomber ' +
('--cov-report html' if report else ''),
pty=True)
c.run('flake8')
@task
def install_git_hook(c, force=False):
"""Installs pre-push git hook
"""
path = Path('.git/hooks/pre-push')
hook_exists = path.is_file()
if hook_exists:
if force:
path.unlink()
else:
sys.exit('Error: pre-push hook already exists. '
'Run: "invoke install-git-hook -f" to force overwrite.')
shutil.copy('.githooks/pre-push', '.git/hooks')
print(f'pre-push hook installed at {str(path)}')
@task
def uninstall_git_hook(c):
"""Uninstalls pre-push git hook
"""
path = Path('.git/hooks/pre-push')
hook_exists = path.is_file()
if hook_exists:
path.unlink()
print(f'Deleted {str(path)}.')
else:
print('Hook doesn\'t exist, nothing to delete.')
|
the-stack_106_31401 | from datetime import datetime, time, date
import pytest
from freezegun import freeze_time
from tests.utils import file_response
from city_scrapers.constants import COMMITTEE
from city_scrapers.spiders.chi_school_community_action_council import (
ChiSchoolCommunityActionCouncilSpider
)
freezer = freeze_time('2018-06-01 12:00:01')
freezer.start()
test_response = file_response(
'files/chi_school_community_action_council_CAC.html',
url='http://cps.edu/FACE/Pages/CAC.aspx'
)
spider = ChiSchoolCommunityActionCouncilSpider()
parsed_items = [
item for item in spider.parse(test_response)
if isinstance(item, dict)
]
current_month_number = datetime.today().month
freezer.stop()
def test_num_items():
assert len(parsed_items) == (13 - current_month_number)*8
def test_name():
assert parsed_items[0]['name'] == 'Austin Community Action Council'
def test_start_time():
EXPECTED_START = {
'date': date(2018, 6, 12),
'time': time(17, 30),
'note': ''
}
assert parsed_items[0]['start'] == EXPECTED_START
def test_end_time():
EXPECTED_END = {
'date': date(2018, 6, 12),
'time': time(20, 30),
'note': 'Estimated 3 hours after the start time'
}
assert parsed_items[0]['end'] == EXPECTED_END
def test_id():
assert parsed_items[0]['id'] == (
'chi_school_community_action_council/201806121730'
'/x/austin_community_action_council'
)
def test_location():
assert parsed_items[0]['location'] == {
'name': 'Michele Clark HS',
'address': '5101 W Harrison St. Chicago, IL',
'neighborhood': 'Austin'
}
def test_sources():
EXPECTED_SOURCES = [
{
'url': 'http://cps.edu/FACE/Pages/CAC.aspx',
'note': 'CAC Meetings Website'
},
{
'url': 'https://cacbronzeville.weebly.com/',
'note': "Neighborhood's Website"
},
]
assert parsed_items[1]['sources'] == EXPECTED_SOURCES
@pytest.mark.parametrize('item', parsed_items)
def test_documents(item):
assert item['documents'] == []
@pytest.mark.parametrize('item', parsed_items)
def test_description(item):
assert item['event_description'] == ''
@pytest.mark.parametrize('item', parsed_items)
def test_all_day(item):
assert item['all_day'] is False
@pytest.mark.parametrize('item', parsed_items)
def test_classification(item):
assert item['classification'] == COMMITTEE
@pytest.mark.parametrize('item', parsed_items)
def test__type(item):
assert parsed_items[0]['_type'] == 'event'
|
the-stack_106_31403 | from __future__ import print_function
import idaapi
# -----------------------------------------------------------------------
# Using raw IDAAPI
def raw_main(p=True):
f = idaapi.get_func(here())
if not f:
return
q = idaapi.qflow_chart_t("The title", f, 0, 0, idaapi.FC_PREDS)
for n in range(0, q.size()):
b = q[n]
if p:
print("%x - %x [%d]:" % (b.start_ea, b.end_ea, n))
for ns in range(0, q.nsucc(n)):
if p:
print("SUCC: %d->%d" % (n, q.succ(n, ns)))
for ns in range(0, q.npred(n)):
if p:
print("PRED: %d->%d" % (n, q.pred(n, ns)))
# -----------------------------------------------------------------------
# Using the class
def cls_main(p=True):
f = idaapi.FlowChart(idaapi.get_func(here()))
for block in f:
if p:
print("%x - %x [%d]:" % (block.start_ea, block.end_ea, block.id))
for succ_block in block.succs():
if p:
print(" %x - %x [%d]:" % (succ_block.start_ea, succ_block.end_ea, succ_block.id))
for pred_block in block.preds():
if p:
print(" %x - %x [%d]:" % (pred_block.start_ea, pred_block.end_ea, pred_block.id))
q = None
f = None
raw_main(False)
cls_main(True)
|
the-stack_106_31404 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class usersonenotepagesparentnotebooksectiongroupssectionspagesOperations(object):
"""usersonenotepagesparentnotebooksectiongroupssectionspagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_functions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def preview(
self,
user_id, # type: str
onenote_page_id, # type: str
section_group_id, # type: str
onenote_section_id, # type: str
onenote_page_id1, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.microsoftgraphonenotepagepreview"
"""Invoke function preview.
Invoke function preview.
:param user_id: key: id of user.
:type user_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:param section_group_id: key: id of sectionGroup.
:type section_group_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_page_id1: key: id of onenotePage.
:type onenote_page_id1: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: microsoftgraphonenotepagepreview, or the result of cls(response)
:rtype: ~users_functions.models.microsoftgraphonenotepagepreview
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.microsoftgraphonenotepagepreview"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.preview.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenotePage-id1': self._serialize.url("onenote_page_id1", onenote_page_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.odataerror, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('microsoftgraphonenotepagepreview', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
preview.metadata = {'url': '/users/{user-id}/onenote/pages/{onenotePage-id}/parentNotebook/sectionGroups/{sectionGroup-id}/sections/{onenoteSection-id}/pages/{onenotePage-id1}/microsoft.graph.preview()'} # type: ignore
|
the-stack_106_31405 | #! /usr/bin/python
#-*- coding: utf-8 -*-
from __future__ import print_function
from pybern.products.gnssdates.gnssdates import mjd2pydt, gps2pydt, SEC_PER_DAY
def utils_pydt2yydoy(pydt):
''' Return two-digit year and day-of-year as integers (in a tuple) from a
python datetime.datetime instance
'''
return [int(_) for _ in [pydt.strftime("%y"), pydt.strftime("%j")]]
def utils_whatever2pydt(**kwargs):
'''
'''
## pydt
if 'pydt' in kwargs:
if set(['year', 'doy', 'month', 'day', 'mjd', 'gwk', 'dow'
]).intersection(set(kwargs)) != set():
status = 0
else:
return kwargs['pydt']
## yyyy, ddd to datetime
if 'year' in kwargs and 'doy' in kwargs:
if any([
x for x in ['pydt', 'month', 'day', 'mjd', 'gwk', 'dow']
if x in kwargs
]):
status = 1
else:
return datetime.datetime(
'{:} {:}'.format(kwargs['year'], kwargs['doy']), '%Y %j')
## yyyy, mm, dd
elif set(['year', 'month',
'day']).intersection(set(kwargs)) == set(['year', 'month',
'day']):
if any([x for x in ['pydt', 'doy', 'mjd', 'gwk', 'dow'] if x in kwargs
]):
status = 2
else:
return datetime.datetime(
'{:} {:} {:}'.format(kwargs['year'], kwargs['month'],
kwargs['day']), '%Y %m %d')
## mjd
elif 'mjd' in kwargs:
if set(['pydt', 'year', 'doy', 'month', 'day', 'gwk', 'dow'
]).intersection(set(kwargs)) != set():
status = 3
else:
return mjd2pydt(float(kwargs['mjd']))
## wwww, d
elif 'gwk' in kwargs and 'dow' in kwargs:
if set(['pydt', 'year', 'doy', 'month', 'day', 'mjd']).intersection(
set(kwargs)) != set():
status = 4
else:
return gps2pydt(int(kwargs['gwk']),
float(kwargs['dow']) * SEC_PER_DAY)
else:
status = 10
msg = '[ERROR] produtils::utils_whatever2pydt failed to parse date; status: {:}'.format(
status)
raise RuntimeError(msg)
|
the-stack_106_31410 | """
Copyright (c) 2020 Cisco Systems Inc or its affiliates.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------
Name: fmc.py
Purpose: This is contains FMC related REST methods
"""
import time
import requests
import logging
import json
import utility as utl
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
logger = utl.setup_logging()
class FirepowerManagementCenter:
"""
FirepowerManagementCenter class has REST methods for FMC connections
"""
def __init__(self, fmc_server, username, password, accesspolicy=None):
self.server = 'https://' + fmc_server
self.username = username
self.password = password
self.headers = []
self.domain_uuid = ""
self.authTokenTimestamp = 0
self.authTokenMaxAge = 15*60 # seconds - 30 minutes is the max without using refresh
self.accessPolicyName = accesspolicy
def rest_get(self, url):
"""
Purpose: Issue REST get to the specified URL
Parameters: url
Returns: r.text is the text response (r.json() is a python dict version of the json response)
r.status_code = 2xx on success
Raises:
"""
# if the token is too old then get another
if time.time() > self.authTokenMaxAge + self.authTokenTimestamp:
logging.debug("Getting a new authToken")
self.get_auth_token()
try:
# REST call with SSL verification turned off:
logging.debug("Request: " + url)
r = requests.get(url, headers=self.headers, verify=False)
# REST call with SSL verification turned on:
# r = requests.get(url, headers=headers, verify='/path/to/ssl_certificate')
status_code = r.status_code
resp = r.text
logging.debug("Response status_code: " + str(status_code))
logging.debug("Response body: " + str(resp))
if 200 <= status_code <= 300:
# logging.debug("GET successful. Response data --> ")
# json_resp = json.loads(resp)
# logging.debug(json.dumps(json_resp,sort_keys=True,indent=4, separators=(',', ': ')))
pass
else:
r.raise_for_status()
raise Exception("Error occurred in Get -->"+resp)
except requests.exceptions.HTTPError as err:
raise Exception("Error in connection --> "+str(err))
finally:
if r: r.close()
return r
def rest_post(self, url, post_data):
"""
Purpose: Issue REST post to the specified url with the post_data provided
Parameters: url, post data
Returns: This function will return 'r' which is the response from the post:
r.text is the text response (r.json() is a python dict version of the json response)
r.status_code = 2xx on success
Raises: Error occurred in post
"""
if time.time() > self.authTokenMaxAge + self.authTokenTimestamp:
logging.debug("Getting a new authToken")
self.get_auth_token()
try:
# REST call with SSL verification turned off:
logging.debug("Request: " + url)
logging.debug("Post_data " + str(post_data))
r = requests.post(url, data=json.dumps(post_data), headers=self.headers, verify=False)
# REST call with SSL verification turned on:
# r = requests.post(url,data=json.dumps(post_data), headers=self.headers, verify='/path/to/ssl_certificate')
status_code = r.status_code
resp = r.text
logging.info("Response status_code: " + str(status_code))
logging.info("Response body: " + str(resp))
# logging.debug("Status code is: "+str(status_code))
if 201 <= status_code <= 202:
# json_resp = json.loads(resp)
# logging.debug(json.dumps(json_resp,sort_keys=True,indent=4, separators=(',', ': ')))
pass
else:
r.raise_for_status()
raise Exception("Error occurred in POST --> "+resp)
except requests.exceptions.HTTPError as err:
raise Exception("Error in connection --> "+str(err))
finally:
if r: r.close()
return r
def rest_put(self, url, put_data):
"""
Purpose: Issue REST put to specific url with the put_data provided
Parameters: url, put data
Returns: This function will return 'r' which is the response from the put:
r.text is the text response (r.json() is a python dict version of the json response)
r.status_code = 2xx on success
Raises:
"""
if time.time() > self.authTokenMaxAge + self.authTokenTimestamp:
logging.debug("Getting a new authToken")
self.get_auth_token()
try:
# REST call with SSL verification turned off:
logging.info("Request: " + url)
logging.info("Put_data: " + str(put_data))
r = requests.put(url, data=json.dumps(put_data), headers=self.headers, verify=False)
# REST call with SSL verification turned on:
# r = requests.put(url, data=json.dumps(put_data), headers=headers, verify='/path/to/ssl_certificate')
status_code = r.status_code
resp = r.text
logging.info("Response status_code: " + str(status_code))
logging.info("Response body: " + str(resp))
if status_code == 200:
pass
else:
r.raise_for_status()
raise Exception("Error occurred in put -->" + resp)
except requests.exceptions.HTTPError as err:
raise Exception("Error in connection --> "+str(err))
finally:
if r: r.close()
return r
def rest_delete(self, url):
"""
Purpose: Issue REST delete to the specified URL
Parameters: url
Returns: This function will return 'r' which is the response to the request:
r.text is the text response (r.json() is a python dict version of the json response)
r.status_code = 2xx on success
Raises:
"""
if time.time() > self.authTokenMaxAge + self.authTokenTimestamp:
logging.debug("Getting a new authToken")
self.get_auth_token()
try:
# REST call with SSL verification turned off:
logging.debug("Request: " + url)
r = requests.delete(url, headers=self.headers, verify=False)
# REST call with SSL verification turned on:
# r = requests.delete(url, headers=headers, verify='/path/to/ssl_certificate')
status_code = r.status_code
resp = r.text
logging.info("Response status_code: " + str(status_code))
logging.info("Response body: " + str(resp))
if 200 <= status_code <= 300:
# logging.debug("GET successful. Response data --> ")
# json_resp = json.loads(resp)
# logging.debug(json.dumps(json_resp,sort_keys=True,indent=4, separators=(',', ': ')))
pass
else:
r.raise_for_status()
raise Exception("Error occurred in Delete -->"+resp)
except requests.exceptions.HTTPError as err:
raise Exception("Error in connection --> "+str(err))
finally:
if r: r.close()
return r
def get_auth_token(self):
"""
Purpose: get a new REST authentication token
update the 'headers' variable
set a timestamp for the header (tokens expire)
Parameters:
Returns:
Raises:
"""
self.headers = {'Content-Type': 'application/json'}
api_auth_path = "/api/fmc_platform/v1/auth/generatetoken"
auth_url = self.server + api_auth_path
try:
# 2 ways of making a REST call are provided:
# One with "SSL verification turned off" and the other with "SSL verification turned on".
# The one with "SSL verification turned off" is commented out. If you like to use that then
# uncomment the line where verify=False and comment the line with =verify='/path/to/ssl_certificate'
# REST call with SSL verification turned off:
r = requests.post(auth_url, headers=self.headers,
auth=requests.auth.HTTPBasicAuth(self.username, self.password), verify=False)
# REST call with SSL verification turned on: Download SSL certificates
# from your FMC first and provide its path for verification.
# r = requests.post(auth_url, headers=self.headers,
# auth=requests.auth.HTTPBasicAuth(username,password), verify='/path/to/ssl_certificate')
auth_headers = r.headers
auth_token = auth_headers.get('X-auth-access-token', default=None)
self.domain_uuid = auth_headers.get('domain_uuid', default=None)
self.headers['X-auth-access-token'] = auth_token
self.authTokenTimestamp = int(time.time())
# logging.debug("Acquired AuthToken: " + auth_token)
# logging.debug("domain_uuid: " + domain_uuid)
if auth_token is None:
logging.debug("auth_token not found. Exiting...")
# raise Exception("Error occurred in get auth token ")
except Exception as err:
logger.error("Error in generating auth token --> " + str(err))
return
def get_device_grp_id_by_name(self, name):
"""
Purpose: To get device group id by passing name of the group
Parameters: Name of device group
Returns: Group Id or None
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devicegroups/devicegrouprecords"
url = self.server + api_path + '?offset=0&limit=9000'
r = self.rest_get(url)
if 'items' in r.json():
for item in r.json()['items']:
if item['name'] == name:
return str(item['id'])
return None
def get_member_list_in_device_grp(self, grp_id):
"""
Purpose: To get devices name list from grp id
Parameters: Group Id
Returns: list or None
Raises:
"""
member_name_list = []
member_id_list = []
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devicegroups/devicegrouprecords/"
url = self.server + api_path + grp_id
r = self.rest_get(url)
if 'members' in r.json():
for item in r.json()['members']:
member_name_list.append(item['name'])
member_id_list.append(item['id'])
return member_name_list, member_id_list
def get_security_objectid_by_name(self, name):
"""
Purpose: Get Zone ID from it's name
Parameters: Zone Name
Returns: Zone ID, None
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/object/securityzones"
url = self.server + api_path + '?offset=0&limit=9000'
r = self.rest_get(url)
if 'items' in r.json():
for item in r.json()['items']:
if item['name'] == name:
return str(item['id'])
return None
# Get network objects (all network and host objects)
def get_network_objectid_by_name(self, name):
"""
Purpose: Get Network object Id by its name
Parameters: Object Name
Returns: Object Id
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/object/networkaddresses"
url = self.server + api_path + '?offset=0&limit=10000'
r = self.rest_get(url)
for item in r.json()['items']:
if item['type'] == 'Network' and item['name'] == name:
return str(item['id'])
# raise Exception('network object with name ' + name + ' was not found')
return ''
def get_port_objectid_by_name(self, name):
"""
Purpose: Get Port object Id by its name
Parameters: Object Name
Returns: Object Id
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/object/protocolportobjects"
url = self.server + api_path + '?offset=0&limit=10000'
r = self.rest_get(url)
for item in r.json()['items']:
if item['type'] == 'ProtocolPortObject' and item['name'] == name:
return str(item['id'])
# raise Exception('network port with name ' + name + ' was not found')
return ''
def get_host_objectid_by_name(self, name):
"""
Purpose: Get Host object Id by Name
Parameters: Object Name
Returns: Object Id
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/object/hosts"
url = self.server + api_path + '?offset=0&limit=10000'
r = self.rest_get(url)
for item in r.json()['items']:
if item['type'] == 'Host' and item['name'] == name:
return str(item['id'])
# raise Exception('host object with name ' + name + ' was not found')
return ''
def get_device_id_by_name(self, name):
"""
Purpose: Get Device Id by its name
Parameters: Device Name
Returns: Device Id
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords"
url = self.server + api_path + '?offset=0&limit=10000'
r = self.rest_get(url)
if 'items' in r.json():
for item in r.json()['items']:
if item['name'] == name:
return str(item['id'])
# or return empty string
return ''
def get_access_policy_id_by_name(self, name):
"""
Purpose: Get Access Policy Id by its name
Parameters: Access policy name
Returns: Access Policy Id, None
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/policy/accesspolicies"
url = self.server + api_path + '?offset=0&limit=10000'
r = self.rest_get(url)
# Search for policy by name
if 'items' in r.json():
for item in r.json()['items']:
if item['name'] == name:
return str(item['id'])
return None
def get_nic_id_by_name(self, device_id, nic_name):
"""
Purpose: Get Nic Id by device & nic name
Parameters: Device Name, Nic name
Returns: Nic Id, None
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/" + \
device_id + "/physicalinterfaces"
url = self.server + api_path
r = self.rest_get(url)
if 'items' in r.json():
for item in r.json()['items']:
if item['name'] == nic_name:
return str(item['id'])
return None
def get_time_stamp(self):
"""
Purpose: Get time stamp
Parameters:
Returns: Audit time stamp
Raises:
"""
api_path = "/api/fmc_platform/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/audit/auditrecords"
url = self.server + api_path
r = self.rest_get(url)
return r.json()['items'][0]['time']*1000
def get_deployable_devices(self):
"""
Purpose: Get list of deployable devices
Parameters:
Returns: List of devices, pending to be deployed
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/deployment/deployabledevices"
url = self.server + api_path
r = self.rest_get(url)
logging.debug("deployable devices:" + str(r.json()))
device_list = []
if 'items' in r.json():
for item in r.json()['items']:
if item['type'] == 'DeployableDevice':
device_list.append(item['name'])
return device_list
def get_nic_status(self, device_id, nic, nic_id, ifname, zone_id, ip=None):
"""
Purpose: To check whether Nic is configured or not configured
Parameters: Device Id, Nic, Nic Id, Interface Name, Zone Id, Ip
Returns: CONFIGURED, MIS-CONFIGURED, UN-CONFIGURED
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/" + \
device_id + "/physicalinterfaces/" + nic_id
url = self.server + api_path
r = self.rest_get(url)
flag1, flag2 = 0, 0
try:
if 'ipv4' in r.json():
item = dict.copy(r.json()['ipv4']['static'])
if item['address'] == ip:
flag1 = 1
except:
try:
if 'ipv4' in r.json():
item = dict.copy(r.json()['ipv4']['dhcp'])
flag1 = 1
except:
flag1 = 0
try:
if r.json()['name'] == nic:
if r.json()['ifname'] == ifname:
flag2 = 1
if r.json()['securityZone']['id'] != zone_id:
flag2 = 0
except:
flag2 = 0
if flag1 == 1 and flag2 == 1:
return "CONFIGURED"
elif (flag1 == 1 and flag2 == 0) or (flag1 == 0 and flag2 == 1):
logger.critical("Interface Mis-Configured! ")
return "UN-CONFIGURED"
def check_static_route(self, device_id, interface_name, _object_name, gate_way):
"""
Purpose: Check if a static route exists on a device
Parameters: Device, Interface name, Network, Gateway
Returns: CONFIGURED, UN-CONFIGURED
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/" + \
device_id + "/routing/ipv4staticroutes"
url = self.server + api_path + '?offset=0&limit=10000'
r = self.rest_get(url)
if 'items' in r.json():
for key1 in r.json()['items']:
id = key1['id']
url = self.server + api_path + '/' + id
r = self.rest_get(url)
if r.json()['interfaceName'] == interface_name:
for key2 in r.json()['selectedNetworks']:
if key2['name'] == _object_name:
try:
element = dict.copy(r.json()['gateway']['object'])
if element['name'] == gate_way:
return "CONFIGURED"
except:
pass
try:
element = dict.copy(r.json()['gateway']['literal'])
if element['value'] == gate_way:
return "CONFIGURED"
except:
pass
return "UN-CONFIGURED"
def configure_nic_dhcp(self, device_id, nic_id, nic, nic_name, mgmt_only, mode, zone_id, mtu):
"""
Purpose: Configure an Nic interface as DHCP
Parameters: Device Name, Nic, Nic name, Zone, MTU
Returns: REST put response
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/" + \
device_id + "/physicalinterfaces/" + nic_id
url = self.server + api_path
put_data = {
"type": "PhysicalInterface",
"managementOnly": mgmt_only,
"MTU": int(mtu),
"ipv4": {
"dhcp": {
"enableDefaultRouteDHCP": "false",
"dhcpRouteMetric": 1
}
},
"securityZone": {
"id": zone_id,
"type": "SecurityZone"
},
"mode": mode,
"ifname": nic_name,
"enabled": "true",
"name": nic,
"id": nic_id
}
r = self.rest_put(url, put_data)
return r
def configure_nic_static(self, device_id, nic_id, nic, nic_name, mgmt_only, mode, zone_id, mtu, ip, netmask):
"""
Purpose: Configure an Nic interface as Static
Parameters: Device Name, Nic, Nic name, Zone, IP, Netmask
Returns: REST put response
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/" + \
device_id + "/physicalinterfaces/" + nic_id
url = self.server + api_path
put_data = {
"type": "PhysicalInterface",
"managementOnly": mgmt_only,
"MTU": mtu,
"ipv4": {
"static": {
"address": ip,
"netmask": netmask
}
},
"securityZone": {
"id": zone_id,
"type": "SecurityZone"
},
"mode": mode,
"ifname": nic_name,
"enabled": "true",
"name": nic,
"id": nic_id
}
r = self.rest_put(url, put_data)
return r
def create_static_route(self, device_id, interface_name, _type, _object_name, _object_id, gate_way, metric):
"""
Purpose: To create static route on device
Parameters: Device, Interface Name, Host, Gateway, Metric
Returns: REST response
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/" + \
device_id + "/routing/ipv4staticroutes" # param
url = self.server + api_path
post_data = {
"interfaceName": interface_name,
"selectedNetworks": [
{
"type": _type,
"id": _object_id,
"name": _object_name
}
],
"gateway": gate_way,
"metricValue": metric,
"type": "IPv4StaticRoute",
"isTunneled": False
}
r = self.rest_post(url, post_data)
return r
def register_device(self, name, mgmt_ip, policy_id, reg_id, nat_id, license_caps, device_grp_id):
"""
Purpose: Register the device to FMC
Parameters: Name of device, Mgmt ip, Access Policy Id, Registration & NAT id, Licenses Caps, Group Id
Returns: REST post response
Raises:
"""
logger.info("Registering: "+name)
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords"
url = self.server + api_path
post_data = {
"name": name,
"hostName": mgmt_ip,
"regKey": reg_id,
"natID": nat_id,
"type": "Device",
"license_caps": license_caps,
"accessPolicy": {
"id": policy_id,
"type": "AccessPolicy"
},
"deviceGroup": {
"id": device_grp_id,
"type": "DeviceGroup"
}
}
r = self.rest_post(url, post_data)
return r
def deregister_device(self, name):
"""
Purpose: De-registers the device from FMC
Parameters: Device Name
Returns: REST delete response
Raises:
"""
logger.info("De-registering: " + name)
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/"
dev_id = self.get_device_id_by_name(name)
url = self.server + api_path + dev_id
r = self.rest_delete(url)
return r
def start_deployment(self, device_name):
"""
Purpose: Deploys policy changes on device
Parameters: Device name
Returns: Task Id
Raises:
"""
logger.info("Deploy called for: " + device_name)
device_list = self.get_deployable_devices()
logging.debug("Device List = " + str(device_list))
if device_name in device_list:
logging.debug("deploying on device: " + device_name)
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/deployment/deploymentrequests"
url = self.server + api_path
post_data = {
"type": "DeploymentRequest",
"version": str(self.get_time_stamp()),
"forceDeploy": True,
"ignoreWarning": True,
"deviceList": [self.get_device_id_by_name(device_name)]
}
r = self.rest_post(url, post_data)
if 'type' in r.json():
if r.json()['type'] == 'DeploymentRequest':
return r.json()['metadata']['task']['id']
return ''
def check_reg_status_from_fmc(self, vm_name):
"""
Purpose: Checks if device is registered to FMC
Parameters: Device Name
Returns: SUCCESS, FAILED
Raises:
"""
try:
device_id = self.get_device_id_by_name(vm_name)
except Exception as e:
logger.debug(str(e))
else:
if device_id != '':
return "SUCCESS"
else:
return "FAILED"
def check_deploy_status(self, vm_name):
"""
Purpose: Checks if any deployment pending for device
Parameters: Device name
Returns: DEPLOYED, NOT-DEPLOYED
Raises:
"""
r = self.get_deployable_devices()
for device in r:
if device == vm_name:
logger.debug("Policies not deployed on " + vm_name)
return "NOT-DEPLOYED"
logger.debug("Policies deployed on " + vm_name)
return "DEPLOYED"
def check_object_fmc(self, obj_name):
"""
Purpose: Checks for Object inn FMC
Parameters: Object name
Returns: Object Id
Raises:
"""
obj_id = self.get_network_objectid_by_name(obj_name)
if obj_id == '':
obj_id = self.get_host_objectid_by_name(obj_name)
if obj_id == '':
obj_id = self.get_port_objectid_by_name(obj_name)
if obj_id == '':
logger.error("Unable to find object %s" % obj_name)
return ''
return obj_id
def get_memory_metrics_from_fmc(self, device_id):
"""
Purpose: Fetch Memory Metric
Parameters: device id
Returns:
Raises:
"""
try:
api_path = '/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/'
api_suffix = '/operational/metrics?filter=metric%3Amemory&offset=0&limit=1&expanded=true'
url = self.server + api_path + device_id + api_suffix
r = self.rest_get(url)
resp = r.text
return json.loads(resp)
except Exception as e:
logger.error("Error {}".format(e))
return None
def get_policy_assign_targets(self, pol_id):
"""
Purpose: Get targets by its policy id
Parameters: Device Name
Returns: Device Id
"targets": [
{
"id": "87b98de4-919c-11ea-bedf-d2d17d1b9702",
"type": "DeviceGroup",
"name": "AWS-Cisco-NGFW-VMs-2"
},
{
"id": "d263cee0-919c-11ea-ad04-b08a2fa13b2d",
"type": "DeviceGroup",
"name": "AWS-Cisco-NGFW-VMs-1"
},
{
"id": "4d71e0d4-91e0-11ea-b727-a060bdd6dece",
"type": "DeviceGroup",
"name": "AWS-Cisco-NGFW-VMs-3"
}
]
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/assignment/policyassignments/"
url = self.server + api_path + pol_id
r = self.rest_get(url)
if 'targets' in r.json():
return r.json()["targets"]
else:
return []
def get_nat_policy_id(self, pol_name):
"""
Purpose: Gets policy id
Parameters: Policy Name
Returns: Policy Id
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/policy/ftdnatpolicies"
url = self.server + api_path + '?offset=0&limit=10000'
r = self.rest_get(url)
for item in r.json()['items']:
if item['name'] == pol_name:
return str(item['id'])
else:
return None
class DerivedFMC(FirepowerManagementCenter):
"""
DerivedFMC is a child class of FirepowerManagementCenter, updates parameters & methods
"""
def __init__(self, fmc_server, username, password, accesspolicy):
super().__init__(fmc_server, username, password, accesspolicy)
self.d_grp_name = ''
self.a_policy_name = ''
self.nat_policy_name = ''
self.seczone_name = []
self.network_obj_name = []
self.host_obj_name = []
self.d_grp_id = ''
self.a_policy_id = ''
self.nat_policy_id = ''
self.seczone_obj_pair = {}
self.network_obj_pair = {}
self.host_obj_pair = {}
self.reachable = False
self.configuration = {}
self.configuration_status = ""
def reach_fmc_(self):
"""
Purpose: To get Auth token & update self.reachable value
Parameters:
Returns: self.reachable
Raises:
"""
try:
self.get_auth_token()
if self.headers['X-auth-access-token']:
self.reachable = 'AVAILABLE'
except Exception as e:
logger.exception(e)
self.reachable = 'UN-AVAILABLE'
self.configuration.update({'fmc_reachable': self.reachable})
return self.reachable
def set_fmc_configuration(self):
"""
Purpose: To update DerivedFMC class parameters
Parameters:
Returns: Object
Raises:
"""
if self.d_grp_name:
self.d_grp_id = self.get_device_grp_id_by_name(self.d_grp_name)
self.configuration.update({"device_grp": {self.d_grp_name: self.d_grp_id}})
if self.a_policy_name:
self.a_policy_id = self.get_access_policy_id_by_name(self.a_policy_name)
self.configuration.update({"access_policy": {self.a_policy_name: self.a_policy_id}})
if self.nat_policy_name:
self.nat_policy_id = self.get_nat_policy_id(self.nat_policy_name)
self.configuration.update({"nat_policy": {self.nat_policy_name: self.nat_policy_id}})
if self.seczone_name:
for i in self.seczone_name:
self.seczone_obj_pair.update({i: self.get_security_objectid_by_name(i)})
self.configuration.update({"security_zones": self.seczone_obj_pair})
if self.network_obj_name:
for i in self.network_obj_name:
self.network_obj_pair.update({i: self.get_network_objectid_by_name(i)})
self.configuration.update({"net_objects": self.network_obj_pair})
if self.host_obj_name:
for i in self.host_obj_name:
self.host_obj_pair.update({i: self.get_host_objectid_by_name(i)})
self.configuration.update({"host_objects": self.host_obj_pair})
logger.info(json.dumps(self.configuration, separators=(',', ':')))
return
def update_fmc_config_user_input(self, d_grp_name, a_policy_name, nat_policy_name,
l_seczone_name, l_network_obj_name, l_host_obj_name):
"""
Purpose: To take parameters to DerivedFMC class
Parameters:
Returns:
Raises:
"""
self.d_grp_name = d_grp_name
self.a_policy_name = a_policy_name
self.nat_policy_name = nat_policy_name
self.seczone_name = l_seczone_name
self.network_obj_name = l_network_obj_name
self.host_obj_name = l_host_obj_name
return
def check_fmc_configuration(self):
"""
Purpose: To inspect if Derived FMC class has all required variables
Parameters:
Returns: self.configuration_status
Raises:
"""
self.configuration_status = 'UN-CONFIGURED'
if self.reachable == 'AVAILABLE':
if self.d_grp_id == '':
return self.configuration_status
if self.a_policy_id == '':
return self.configuration_status
else:
r = self.get_policy_assign_targets(self.a_policy_id)
if not utl.find_value_in_list(r, self.d_grp_id):
return self.configuration_status
if self.nat_policy_id == '':
return self.configuration_status
else:
r = self.get_policy_assign_targets(self.nat_policy_id)
if not utl.find_value_in_list(r, self.d_grp_id):
return self.configuration_status
for (k, v) in self.seczone_obj_pair.items():
if v is None:
return self.configuration_status
for (k, v) in self.network_obj_pair.items():
if v is None:
return self.configuration_status
for (k, v) in self.host_obj_pair.items():
if v is None:
return self.configuration_status
self.configuration_status = 'CONFIGURED'
self.configuration.update({'fmc_configuration_status': self.configuration_status})
return self.configuration_status
def register_ftdv(self, vm_name, mgmt_ip, reg_id, nat_id, license_caps):
"""
Purpose: Register the device to FMC
Parameters: Device Name, Mgmgt Ip, Registration & NAT id, Licenses cap
Returns: Task id, None
Raises:
"""
try:
logger.info("Registering FTDv: " + vm_name + " to FMC with policy id: " + self.a_policy_name)
r = self.register_device(vm_name, mgmt_ip, self.a_policy_id, reg_id, nat_id, license_caps, self.d_grp_id)
logger.debug("Register response was: " + str(r.json()))
if 'type' in r.json():
if r.json()['type'] == 'Device':
logger.info("NGWFv: " + vm_name + " registration started and task ID is: " +
r.json()['metadata']['task']['id'])
return r.json()['metadata']['task']['id']
except Exception as e:
logger.exception(e)
return None
def conf_static_rt(self, device_id, int_name, rt_type, net_name, gateway, metric):
"""
Purpose: To configure gateway if required for static_route
Parameters:
Returns: response
Raises:
"""
net_id = self.get_host_objectid_by_name(net_name)
gateway_id = self.get_host_objectid_by_name(gateway)
# Gateway can be an object or IP literal
if gateway_id != '':
gate_way = {
"object": {
"type": "Host",
"id": gateway_id,
"name": gateway
}
}
else:
gate_way = {
"literal": {
"type": "Host",
"value": gateway
}
}
try:
r = self.create_static_route(device_id, int_name, rt_type, net_name, net_id, gate_way, metric)
return r
except Exception as e:
logger.exception(e)
return None
|
the-stack_106_31412 | # Symbolizing a Vector Layer
# https://github.com/GeospatialPython/Learn/raw/master/Mississippi.zip
from PyQt4.QtGui import *
lyr = QgsVectorLayer("/qgis_data/ms/mississippi.shp", "Mississippi", "ogr")
QgsMapLayerRegistry.instance().addMapLayer(lyr)
symbols = lyr.rendererV2().symbols()
sym = symbols[0]
sym.setColor(QColor.fromRgb(255,0,0))
lyr.triggerRepaint()
|
the-stack_106_31413 | import argparse
import logging
import os
import uuid
import zmq
from zmq.utils import jsonapi
from .graph import PoolingStrategy
__all__ = ['set_logger', 'send_ndarray', 'get_args_parser', 'check_tf_version', 'auto_bind']
def set_logger(context):
logger = logging.getLogger(context)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(levelname)-.1s:' + context + ':[%(filename).3s:%(funcName).3s:%(lineno)3d]:%(message)s', datefmt=
'%m-%d %H:%M:%S')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(console_handler)
return logger
def send_ndarray(src, dest, X, req_id=b'', flags=0, copy=True, track=False):
"""send a numpy array with metadata"""
md = dict(dtype=str(X.dtype), shape=X.shape)
return src.send_multipart([dest, jsonapi.dumps(md), X, req_id], flags, copy=copy, track=track)
def get_args_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-model_dir', type=str, required=True,
help='directory of a pretrained BERT model')
parser.add_argument('-max_seq_len', type=int, default=25,
help='maximum length of a sequence')
parser.add_argument('-num_worker', type=int, default=1,
help='number of server instances')
parser.add_argument('-max_batch_size', type=int, default=256,
help='maximum number of sequences handled by each worker')
parser.add_argument('-port', '-port_in', '-port_data', type=int, default=5555,
help='server port for receiving data from client')
parser.add_argument('-port_out', '-port_result', type=int, default=5556,
help='server port for outputting result to client')
parser.add_argument('-pooling_layer', type=int, nargs='+', default=[-2],
help='the encoder layer(s) that receives pooling. '
'Give a list in order to concatenate several layers into 1.')
parser.add_argument('-pooling_strategy', type=PoolingStrategy.from_string,
default=PoolingStrategy.REDUCE_MEAN, choices=list(PoolingStrategy),
help='the pooling strategy for generating encoding vectors')
parser.add_argument('-cpu', action='store_true', default=False,
help='running on CPU (default is on GPU)')
parser.add_argument('-xla', action='store_true', default=False,
help='enable XLA compiler')
parser.add_argument('-gpu_memory_fraction', type=float, default=0.5,
help='determines the fraction of the overall amount of memory '
'that each visible GPU should be allocated per worker. '
'Should be in range [0.0, 1.0]')
parser.add_argument('-version', action='store_true', default=False,
help='show version and exit')
return parser
def check_tf_version():
import tensorflow as tf
tf_ver = tf.__version__.split('.')
assert int(tf_ver[0]) >= 1 and int(tf_ver[1]) >= 10, 'Tensorflow >=1.10 is required!'
return tf_ver
def auto_bind(socket):
if os.name == 'nt': # for Windows
socket.bind_to_random_port('tcp://*')
else:
# Get the location for tmp file for sockets
try:
tmp_dir = os.environ['ZEROMQ_SOCK_TMP_DIR']
if not os.path.exists(tmp_dir):
raise ValueError('This directory for sockets ({}) does not seems to exist.'.format(tmp_dir))
tmp_dir = os.path.join(tmp_dir, str(uuid.uuid1())[:8])
except KeyError:
tmp_dir = '*'
socket.bind('ipc://{}'.format(tmp_dir))
return socket.getsockopt(zmq.LAST_ENDPOINT).decode('ascii')
|
the-stack_106_31415 | from setuptools import find_packages
from setuptools import setup
package_name = 'ament_uncrustify'
setup(
name=package_name,
version='0.8.0',
packages=find_packages(exclude=['test']),
data_files=[
('share/' + package_name, ['package.xml']),
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
],
install_requires=['setuptools'],
package_data={'': [
'configuration/ament_code_style.cfg',
]},
zip_safe=False,
author='Dirk Thomas',
author_email='[email protected]',
maintainer='Dirk Thomas',
maintainer_email='[email protected]',
url='https://github.com/ament/ament_lint',
download_url='https://github.com/ament/ament_lint/releases',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Check code style using uncrustify.',
long_description="""\
The ability to check code against style conventions using uncrustify
and generate xUnit test result files.""",
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'ament_uncrustify = ament_uncrustify.main:main',
],
},
)
|
the-stack_106_31416 | import os
from smarts.sstudio import gen_traffic
from smarts.sstudio.types import (
Traffic,
Flow,
Route,
TrafficActor,
Distribution,
LaneChangingModel,
JunctionModel,
)
scenario = os.path.dirname(os.path.realpath(__file__))
impatient_car = TrafficActor(
name="car",
speed=Distribution(sigma=0.2, mean=1.0),
lane_changing_model=LaneChangingModel(impatience=1, cooperative=0.25),
junction_model=JunctionModel(
drive_after_red_time=1.5, drive_after_yellow_time=1.0, impatience=1.0
),
)
patient_car = TrafficActor(
name="car",
speed=Distribution(sigma=0.2, mean=0.8),
lane_changing_model=LaneChangingModel(impatience=0, cooperative=0.5),
junction_model=JunctionModel(drive_after_yellow_time=1.0, impatience=0.5),
)
vertical_routes = [
("north-NS", "south-NS"),
("south-SN", "north-SN"),
]
horizontal_routes = [
("west-WE", "east-WE"),
("east-EW", "west-EW"),
]
turn_left_routes = [
("south-SN", "west-EW"),
("west-WE", "north-SN"),
("north-NS", "east-WE"),
("east-EW", "south-NS"),
]
turn_right_routes = [
("south-SN", "east-WE"),
("west-WE", "south-NS"),
("north-NS", "west-EW"),
("east-EW", "north-SN"),
]
for name, routes in {
"vertical": vertical_routes,
"horizontal": horizontal_routes,
"unprotected_left": turn_left_routes,
"turns": turn_left_routes + turn_right_routes,
"all": vertical_routes + horizontal_routes + turn_left_routes + turn_right_routes,
}.items():
traffic = Traffic(
flows=[
Flow(
route=Route(
begin=(f"edge-{r[0]}", 0, "random"),
end=(f"edge-{r[1]}", 0, "random"),
),
rate=1,
actors={impatient_car: 0.5, patient_car: 0.5},
)
for r in routes
]
)
gen_traffic(scenario, traffic, name=name)
|
the-stack_106_31417 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for Residual Networks.
Residual networks ('v1' ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant was introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer
rather than after.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-5
DEFAULT_VERSION = 2
################################################################################
# Convenience functions for building the ResNet model.
################################################################################
def batch_norm(inputs, training, data_format):
"""Performs a batch normalization using a standard set of parameters."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
return tf.layers.batch_normalization(
inputs=inputs, axis=1 if data_format == 'channels_first' else 3,
momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,
scale=True, training=training, fused=True)
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):
"""Strided 2-D convolution with explicit padding."""
# The padding is consistent and is based only on `kernel_size`, not on the
# dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)
################################################################################
# ResNet block definitions.
################################################################################
def _building_block_v1(inputs, filters, training, projection_shortcut, strides,
data_format):
"""
Convolution then batch normalization then ReLU as described by:
Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts
(typically a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block.
"""
shortcut = inputs
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
shortcut = batch_norm(inputs=shortcut, training=training,
data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=1,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs += shortcut
inputs = tf.nn.relu(inputs)
return inputs
def _building_block_v2(inputs, filters, training, projection_shortcut, strides,
data_format):
"""
Batch normalization then ReLu then convolution as described by:
Identity Mappings in Deep Residual Networks
https://arxiv.org/pdf/1603.05027.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts
(typically a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block.
"""
shortcut = inputs
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=1,
data_format=data_format)
return inputs + shortcut
def _bottleneck_block_v1(inputs, filters, training, projection_shortcut,
strides, data_format):
"""
Similar to _building_block_v1(), except using the "bottleneck" blocks
described in:
Convolution then batch normalization then ReLU as described by:
Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015.
"""
shortcut = inputs
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
shortcut = batch_norm(inputs=shortcut, training=training,
data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=1, strides=1,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs += shortcut
inputs = tf.nn.relu(inputs)
return inputs
def _bottleneck_block_v2(inputs, filters, training, projection_shortcut,
strides, data_format):
"""
Similar to _building_block_v2(), except using the "bottleneck" blocks
described in:
Convolution then batch normalization then ReLU as described by:
Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015.
adapted to the ordering conventions of:
Batch normalization then ReLu then convolution as described by:
Identity Mappings in Deep Residual Networks
https://arxiv.org/pdf/1603.05027.pdf
by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016.
"""
shortcut = inputs
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
# The projection shortcut should come after the first batch norm and ReLU
# since it performs a 1x1 convolution.
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=1, strides=1,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format)
inputs = batch_norm(inputs, training, data_format)
inputs = tf.nn.relu(inputs)
inputs = conv2d_fixed_padding(
inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,
data_format=data_format)
return inputs + shortcut
def block_layer(inputs, filters, bottleneck, block_fn, blocks, strides,
training, name, data_format):
"""Creates one layer of blocks for the ResNet model.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first convolution of the layer.
bottleneck: Is the block created a bottleneck block.
block_fn: The block to use within the model, either `building_block` or
`bottleneck_block`.
blocks: The number of blocks contained in the layer.
strides: The stride to use for the first convolution of the layer. If
greater than 1, this layer will ultimately downsample the input.
training: Either True or False, whether we are currently training the
model. Needed for batch norm.
name: A string name for the tensor output of the block layer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block layer.
"""
# Bottleneck blocks end with 4x the number of filters as they start with
filters_out = filters * 4 if bottleneck else filters
def projection_shortcut(inputs):
return conv2d_fixed_padding(
inputs=inputs, filters=filters_out, kernel_size=1, strides=strides,
data_format=data_format)
# Only the first block per block_layer uses projection_shortcut and strides
inputs = block_fn(inputs, filters, training, projection_shortcut, strides,
data_format)
for _ in range(1, blocks):
inputs = block_fn(inputs, filters, training, None, 1, data_format)
return tf.identity(inputs, name)
class Model(object):
"""Base class for building the Resnet Model.
"""
def __init__(self, resnet_size, bottleneck, num_classes, num_filters,
kernel_size,
conv_stride, first_pool_size, first_pool_stride,
second_pool_size, second_pool_stride, block_sizes, block_strides,
final_size, version=DEFAULT_VERSION, data_format=None):
"""Creates a model for classifying an image.
Args:
resnet_size: A single integer for the size of the ResNet model.
bottleneck: Use regular blocks or bottleneck blocks.
num_classes: The number of classes used as labels.
num_filters: The number of filters to use for the first block layer
of the model. This number is then doubled for each subsequent block
layer.
kernel_size: The kernel size to use for convolution.
conv_stride: stride size for the initial convolutional layer
first_pool_size: Pool size to be used for the first pooling layer.
If none, the first pooling layer is skipped.
first_pool_stride: stride size for the first pooling layer. Not used
if first_pool_size is None.
second_pool_size: Pool size to be used for the second pooling layer.
second_pool_stride: stride size for the final pooling layer
block_sizes: A list containing n values, where n is the number of sets of
block layers desired. Each value should be the number of blocks in the
i-th set.
block_strides: List of integers representing the desired stride size for
each of the sets of block layers. Should be same length as block_sizes.
final_size: The expected size of the model after the second pooling.
version: Integer representing which version of the ResNet network to use.
See README for details. Valid values: [1, 2]
data_format: Input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
"""
self.resnet_size = resnet_size
if not data_format:
data_format = (
'channels_first' if tf.test.is_built_with_cuda() else 'channels_last')
self.resnet_version = version
if version not in (1, 2):
raise ValueError(
"Resnet version should be 1 or 2. See README for citations.")
self.bottleneck = bottleneck
if bottleneck:
if version == 1:
self.block_fn = _bottleneck_block_v1
else:
self.block_fn = _bottleneck_block_v2
else:
if version == 1:
self.block_fn = _building_block_v1
else:
self.block_fn = _building_block_v2
self.data_format = data_format
self.num_classes = num_classes
self.num_filters = num_filters
self.kernel_size = kernel_size
self.conv_stride = conv_stride
self.first_pool_size = first_pool_size
self.first_pool_stride = first_pool_stride
self.second_pool_size = second_pool_size
self.second_pool_stride = second_pool_stride
self.block_sizes = block_sizes
self.block_strides = block_strides
self.final_size = final_size
def __call__(self, inputs, training):
"""Add operations to classify a batch of input images.
Args:
inputs: A Tensor representing a batch of input images.
training: A boolean. Set to True to add operations required only when
training the classifier.
Returns:
A logits Tensor with shape [<batch_size>, self.num_classes].
"""
if self.data_format == 'channels_first':
# Convert the inputs from channels_last (NHWC) to channels_first (NCHW).
# This provides a large performance boost on GPU. See
# https://www.tensorflow.org/performance/performance_guide#data_formats
inputs = tf.transpose(inputs, [0, 3, 1, 2])
inputs = conv2d_fixed_padding(
inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size,
strides=self.conv_stride, data_format=self.data_format)
inputs = tf.identity(inputs, 'initial_conv')
if self.first_pool_size:
inputs = tf.layers.max_pooling2d(
inputs=inputs, pool_size=self.first_pool_size,
strides=self.first_pool_stride, padding='SAME',
data_format=self.data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
for i, num_blocks in enumerate(self.block_sizes):
num_filters = self.num_filters * (2**i)
inputs = block_layer(
inputs=inputs, filters=num_filters, bottleneck=self.bottleneck,
block_fn=self.block_fn, blocks=num_blocks,
strides=self.block_strides[i], training=training,
name='block_layer{}'.format(i + 1), data_format=self.data_format)
inputs = batch_norm(inputs, training, self.data_format)
inputs = tf.nn.relu(inputs)
inputs = tf.layers.average_pooling2d(
inputs=inputs, pool_size=self.second_pool_size,
strides=self.second_pool_stride, padding='VALID',
data_format=self.data_format)
inputs = tf.identity(inputs, 'final_avg_pool')
inputs = tf.reshape(inputs, [-1, self.final_size])
inputs = tf.layers.dense(inputs=inputs, units=self.num_classes)
inputs = tf.identity(inputs, 'final_dense')
return inputs
|
the-stack_106_31419 | #
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import pytest
import requests
import responses
from airbyte_cdk.sources.streams.http.auth import NoAuth
from source_us_census.source import UsCensusStream
@pytest.fixture
def us_census_stream():
return UsCensusStream(
query_params={},
query_path="data/test",
api_key="MY_API_KEY",
authenticator=NoAuth(),
)
simple_test = '[["name","id"],["A","1"],["B","2"]]'
example_from_docs_test = (
'[["STNAME","POP","DATE_","state"],'
'["Alabama","4849377","7","01"],'
'["Alaska","736732","7","02"],'
'["Arizona","6731484","7","04"],'
'["Arkansas","2966369","7","05"],'
'["California","38802500","7","06"]]'
)
@responses.activate
@pytest.mark.parametrize(
"response, expected_result",
[
(
simple_test,
[{"name": "A", "id": "1"}, {"name": "B", "id": "2"}],
),
(
(
example_from_docs_test,
[
{
"STNAME": "Alabama",
"POP": "4849377",
"DATE_": "7",
"state": "01",
},
{"STNAME": "Alaska", "POP": "736732", "DATE_": "7", "state": "02"},
{
"STNAME": "Arizona",
"POP": "6731484",
"DATE_": "7",
"state": "04",
},
{
"STNAME": "Arkansas",
"POP": "2966369",
"DATE_": "7",
"state": "05",
},
{
"STNAME": "California",
"POP": "38802500",
"DATE_": "7",
"state": "06",
},
],
)
),
(
'[["name","id"],["I have an escaped \\" quote","I have an embedded , comma"],["B","2"]]',
[
{
"name": 'I have an escaped " quote',
"id": "I have an embedded , comma",
},
{"name": "B", "id": "2"},
],
),
],
)
def test_parse_response(us_census_stream: UsCensusStream, response: str, expected_result: dict):
responses.add(
responses.GET,
us_census_stream.url_base,
body=response,
)
resp = requests.get(us_census_stream.url_base)
assert list(us_census_stream.parse_response(resp)) == expected_result
type_string = {"type": "string"}
@responses.activate
@pytest.mark.parametrize(
"response, expected_schema",
[
(
simple_test,
{
"name": type_string,
"id": type_string,
},
),
(
example_from_docs_test,
{
"STNAME": type_string,
"POP": type_string,
"DATE_": type_string,
"state": type_string,
},
),
],
)
def test_discover_schema(us_census_stream: UsCensusStream, response: str, expected_schema: dict):
responses.add(
responses.GET,
f"{us_census_stream.url_base}{us_census_stream.query_path}",
body=response,
)
assert us_census_stream.get_json_schema().get("properties") == expected_schema
|
the-stack_106_31421 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import db
from django.core.exceptions import ValidationError
from django.test import SimpleTestCase
from localflavor.nl import forms, models, validators
from .forms import NLPlaceForm
from .models import NLPlace
class NLLocalFlavorValidatorTests(SimpleTestCase):
def assert_validator(self, validator, valid=[], invalid=[]):
for item in valid:
validator(item)
for item in invalid:
self.assertRaises(ValidationError, lambda: validator(item))
def test_NLZipCodeValidator(self):
valid = [
'1234 AB',
'2403 BW',
'2612 JJ',
]
invalid = [
'0123 AB',
'1123 BA'
'11235BA'
'3243 A1',
'AA 1245',
'1234-AB',
'foo',
]
self.assert_validator(validators.NLZipCodeFieldValidator(), valid, invalid)
def test_NLSoFiNumberValidator(self):
valid = [
'123456782',
]
invalid = [
'000000000',
'123456789',
'foo',
]
self.assert_validator(validators.NLSoFiNumberFieldValidator(), valid, invalid)
def test_NLPhoneNumberValidator(self):
valid = [
'0123456789',
'012-3456789',
'+31-12-3456789',
'(0123) 456789',
]
invalid = [
'(010) 12345678',
'06-123456789',
'+31 6123456789',
'foo',
]
self.assert_validator(validators.NLPhoneNumberFieldValidator(), valid, invalid)
def test_NLPhoneNumberValidator_deconstruct(self):
# Deconstruct method is required for django 1.7+ compatibility.
nlphone1 = validators.NLPhoneNumberFieldValidator()
nlphone2 = validators.NLPhoneNumberFieldValidator()
self.assertEqual(nlphone1, nlphone2, msg="NLPhoneNumberFieldValidator are not equal.")
# Call to the deconstruct method to see if it exists.
nlphone1.deconstruct()
def test_NLBankAccountNumberFieldValidator(self):
valid = [
'0417164300',
'755490975',
'12345',
]
invalid = [
'7584955151',
'foo',
'0',
'75849551519',
'00417164300',
'75849551',
]
self.assert_validator(validators.NLBankAccountNumberFieldValidator(), valid, invalid)
class NLLocalFlavorModelTests(SimpleTestCase):
def test_NLZipCodeField(self):
field = models.NLZipCodeField()
self.assertEqual(field.to_python('1234AB'), '1234 AB')
self.assertEqual(field.to_python(None), None)
self.assertIsInstance(field.formfield(), forms.NLZipCodeField)
def test_NL_model(self):
m = NLPlace(**{
'zipcode': '2403BW',
'province': 'OV',
'sofinr': '123456782',
'phone': '012-3456789',
'bankaccount': '0417164300'
})
self.assertEqual(str(m.zipcode), '2403BW')
self.assertEqual(str(m.province), 'OV')
self.assertEqual(str(m.sofinr), '123456782')
self.assertEqual(str(m.phone), '012-3456789')
self.assertEqual(str(m.bankaccount), '0417164300')
m.clean_fields()
def test_NL_model_cleanup(self):
m = NLPlace(**{
'zipcode': '2403 bwa',
'province': 'OV',
'sofinr': '123456782',
'phone': '012-3456789',
'bankaccount': '0417164300'
})
# zipcode is not quite right, so it should raise an error
self.assertRaises(ValidationError, lambda: m.clean_fields())
# correct zipcode, should be clean now
m.zipcode = '2403 bw'
m.clean_fields()
self.assertEquals(str(m.zipcode), '2403 BW')
class NLLocalFlavorFormTests(SimpleTestCase):
def test_NLZipCodeField(self):
error_invalid = ['Enter a valid zip code.']
valid = {
'1234ab': '1234 AB',
'1234 ab': '1234 AB',
'1234 AB': '1234 AB',
# superfluous spaces should get cleaned off
'1234 AB ': '1234 AB',
' 1234AB ': '1234 AB',
}
invalid = {
'0123AB': error_invalid,
'foo': error_invalid,
'1234ABC': error_invalid,
'1234A': error_invalid,
}
self.assertFieldOutput(forms.NLZipCodeField, valid, invalid)
def test_NLProvinceSelect(self):
f = forms.NLProvinceSelect()
out = '''<select name="provinces">
<option value="DR">Drenthe</option>
<option value="FL">Flevoland</option>
<option value="FR">Fryslân</option>
<option value="GL">Gelderland</option>
<option value="GR">Groningen</option>
<option value="LB">Limburg</option>
<option value="NB">Noord-Brabant</option>
<option value="NH">Noord-Holland</option>
<option value="OV" selected="selected">Overijssel</option>
<option value="UT">Utrecht</option>
<option value="ZE">Zeeland</option>
<option value="ZH">Zuid-Holland</option>
</select>'''
self.assertHTMLEqual(f.render('provinces', 'OV'), out)
def test_NLPhoneNumberField(self):
error_invalid = ['Enter a valid phone number.']
valid = {
'012-3456789': '012-3456789',
'0123456789': '0123456789',
'+31-12-3456789': '+31-12-3456789',
'(0123) 456789': '(0123) 456789',
'0623456789': '0623456789',
}
invalid = {
'(010) 12345678': error_invalid,
'06-123456789': error_invalid,
'+31 6123456789': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(forms.NLPhoneNumberField, valid, invalid)
def test_NLSoFiNumberField(self):
error_invalid = ['Enter a valid SoFi number.']
valid = {
'123456782': '123456782',
}
invalid = {
'000000000': error_invalid,
'123456789': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(forms.NLSoFiNumberField, valid, invalid)
def test_NL_ModelForm_errors(self):
form = NLPlaceForm({
'zipcode': 'invalid',
'province': 'invalid',
'sofinr': 'invalid',
'phone': 'invalid',
'bankaccount': 'invalid',
})
self.assertFalse(form.is_valid())
invalid_choice = 'Select a valid choice. invalid is not one of the available choices.'
self.assertEqual(form.errors['zipcode'], ['Enter a valid zip code.'])
self.assertEqual(form.errors['province'], [invalid_choice])
self.assertEqual(form.errors['sofinr'], ['Enter a valid SoFi number.'])
self.assertEqual(form.errors['phone'], ['Enter a valid phone number.'])
self.assertEqual(form.errors['bankaccount'], ['Enter a valid bank account number.'])
def test_NL_ModelForm_valid(self):
form = NLPlaceForm({
'zipcode': '2233 AB',
'province': 'OV',
'sofinr': '123456782',
'phone': '0623456789',
'bankaccount': '0417164300'
})
self.assertTrue(form.is_valid())
|
the-stack_106_31422 | """
Demonstrates implementation of SHA1 Hash function in a Python class and gives utilities
to find hash of string or hash of text from a file.
Usage: python sha1.py --string "Hello World!!"
python sha1.py --file "hello_world.txt"
When run without any arguments, it prints the hash of the string "Hello World!!
Welcome to Cryptography"
Also contains a Test class to verify that the generated Hash is same as that
returned by the hashlib library
SHA1 hash or SHA1 sum of a string is a crytpographic function which means it is easy
to calculate forwards but extremely difficult to calculate backwards. What this means
is, you can easily calculate the hash of a string, but it is extremely difficult to
know the original string if you have its hash. This property is useful to communicate
securely, send encrypted messages and is very useful in payment systems, blockchain
and cryptocurrency etc.
The Algorithm as described in the reference:
First we start with a message. The message is padded and the length of the message
is added to the end. It is then split into blocks of 512 bits or 64 bytes. The blocks
are then processed one at a time. Each block must be expanded and compressed.
The value after each compression is added to a 160bit buffer called the current hash
state. After the last block is processed the current hash state is returned as
the final hash.
Reference: https://deadhacker.com/2006/02/21/sha-1-illustrated/
"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
import unittest
class SHA1Hash:
"""
Class to contain the entire pipeline for SHA1 Hashing Algorithm
>>> SHA1Hash(bytes('Allan', 'utf-8')).final_hash()
'872af2d8ac3d8695387e7c804bf0e02c18df9e6e'
"""
def __init__(self, data):
"""
Inititates the variables data and h. h is a list of 5 8-digit Hexadecimal
numbers corresponding to
(1732584193, 4023233417, 2562383102, 271733878, 3285377520)
respectively. We will start with this as a message digest. 0x is how you write
Hexadecimal numbers in Python
"""
self.data = data
self.h = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0]
@staticmethod
def rotate(n, b):
"""
Static method to be used inside other methods. Left rotates n by b.
>>> SHA1Hash('').rotate(12,2)
48
"""
return ((n << b) | (n >> (32 - b))) & 0xFFFFFFFF
def padding(self):
"""
Pads the input message with zeros so that padded_data has 64 bytes or 512 bits
"""
padding = b"\x80" + b"\x00" * (63 - (len(self.data) + 8) % 64)
padded_data = self.data + padding + struct.pack(">Q", 8 * len(self.data))
return padded_data
def split_blocks(self):
"""
Returns a list of bytestrings each of length 64
"""
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data), 64)
]
# @staticmethod
def expand_block(self, block):
"""
Takes a bytestring-block of length 64, unpacks it to a list of integers and
returns a list of 80 integers after some bit operations
"""
w = list(struct.unpack(">16L", block)) + [0] * 64
for i in range(16, 80):
w[i] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1)
return w
def final_hash(self):
"""
Calls all the other methods to process the input. Pads the data, then splits
into blocks and then does a series of operations for each block (including
expansion).
For each block, the variable h that was initialized is copied to a,b,c,d,e
and these 5 variables a,b,c,d,e undergo several changes. After all the blocks
are processed, these 5 variables are pairwise added to h ie a to h[0], b to h[1]
and so on. This h becomes our final hash which is returned.
"""
self.padded_data = self.padding()
self.blocks = self.split_blocks()
for block in self.blocks:
expanded_block = self.expand_block(block)
a, b, c, d, e = self.h
for i in range(0, 80):
if 0 <= i < 20:
f = (b & c) | ((~b) & d)
k = 0x5A827999
elif 20 <= i < 40:
f = b ^ c ^ d
k = 0x6ED9EBA1
elif 40 <= i < 60:
f = (b & c) | (b & d) | (c & d)
k = 0x8F1BBCDC
elif 60 <= i < 80:
f = b ^ c ^ d
k = 0xCA62C1D6
a, b, c, d, e = (
self.rotate(a, 5) + f + e + k + expanded_block[i] & 0xFFFFFFFF,
a,
self.rotate(b, 30),
c,
d,
)
self.h = (
self.h[0] + a & 0xFFFFFFFF,
self.h[1] + b & 0xFFFFFFFF,
self.h[2] + c & 0xFFFFFFFF,
self.h[3] + d & 0xFFFFFFFF,
self.h[4] + e & 0xFFFFFFFF,
)
return "%08x%08x%08x%08x%08x" % tuple(self.h)
class SHA1HashTest(unittest.TestCase):
"""
Test class for the SHA1Hash class. Inherits the TestCase class from unittest
"""
def testMatchHashes(self):
msg = bytes("Test String", "utf-8")
self.assertEqual(SHA1Hash(msg).final_hash(), hashlib.sha1(msg).hexdigest())
def main():
"""
Provides option 'string' or 'file' to take input and prints the calculated SHA1
hash. unittest.main() has been commented because we probably don't want to run
the test each time.
"""
# unittest.main()
parser = argparse.ArgumentParser(description="Process some strings or files")
parser.add_argument(
"--string",
dest="input_string",
default="Hello World!! Welcome to Cryptography",
help="Hash the string",
)
parser.add_argument("--file", dest="input_file", help="Hash contents of a file")
args = parser.parse_args()
input_string = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb") as f:
hash_input = f.read()
else:
hash_input = bytes(input_string, "utf-8")
print(SHA1Hash(hash_input).final_hash())
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
|
the-stack_106_31424 | """raspy_hill_29176 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Raspy Hill"
admin.site.site_title = "Raspy Hill Admin Portal"
admin.site.index_title = "Raspy Hill Admin"
# swagger
api_info = openapi.Info(
title="Raspy Hill API",
default_version="v1",
description="API documentation for Raspy Hill App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
the-stack_106_31425 | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import logging as lg
import os
import sys
import json
from aztest.common import create_output_directory, create_xml_output_filename, ModuleType, MODULE_UNIT_EXPORT_SYMBOL, \
MODULE_INTEG_EXPORT_SYMBOL, EXECUTABLE_EXPORT_SYMBOL, ScanResult
from aztest.filters import FileApprover, get_default_blacklist, get_default_whitelist
from aztest.log import setup_logging
from aztest.report import HTMLReporter, XMLGenerator
from aztest.errors import RunnerReturnCodes
from bootstrap import BootstrapConfig
logger = lg.getLogger(__name__)
if sys.platform.startswith("darwin"):
from aztest.platform.osx import Scanner
elif sys.platform.startswith("win32"):
from aztest.platform.win import Scanner
elif sys.platform.startswith("linux"):
from aztest.platform.linux import Scanner
else:
logger.error("Unrecognized platform: {}".format(sys.platform))
exit()
__no_dll__ = False
def add_dirs_to_path(path_dirs):
if path_dirs:
os.environ['PATH'] += os.pathsep + os.pathsep.join([os.path.abspath(path_dir) for path_dir in path_dirs])
def scan_one(args, extra, type_, scanner, runner_path, bootstrap_config, file_name, output_dir):
""" Scan one module or executable
:param args: command line arguments
:param extra: extra parameters
:param int type_: module or executable
:param scanner: platform-specific scanner instance
:param runner_path: path to test runner executable
:param BootstrapConfig bootstrap_config: configuration object for bootstrapping modules
:param file_name: filename of module to scan
:param output_dir: directory for output
:return: ScannerResult
"""
logger.info("{}: {}".format(type_, file_name))
xml_out = create_xml_output_filename(file_name, output_dir)
if os.path.exists(xml_out):
return # module has already been tested
# for a more exhaustive list of options:
# https://github.com/google/googletest/blob/master/googletest/docs/AdvancedGuide.md#running-a-subset-of-the-tests
# --help lists command options
# --gtest_list_tests just list what tests are in the module
# --gtest_shuffle shuffle test ordering
cmd_args = ["--gtest_output=xml:" + xml_out,
"--gtest_color=yes"]
cmd_args += extra
if args.wait_for_debugger:
# user has requested to attach a debugger when running
cmd_args += ["--wait-for-debugger"]
ret = 0
if type_ == ModuleType.LIBRARY:
if args.integ:
# user wants to run integration tests
export_symbol = MODULE_INTEG_EXPORT_SYMBOL
cmd_args += ["--integ"]
else:
# just run unit tests
export_symbol = MODULE_UNIT_EXPORT_SYMBOL
# run with bootstrapper
ran_with_bootstrapper = False
if bootstrap_config:
module_name = os.path.split(file_name)[1]
bootstrapper = bootstrap_config.get_bootstrapper(module_name)
if bootstrapper:
ran_with_bootstrapper = True
try:
working_dir = args.dir
app = os.path.join(args.dir, bootstrapper.command_line[0])
if not os.path.isfile(app):
logger.error("bootstrap executable not found {}".format(app))
full_command_line = bootstrapper.command_line + tuple(cmd_args)
ret = scanner.bootstrap(working_dir, full_command_line)
except:
ret = RunnerReturnCodes.UNEXPECTED_EXCEPTION
logger.exception("bootstrap failed")
# run with "runner_<platform>" as the implicit bootstrapper (no need to specify this
# in the bootstrapper config file)
if not ran_with_bootstrapper:
if scanner.exports_symbol(file_name, export_symbol):
try:
ret = scanner.call(file_name, export_symbol, runner_path, args=cmd_args)
logger.debug("Code returned from call: {}".format(ret))
except KeyboardInterrupt:
raise
except:
ret = RunnerReturnCodes.UNEXPECTED_EXCEPTION
logger.exception("module call failed")
else:
ret = RunnerReturnCodes.SYMBOL_NOT_FOUND
elif type_ == ModuleType.EXECUTABLE:
if scanner.exports_symbol(file_name, EXECUTABLE_EXPORT_SYMBOL):
try:
cmd_args = ["--unittest"] + cmd_args
ret = scanner.run(file_name, args=cmd_args)
except KeyboardInterrupt:
raise
except:
ret = RunnerReturnCodes.UNEXPECTED_EXCEPTION
logger.exception("executable run failed")
else:
logger.error("Executable does not export correct symbol.")
ret = RunnerReturnCodes.SYMBOL_NOT_FOUND
else:
raise NotImplementedError("module type not supported: " + str(type_))
err = RunnerReturnCodes.to_string(ret)
return ScanResult(path=file_name, return_code=ret, xml_path=xml_out, error_msg=err)
def scan(args, extra):
scanner = Scanner()
output_dir = create_output_directory(args.output_path, args.no_timestamp)
# setup logging
setup_logging(os.path.join(output_dir, "aztest.log"), args.verbosity)
logger.info("AZ Test Scanner")
if not args.runner_path:
runner_path = os.path.abspath(os.path.join(args.dir, scanner.__runner_exe__))
else:
runner_path = os.path.abspath(args.runner_path)
if not os.path.exists(runner_path):
logger.exception("Invalid test runner path: {}".format(runner_path))
return
bootstrap_config = None
if args.bootstrap_config:
with open(args.bootstrap_config) as json_file:
bootstrap_config = BootstrapConfig(flatten=True)
bootstrap_config.load(json.load(json_file))
add_dirs_to_path(args.add_path)
scan_results = [] # list of ScanResult()
# Find default filter files if they exist and add to user-defined lists
whitelist_files = (args.whitelist_files if args.whitelist_files else []) + [get_default_whitelist()]
blacklist_files = (args.blacklist_files if args.blacklist_files else []) + [get_default_blacklist()]
# Create a FileApprover to determine if scanned files can be tested
file_approver = FileApprover(whitelist_files, blacklist_files)
module_failures = 0
# Dynamic Libraries / Modules
if not __no_dll__:
logger.info("Scanning for dynamic libraries")
for file_name in scanner.enumerate_modules(args.dir):
try:
if args.limit and len(scan_results) >= args.limit:
continue # reached scanning limit
if args.only and not FileApprover.is_in_list(file_name, args.only.split(',')):
continue # filename does not match any expected pattern
if not file_approver.is_approved(file_name):
continue
result = scan_one(args, extra, ModuleType.LIBRARY, scanner, runner_path, bootstrap_config,
file_name, output_dir)
if result:
scan_results += [result]
if result.return_code != RunnerReturnCodes.TESTS_SUCCEEDED:
logger.error("Module FAILED: {}, with exit code: {} ({})".format(file_name, result.return_code,
RunnerReturnCodes.to_string(
result.return_code)))
module_failures += 1
if not os.path.exists(result.xml_path):
XMLGenerator.create_xml_output_file(result.xml_path, result.return_code, result.error_msg)
except KeyboardInterrupt:
logger.exception("Process interrupted by user.")
break
except:
logger.exception("Module scan failed.")
# Executables
if args.exe:
logger.info("Scanning for executables")
for file_name in scanner.enumerate_executables(args.dir):
if args.limit and len(scan_results) >= args.limit:
continue # reached scanning limit
if args.only and not FileApprover.is_in_list(file_name, args.only.split(',')):
continue # filename does not match any expected pattern
if not file_approver.is_approved(file_name):
continue
result = scan_one(args, extra, ModuleType.EXECUTABLE, scanner, runner_path,
bootstrap_config, file_name, output_dir)
if result:
scan_results += [result]
if result.return_code != RunnerReturnCodes.TESTS_SUCCEEDED:
logger.error("Module FAILED: {}, with exit code: {} ({})".format(file_name, result.return_code,
RunnerReturnCodes.to_string(
result.return_code)))
module_failures += 1
if not os.path.exists(result.xml_path):
XMLGenerator.create_xml_output_file(result.xml_path, result.return_code, result.error_msg)
# Always save ScanResult data in a JSON file so we have access to it later
scan_results_json = {'scan_results': []}
for scan_result in scan_results:
scan_results_json['scan_results'].append(scan_result._asdict())
json_path = os.path.join(output_dir, 'scan_results.json')
with open(json_path, 'w') as f:
json.dump(scan_results_json, f)
if not args.no_html_report:
# Convert the set of XML files into an HTML report
HTMLReporter.create_html_report(scan_results, output_dir)
HTMLReporter.create_html_failure_report(scan_results, output_dir)
return 1 if module_failures > 0 else 0
|
the-stack_106_31427 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from __future__ import division
import torch
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask, PolygonList, PolygonInstance
from maskrcnn_benchmark.config import cfg
import numpy as np
class ImageList(object):
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size,
and storing in a field the original sizes of each image
"""
def __init__(self, tensors, image_sizes):
"""
Arguments:
tensors (tensor)
image_sizes (list[tuple[int, int]])
"""
self.tensors = tensors
self.image_sizes = image_sizes
def to(self, *args, **kwargs):
cast_tensor = self.tensors.to(*args, **kwargs)
return ImageList(cast_tensor, self.image_sizes)
def to_image_list(tensors, size_divisible=0):
"""
tensors can be an ImageList, a torch.Tensor or
an iterable of Tensors. It can't be a numpy array.
When tensors is an iterable of Tensors, it pads
the Tensors with zeros so that they have the same
shape
"""
if isinstance(tensors, torch.Tensor) and size_divisible > 0:
tensors = [tensors]
if isinstance(tensors, ImageList):
return tensors
elif isinstance(tensors, torch.Tensor):
# single tensor shape can be inferred
if tensors.dim() == 3:
tensors = tensors[None]
assert tensors.dim() == 4
image_sizes = [tensor.shape[-2:] for tensor in tensors]
return ImageList(tensors, image_sizes)
elif isinstance(tensors, (tuple, list)):
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors]))
# TODO Ideally, just remove this and let me model handle arbitrary
# input sizs
if size_divisible > 0:
import math
stride = size_divisible
max_size = list(max_size)
max_size[1] = int(math.ceil(max_size[1] / stride) * stride)
max_size[2] = int(math.ceil(max_size[2] / stride) * stride)
max_size = tuple(max_size)
batch_shape = (len(tensors),) + max_size
batched_imgs = tensors[0].new(*batch_shape).zero_()
for img, pad_img in zip(tensors, batched_imgs):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
image_sizes = [im.shape[-2:] for im in tensors]
return ImageList(batched_imgs, image_sizes)
else:
raise TypeError("Unsupported type for to_image_list: {}".format(type(tensors)))
def to_image_list_synthesize_4(transposed_info, size_divisible=0):
tensors = transposed_info[0]
if isinstance(tensors, (tuple, list)):
targets = transposed_info[1]
img_ids = transposed_info[2]
#synthesize data:
assert len(tensors) % 4 == 0, \
'len(tensor) % 4 != 0, could not be synthesized ! uneven'
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors]))
# TODO Ideally, just remove this and let me model handle arbitrary
# input sizs
if size_divisible > 0:
import math
stride = size_divisible
max_size = list(max_size)
max_size[1] = int(math.ceil(max_size[1] / stride) * stride)
max_size[2] = int(math.ceil(max_size[2] / stride) * stride)
max_size = tuple(max_size)
batch_shape = (len(tensors)//4,) + max_size
syn_batched_imgs = tensors[0].new(*batch_shape).zero_()
syn_targets = []
with torch.no_grad():
for idx, pad_img in enumerate(syn_batched_imgs):
# currently suppose first w then h
new_h, new_w = max_size[1]//2, max_size[2]//2
#NOTE: interpolate api require first h then w !
mode = 'nearest'
topLeftImg = torch.nn.functional.interpolate(tensors[idx*4].unsqueeze(0),size=(new_h, new_w),mode=mode).squeeze(0)
topRightImg = torch.nn.functional.interpolate(tensors[idx*4+1].unsqueeze(0),size=(new_h, new_w),mode=mode).squeeze(0)
bottomLeftImg = torch.nn.functional.interpolate(tensors[idx*4+2].unsqueeze(0),size=(new_h, new_w),mode=mode).squeeze(0)
bottomRightImg = torch.nn.functional.interpolate(tensors[idx*4+3].unsqueeze(0),size=(new_h, new_w),mode=mode).squeeze(0)
c = topLeftImg.shape[0]
assert c == topRightImg.shape[0] and c == bottomLeftImg.shape[0] and c == bottomRightImg.shape[0]
pad_img[:c, :new_h, :new_w].copy_(topLeftImg)
pad_img[:c, :new_h, new_w:].copy_(topRightImg)
pad_img[:c, new_h:, :new_w].copy_(bottomLeftImg)
pad_img[:c, new_h:, new_w:].copy_(bottomRightImg)
# resize each of four sub-imgs into (new_h, new_w) scale
# resize api require first w then h !
topLeftBL = targets[idx*4].resize((new_w, new_h))
topRightBL = targets[idx*4+1].resize((new_w, new_h))
bottomLeftBL = targets[idx*4+2].resize((new_w, new_h))
bottomRightBL = targets[idx*4+3].resize((new_w, new_h))
assert topLeftBL.mode == 'xyxy'
offsets = [torch.Tensor([0.0,0.0,0.0,0.0]), torch.Tensor([new_w,0.0,new_w,0.0]), torch.Tensor([0.0,new_h,0.0,new_h]),torch.Tensor([new_w,new_h,new_w,new_h])]
# append offsets to box coordinates except for topLeftBL
syn_bbox = torch.cat(
(topLeftBL.bbox + offsets[0],
topRightBL.bbox + offsets[1],
bottomLeftBL.bbox + offsets[2],
bottomRightBL.bbox + offsets[3]), dim=0)
#NOTE: BoxList initialization require first w then h
tmp_BoxList = BoxList(syn_bbox, (new_w*2, new_h*2), mode='xyxy')
tmp_BoxList.add_field('labels', torch.cat((topLeftBL.extra_fields['labels'], topRightBL.extra_fields['labels'], bottomLeftBL.extra_fields['labels'], bottomRightBL.extra_fields['labels']), dim=-1))
#NOTE: adjust the targets mask
topLeftPoly = [poly.polygons[0] for poly in topLeftBL.extra_fields['masks'].instances.polygons]
topRightPoly = [poly.polygons[0] for poly in topRightBL.extra_fields['masks'].instances.polygons]
bottomLeftPoly = [poly.polygons[0] for poly in bottomLeftBL.extra_fields['masks'].instances.polygons]
bottomRightPoly = [poly.polygons[0] for poly in bottomRightBL.extra_fields['masks'].instances.polygons]
offsets = [[0.0,0.0], [new_w,0.0], [0.0,new_h], [new_w,new_h]]
syn_mask = [[list(np.array(poly)+np.array(offsets[0]*int(len(poly)/2)))] for poly in topLeftPoly] + \
[[list(np.array(poly)+np.array(offsets[1]*int(len(poly)/2)))] for poly in topRightPoly] + \
[[list(np.array(poly)+np.array(offsets[2]*int(len(poly)/2)))] for poly in bottomLeftPoly] + \
[[list(np.array(poly)+np.array(offsets[3]*int(len(poly)/2)))] for poly in bottomRightPoly]
syn_mask = SegmentationMask(syn_mask, (new_w*2, new_h*2), mode='poly')
tmp_BoxList.add_field('masks', syn_mask)
# append a four-to-one BoxList object
syn_targets.append(tmp_BoxList)
syn_targets = tuple(syn_targets)
assert len(img_ids)%4==0
#since images are synthesized, id is meaningless, substitute with -1
syn_img_ids = tuple([-1]*(len(syn_targets)))
syn_image_sizes = [list(max_size)[-2:] for i in range(batch_shape[0])]
return ImageList(syn_batched_imgs, syn_image_sizes), syn_targets, syn_img_ids
else:
raise TypeError("Unsupported type for to_image_list: {}".format(type(tensors)))
def to_image_list_synthesize_batchstitch(transposed_info, num_images=4, size_divisible=0):
tensors = transposed_info[0]
if isinstance(tensors, (tuple, list)):
targets = transposed_info[1]
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors]))
# TODO Ideally, just remove this and let me model handle arbitrary
# input sizs
if size_divisible > 0:
import math
stride = size_divisible
max_size = list(max_size)
divider = num_images**0.5
max_size[1] = int(math.ceil(max_size[1] //divider / stride) * stride)
max_size[2] = int(math.ceil(max_size[2] //divider / stride) * stride)
max_size = tuple(max_size)
batch_shape = (len(tensors),) + max_size
syn_batched_imgs = tensors[0].new(*batch_shape).zero_()
new_h, new_w = max_size[1], max_size[2]
with torch.no_grad():
#NOTE: interpolate api require first h then w !
#Imgs = torch.nn.functional.interpolate(torch.cat(list(tensors)),size=(new_h, new_w),mode='nearest')
if cfg.STITCHER.USE_PAD:
max_h, max_w = max([tensor.shape[1] for tensor in tensors]), max([tensor.shape[2] for tensor in tensors])
padded_tensors = [torch.nn.functional.pad(tensor.unsqueeze(0), (0, max_w-tensor.shape[2], 0, max_h-tensor.shape[1]), 'replicate') for tensor in tensors]
for target in targets:
target.size = (max_w, max_h)
tensors = [padded_tensor.reshape(padded_tensor.shape[1:]) for padded_tensor in padded_tensors]
Imgs = torch.cat([torch.nn.functional.interpolate(tensor.unsqueeze(0),size=(new_h, new_w),mode='nearest') for tensor in tensors])
c = tensors[0].shape[0]
syn_batched_imgs[:,:c,:,:].copy_(Imgs)
# resize each of four sub-imgs into (new_h, new_w) scale
# resize api require first w then h !
BLs = [target.resize((new_w, new_h)) for target in targets]
#NOTE: BoxList initialization require first w then h
tmp_BoxLists = [BoxList(BL.bbox, (new_w, new_h), mode='xyxy') for BL in BLs]
for idx, tmp_BoxList in enumerate(tmp_BoxLists):
tmp_BoxList.add_field('labels', BLs[idx].extra_fields['labels'])
#NOTE: adjust the targets mask
Polys = [[poly.polygons[0] for poly in BL.extra_fields['masks'].instances.polygons] for BL in BLs]
syn_masks = [[[list(np.array(poly))] for poly in Poly] for Poly in Polys]
syn_masks = [SegmentationMask(syn_mask, (new_w, new_h), mode='poly') for syn_mask in syn_masks]
for idx, tmp_BoxList in enumerate(tmp_BoxLists):
tmp_BoxList.add_field('masks', syn_masks[idx])
syn_targets = tuple(tmp_BoxLists)
#since images are synthesized, id is meaningless, substitute with -1
syn_img_ids = tuple([-1]*(len(syn_targets)))
syn_image_sizes = [list(max_size)[-2:] for i in range(batch_shape[0])]
return ImageList(syn_batched_imgs, syn_image_sizes), syn_targets, syn_img_ids
else:
raise TypeError("Unsupported type for to_image_list: {}".format(type(tensors)))
def to_image_list_synthesize(transposed_info, size_divisible=0):
num_images = cfg.STITCHER.NUM_IMAGES_STITCH
if cfg.STITCHER.BATCH_STITCH:
return to_image_list_synthesize_batchstitch(transposed_info, num_images, size_divisible=size_divisible)
else:
return to_image_list_synthesize_4(transposed_info,size_divisible=size_divisible)
|
the-stack_106_31429 | """Load a model asset in Blender."""
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
import bpy
from avalon import api
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
AVALON_PROPERTY,
AVALON_CONTAINER_ID
)
class BlendModelLoader(plugin.AssetLoader):
"""Load models from a .blend file.
Because they come from a .blend file we can simply link the collection that
contains the model. There is no further need to 'containerise' it.
"""
families = ["model"]
representations = ["blend"]
label = "Link Model"
icon = "code-fork"
color = "orange"
def _remove(self, asset_group):
objects = list(asset_group.children)
for obj in objects:
if obj.type == 'MESH':
for material_slot in list(obj.material_slots):
bpy.data.materials.remove(material_slot.material)
bpy.data.meshes.remove(obj.data)
elif obj.type == 'EMPTY':
objects.extend(obj.children)
bpy.data.objects.remove(obj)
def _process(self, libpath, asset_group, group_name):
with bpy.data.libraries.load(
libpath, link=True, relative=False
) as (data_from, data_to):
data_to.objects = data_from.objects
parent = bpy.context.scene.collection
empties = [obj for obj in data_to.objects if obj.type == 'EMPTY']
container = None
for empty in empties:
if empty.get(AVALON_PROPERTY):
container = empty
break
assert container, "No asset group found"
# Children must be linked before parents,
# otherwise the hierarchy will break
objects = []
nodes = list(container.children)
for obj in nodes:
obj.parent = asset_group
for obj in nodes:
objects.append(obj)
nodes.extend(list(obj.children))
objects.reverse()
for obj in objects:
parent.objects.link(obj)
for obj in objects:
local_obj = plugin.prepare_data(obj, group_name)
if local_obj.type != 'EMPTY':
plugin.prepare_data(local_obj.data, group_name)
for material_slot in local_obj.material_slots:
if material_slot.material:
plugin.prepare_data(material_slot.material, group_name)
if not local_obj.get(AVALON_PROPERTY):
local_obj[AVALON_PROPERTY] = dict()
avalon_info = local_obj[AVALON_PROPERTY]
avalon_info.update({"container_name": group_name})
objects.reverse()
bpy.data.orphans_purge(do_local_ids=False)
plugin.deselect_all()
return objects
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
asset_name = plugin.asset_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
bpy.context.scene.collection.children.link(avalon_container)
asset_group = bpy.data.objects.new(group_name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
avalon_container.objects.link(asset_group)
plugin.deselect_all()
if options is not None:
parent = options.get('parent')
transform = options.get('transform')
if parent and transform:
location = transform.get('translation')
rotation = transform.get('rotation')
scale = transform.get('scale')
asset_group.location = (
location.get('x'),
location.get('y'),
location.get('z')
)
asset_group.rotation_euler = (
rotation.get('x'),
rotation.get('y'),
rotation.get('z')
)
asset_group.scale = (
scale.get('x'),
scale.get('y'),
scale.get('z')
)
bpy.context.view_layer.objects.active = parent
asset_group.select_set(True)
bpy.ops.object.parent_set(keep_transform=True)
plugin.deselect_all()
objects = self._process(libpath, asset_group, group_name)
bpy.context.scene.collection.objects.link(asset_group)
asset_group[AVALON_PROPERTY] = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"family": context["representation"]["context"]["family"],
"objectName": group_name
}
self[:] = objects
return objects
def exec_update(self, container: Dict, representation: Dict):
"""Update the loaded asset.
This will remove all objects of the current collection, load the new
ones and add them to the collection.
If the objects of the collection are used in another collection they
will not be removed, only unlinked. Normally this should not be the
case though.
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = Path(api.get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert asset_group, (
f"The asset is not loaded: {container['objectName']}"
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
metadata = asset_group.get(AVALON_PROPERTY)
group_libpath = metadata["libpath"]
normalized_group_libpath = (
str(Path(bpy.path.abspath(group_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
"normalized_group_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_group_libpath,
normalized_libpath,
)
if normalized_group_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
# Check how many assets use the same library
count = 0
for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects:
if obj.get(AVALON_PROPERTY).get('libpath') == group_libpath:
count += 1
mat = asset_group.matrix_basis.copy()
self._remove(asset_group)
# If it is the last object to use that library, remove it
if count == 1:
library = bpy.data.libraries.get(bpy.path.basename(group_libpath))
if library:
bpy.data.libraries.remove(library)
self._process(str(libpath), asset_group, object_name)
asset_group.matrix_basis = mat
metadata["libpath"] = str(libpath)
metadata["representation"] = str(representation["_id"])
metadata["parent"] = str(representation["parent"])
def exec_remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.
Arguments:
container (openpype:container-1.0): Container to remove,
from `host.ls()`.
Returns:
bool: Whether the container was deleted.
"""
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = asset_group.get(AVALON_PROPERTY).get('libpath')
# Check how many assets use the same library
count = 0
for obj in bpy.data.collections.get(AVALON_CONTAINERS).objects:
if obj.get(AVALON_PROPERTY).get('libpath') == libpath:
count += 1
if not asset_group:
return False
self._remove(asset_group)
bpy.data.objects.remove(asset_group)
# If it is the last object to use that library, remove it
if count == 1:
library = bpy.data.libraries.get(bpy.path.basename(libpath))
bpy.data.libraries.remove(library)
return True
|
the-stack_106_31430 | #!/usr/local/bin/anaconda3/bin/python
#Aquí hay una función que se usa para extraer los transcritos que tienen un ortólogo en alguna especie de las del estudio, esto lo hace del archivo mauricio.Proteinortho-graph
#Adicionalmente hay funciones para que una vez que tiene el identificador del transcrito, los busca en los archivos .cds de cada especie para extraer info adicional como el cromosoma donde se encuentra el transcrito.
#Esto con el fin de hacer una comparación entre las secuencias de los archivos .cds y las obtenidas de las coordenadas codificantes de los archivos .gtf, pero solo se hicieron para comprobar el correcto entendimiento de la información.
#SI SE QUIEREN AÑADIR NUEVAS ESPECIES AL ESTUDIO ENTONCES SE TIENE QUE CREAR UN NUEVO ARCHIVO mauricio.Proteinortho-graph QUE ENCUENTRE TRANSCRITOS ORTÓLOGOS CONSIDERANDO LA NUEVA ESPECIE.
import random
import csv
from Bio import SeqIO
import numpy as np
def lee_sec_gen_cds(arch, trans):
fasta_sequences = SeqIO.parse(arch,'fasta')
lon=len(trans)
conta=0
for seq_record in fasta_sequences:
desc=seq_record.description
n0=desc.find(">")
n1=desc.find(" ")
tr=desc[n0+1:n1]
ind=0
band=True
while ind<lon and band:
if len(trans[ind][1])>0:
trans_aux=trans[ind][0]+"."+trans[ind][1]
else:
trans_aux=trans[ind][0]
if tr==trans_aux:
band=False
ind+=1
if not band : #tr==trans[ind-1][0] or tr==trans[ind-1][0]+"."+trans[ind-1][1]
conta+=1
n2=desc.find("cds chromosome:")
cte_n2=len("cds chromosome:") #=15
if n2<=0:
n2=desc.find("cds scaffold:")
cte_n2=len("cds scaffold:") #=13
n3=desc.find("gene:")
n4=desc.find(":",n2+cte_n2,n3)
n5=desc.find(":",n4+1,n3)
n6=desc.find(":",n5+1,n3)
n7=desc.find(":",n6+1,n3)
n8=desc.find(":",n7+1,n3)
n9=desc.find(" ",n3+5)
crom=desc[n4+1:n5]
inicio=desc[n5+1:n6]
fin=desc[n6+1:n7]
gen=desc[n3+5:n9]
sequ=seq_record.seq
l_sec=len(sequ)
trans[ind-1].append(crom)
trans[ind-1].append(inicio)
trans[ind-1].append(fin)
trans[ind-1].append(gen)
trans[ind-1].append(sequ)
trans[ind-1].append(l_sec)
if conta==lon:
break
def llena_arr_especie(arrL, arrC, arrM, arrPe, arrPo, arrR, arrH, arrG, esp, t, ver):
li=[t,ver]
if esp=="Homo_sapiens.GRCh38.cds.all.fa":
ele=len(arrH)
res=encuentra_t(arrH, ele, t)
if res==ele:
arrH.append(li)
elif esp=="Mus_musculus.GRCm38.cds.all.fa":
ele=len(arrR)
res=encuentra_t(arrR, ele, t)
if res==ele:
arrR.append(li)
elif esp=="Danio_rerio.GRCz11.cds.all.fa":
ele=len(arrPe)
res=encuentra_t(arrPe, ele, t)
if res==ele:
arrPe.append(li)
elif esp=="Caenorhabditis_elegans.WBcel235.cds.all.fa":
ele=len(arrG)
res=encuentra_t(arrG, ele, t)
if res==ele:
arrG.append(li)
elif esp=="Drosophila_melanogaster.BDGP6.28.cds.all.fa":
ele=len(arrM)
res=encuentra_t(arrM, ele, t)
if res==ele:
arrM.append(li)
elif esp=="Gallus_gallus.GRCg6a.cds.all.fa":
ele=len(arrPo)
res=encuentra_t(arrPo, ele, t)
if res==ele:
arrPo.append(li)
elif esp=="Ciona_intestinalis.KH.cds.all.fa":
ele=len(arrC)
res=encuentra_t(arrC, ele, t)
if res==ele:
arrC.append(li)
elif esp=="Saccharomyces_cerevisiae.R64-1-1.cds.all.fa":
ele=len(arrL)
res=encuentra_t(arrL, ele, t)
if res==ele:
arrL.append(li)
def encuentra_t(arre,ele,t):
#ele=len(arre)
m=0
bande=True
while m<ele and bande:
if t==arre[m][0]:
bande=False
m-=1
m+=1
return m
def proteinortho_graph_cds():
f=open("/u/maribel/genomes/mauricio.proteinortho-graph","r")
frl=f.readlines()
listaLevadura=[]
listaCiona=[]
listaMosca=[]
listaPez=[]
listaPollo=[]
listaRaton=[]
listaHumano=[]
listaGusano=[]
l=0
for x in frl:
c=x[0]
if c=="#":
n0=len(x)
n1=x.find(" ")
n2=x.find(chr(9))
e1_aux=x[n1+1:n2]
n3=x.find(chr(9),n2+1,n0)
e2_aux=x[n2+1:n3]
if e1_aux.find(".cds.all.fa")>=0 and e2_aux.find(".cds.all.fa")>=0:
e1=e1_aux
e2=e2_aux
else:
n4=x.find(chr(9))
t1=x[:n4]
n5=x.find(chr(9),n4+1,len(x))
t2=x[n4+1:n5]
transcrito1=t1
versionT1=""
npunt1=t1.find(".")
if e1!="Caenorhabditis_elegans.WBcel235.cds.all.fa" and npunt1>=0:
transcrito1=t1[:npunt1]
versionT1=t1[npunt1+1:]
llena_arr_especie(listaLevadura, listaCiona, listaMosca, listaPez, listaPollo, listaRaton, listaHumano, listaGusano, e1, transcrito1, versionT1)
transcrito2=t2
versionT2=""
npunt2=t2.find(".")
if e2!="Caenorhabditis_elegans.WBcel235.cds.all.fa" and npunt2>=0:
transcrito2=t2[:npunt2]
versionT2=t2[npunt2+1:]
llena_arr_especie(listaLevadura, listaCiona, listaMosca, listaPez, listaPollo, listaRaton, listaHumano, listaGusano, e2, transcrito2, versionT2)
f.close()
contador=0
listaCDSs=["Saccharomyces_cerevisiae.R64-1-1.cds.all.fa", "Ciona_intestinalis.KH.cds.all.fa", "Drosophila_melanogaster.BDGP6.28.cds.all.fa", "Danio_rerio.GRCz11.cds.all.fa", "Gallus_gallus.GRCg6a.cds.all.fa", "Mus_musculus.GRCm38.cds.all.fa", "Homo_sapiens.GRCh38.cds.all.fa", "Caenorhabditis_elegans.WBcel235.cds.all.fa"]
listaEspecies=[listaLevadura, listaCiona, listaMosca, listaPez, listaPollo, listaRaton, listaHumano, listaGusano]
for ind in listaEspecies:
espe=listaCDSs[contador]
lee_sec_gen_cds("/u/maribel/genomes/"+espe, ind)
contador+=1
return listaLevadura, listaCiona, listaMosca, listaPez, listaPollo, listaRaton, listaHumano, listaGusano
def proteinortho_graph():
f=open("/u/maribel/genomes/mauricio.proteinortho-graph","r")
frl=f.readlines()
listaLevadura=[]
listaCiona=[]
listaMosca=[]
listaPez=[]
listaPollo=[]
listaRaton=[]
listaHumano=[]
listaGusano=[]
l=0
for x in frl:
c=x[0]
if c=="#":
n0=len(x)
n1=x.find(" ")
n2=x.find(chr(9))
e1_aux=x[n1+1:n2]
n3=x.find(chr(9),n2+1,n0)
e2_aux=x[n2+1:n3]
if e1_aux.find(".cds.all.fa")>=0 and e2_aux.find(".cds.all.fa")>=0:
e1=e1_aux
e2=e2_aux
else:
n4=x.find(chr(9))
t1=x[:n4]
n5=x.find(chr(9),n4+1,len(x))
t2=x[n4+1:n5]
transcrito1=t1
versionT1=""
npunt1=t1.find(".")
if e1!="Caenorhabditis_elegans.WBcel235.cds.all.fa" and npunt1>=0:
transcrito1=t1[:npunt1]
versionT1=t1[npunt1+1:]
llena_arr_especie(listaLevadura, listaCiona, listaMosca, listaPez, listaPollo, listaRaton, listaHumano, listaGusano, e1, transcrito1, versionT1)
transcrito2=t2
versionT2=""
npunt2=t2.find(".")
if e2!="Caenorhabditis_elegans.WBcel235.cds.all.fa" and npunt2>=0:
transcrito2=t2[:npunt2]
versionT2=t2[npunt2+1:]
llena_arr_especie(listaLevadura, listaCiona, listaMosca, listaPez, listaPollo, listaRaton, listaHumano, listaGusano, e2, transcrito2, versionT2)
f.close()
return listaLevadura, listaCiona, listaMosca, listaPez, listaPollo, listaRaton, listaHumano, listaGusano
|
the-stack_106_31432 | # Copyright (c) Facebook, Inc. and its affiliates.
import os
import pickle
from copy import deepcopy
from dataclasses import dataclass
from enum import Enum
from typing import Any
import torch
import torchvision
from mmf.common.registry import registry
from mmf.modules.embeddings import ProjectionEmbedding, TextEmbedding
from mmf.modules.hf_layers import BertModelJit
from mmf.modules.layers import Identity
from mmf.utils.build import build_image_encoder, build_text_encoder
from mmf.utils.download import download_pretrained_model
from mmf.utils.file_io import PathManager
from mmf.utils.general import get_absolute_path
from omegaconf import MISSING, OmegaConf
from torch import nn
from transformers.configuration_auto import AutoConfig
from transformers.modeling_auto import AutoModel
class Encoder(nn.Module):
@dataclass
class Config:
name: str = MISSING
@classmethod
def from_params(cls, **kwargs):
config = OmegaConf.structured(cls.Config(**kwargs))
return cls(config)
class EncoderFactory(nn.Module):
@dataclass
class Config:
type: str = MISSING
params: Encoder.Config = MISSING
class ImageFeatureEncoderTypes(Enum):
default = "default"
identity = "identity"
projection = "projection"
frcnn_fc7 = "finetune_faster_rcnn_fpn_fc7"
class ImageFeatureEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
in_dim: int = MISSING
class ImageFeatureEncoderFactory(EncoderFactory):
@dataclass
class Config(EncoderFactory.Config):
type: ImageFeatureEncoderTypes = MISSING
params: ImageFeatureEncoder.Config = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
encoder_type = config.type
if isinstance(encoder_type, ImageFeatureEncoderTypes):
encoder_type = encoder_type.value
assert (
"in_dim" in config.params
), "ImageFeatureEncoder require 'in_dim' param in config"
params = config.params
if encoder_type == "default" or encoder_type == "identity":
self.module = Identity()
self.module.in_dim = params.in_dim
self.module.out_dim = params.in_dim
elif encoder_type == "projection":
if "module" not in params:
params = deepcopy(params)
params.module = "linear"
self.module = ProjectionEmbedding(**params)
elif encoder_type == "finetune_faster_rcnn_fpn_fc7":
self.module = FinetuneFasterRcnnFpnFc7(params)
else:
raise NotImplementedError("Unknown Image Encoder: %s" % encoder_type)
self.out_dim = self.module.out_dim
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
@registry.register_encoder("finetune_faster_rcnn_fpn_fc7")
class FinetuneFasterRcnnFpnFc7(ImageFeatureEncoder):
@dataclass
class Config(ImageFeatureEncoder.Config):
name: str = "finetune_faster_rcnn_fpn_fc7"
in_dim: int = MISSING
weights_file: str = "fc7_w.pkl"
bias_file: str = "fc7_b.pkl"
model_data_dir: str = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
model_data_dir = get_absolute_path(config.model_data_dir)
if not os.path.isabs(config.weights_file):
weights_file = os.path.join(model_data_dir, config.weights_file)
if not os.path.isabs(config.bias_file):
bias_file = os.path.join(model_data_dir, config.bias_file)
if not PathManager.exists(bias_file) or not PathManager.exists(weights_file):
download_path = download_pretrained_model("detectron.vmb_weights")
weights_file = get_absolute_path(os.path.join(download_path, "fc7_w.pkl"))
bias_file = get_absolute_path(os.path.join(download_path, "fc7_b.pkl"))
with PathManager.open(weights_file, "rb") as w:
weights = pickle.load(w)
with PathManager.open(bias_file, "rb") as b:
bias = pickle.load(b)
out_dim = bias.shape[0]
self.lc = nn.Linear(config.in_dim, out_dim)
self.lc.weight.data.copy_(torch.from_numpy(weights))
self.lc.bias.data.copy_(torch.from_numpy(bias))
self.out_dim = out_dim
def forward(self, image):
i2 = self.lc(image)
i3 = nn.functional.relu(i2)
return i3
@registry.register_encoder("identity")
class IdentityEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "identity"
in_dim: int = MISSING
def __init__(self, config: Config):
super().__init__()
self.module = nn.Identity()
self.in_dim = config.in_dim
self.out_dim = config.in_dim
def forward(self, x):
return self.module(x)
class ImageEncoderTypes(Enum):
default = "default"
identity = "identity"
resnet152 = "resnet152"
class ImageEncoderFactory(EncoderFactory):
@dataclass
class Config(EncoderFactory.Config):
type: ImageEncoderTypes = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self._type = config.type
if isinstance(self._type, ImageEncoderTypes):
self._type = self._type.value
params = config.params
if self._type == "default" or self._type == "identity":
self.module = nn.Identity()
self.module.out_dim = params.in_dim
elif self._type == "resnet152":
self.module = ResNet152ImageEncoder(params)
else:
raise NotImplementedError("Unknown Image Encoder: %s" % self._type)
@property
def out_dim(self):
return self.module.out_dim
def forward(self, image):
return self.module(image)
# Taken from facebookresearch/mmbt with some modifications
@registry.register_encoder("resnet152")
class ResNet152ImageEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "resnet152"
pretrained: bool = True
# "avg" or "adaptive"
pool_type: str = "avg"
num_output_features: int = 1
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
model = torchvision.models.resnet152(pretrained=config.get("pretrained", True))
modules = list(model.children())[:-2]
self.model = nn.Sequential(*modules)
pool_func = (
nn.AdaptiveAvgPool2d if config.pool_type == "avg" else nn.AdaptiveMaxPool2d
)
# -1 will keep the original feature size
if config.num_output_features == -1:
self.pool = nn.Identity()
elif config.num_output_features in [1, 2, 3, 5, 7]:
self.pool = pool_func((config.num_output_features, 1))
elif config.num_output_features == 4:
self.pool = pool_func((2, 2))
elif config.num_output_features == 6:
self.pool = pool_func((3, 2))
elif config.num_output_features == 8:
self.pool = pool_func((4, 2))
elif config.num_output_features == 9:
self.pool = pool_func((3, 3))
self.out_dim = 2048
def forward(self, x):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
out = self.pool(self.model(x))
out = torch.flatten(out, start_dim=2)
out = out.transpose(1, 2).contiguous()
return out # BxNx2048
class TextEncoderTypes(Enum):
identity = "identity"
transformer = "transformer"
embedding = "embedding"
class TextEncoderFactory(EncoderFactory):
@dataclass
class Config(EncoderFactory.Config):
# identity, transformer or embedding as of now
type: TextEncoderTypes = MISSING
params: Encoder.Config = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self._type = config.type
if isinstance(self._type, TextEncoderTypes):
self._type = self._type.value
if self._type == "identity":
self.module = nn.Identity()
elif self._type == "transformer":
self._module = TransformerEncoder(config.params)
self.module = self._module.module
elif self._type == "embedding":
self.module = TextEmbeddingEncoder(config.params)
else:
raise NotImplementedError(f"Unknown Text Encoder {self._type}")
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
@registry.register_encoder("text_embedding")
class TextEmbeddingEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "text_embedding"
operator: str = MISSING
# Keeping this Any for now as this
# needs a separate refactor PR.
embedding_params: Any = MISSING
def __init__(self, config: Config):
super().__init__()
self._operator = config.operator
self._embedding_params = config.embedding_params
self.module = TextEmbedding(
self._embedding_params.type, **self._embedding_params.params
)
def forward(self, x):
x = self.module(x)
if self._operator == "sum":
x = x.sum(dim=1)
elif self._operator == "concat":
x = torch.cat(x, dim=1)
elif self._operator == "mul":
x = torch.prod(x, dim=1)
return x.squeeze()
@registry.register_encoder("transformer")
class TransformerEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "transformer"
num_segments: int = 2
bert_model_name: str = "bert-base-uncased"
# Options below can be overridden to update the bert configuration used
# to initialize the bert encoder. If some option is missing or
# if you are using an encoder different then BERT, add extra parameters
# by inheriting and extending this config
# Those options will automatically override the options for your transformer
# encoder's configuration. For e.g. vocab_size is missing here, just add
# vocab_size: x to update the size of the vocabulary with which encoder is
# initialized. If you update the default values, the transformer you
# will get will be initialized from scratch.
hidden_size: int = 768
num_hidden_layers: int = 12
num_attention_heads: int = 12
output_attentions: bool = False
output_hidden_states: bool = False
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
hf_params = {"config": self._build_encoder_config(config)}
# For BERT models, initialize using Jit version
if self.config.bert_model_name.startswith("bert-"):
self.module = BertModelJit.from_pretrained(
self.config.bert_model_name, **hf_params
)
else:
self.module = AutoModel.from_pretrained(
self.config.bert_model_name, **hf_params
)
self.embeddings = self.module.embeddings
self.original_config = self.config
self.config = self.module.config
self._init_segment_embeddings()
def _init_segment_embeddings(self):
if self.original_config.get("num_segments", None):
num_segments = self.original_config.num_segments
if hasattr(self.embeddings, "token_type_embeddings"):
new_embeds = nn.Embedding(num_segments, self.config.hidden_size)
new_embeds.weight.data[:2].copy_(
self.embeddings.token_type_embeddings.weight
)
for idx in range(2, num_segments - 1):
new_embeds.weight.data[idx].copy_(
self.embeddings.token_type_embeddings.weight.data.mean(dim=0)
)
self.embeddings.token_type_embeddings = new_embeds
def _build_encoder_config(self, config: Config):
return AutoConfig.from_pretrained(
self.config.bert_model_name, **OmegaConf.to_container(self.config)
)
def forward(self, *args, **kwargs):
# Only return pooled output
return self.module(*args, **kwargs)[1]
class MultiModalEncoderBase(Encoder):
__jit_unused_properties__ = ["encoder_config"]
@dataclass
class Config(Encoder.Config):
# This actually is Union[ImageEncoderConfig, ImageFeatureEncoderConfig]
modal_encoder: EncoderFactory.Config = ImageEncoderFactory.Config(
type=ImageEncoderTypes.resnet152, params=ResNet152ImageEncoder.Config()
)
text_encoder: EncoderFactory.Config = TextEncoderFactory.Config(
type=TextEncoderTypes.transformer, params=TransformerEncoder.Config()
)
direct_features_input: bool = False
modal_hidden_size: int = 2048
text_hidden_size: int = 768
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
self._modal_encoder_config = self.config.get("modal_encoder", None)
self._is_direct_features_input = self.config.get("direct_features_input", False)
self.build()
self.modal_hidden_size = self.config.get("modal_hidden_size", None)
self.text_hidden_size = self.config.get("text_hidden_size", None)
def build(self):
encoders = self._build_encoders(self.config)
self.text_encoder, self.modal_encoder = encoders[0], encoders[1]
self._encoder_config = None
if self.text_encoder:
self._encoder_config = self.text_encoder.config
@property
def encoder_config(self):
return self._encoder_config
def _build_encoders(self, config):
text_encoder = None
if config.get("text_encoder", None):
text_encoder = build_text_encoder(config.text_encoder)
modal_encoder = None
if config.get("modal_encoder", None):
modal_encoder = self._build_modal_encoder(config.modal_encoder)
return (text_encoder, modal_encoder)
def _build_modal_encoder(self, config):
return build_image_encoder(
config, direct_features=self._is_direct_features_input
)
|
the-stack_106_31433 | from fractions import Fraction
from function.generic_univariate_pitch_function import GenericUnivariatePitchFunction
from function.piecewise_linear_function import PiecewiseLinearFunction
from function.scalar_range_interpreter import ScalarRangeInterpreter
from function.chromatic_range_interpreter import ChromaticRangeInterpreter
from instruments.instrument_catalog import InstrumentCatalog
from melody.constraints.chordal_pitch_constraint import ChordalPitchConstraint
from melody.constraints.pitch_range_constraint import PitchRangeConstraint
from melody.constraints.step_sequence_constraint import StepSequenceConstraint
from melody.structure.melodic_form import MelodicForm
from melody.structure.motif import Motif
from structure.LineGrammar.core.line_grammar_executor import LineGrammarExecutor
from structure.lite_score import LiteScore
from structure.tempo import Tempo
from structure.time_signature import TimeSignature
from timemodel.duration import Duration
from timemodel.event_sequence import EventSequence
from timemodel.position import Position
from timemodel.tempo_event import TempoEvent
from timemodel.tempo_event_sequence import TempoEventSequence
from timemodel.time_signature_event import TimeSignatureEvent
from tonalmodel.diatonic_pitch import DiatonicPitch
import math
from tonalmodel.pitch_range import PitchRange
from tonalmodel.range import Range
from transformation.reshape.min_curve_fit_filter import MinCurveFitFilter
from transformation.reshape.t_reshape import TReshape
BASE = DiatonicPitch.parse('C:4').chromatic_distance
def create_score(grammar_str, instrument, ts):
lge = LineGrammarExecutor()
target_line, target_hct = lge.parse(grammar_str)
tempo_seq = TempoEventSequence()
ts_seq = EventSequence()
tempo_seq.add(TempoEvent(Tempo(60, Duration(1, 4)), Position(0)))
ts_seq.add(TimeSignatureEvent(TimeSignature(ts[0], Duration(1, ts[1]), ts[2]), Position(0)))
c = InstrumentCatalog.instance()
violin = c.get_instrument(instrument)
return LiteScore(target_line, target_hct, violin, tempo_seq, ts_seq)
def duration_ltr(duration):
if duration.duration == Fraction(1, 16):
return 's'
elif duration.duration == Fraction(3, 16):
return 'i@'
elif duration.duration == Fraction(1, 8):
return 'i'
elif duration.duration == Fraction(3, 8):
return 'q@'
elif duration.duration == Fraction(1, 4):
return 'q'
elif duration.duration == Fraction(1, 2):
return 'h'
elif duration.duration == Fraction(1):
return 'w'
return '>'
def str_line(line):
notes = line.get_all_notes()
prior_octave = None
prior_duration = None
note_annotations = list()
for note in notes:
annotation = ''
d = duration_ltr(note.duration)
if d != prior_duration:
annotation += d
prior_duration = d
annotation += str(note.diatonic_pitch.diatonic_tone.diatonic_symbol) if note.diatonic_pitch is not None else 'R'
o = note.diatonic_pitch.octave if note.diatonic_pitch is not None else prior_octave
if o != prior_octave:
annotation += ":" + str(o)
prior_octave = o
note_annotations.append(annotation)
s = ' '.join(annotation for annotation in note_annotations)
return s
def print_line(line):
print(str_line(line))
def sinasoidal(v):
"""
Maps v to a chromatic distance.
:param v:
:return:
[0..1] -->[0..2*PI]-->[0..19] with value added to C:4 absolute chromatic distance.
"""
return BASE + 19 * math.sin(2 * math.pi * v/3)
def three_sin(v):
return math.sin(2 * math.pi * v/3)
def simple_reshape_cpf():
print('----- test_simple_reshape_cpf -----')
line_str = '{<C-Major: I> iE:4 E E E E E E E <:IV> qE ie e <:V> qe ie e <:VI> qE E iE E E E}'
score = create_score(line_str, 'violin', (3, 4, 'sww'))
all_notes = score.line.get_all_notes()
# 11 scalar notes to C:4 (0) to G:5 (1) with pitch unit 1/19
interpreter = ChromaticRangeInterpreter(DiatonicPitch.parse('C:4'), 0, Fraction(1, 19))
pitch_function = GenericUnivariatePitchFunction(three_sin, Position(0), Position(3), interp=interpreter)
# The first note should have one of 3 values, C:4, E:4, G:4
constraints = {
ChordalPitchConstraint(all_notes[0]),
ChordalPitchConstraint(all_notes[8]),
ChordalPitchConstraint(all_notes[11]),
ChordalPitchConstraint(all_notes[14]),
PitchRangeConstraint([all_notes[0]], PitchRange.create('C:4', 'E:4')),
}
# motif = Motif(score.line, constraints, 'A')
motif = Motif([all_notes[0], all_notes[8], all_notes[11], all_notes[14]], constraints, 'A')
melodic_form = MelodicForm([motif])
t_reshape = TReshape(score, pitch_function, Range(0, 3), melodic_form, True)
results = t_reshape.apply()
filter = MinCurveFitFilter(pitch_function, results)
print('{0} filtered results'.format(len(filter.scored_results)))
for index in range(0, min(5, len(filter.scored_results))):
result = filter.scored_results[index]
print('[{0}] {1} ({2})'.format(index, str_line(result[0].line), result[1]))
print('Chords: {0}'.format(','.join([str(c) for c in score.hct.hc_list()])))
def reshape_with_spf():
print('----- test_reshape_with_spf -----')
line_str = '{<C-Major: I> iE:4 E E E E q@E <:IV> qE ie e <:V> qe ie e <:VI> qE E iE E E E}'
score = create_score(line_str, 'piano', (3, 4, 'sww'))
tonality = score.hct.get_hc_by_position(0).tonality
all_notes = score.line.get_all_notes()
# 11 scalar notes to C:4 (0) to G:5 (11) with pitch unit 1/11
interpreter = ScalarRangeInterpreter(tonality, DiatonicPitch.parse('C:4'), 0, Fraction(1, 11))
pitch_function = GenericUnivariatePitchFunction(three_sin, Position(0), Position(3), False, interpreter)
# The first note should have one of 3 values, C:4, E:4, G:4
constraints = {
ChordalPitchConstraint(all_notes[0]),
ChordalPitchConstraint(all_notes[8]),
ChordalPitchConstraint(all_notes[11]),
ChordalPitchConstraint(all_notes[14]),
PitchRangeConstraint([all_notes[0]], PitchRange.create('C:4', 'E:4')),
}
#motif = Motif(score.line, constraints, 'A')
motif = Motif([all_notes[0], all_notes[8], all_notes[11], all_notes[14]], constraints, 'A')
melodic_form = MelodicForm([motif])
t_reshape = TReshape(score, pitch_function, Range(0, 3), melodic_form, True)
results = t_reshape.apply()
filter = MinCurveFitFilter(pitch_function, results)
print('{0} filtered results'.format(len(filter.scored_results)))
for index in range(0, min(5, len(filter.scored_results))):
result = filter.scored_results[index]
print('[{0}] {1} ({2})'.format(index, str_line(result[0].line), result[1]))
constraints = {
ChordalPitchConstraint(all_notes[0]),
ChordalPitchConstraint(all_notes[4]),
ChordalPitchConstraint(all_notes[6]),
ChordalPitchConstraint(all_notes[8]),
ChordalPitchConstraint(all_notes[12]),
ChordalPitchConstraint(all_notes[14]),
PitchRangeConstraint([all_notes[0]], PitchRange.create('C:4', 'G:4')),
#PitchRangeConstraint([all_notes[4]], PitchRange.create('E:5', 'G:5')),
#PitchRangeConstraint([all_notes[6]], PitchRange.create('C:5', 'G:5')),
#PitchRangeConstraint([all_notes[8]], PitchRange.create('C:4', 'G:4')),
#PitchRangeConstraint([all_notes[12]], PitchRange.create('E:2', 'A:2')),
#PitchRangeConstraint([all_notes[14]], PitchRange.create('E:2', 'G:2')),
}
motif = Motif(score.line, constraints, 'A')
melodic_form = MelodicForm([motif])
t_reshape = TReshape(score, pitch_function, Range(0, 3), melodic_form, True)
results = t_reshape.apply()
filter = MinCurveFitFilter(pitch_function, results)
print('{0} filtered results'.format(len(filter.scored_results)))
for index in range(0, min(5, len(filter.scored_results))):
result = filter.scored_results[index]
print('[{0}] {1} ({2})'.format(index, str_line(result[0].line), result[1]))
def reshape_to_scale():
print('----- test_reshape_to_scale -----')
line_str = '{<C-Major: I> iE:4 E E E E E E E E E E E E E E E E E E E E E E E wE}'
score = create_score(line_str, 'violin', (4, 4, 'swww'))
tonality = score.hct.get_hc_by_position(0).tonality
all_notes = score.line.get_all_notes()
plf = PiecewiseLinearFunction([(0, 0), (1, 8), (Fraction(3, 2), 4), (2, 8), (3, 0)])
for i in range(0, 17):
x = Fraction(1, 8) * i
y = plf(x)
print('({0}, {1})'.format(x, y))
time_range = Range(0, 3)
interpreter = ScalarRangeInterpreter(tonality, DiatonicPitch.parse('C:4'), 0, 1)
pitch_function = GenericUnivariatePitchFunction(plf, Position(0), Position(3), False, interpreter)
constraints = {
ChordalPitchConstraint(all_notes[0]),
PitchRangeConstraint([all_notes[0]], PitchRange.create('C:4', 'G:4')),
}
#motif = Motif(score.line, constraints, 'A')
motif = Motif([all_notes[0]], constraints, 'A')
melodic_form = MelodicForm([motif])
t_reshape = TReshape(score, pitch_function, time_range, melodic_form, False)
results = t_reshape.apply()
filter = MinCurveFitFilter(pitch_function, results)
print('{0} filtered results'.format(len(filter.scored_results)))
for index in range(0, min(5, len(filter.scored_results))):
result = filter.scored_results[index]
print('[{0}] {1} ({2})'.format(index, str_line(result[0].line), result[1]))
def motif_example():
print('----- motif_example -----')
line_str = '{<C-Major: I> iC:4 D E D E E E E C D E D E E E E C D E D E E E E wE}'
score = create_score(line_str, 'piano', (4, 4, 'swww'))
tonality = score.hct.get_hc_by_position(0).tonality
all_notes = score.line.get_all_notes()
constraints = {
StepSequenceConstraint([all_notes[0], all_notes[1], all_notes[2], all_notes[3]], [1, 1, -1])
}
motif = Motif([all_notes[0], all_notes[1], all_notes[2], all_notes[3]], constraints)
motif1 = motif.copy_to(all_notes[8])
motif2 = motif.copy_to(all_notes[16])
form = MelodicForm([motif, motif1, motif2])
# 11 scalar notes to C:4 (0) to G:5 (11) with pitch unit 1/11
interpreter = ScalarRangeInterpreter(tonality, DiatonicPitch.parse('C:4'), 0, Fraction(1, 11))
pitch_function = GenericUnivariatePitchFunction(three_sin, Position(0), Position(3), False, interpreter)
t_reshape = TReshape(score, pitch_function, Range(0, 3), form, True)
results = t_reshape.apply()
filter = MinCurveFitFilter(pitch_function, results)
print('{0} filtered results'.format(len(filter.scored_results)))
for index in range(0, min(5, len(filter.scored_results))):
result = filter.scored_results[index]
print('[{0}] {1} ({2})'.format(index, str_line(result[0].line), result[1]))
#simple_reshape_cpf()
#reshape_with_spf()
#reshape_to_scale()
motif_example()
|
the-stack_106_31435 | import unittest
from decimal import Decimal
from django.db import connection
from django.db.models import DecimalField
from django.db.models.functions import Pi, Round
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class RoundTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_round=Round("normal")).first()
self.assertIsNone(obj.null_round)
def test_null_with_precision(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_round=Round("normal", 5)).first()
self.assertIsNone(obj.null_round)
def test_null_with_negative_precision(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_round=Round("normal", -1)).first()
self.assertIsNone(obj.null_round)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal("-12.9"), n2=Decimal("0.6"))
obj = DecimalModel.objects.annotate(
n1_round=Round("n1"), n2_round=Round("n2")
).first()
self.assertIsInstance(obj.n1_round, Decimal)
self.assertIsInstance(obj.n2_round, Decimal)
self.assertAlmostEqual(obj.n1_round, obj.n1, places=0)
self.assertAlmostEqual(obj.n2_round, obj.n2, places=0)
def test_decimal_with_precision(self):
DecimalModel.objects.create(n1=Decimal("-5.75"), n2=Pi())
obj = DecimalModel.objects.annotate(
n1_round=Round("n1", 1),
n2_round=Round("n2", 5),
).first()
self.assertIsInstance(obj.n1_round, Decimal)
self.assertIsInstance(obj.n2_round, Decimal)
self.assertAlmostEqual(obj.n1_round, obj.n1, places=1)
self.assertAlmostEqual(obj.n2_round, obj.n2, places=5)
def test_decimal_with_negative_precision(self):
DecimalModel.objects.create(n1=Decimal("365.25"))
obj = DecimalModel.objects.annotate(n1_round=Round("n1", -1)).first()
self.assertIsInstance(obj.n1_round, Decimal)
self.assertEqual(obj.n1_round, 370)
def test_float(self):
FloatModel.objects.create(f1=-27.55, f2=0.55)
obj = FloatModel.objects.annotate(
f1_round=Round("f1"), f2_round=Round("f2")
).first()
self.assertIsInstance(obj.f1_round, float)
self.assertIsInstance(obj.f2_round, float)
self.assertAlmostEqual(obj.f1_round, obj.f1, places=0)
self.assertAlmostEqual(obj.f2_round, obj.f2, places=0)
def test_float_with_precision(self):
FloatModel.objects.create(f1=-5.75, f2=Pi())
obj = FloatModel.objects.annotate(
f1_round=Round("f1", 1),
f2_round=Round("f2", 5),
).first()
self.assertIsInstance(obj.f1_round, float)
self.assertIsInstance(obj.f2_round, float)
self.assertAlmostEqual(obj.f1_round, obj.f1, places=1)
self.assertAlmostEqual(obj.f2_round, obj.f2, places=5)
def test_float_with_negative_precision(self):
FloatModel.objects.create(f1=365.25)
obj = FloatModel.objects.annotate(f1_round=Round("f1", -1)).first()
self.assertIsInstance(obj.f1_round, float)
self.assertEqual(obj.f1_round, 370)
def test_integer(self):
IntegerModel.objects.create(small=-20, normal=15, big=-1)
obj = IntegerModel.objects.annotate(
small_round=Round("small"),
normal_round=Round("normal"),
big_round=Round("big"),
).first()
self.assertIsInstance(obj.small_round, int)
self.assertIsInstance(obj.normal_round, int)
self.assertIsInstance(obj.big_round, int)
self.assertAlmostEqual(obj.small_round, obj.small, places=0)
self.assertAlmostEqual(obj.normal_round, obj.normal, places=0)
self.assertAlmostEqual(obj.big_round, obj.big, places=0)
def test_integer_with_precision(self):
IntegerModel.objects.create(small=-5, normal=3, big=-100)
obj = IntegerModel.objects.annotate(
small_round=Round("small", 1),
normal_round=Round("normal", 5),
big_round=Round("big", 2),
).first()
self.assertIsInstance(obj.small_round, int)
self.assertIsInstance(obj.normal_round, int)
self.assertIsInstance(obj.big_round, int)
self.assertAlmostEqual(obj.small_round, obj.small, places=1)
self.assertAlmostEqual(obj.normal_round, obj.normal, places=5)
self.assertAlmostEqual(obj.big_round, obj.big, places=2)
def test_integer_with_negative_precision(self):
IntegerModel.objects.create(normal=365)
obj = IntegerModel.objects.annotate(normal_round=Round("normal", -1)).first()
self.assertIsInstance(obj.normal_round, int)
self.assertEqual(obj.normal_round, 370)
def test_transform(self):
with register_lookup(DecimalField, Round):
DecimalModel.objects.create(n1=Decimal("2.0"), n2=Decimal("0"))
DecimalModel.objects.create(n1=Decimal("-1.0"), n2=Decimal("0"))
obj = DecimalModel.objects.filter(n1__round__gt=0).get()
self.assertEqual(obj.n1, Decimal("2.0"))
@unittest.skipUnless(
connection.vendor == "sqlite",
"SQLite doesn't support negative precision.",
)
def test_unsupported_negative_precision(self):
FloatModel.objects.create(f1=123.45)
msg = "SQLite does not support negative precision."
with self.assertRaisesMessage(ValueError, msg):
FloatModel.objects.annotate(value=Round("f1", -1)).first()
|
the-stack_106_31440 | """Base classes for basic metagraph plugins.
"""
import types
import inspect
from typing import Callable, List, Dict, Set, Any
from .typecache import TypeCache, TypeInfo
class AbstractType:
"""Equivalence class of concrete types."""
# Properties must be a dict of property name to set of allowable values
# A value of None indicates unspecified value
properties = {}
# Unambiguous subcomponents is a set of other abstract types which can be
# extracted without any additional information, allowing translators to be
# written from this type to the listed subcomponents
unambiguous_subcomponents = set()
def __init_subclass__(cls, **kwargs):
# Check properties are lists
for key, val in cls.properties.items():
if not isinstance(val, set):
cls.properties[key] = set(val)
def __init__(self, **props):
prop_val = {key: None for key in self.properties}
for key, val in props.items():
if key not in self.properties:
raise KeyError(f"{key} not a valid property of {self.__class__}")
if isinstance(val, (set, tuple, list)):
for v in val:
if v not in self.properties[key]:
raise ValueError(
f"Invalid setting for {key} property: '{v}'; must be one of {self.properties[key]}"
)
prop_val[key] = tuple(sorted(val)) # sort to give consistent hash
else:
if val not in self.properties[key]:
raise ValueError(
f"Invalid setting for {key} property: '{val}'; must be one of {self.properties[key]}"
)
prop_val[key] = val
self.prop_val = prop_val
def __eq__(self, other):
return self.__class__ == other.__class__ and self.prop_val == other.prop_val
def __hash__(self):
return hash((self.__class__, tuple(self.prop_val.items())))
def __getitem__(self, key):
return self.prop_val[key]
def __repr__(self):
props_clean = {k: v for k, v in self.prop_val.items() if v is not None}
return f"{self.__class__.__name__}({props_clean})"
class ConcreteType:
"""A specific data type in a particular memory space recognized by metagraph.
Subclasses of ConcreteType pass an `abstract` keyword argument in the
inheritance definition:
class MyConcreteType(ConcreteType, abstract=MyAbstractType):
pass
For faster dispatch, set the `value_type` attribute to the Python class
which is uniquely associated with this type.
In type signatures, the uninstantiated class is considered equivalent to
an instance with no properties set.
"""
# Most subclasses only need to set these class attributes
value_type = None # override this for fast path type identification
allowed_props = {} # default is no props
target = "cpu" # key may be used in future to guide dispatch
# Override these methods only if necessary
def __init__(self, **props):
"""
Used in two ways:
1. As a requirements indicator
Specify concrete properties which are required for the algorithm
2. As a descriptor of a concrete type instance
Includes both concrete and abstract properties which describe the instance
"""
# Separate abstract properties from concrete properties
abstract_keys = props.keys() & self.abstract.properties.keys()
abstract_props = {key: props.pop(key) for key in abstract_keys}
if abstract_props:
self.abstract_instance = self.abstract(**abstract_props)
else:
self.abstract_instance = None
# Handle concrete properties
for key in props:
if key not in self.allowed_props:
raise KeyError(f"{key} not allowed property of {self.__class__}")
# maybe type check?
self.props = dict(props)
def __init_subclass__(cls, *, abstract=None):
"""Enforce requirements on 'abstract' attribute"""
super().__init_subclass__()
if abstract is None:
raise TypeError(f"Missing required 'abstract' keyword argument on {cls}.")
elif not isinstance(abstract, type) or not issubclass(abstract, AbstractType):
raise TypeError(
f"'abstract' keyword argument on {cls} must be subclass of AbstractType"
)
cls.abstract = abstract
# Property caches live with each ConcreteType, allowing them to be easily accessible
# separate from the Resolver
cls._typecache = TypeCache()
# Ensure ConcreteType.method decorators are used in ConcreteType class
# They are intended only to be used in a Wrapper class
for name, val in cls.__dict__.items():
if getattr(val, "_is_type_method", False):
raise TypeError(
"Invalid decorator: `ConcreteType.method` should only be used in a Wrapper class"
)
elif getattr(val, "_is_type_classmethod", False):
raise TypeError(
"Invalid decorator: `ConcreteType.classmethod` should only be used in a Wrapper class"
)
elif getattr(val, "_is_type_staticmethod", False):
raise TypeError(
"Invalid decorator: `ConcreteType.staticmethod` should only be used in a Wrapper class"
)
@classmethod
def get_typeinfo(cls, value):
if not hasattr(cls, "_typecache"):
raise NotImplementedError("Only implemented for subclasses of ConcreteType")
if value in cls._typecache:
return cls._typecache[value]
# Add a new entry for value
typeinfo = TypeInfo(
abstract_typeclass=cls.abstract,
known_abstract_props={},
concrete_typeclass=cls,
known_concrete_props={},
)
cls._typecache[value] = typeinfo
return typeinfo
def is_satisfied_by(self, other_type):
"""Is other_type and its properties compatible with this type?
(self must be equivalent or less specific than other_type)
"""
if isinstance(other_type, self.__class__):
for k in self.props:
if k not in other_type.props or self.props[k] != other_type.props[k]:
return False
else:
return False
return True
def is_satisfied_by_value(self, obj):
"""Is the type associated with this object compatible with this type?
(self must be equivalent or less specific than the type of obj)
Note that this is potentially slow because it uses get_type() and
therefore computes all properties. Prefer is_satisfied_by() with a
partially specified type instance.
"""
try:
t = self.get_type(obj)
return self.is_satisfied_by(t)
except TypeError:
return False
def __eq__(self, other_type):
return isinstance(other_type, self.__class__) and self.props == other_type.props
def __hash__(self):
return hash((self.__class__, tuple(self.props.items())))
def __getitem__(self, key):
if key in self.abstract.properties:
return self.abstract_instance[key]
return self.props[key]
def __repr__(self):
if self.abstract_instance is None:
props_clean = {}
else:
props_clean = {
k: v
for k, v in self.abstract_instance.prop_val.items()
if v is not None
}
props_clean.update(
{k: v for k, v in self.allowed_props.items() if v is not None}
)
return f"{self.__class__.__name__}({props_clean})"
@classmethod
def is_typeclass_of(cls, obj):
"""Is obj described by this type class?"""
# check fastpath
if cls.value_type is not None:
return isinstance(obj, cls.value_type)
else:
raise NotImplementedError(
"Must override `is_typeclass_of` if cls.value_type not set"
)
@classmethod
def _compute_abstract_properties(
cls, obj, props: Set[str], known_props: Dict[str, Any]
) -> Dict[str, Any]:
raise NotImplementedError(
"Must override `_compute_abstract_properties` if type has abstract properties"
)
@classmethod
def compute_abstract_properties(cls, obj, props: Set[str]) -> Dict[str, Any]:
"""Return a dictionary with a subset of abstract properties for this object.
At a minimum, only the requested properties will be computed, although
this method may return additional keys if they can be computed with
minimal additional cost.
The properties are cached to speed up future calls for the same properties.
"""
if len(props) == 0:
return {}
# Validate properties
for propname in props:
if propname not in cls.abstract.properties:
raise KeyError(
f"{propname} is not an abstract property of {cls.abstract.__name__}"
)
if type(props) is not set:
props = set(props)
typeinfo = cls.get_typeinfo(obj)
abstract_props = cls._compute_abstract_properties(
obj, props, typeinfo.known_abstract_props
)
# Verify requested properties were computed
uncomputed_properties = props - set(abstract_props)
if uncomputed_properties:
raise AssertionError(
f"Requested abstract properties were not computed: {uncomputed_properties}"
)
# Cache properties
typeinfo.known_abstract_props.update(abstract_props)
return abstract_props
@classmethod
def _compute_concrete_properties(
cls, obj, props: List[str], known_props: Dict[str, Any]
) -> Dict[str, Any]:
raise NotImplementedError(
"Must override `_compute_concrete_properties` if type has concrete properties"
)
@classmethod
def compute_concrete_properties(cls, obj, props: List[str]) -> Dict[str, Any]:
"""Return a dictionary with a subset of concrete properties for this object.
At a minimum, only the requested properties will be computed, although
this method may return additional keys if they can be computed with
minimal additional cost.
The properties are cached to speed up future calls for the same properties.
"""
if len(props) == 0:
return {}
# Validate properties
for propname in props:
if propname not in cls.allowed_props:
raise KeyError(
f"{propname} is not a concrete property of {cls.__name__}"
)
typeinfo = cls.get_typeinfo(obj)
concrete_props = cls._compute_concrete_properties(
obj, props, typeinfo.known_concrete_props
)
# Verify requested properties were computed
uncomputed_properties = props - set(concrete_props)
if uncomputed_properties:
raise AssertionError(
f"Requested concrete properties were not computed: {uncomputed_properties}"
)
# Cache properties
typeinfo.known_concrete_props.update(concrete_props)
return concrete_props
@classmethod
def get_type(cls, obj):
"""Get an instance of this type class that fully describes obj
Note that this will completely specialize the type and may require
non-trivial computation to determine all properties of obj. Prefer to
use is_typeclass_of(), compute_abstract_properties(), and
compute_concrete_properties() instead of this method when possible.
"""
if cls.is_typeclass_of(obj):
abstract_props = cls.compute_abstract_properties(
obj, cls.abstract.properties.keys()
)
concrete_props = cls.compute_concrete_properties(
obj, cls.allowed_props.keys()
)
ret_val = cls(**abstract_props, **concrete_props)
return ret_val
else:
raise TypeError(f"object not of type {cls.__name__}")
@classmethod
def assert_equal(
cls,
obj1,
obj2,
aprops1,
aprops2,
cprops1,
cprops2,
*,
rel_tol=1e-9,
abs_tol=0.0,
):
"""
Compare whether obj1 and obj2 are equal, raising an AssertionError if not equal.
rel_tol and abs_tol should be used when comparing floating point numbers
props1 and props2 and dicts of all properties and can be used when performing the comparison
"""
raise NotImplementedError()
class MetaWrapper(type):
def __new__(mcls, name, bases, dict_, abstract=None, register=True):
kwargs = {}
if bases:
kwargs["register"] = register
if abstract is not None:
kwargs["abstract"] = abstract
cls = type.__new__(mcls, name, bases, dict_, **kwargs)
if register and abstract is not None:
# Check for required methods defined on abstract
for name, val in abstract.__dict__.items():
if getattr(val, "_is_required_method", False):
if not hasattr(cls, name):
raise TypeError(
f"{cls.__name__} is missing required wrapper method '{name}'"
)
prop = getattr(cls, name)
if not callable(prop):
raise TypeError(
f"{cls.__name__}.{name} must be callable, not {type(prop)}"
)
if getattr(val, "_is_required_property", False):
if not hasattr(cls, name):
raise TypeError(
f"{cls.__name__} is missing required wrapper property '{name}'"
)
prop = getattr(cls, name)
if type(prop) is not property:
raise TypeError(
f"{cls.__name__}.{name} must be a property, not {type(prop)}"
)
return cls
class Wrapper(metaclass=MetaWrapper):
"""Helper class for creating wrappers around data objects
A ConcreteType will be automatically created with its `value_type` set to this class.
The auto-created ConcreteType will be attached as `.Type` onto the wrapper class.
"""
def __init_subclass__(cls, *, abstract=None, register=True):
if not register:
cls._abstract = abstract
return
# Attempt to lookup abstract from unregistered wrapper superclass
implied_abstract = getattr(cls, "_abstract", None)
if abstract is None:
abstract = implied_abstract
elif implied_abstract is not None:
if abstract is not implied_abstract:
raise TypeError(
f"Wrong abstract type for wrapper: {abstract}, expected {implied_abstract}"
)
# Use TypeMixin class to create a new ConcreteType class; store as `.Type`
if not hasattr(cls, "TypeMixin") or type(cls.TypeMixin) is not type:
raise TypeError(
f"class {cls.__name__} does not define required `TypeMixin` inner class"
)
cls.Type = types.new_class(
f"{cls.__name__}Type", (cls.TypeMixin, ConcreteType), {"abstract": abstract}
)
cls.Type.__module__ = cls.__module__
cls.Type.__doc__ = cls.__doc__
# Point new Type class at this wrapper
cls.Type.value_type = cls
@staticmethod
def _assert_instance(obj, klass, err_msg=None):
if not isinstance(obj, klass):
if err_msg:
raise TypeError(err_msg)
else:
if type(klass) is tuple:
name = tuple(kls.__name__ for kls in klass)
else:
name = klass.__name__
raise TypeError(f"{obj} is not an instance of {name}")
@staticmethod
def _assert(cond, err_msg):
if not cond:
raise TypeError(err_msg)
@staticmethod
def required_method(func):
func._is_required_method = True
return func
@staticmethod
def required_property(func):
func._is_required_property = True
return func
class Translator:
"""Converts from one concrete type to another, enforcing properties on the
destination if requested."""
def __init__(self, func: Callable):
self.func = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
self.__wrapped__ = func
def __call__(self, src, **props):
return self.func(src, **props)
def translator(func: Callable = None):
"""
decorator which can be called as either:
>>> @translator
>>> def myfunc(): ...
We also handle the format
>>> @translate()
>>> def myfunc(): ...
"""
# FIXME: signature checks?
if func is None:
return Translator
else:
return Translator(func)
def normalize_type(t):
"""Instantiate ConcreteType classes with no properties (found in signatures)"""
if type(t) is type and issubclass(t, ConcreteType):
return t()
else:
return t
def normalize_parameter(p: inspect.Parameter):
"""Instantiate any ConcreteType classes found in this parameter annotation"""
return p.replace(annotation=normalize_type(p.annotation))
def normalize_signature(sig: inspect.Signature):
"""Return normalized signature with bare type classes instantiated"""
new_params = [normalize_parameter(p) for p in sig.parameters.values()]
new_return = normalize_type(sig.return_annotation)
return sig.replace(parameters=new_params, return_annotation=new_return)
class AbstractAlgorithm:
"""A named algorithm with a type signature of AbstractTypes and/or Python types.
Abstract algorithms should have empty function bodies.
"""
def __init__(self, func: Callable, name: str, *, version: int = 0):
self.func = func
self.name = name
self.version = version
self.__name__ = func.__name__
self.__doc__ = func.__doc__
self.__wrapped__ = func
self.__signature__ = inspect.signature(self.func)
def abstract_algorithm(name: str, *, version: int = 0):
def _abstract_decorator(func: Callable):
return AbstractAlgorithm(func=func, name=name, version=version)
_abstract_decorator.version = version
return _abstract_decorator
class ConcreteAlgorithm:
"""A specific implementation of an abstract algorithm.
Function signature should consist of ConcreteTypes that are compatible
with the AbstractTypes in the corresponding abstract algorithm. Python
types (which are not converted) must match exactly.
"""
def __init__(self, func: Callable, abstract_name: str, *, version: int = 0):
self.func = func
self.abstract_name = abstract_name
self.version = version
self.__name__ = func.__name__
self.__doc__ = func.__doc__
self.__wrapped__ = func
self.__original_signature__ = inspect.signature(self.func)
self.__signature__ = normalize_signature(self.__original_signature__)
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def concrete_algorithm(abstract_name: str, *, version: int = 0):
def _concrete_decorator(func: Callable):
return ConcreteAlgorithm(
func=func, abstract_name=abstract_name, version=version
)
_concrete_decorator.version = version
return _concrete_decorator
|
the-stack_106_31443 | """Grammalecte wrapper."""
import json
import subprocess
import sys
import tempfile
from dataclasses import dataclass, field
from pathlib import Path
from typing import Generator, List, Union
from zipfile import ZipFile
import requests
@dataclass
class GrammalecteMessage:
"""Base class for Grammalecte messages."""
line: int
start: int
end: int
def __str__(self):
return f"Ligne {self.line} [{self.start}:{self.end}]"
def __eq__(self, other: "GrammalecteMessage"):
# to be sortable, but misleading equality usage
return (self.line, self.start, self.end) == (other.line, other.start, other.end)
def __lt__(self, other: "GrammalecteMessage"):
return (self.line, self.start, self.end) < (other.line, other.start, other.end)
@dataclass
class GrammalecteSpellingMessage(GrammalecteMessage):
"""Spelling error message."""
word: str
message: str = field(init=False)
def __post_init__(self):
self.message = f"Mot inconnu : {self.word}"
def __str__(self):
return super().__str__() + " " + self.message
@staticmethod
def from_dict(line: int, grammalecte_dict: dict) -> "GrammalecteSpellingMessage":
"""Instanciate GrammalecteSpellingMessage from Grammalecte result."""
return GrammalecteSpellingMessage(
line=line,
start=int(grammalecte_dict["nStart"]),
end=int(grammalecte_dict["nEnd"]),
word=grammalecte_dict["sValue"],
)
@dataclass
class GrammalecteGrammarMessage(GrammalecteMessage):
"""Grammar error message."""
url: str
color: List[int]
suggestions: List[str]
message: str
rule: str
type: str
def __str__(self):
ret = super().__str__() + f" [{self.rule}] {self.message}"
if self.suggestions:
ret += f" (Suggestions : {', '.join(self.suggestions)})"
return ret
@staticmethod
def from_dict(line: int, grammalecte_dict: dict) -> "GrammalecteGrammarMessage":
"""Instanciate GrammalecteGrammarMessage from Grammalecte result."""
return GrammalecteGrammarMessage(
line=line,
start=int(grammalecte_dict["nStart"]),
end=int(grammalecte_dict["nEnd"]),
url=grammalecte_dict["URL"],
color=grammalecte_dict["aColor"],
suggestions=grammalecte_dict["aSuggestions"],
message=grammalecte_dict["sMessage"],
rule=grammalecte_dict["sRuleId"],
type=grammalecte_dict["sType"],
)
def grammalecte_text(text: str) -> Generator[GrammalecteMessage, None, None]:
"""Run grammalecte on a string, generate messages."""
with tempfile.TemporaryDirectory() as tmpdirname:
tmpfile = Path(tmpdirname) / "file.txt"
with open(tmpfile, "w", encoding="utf-8") as f:
f.write(text)
yield from grammalecte_file(tmpfile)
def grammalecte_file(
filename: Union[str, Path]
) -> Generator[GrammalecteMessage, None, None]:
"""Run grammalecte on a file given its path, generate messages."""
stdout = "[]"
# TODO check existence of a file
filename = str(filename)
try:
result = _run_grammalecte(filename)
stdout = result.stdout
except FileNotFoundError as e:
if e.filename == "grammalecte-cli.py":
_install_grammalecte()
result = _run_grammalecte(filename)
stdout = result.stdout
yield from _convert_to_messages(stdout)
def _convert_to_messages(
grammalecte_json: str,
) -> Generator[GrammalecteMessage, None, None]:
# grammalecte 1.12.0 adds python comments in the JSON!
grammalecte_json = "\n".join(
line for line in grammalecte_json.splitlines() if not line.startswith("#")
)
warnings = json.loads(grammalecte_json)
for warning in warnings["data"]:
lineno = int(warning["iParagraph"])
messages = []
for error in warning["lGrammarErrors"]:
messages.append(GrammalecteGrammarMessage.from_dict(lineno, error))
for error in warning["lSpellingErrors"]:
messages.append(GrammalecteSpellingMessage.from_dict(lineno, error))
for message in sorted(messages):
yield message
def _run_grammalecte(filepath: str) -> subprocess.CompletedProcess:
"""Run Grammalecte on a file."""
return subprocess.run(
[
"grammalecte-cli.py",
"-f",
filepath,
"-off",
"apos",
"--json",
"--only_when_errors",
],
capture_output=True,
text=True,
)
def _install_grammalecte():
"""Install grammalecte CLI."""
version = "1.12.0"
tmpdirname = tempfile.mkdtemp(prefix="grammalecte_")
tmpdirname = Path(tmpdirname)
tmpdirname.mkdir(exist_ok=True)
download_request = requests.get(
f"https://grammalecte.net/grammalecte/zip/Grammalecte-fr-v{version}.zip"
)
download_request.raise_for_status()
zip_file = tmpdirname / f"Grammalecte-fr-v{version}.zip"
zip_file.write_bytes(download_request.content)
with ZipFile(zip_file, "r") as zip_obj:
zip_obj.extractall(tmpdirname / f"Grammalecte-fr-v{version}")
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
str(tmpdirname / f"Grammalecte-fr-v{version}"),
]
)
|
the-stack_106_31444 | """Test airfs.storage.http"""
import pytest
UNSUPPORTED_OPERATIONS = (
"copy",
"getmtime",
"getctime",
"getsize",
"mkdir",
"listdir",
"remove",
"symlink",
"write",
"shareable_url",
)
def test_handle_http_errors():
"""Test airfs.http._handle_http_errors"""
from airfs.storage.http import _handle_http_errors
from airfs._core.exceptions import ObjectNotFoundError, ObjectPermissionError
# Mocks response
class Response:
"""Dummy response"""
status_code = 200
reason = "reason"
raised = False
def raise_for_status(self):
"""Do nothing"""
self.raised = True
response = Response()
# No error
assert _handle_http_errors(response) is response
# 403 error
response.status_code = 403
with pytest.raises(ObjectPermissionError):
_handle_http_errors(response)
# 404 error
response.status_code = 404
with pytest.raises(ObjectNotFoundError):
_handle_http_errors(response)
# Any error
response.status_code = 500
assert not response.raised
_handle_http_errors(response)
assert response.raised
def test_mocked_storage():
"""Tests airfs.http with a mock"""
import requests
from requests.exceptions import HTTPError
import airfs.storage.http
from airfs.storage.http import HTTPRawIO, _HTTPSystem, HTTPBufferedIO
from tests.test_storage import StorageTester
from tests.storage_mock import ObjectStorageMock
# Mocks client
class HTTPException(Exception):
"""HTTP Exception
Args:
status_code (int): HTTP status
"""
def __init__(self, status_code):
self.status_code = status_code
def raise_404():
"""Raise 404 error"""
raise HTTPException(404)
def raise_416():
"""Raise 416 error"""
raise HTTPException(416)
def raise_500():
"""Raise 500 error"""
raise HTTPException(500)
storage_mock = ObjectStorageMock(raise_404, raise_416, raise_500)
class Response:
"""HTTP request response"""
status_code = 200
reason = "reason"
def __init__(self, **attributes):
for name, value in attributes.items():
setattr(self, name, value)
def raise_for_status(self):
"""Raise for status"""
if self.status_code >= 300:
raise HTTPError(self.reason, response=self)
class Session:
"""Fake Session"""
def __init__(self, *_, **__):
"""Do nothing"""
@staticmethod
def request(method, url, headers=None, **_):
"""Check arguments and returns fake result"""
# Remove scheme
try:
url = url.split("//")[1]
except IndexError:
pass
# Split path and locator
locator, path = url.split("/", 1)
# Perform requests
try:
if method == "HEAD":
return Response(headers=storage_mock.head_object(locator, path))
elif method == "GET":
return Response(
content=storage_mock.get_object(locator, path, header=headers)
)
else:
raise ValueError("Unknown method: " + method)
# Return exception as response with status_code
except HTTPException as exception:
return Response(status_code=exception.status_code)
requests_session = requests.Session
airfs.storage.http._Session = Session
# Tests
try:
# Init mocked system
system = _HTTPSystem()
storage_mock.attach_io_system(system)
# Tests
with StorageTester(
system,
HTTPRawIO,
HTTPBufferedIO,
storage_mock,
unsupported_operations=UNSUPPORTED_OPERATIONS,
) as tester:
# Common tests
tester.test_common()
# Restore mocked functions
finally:
airfs.storage.http._Session = requests_session
|
the-stack_106_31447 | from __future__ import print_function
import lldb
def StepOver(debugger, args, result, dict):
"""
Step over a given number of times instead of only just once
"""
arg_split = args.split(" ")
print(type(arg_split))
count = int(arg_split[0])
for i in range(0, count):
debugger.GetSelectedTarget().GetProcess(
).GetSelectedThread().StepOver(lldb.eOnlyThisThread)
print("step<%d>" % i)
def __lldb_init_module(debugger, session_dict):
# by default, --synchronicity is set to synchronous
debugger.HandleCommand("command script add -f mysto.StepOver mysto")
return None
|
the-stack_106_31449 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
A nn.Module wrapper to go with a Sharded Optimizer in order to handle targeted gradient
reduction automatically.
"""
from collections import deque
import contextlib
import functools
from itertools import chain
import logging
from typing import Any, Callable, Deque, Dict, Generator, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.autograd import Variable
import torch.distributed as dist
from torch.nn import Parameter
from fairscale.optim import OSS
from fairscale.optim.utils import Bucket, Workhandle
def _trainable(param: torch.Tensor) -> bool:
return param.requires_grad
class ShardedDataParallel(nn.Module):
""" Wrap the model, and reduce the gradients to the right rank during the backward pass.
- the partition is given by the sharded optimizer
- wrap the base model with a model which knows where to reduce each gradient
- add an autograd function which calls the model grad dispatch on the way back
Args:
module (nn.Module):
model to be wrapped
sharded_optimizer (OSS, or list of OSS):
the sharded optimizer(s) which will decide the gradient partitioning
Keyword Args:
process_group (group):
torch.distributed group (default: group.WORLD)
broadcast_buffers (bool):
Whether to additionally broadcast model buffers in between ranks at the beginning of each forward pass.
Same setting as in Pytorch DDP, this is in addition to the broadcast and reduction of the model parameters.
sync_models_at_startup (bool):
Synchronize the models in between the ranks when starting up. Not needed if each rank has the same seed,
or the training restarts from a saved state
reduce_buffer_size (int):
The max size of the buffer used to batch the small parameter tensors, in number of elements (default 0 - unused).
this will impact the long term memory consumption, because these buckets correspond to parameters which will not be sharded.
Set to 0 to remove all bucketing, 1M to 8M is usually reasonable.
auto_refresh_trainable (bool):
(default: True) Check whether the parameters trainability (`requires_grad`) has changed and update both ShardedDDP
and OSS automatically if this is the case. If set to False, `refresh_trainable()` needs to be called anytime
a parameter is frozen or unfrozen.
reduce_fp16 (bool):
cast the grads to fp16 before reducing. Not needed if the model is already fp16, but will probably improve performance
for multi node jobs using PyTorch AMP. The effect is similar to DDP's fp16_compress_hook_ and will also save some memory.
.. _fp16_compress_hook: https://pytorch.org/docs/1.8.0/ddp_comm_hooks.html?highlight=fp16#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook
.. warning:
ShardedDDP implements gradient sharding, meaning that each rank only owns a unique shard of the model gradients
after the backward pass, in order to save memory and some communication bandwidth.
.. warning:
As a consequence of sharding:
* in case of gradient clipping, one has to use the `clip_grad_norm` exposed by
the `optimizer state sharding wrapper <fairscale.optim.OSS>`
* after loss.backward() (or equivalent) each rank will have `None` in place of some param.grad
* Pytorch and Apex AMP implementations will hang when used in conjunction with `ShardedDDP`.
One needs a `shard-aware grad scaler<ShardedGradScaler>`, which is proposed in `fairscale.optim.grad_scaler`,
compatible with PytorchAMP.
.. warning:
If `auto_refresh_trainable` is set to `True` (this is the default) then any trainability change in the model graph will be handled
automatically.
If `auto_refresh_trainable` is set to `False`, ShardedDDP will not refresh its assumptions with respect to trainable parameters
for every forward pass, in the hope of saving some time. If some parameters are frozen or unfrozen over time, please refresh
ShardedDDP assumptions by calling `refresh_trainable()` just after said change (before the next forward pass).
"""
def __init__(
self,
module: nn.Module,
sharded_optimizer: Union[OSS, List[OSS]],
process_group: Any = None,
broadcast_buffers: bool = True,
sync_models_at_startup: bool = True,
reduce_buffer_size: int = 2 ** 23,
auto_refresh_trainable: bool = True,
reduce_fp16: bool = False,
):
super().__init__()
self.module = module
self.sharded_optimizers = [sharded_optimizer] if not isinstance(sharded_optimizer, list) else sharded_optimizer
self.enable_broadcast_buffers = broadcast_buffers
self.auto_refresh_trainable = auto_refresh_trainable
self.reduce_fp16 = reduce_fp16
if reduce_buffer_size > 0 and reduce_fp16:
self.reduce_fp16 = False
logging.warning(
"fp16 gradient reduction is not compatible with reduction buffers, which are requested. fp16 grad reduction is deactivated."
)
# Handle a no_sync() context which prevents the gradient synchronization,
# accumulate in place
self.should_accumulate_grads = False
self.accumulate_grads_flipped = False
# Communication related attributes
self.process_group = process_group if process_group is not None else dist.group.WORLD
self.world_size_scaling = 1.0 / dist.get_world_size(self.process_group) # > 0
self.reference_global_rank = OSS.get_global_rank(self.process_group, 0) # picking rank 0 as the reference
self.rank = dist.get_rank(self.process_group)
self.global_rank = OSS.get_global_rank(self.process_group, self.rank)
self._local_to_global_rank = [
OSS.get_global_rank(self.process_group, i) for i in range(dist.get_world_size(self.process_group))
]
# Expose some of the PytorchDDP attributes, some frameworks rely on them.
# See https://pytorch.org/docs/stable/_modules/torch/nn/parallel/distributed.html#DistributedDataParallel
# device_id related logic is not present, this is not handled
devices = {p.device for p in self.module.parameters()}
self.is_multi_device_module = len(devices) > 1
self.device = list(devices)[0]
distinct_device_types = {p.device.type for p in self.module.parameters()}
assert len(distinct_device_types) == 1, (
"ShardedDataParallel's input module must be on "
"the same type of devices, but input module parameters are located on {} different device types."
).format(distinct_device_types)
self.device_type = list(distinct_device_types)[0]
# Scafolding to be able to reduce the grads during the BW pass
# several optimizers can be present each working on seperate parameter set which is spread across multiple ranks
# - we build an iterator which goes through all the parameters involved globally
self._all_params = list(
chain(
*[sum([sum(p, []) for p in optim.per_device_params.values()], []) for optim in self.sharded_optimizers]
)
)
self._trainable_params: List[torch.Tensor] = []
self._grad_to_be_reduced: List[bool] = []
self._trainable_param_to_rank: Dict[torch.Tensor, int] = {}
self._reference_trainable_mask = list(map(_trainable, self._all_params))
# - setup buckets and tensor views
model_size = sum([p.numel() for p in self.module.parameters()])
self.buffer_max_size = min(reduce_buffer_size, model_size)
logging.info(
"ShardedDDP bucket size: {:.2f}M parameters, model size {:.2f}M parameters".format(
self.buffer_max_size / 2 ** 20, model_size / 2 ** 20
)
)
self.use_buckets = self.buffer_max_size > 0
self.buckets: Dict[torch.device, List[Bucket]] = {}
self._should_bucket_grad: List[bool] = []
self._bucket_list: Optional[List[Bucket]] = None
# - setup backward hooks which will be called by Torch's autograd in due time
self._grad_accs: List[Callable] = []
# passing a handle to torch.nn.SyncBatchNorm layer
self._passing_sync_batchnorm_handle(self.module)
# Make sure that all ranks start with the same model
if sync_models_at_startup:
self._sync_params_and_buffers()
self._work_handles: Deque[Workhandle] = deque()
self._bucket_flush_callback_set = False
self.refresh_trainable()
def forward(self, *inputs: Any, **kwargs: Any) -> Any:
"""
Module forward pass, handles any DDP-specific work in the background. Primes the
backward pass for gradient reduction to the proper ranks.
"""
# Optionally check whether the trainable parameters have changed
if self.auto_refresh_trainable:
trainable_mask = list(map(_trainable, self._all_params))
if trainable_mask != self._reference_trainable_mask:
logging.warning("ShardedDDP detected that the trainable params changed, updating the partitioning")
self.refresh_trainable()
self._reference_trainable_mask = trainable_mask
if self.enable_broadcast_buffers:
# NCCL communications are on a different stream, needs to be blocking
# for the subsequent FW to be correct
self.sync_buffers(blocking=True)
# Reset all the grad reduce and bucket state flags
self._clear_counters()
# Normal FW on the base model
return self.module(*inputs, **kwargs)
def to( # type: ignore
self,
device: Optional[Union[int, torch.device]],
dtype: Optional[torch.dtype] = None,
non_blocking: bool = False,
) -> "ShardedDataParallel":
"""
Moves and/or casts the parameters and buffers.
Its signature is similar to :meth:`torch.Tensor.to`, but only accepts
floating point desired :attr:`dtype` s. In addition, this method will
only cast the floating point parameters and buffers to :attr:`dtype`
(if given). The integral parameters and buffers will be moved
:attr:`device`, if that is given, but with dtypes unchanged. When
:attr:`non_blocking` is set, it tries to convert/move asynchronously
with respect to the host if possible, e.g., moving CPU Tensors with
pinned memory to CUDA devices.
.. note::
This method modifies the module in-place.
.. warning:
Device changes are not supported, and this will raise an exception. The issue in that case is not
really ShardedDDP, but OSS which will not be aware of the device change, and whose buffers will be
in a broken state.
Arguments:
device (:class:`torch.device`): the desired device of the parameters and buffers in this module.
dtype (:class:`torch.dtype`): the desired floating point type of the floating point parameters and buffers.
non_blocking (bool): make it an asynchronous call.
Returns:
Module: self.
"""
assert device in self.buckets.keys(), "Changing devices is not supported, because this would break OSSs state"
assert (
len(self.buckets.keys()) == 1
), "Several devices specified to begin with, incompatible with setting a single device here"
for _device in self.buckets.keys():
for bucket in self.buckets[_device]:
bucket.buffer.to(device=device, dtype=dtype, non_blocking=non_blocking)
self.module.to(device=device, dtype=dtype, non_blocking=non_blocking)
def refresh_trainable(self) -> None:
""" If the module trainability has changed, update all the assumptions """
# Make sure that this is not done while gradients are waiting to be reduced (if no_sync context for instance)
assert not functools.reduce(
lambda x, y: x or y, self._grad_to_be_reduced, False
), "Grads waiting to be reduced: {}".format(self._grad_to_be_reduced)
self._trainable_params = list(filter(lambda x: x.requires_grad, self._all_params))
self._trainable_params.sort(key=lambda x: x.numel())
self._grad_to_be_reduced = [True for _ in self._trainable_params]
self._trainable_param_to_rank = {}
for optim in self.sharded_optimizers:
# OSS may need to change the communication pattern
optim.refresh_trainable()
# Update ShardedDDP given the new partitions
for (
device_per_rank_params
) in optim.per_device_params.values(): # all the params on this device (inc all ranks)
for device_params in device_per_rank_params:
for param in filter(lambda x: x.requires_grad, device_params):
self._trainable_param_to_rank[param] = optim.param_to_rank[param]
self._setup_bucket_strategy()
self._setup_backward_hooks()
def reduce(self) -> None:
"""
This does not *need* to be called, the gradient reduction is done automatically during the BW pass.
Use this method to reduce the gradients manually
"""
# Check that this is not a mistake, if there's nothing to reduce
assert functools.reduce(
lambda x, y: x or y, self._grad_to_be_reduced, False
), "No grads waiting to be reduced, maybe that this was called twice or there was no BW pass ?"
# Trigger all the current BW hooks
_ = map(lambda x: x(), self._grad_accs)
# Make sure that all the futures are consumed
self._consume_work_handles()
@torch.no_grad()
def sync_buffers(self, blocking: bool = False) -> None:
"""
Sync all the param buffers in between ranks (including for instance batch norm statistics).
Arguments:
blocking (bool): wait for the operation to conclude.
"""
last_work_handle = None
for buffer in self.module.buffers(recurse=True):
last_work_handle = dist.broadcast(
buffer.data, self.reference_global_rank, self.process_group, async_op=True
)
if blocking and last_work_handle:
# Only wait for the last coms, they're inlined on the same CUDA stream
last_work_handle.wait()
def zero_grad(self, set_to_none: bool = False) -> None:
r"""Sets gradients of all model parameters to zero. See similar function
under :class:`torch.optim.Optimizer` for more context.
Arguments:
set_to_none (bool): instead of setting to zero, set the grads to None.
See :meth:`torch.optim.Optimizer.zero_grad` for details.
"""
for index, trainable_param in enumerate(self._all_params):
if set_to_none and not self._should_bucket_grad[index]:
trainable_param.grad = None
elif trainable_param.grad is not None:
trainable_param.grad.zero_()
def __getattr__(self, name: str) -> Any:
"""Forward missing attributes to wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.module, name)
@contextlib.contextmanager
def no_sync(self) -> Generator:
"""A context manager to disable gradient synchronization."""
old_should_accumulate_grads = self.should_accumulate_grads
self.should_accumulate_grads = True
yield
self.accumulate_grads_flipped = self.should_accumulate_grads != old_should_accumulate_grads
self.should_accumulate_grads = old_should_accumulate_grads
@torch.no_grad()
def _clear_counters(self) -> None:
"""Reset all the grad reduce and call counters"""
self._grad_to_be_reduced = [True for _ in self._grad_to_be_reduced]
self._bucket_flush_callback_set = False
# Do not reset the buckets
if self.use_buckets:
assert self._bucket_list is not None
for bucket in self._bucket_list:
assert (
self.accumulate_grads_flipped or not self.training or self.should_accumulate_grads or bucket.sent
), (
"A bucket failed to be sent, cannot continue as results would be wrong. "
+ "You can trye de-activating ShardedDDP buckets -set `reduce_buffer_size` to 0-"
+ "Please submit a GitHub issue, this should not happen"
)
bucket.reset()
if not self.should_accumulate_grads:
self.accumulate_grads_flipped = False
def _find_rank(self, param: Parameter) -> Tuple[OSS, int]:
""" Look up where this parameter belongs to """
for optim in self.sharded_optimizers:
if param in optim.param_to_rank.keys():
return optim, optim.param_to_rank[param]
assert False, "This parameter is not present in an optimizer, this should not happen"
return (None, -1)
def _get_reduce_fn(self, index: int, param: torch.Tensor, dst_rank: int) -> Callable:
"""
Two possible backward hooks for a given parameter: either directly reduce to the appropriate rank,
or contribute to a bucket and reduce when the bucket is full.
Either way a delayed action is necessary and is passed as a callback.
"""
if not self.use_buckets or not self._should_bucket_grad[index]:
# Direct reduction
@torch.no_grad()
def reduce(*_: Any) -> None:
# Skip gradient reduction, do not alter status flags
if not self.should_accumulate_grads and self._grad_to_be_reduced[index]:
assert param.grad is not None, "Reducing gradients during backward pass, cannot be None"
if not self._bucket_flush_callback_set:
Variable._execution_engine.queue_callback(self._flush_reduce_calls)
self._bucket_flush_callback_set = True
# Make sure that this is not fired twice
self._grad_to_be_reduced[index] = False
param.grad.mul_(self.world_size_scaling)
if self.reduce_fp16:
param.grad.data = param.grad.data.half()
# Future work includes clearing up the buffer if possible
def cleanup() -> None:
if dst_rank != self.global_rank:
param.grad = None
else:
assert param.grad is not None
param.grad.data = param.grad.data.to(dtype=param.dtype)
# Async reduce for this buffer, log the future
self._work_handles.append(
Workhandle(
handle=dist.reduce(
tensor=param.grad.data,
dst=self._local_to_global_rank[dst_rank],
group=self.process_group,
async_op=True,
),
callback=cleanup,
)
)
# Opportunistically try to empty the queue, free memory
self._try_consume_work_handle()
else:
@torch.no_grad()
def reduce(*_: Any) -> None:
# Skip gradient reduction, do not alter status flags
if not self.should_accumulate_grads and self._grad_to_be_reduced[index]:
assert param.grad is not None, "Reducing gradients during backward pass, cannot be None"
if not self._bucket_flush_callback_set:
Variable._execution_engine.queue_callback(self._flush_reduce_calls)
self._bucket_flush_callback_set = True
# Make sure that this is not fired twice
self._grad_to_be_reduced[index] = False
bucket = self.buckets[param.device][dst_rank]
bucket.params_checked_in += 1
if bucket.full():
# Normalize the bucket in one go
bucket.buffer.mul_(self.world_size_scaling)
# Reduce the bucket
bucket.sent = True
self._work_handles.append(
Workhandle(
handle=dist.reduce(
tensor=bucket.buffer,
dst=bucket.destination,
group=self.process_group,
async_op=True,
),
callback=None,
)
)
# Opportunistically try to empty the queue
self._try_consume_work_handle()
return reduce
def _setup_backward_hooks(self) -> None:
"""
Attach a reduce function to each grad-requiring parameter.
This makes the gradient reduction automatic whenever there's a backward pass
"""
# Go through the parameters, attach the hook
self._grad_accs = []
for index, param in enumerate(self._trainable_params):
if param.grad is not None and param.grad.requires_grad:
raise RuntimeError("ShardedDataParallel only works with gradients that don't require grad")
# Register the hook to the next function in line,
# so that the hook is fired when this grad has properly been computed
p_tmp = param.expand_as(param)
assert p_tmp.grad_fn is not None
grad_acc = p_tmp.grad_fn.next_functions[0][0]
dst_rank = self._trainable_param_to_rank[param]
grad_acc.register_hook(self._get_reduce_fn(index, param, dst_rank))
self._grad_accs.append(grad_acc) # keep this function in scope
@torch.no_grad()
def _sync_params_and_buffers(self) -> None:
"""
Sync the complete model states in between the ranks
"""
last_work_handle = None
for t in self.module.state_dict().values():
last_work_handle = dist.broadcast(
t, src=self.reference_global_rank, group=self.process_group, async_op=True
)
# Only wait for the last handle, they're inlined in the same CUDA stream
if last_work_handle:
last_work_handle.wait()
def _passing_sync_batchnorm_handle(self, module: nn.Module) -> None:
"""
Passes handle required for ``torch.nn.modules.SyncBatchNorm``.
Adapted from ``torch.nn.distributed.DistributedDataParallel``.
"""
for layer in module.modules():
if isinstance(layer, torch.nn.modules.SyncBatchNorm):
assert self.device_type != "cpu", "SyncBatchNorm layers only work with GPU modules"
# device_id logic has not been handled, assume single-process single-device
# SyncBatchNorm only supports DDP with single-process single-device anyway'
layer._specify_ddp_gpu_num(1) # type: ignore
def _setup_bucket_strategy(self) -> None:
"""Devise a bucketing strategy on a per-rank ownership level.
These buckets will not be sharded, since the gradients would be re-allocated during the backward in that case.
This method can be a slow for big models, but it it not typically called often (not for every forward for instance)
"""
if not self.use_buckets:
return
# Devise the bucketing strategy. Parameters are already sorted, in that:
# - these are only the trainable parameters, so they should produce grads
# - they are sorted by increasing size
self.buckets = {}
for param in self._trainable_params:
device = param.device
dst_rank = self._trainable_param_to_rank[param]
if param.device not in self.buckets.keys():
self.buckets[param.device] = [
Bucket(buffer=torch.zeros(self.buffer_max_size, dtype=param.dtype, device=device))
for _ in range(dist.get_world_size(self.process_group))
]
bucket = self.buckets[device][dst_rank]
bucket.destination = self._local_to_global_rank[dst_rank]
# Criteria to decide whether this parameter is to be bucketed or not:
# - enough room in the bucket
if (bucket.fill + param.numel()) < self.buffer_max_size:
self._should_bucket_grad.append(True)
# This parameter gradients becomes a view of the bucket
fill_next = bucket.fill + param.numel()
if param.grad is None:
# will be overwritten just below, see next line
param.grad = torch.zeros_like(param)
param.grad.data = bucket.buffer[bucket.fill : fill_next].view_as(param.data)
bucket.fill = fill_next
# Update the bucket
self.buckets[device][dst_rank].max_params_checked_in += 1
else:
self._should_bucket_grad.append(False)
self._bucket_list = list(chain(*[self.buckets[device] for device in self.buckets.keys()]))
# Resize the buckets to remove lost space in the end
for bucket in self._bucket_list:
bucket.buffer.resize_(bucket.fill)
bucket.sent = True
def _consume_work_handles(self) -> None:
"""Consume all the futures which are tied to this optimizer's buckets.
We start from the first/older ones, since they are the most likely to be ready and non-blocking
"""
while len(self._work_handles) > 0:
work_handle = self._work_handles.popleft()
work_handle.handle.wait()
if work_handle.callback is not None:
work_handle.callback()
def _try_consume_work_handle(self) -> None:
"""Try to consume the oldest future. This is non blocking, if not ready we'll pass"""
while len(self._work_handles) > 0 and self._work_handles[0].handle.is_completed():
work_handle = self._work_handles.popleft()
if work_handle.callback is not None:
work_handle.callback()
def _flush_reduce_calls(self) -> None:
if self._bucket_list is not None:
for bucket in self._bucket_list:
if not bucket.sent:
# Normalize the bucket in one go
bucket.buffer.mul_(self.world_size_scaling)
# Reduce the bucket
self._work_handles.append(
Workhandle(
handle=dist.reduce(
tensor=bucket.buffer, dst=bucket.destination, group=self.process_group, async_op=True,
),
callback=None,
)
)
bucket.sent = True
self._consume_work_handles()
|
the-stack_106_31450 | # -*- coding: utf-8 -*-
from twisted.plugin import IPlugin
from pymoronbot.moduleinterface import IModule
from pymoronbot.modules.commandinterface import BotCommand
from zope.interface import implementer
from pymoronbot.message import IRCMessage
from pymoronbot.response import IRCResponse, ResponseType
from six import string_types
@implementer(IPlugin, IModule)
class Help(BotCommand):
def triggers(self):
return['help', 'module', 'modules']
def help(self, query):
return 'help/module(s) (<module>) - returns a list of loaded modules, ' \
'or the help text of a particular module if one is specified'
def execute(self, message):
"""
@type message: IRCMessage
"""
moduleHandler = self.bot.moduleHandler
if message.ParameterList:
helpStr = moduleHandler.runActionUntilValue('help', message.ParameterList)
if helpStr:
return IRCResponse(ResponseType.Say, helpStr, message.ReplyTo)
else:
return IRCResponse(ResponseType.Say,
'"{0}" not found, try "{1}" without parameters '
'to see a list of loaded module names'.format(message.ParameterList[0],
message.Command),
message.ReplyTo)
else:
modules = ', '.join(sorted(moduleHandler.modules, key=lambda s: s.lower()))
return [IRCResponse(ResponseType.Say,
"Modules loaded are (use 'help <module>' to get help for that module):",
message.ReplyTo),
IRCResponse(ResponseType.Say,
modules,
message.ReplyTo)]
help = Help()
|
the-stack_106_31451 | from scipy.sparse import dok_matrix
import pickle
import numpy as np
from utils import read_w2v, save_sparse_csr
def build_graph(filename, TOPN, A_name, indice2word_name, annoy=False, dim=100, tree_num=20):
"""
"""
model = read_w2v(filename, dim)
V = len(model.wv.vocab)
print("Num. vocab = %i" % V)
word_indice_dic = {word: i for i, word in enumerate(model.wv.vocab)}
indice2word = {i: word for word, i in word_indice_dic.items()}
A = dok_matrix((V, V), dtype=np.float32)
if annoy:
print("Using ANNOY...")
from gensim.similarities.index import AnnoyIndexer
annoy_index = AnnoyIndexer(model, tree_num)
add_neighbors(A, TOPN, model, word_indice_dic, annoy_index=annoy_index)
else:
add_neighbors(A, TOPN, model, word_indice_dic)
save_sparse_csr(A_name, A.tocsr())
pickle.dump(indice2word, open(indice2word_name , "wb"))
def add_neighbors(A, TOPN, model, word_indice_dic, annoy_index=None):
for word, indice in word_indice_dic.items():
finished = 0
if annoy_index:
word_sim_list = model.most_similar(positive=[word], topn=TOPN + 1, indexer=annoy_index)
else:
word_sim_list = model.most_similar(positive=[word], topn=TOPN)
for sim_word, cos_sim in word_sim_list:
target_indice = word_indice_dic[sim_word]
if indice == target_indice:
continue # avoid adding self-loops
A[indice, target_indice] = max(cos_sim, 0.0)
A[target_indice, indice] = max(cos_sim, 0.0)
finished += 1
def build_subgraph(seed_words, w2v_filename, TOPN, A_name, indice2word_filename, dim):
A, indice2word, model, word_indice_dic = graph_setup(dim, w2v_filename)
#Obtain k-NN
finished = 0
for word in seed_words:
if not word in word_indice_dic:
print("%s is OOV" % word)
continue
indice = word_indice_dic[word]
for sim_word, cos_sim in model.most_similar(positive=[word], topn=TOPN):
print(sim_word, "%.2f" % cos_sim)
target_indice = word_indice_dic[sim_word]
if indice == target_indice: continue # avoid adding self-loops
A[indice, target_indice] = max(cos_sim, 0.0)
A[target_indice, indice] = max(cos_sim, 0.0)
finished += 1
save_sparse_csr(A_name, A.tocsr())
pickle.dump(indice2word, open(indice2word_filename , "wb" ))
def graph_setup(dim, w2v_filename):
# Reading word vector
model = read_w2v(w2v_filename, dim)
V = len(model.wv.vocab)
print("Num. vocab = %i" % V)
# Set up for constructing adjacency matrix
word_indice_dic = {word: i for i, word in enumerate(model.wv.vocab)} # memory size maybe large?
indice2word = {i: word for word, i in word_indice_dic.items()}
A = dok_matrix((V, V), dtype=np.float32)
return A, indice2word, model, word_indice_dic
|
the-stack_106_31452 | import time
import gym_envs.kuka_gym.kuka_button_gym_env as kuka_env
# env = kuka_env.KukaButtonGymEnv(renders=True, is_discrete=True, log_folder="mobile_robot", record_data=False, random_target=False)
env = kuka_env.KukaButtonGymEnv(renders=True)
timesteps = 1000 # must be greater than MAX_STEPS
episodes = 100
env.seed(1)
i = 0
print('Starting episodes...')
start_time = time.time()
try:
for _ in range(episodes):
observation = env.reset()
for t in range(timesteps):
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
env.render() # render() requires first the observation to be obtained
if done:
print("Episode finished after {} timesteps".format(t + 1))
break
i += 1
except KeyboardInterrupt:
pass
print("Avg. frame rate: {:.2f} FPS".format(i / (time.time() - start_time)))
|
the-stack_106_31454 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from datetime import datetime, timedelta
from decimal import Decimal
import os
import pickle
import sys
import unittest
from babel.support import NullTranslations
import flask
from flask_babelhg import (
get_translations,
gettext,
lazy_gettext,
lazy_ngettext,
ngettext,
)
from flask_babelhg._compat import text_type
import flask_babelhg as babel
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
class IntegrationTestCase(unittest.TestCase):
def test_no_request_context(self):
b = babel.Babel()
app = flask.Flask(__name__)
b.init_app(app)
with app.app_context():
assert isinstance(get_translations(), NullTranslations)
def test_lazy_old_style_formatting(self):
lazy_string = lazy_gettext(u'Hello %(name)s')
assert lazy_string % {u'name': u'test'} == u'Hello test'
lazy_string = lazy_gettext(u'test')
assert u'Hello %s' % lazy_string == u'Hello test'
def test_lazy_pickling(self):
lazy_string = lazy_gettext(u'Foo')
pickled = pickle.dumps(lazy_string)
unpickled = pickle.loads(pickled)
assert unpickled == lazy_string
class DateFormattingTestCase(unittest.TestCase):
def test_basics(self):
app = flask.Flask(__name__)
babel.Babel(app)
d = datetime(2010, 4, 12, 13, 46)
delta = timedelta(days=6)
with app.test_request_context():
assert babel.format_datetime(d) == 'Apr 12, 2010, 1:46:00 PM'
assert babel.format_date(d) == 'Apr 12, 2010'
assert babel.format_time(d) == '1:46:00 PM'
assert babel.format_timedelta(delta) == '1 week'
assert babel.format_timedelta(delta, threshold=1) == '6 days'
with app.test_request_context():
app.config['BABEL_DEFAULT_TIMEZONE'] = 'Europe/Vienna'
assert babel.format_datetime(d) == 'Apr 12, 2010, 3:46:00 PM'
assert babel.format_date(d) == 'Apr 12, 2010'
assert babel.format_time(d) == '3:46:00 PM'
with app.test_request_context():
app.config['BABEL_DEFAULT_LOCALE'] = 'de_DE'
assert babel.format_datetime(d, 'long') == '12. April 2010 um 15:46:00 MESZ'
def test_init_app(self):
b = babel.Babel()
app = flask.Flask(__name__)
b.init_app(app)
d = datetime(2010, 4, 12, 13, 46)
with app.test_request_context():
assert babel.format_datetime(d) == 'Apr 12, 2010, 1:46:00 PM'
assert babel.format_date(d) == 'Apr 12, 2010'
assert babel.format_time(d) == '1:46:00 PM'
with app.test_request_context():
app.config['BABEL_DEFAULT_TIMEZONE'] = 'Europe/Vienna'
assert babel.format_datetime(d) == 'Apr 12, 2010, 3:46:00 PM'
assert babel.format_date(d) == 'Apr 12, 2010'
assert babel.format_time(d) == '3:46:00 PM'
with app.test_request_context():
app.config['BABEL_DEFAULT_LOCALE'] = 'de_DE'
assert babel.format_datetime(d, 'long') == '12. April 2010 um 15:46:00 MESZ'
def test_custom_formats(self):
app = flask.Flask(__name__)
app.config.update(
BABEL_DEFAULT_LOCALE='en_US', BABEL_DEFAULT_TIMEZONE='Pacific/Johnston'
)
b = babel.Babel(app)
b.date_formats['datetime'] = 'long'
b.date_formats['datetime.long'] = 'MMMM d, yyyy h:mm:ss a'
d = datetime(2010, 4, 12, 13, 46)
with app.test_request_context():
assert babel.format_datetime(d) == 'April 12, 2010 3:46:00 AM'
def test_custom_locale_selector(self):
app = flask.Flask(__name__)
b = babel.Babel(app)
d = datetime(2010, 4, 12, 13, 46)
the_timezone = 'UTC'
the_locale = 'en_US'
@b.localeselector
def select_locale():
return the_locale
@b.timezoneselector
def select_timezone():
return the_timezone
with app.test_request_context():
assert babel.format_datetime(d) == 'Apr 12, 2010, 1:46:00 PM'
the_locale = 'de_DE'
the_timezone = 'Europe/Vienna'
with app.test_request_context():
assert babel.format_datetime(d) == '12.04.2010, 15:46:00'
def test_refreshing(self):
app = flask.Flask(__name__)
babel.Babel(app)
d = datetime(2010, 4, 12, 13, 46)
with app.test_request_context():
assert babel.format_datetime(d) == 'Apr 12, 2010, 1:46:00 PM'
app.config['BABEL_DEFAULT_TIMEZONE'] = 'Europe/Vienna'
babel.refresh()
assert babel.format_datetime(d) == 'Apr 12, 2010, 3:46:00 PM'
def test_force_locale(self):
app = flask.Flask(__name__)
b = babel.Babel(app)
@b.localeselector
def select_locale():
return 'de_DE'
with app.test_request_context():
assert str(babel.get_locale()) == 'de_DE'
with babel.force_locale('en_US'):
assert str(babel.get_locale()) == 'en_US'
assert str(babel.get_locale()) == 'de_DE'
def test_non_initialized(self):
app = flask.Flask(__name__)
d = datetime(2010, 4, 12, 13, 46)
with app.test_request_context():
assert babel.format_datetime(d) == 'Apr 12, 2010, 1:46:00 PM'
class NumberFormattingTestCase(unittest.TestCase):
def test_basics(self):
app = flask.Flask(__name__)
babel.Babel(app)
n = 1099
with app.test_request_context():
assert babel.format_decimal(n) == u'1,099'
assert babel.format_decimal(Decimal('1010.99')) == u'1,010.99'
assert babel.format_currency(n, 'USD') == '$1,099.00'
assert babel.format_percent(0.19) == '19%'
assert babel.format_scientific(10000) == u'1E4'
class GettextTestCase(unittest.TestCase):
def test_basics(self):
app = flask.Flask(__name__)
babel.Babel(app, default_locale='de_DE')
with app.test_request_context():
assert gettext(u'Hello %(name)s!', name='Peter') == 'Hallo Peter!'
assert ngettext(u'%(num)s Apple', u'%(num)s Apples', 3) == u'3 Äpfel'
assert ngettext(u'%(num)s Apple', u'%(num)s Apples', 1) == u'1 Apfel'
def test_template_basics(self):
app = flask.Flask(__name__)
babel.Babel(app, default_locale='de_DE')
def t(x):
return flask.render_template_string('{{ %s }}' % x)
with app.test_request_context():
assert t("gettext('Hello %(name)s!', name='Peter')") == u'Hallo Peter!'
assert t("ngettext('%(num)s Apple', '%(num)s Apples', 3)") == u'3 Äpfel'
assert t("ngettext('%(num)s Apple', '%(num)s Apples', 1)") == u'1 Apfel'
assert (
flask.render_template_string(
'''
{% trans %}Hello {{ name }}!{% endtrans %}
''',
name='Peter',
).strip()
== 'Hallo Peter!'
)
assert (
flask.render_template_string(
'''
{% trans num=3 %}{{ num }} Apple
{%- pluralize %}{{ num }} Apples{% endtrans %}
''',
name='Peter',
).strip()
== u'3 Äpfel'
)
def test_lazy_gettext(self):
app = flask.Flask(__name__)
babel.Babel(app, default_locale='de_DE')
yes = lazy_gettext(u'Yes')
with app.test_request_context():
assert text_type(yes) == 'Ja'
app.config['BABEL_DEFAULT_LOCALE'] = 'en_US'
with app.test_request_context():
assert text_type(yes) == 'Yes'
def test_lazy_ngettext(self):
app = flask.Flask(__name__)
babel.Babel(app, default_locale='de_DE')
one_apple = lazy_ngettext(u'%(num)s Apple', u'%(num)s Apples', 1)
with app.test_request_context():
assert text_type(one_apple) == '1 Apfel'
two_apples = lazy_ngettext(u'%(num)s Apple', u'%(num)s Apples', 2)
with app.test_request_context():
assert text_type(two_apples) == u'2 Äpfel'
def test_lazy_gettext_defaultdomain(self):
app = flask.Flask(__name__)
domain = babel.Domain(domain='test')
babel.Babel(app, default_locale='de_DE', default_domain=domain)
first = lazy_gettext('first')
with app.test_request_context():
assert text_type(first) == 'erste'
app.config['BABEL_DEFAULT_LOCALE'] = 'en_US'
with app.test_request_context():
assert text_type(first) == 'first'
def test_no_formatting(self):
"""
Ensure we don't format strings unless a variable is passed.
"""
app = flask.Flask(__name__)
babel.Babel(app)
with app.test_request_context():
assert gettext(u'Test %s') == u'Test %s'
assert gettext(u'Test %(name)s', name=u'test') == u'Test test'
assert gettext(u'Test %s') % 'test' == u'Test test'
def test_domain(self):
app = flask.Flask(__name__)
babel.Babel(app, default_locale='de_DE')
domain = babel.Domain(domain='test')
with app.test_request_context():
assert domain.gettext('first') == 'erste'
assert babel.gettext('first') == 'first'
def test_as_default(self):
app = flask.Flask(__name__)
babel.Babel(app, default_locale='de_DE')
domain = babel.Domain(domain='test')
with app.test_request_context():
assert babel.gettext('first') == 'first'
domain.as_default()
assert babel.gettext('first') == 'erste'
def test_default_domain(self):
app = flask.Flask(__name__)
domain = babel.Domain(domain='test')
babel.Babel(app, default_locale='de_DE', default_domain=domain)
with app.test_request_context():
assert babel.gettext('first') == 'erste'
def test_non_initialized(self):
app = flask.Flask(__name__)
with app.test_request_context():
assert babel.gettext('first') == 'first'
def test_multiple_apps(self):
app1 = flask.Flask(__name__)
b1 = babel.Babel(app1, default_locale='de_DE')
app2 = flask.Flask(__name__)
b2 = babel.Babel(app2, default_locale='de_DE')
with app1.test_request_context():
assert babel.gettext('Yes') == 'Ja'
assert 'de_DE' in b1._default_domain.cache
with app2.test_request_context():
assert 'de_DE' not in b2._default_domain.cache
if __name__ == '__main__':
unittest.main()
|
the-stack_106_31456 | # -*- coding:utf-8 -*-
'''
Created on 2017年1月9日
@author: AppleWang
cpu相关工具集
'''
# -*- coding:utf-8 -*-
import common.utils as utils
class CpuUtils():
def __init__(self, serial, pid):
self.pCpu =self.o_pCpu = 0.0
self.aCpu =self.o_aCpu = 0.0
self.serial = serial
self.pid=pid
#获取pid的cpu信息
def getProcessCpuAction(self):
pid=str(self.pid)
#cmd='adb shell cat proc/'
#cmd=cmd+pid
#cmd=cmd+'/stat'
cmd = 'adb -s %s shell cat proc/%s/stat' % (self.serial, pid)
try:
cmdlogs=utils.execmd(cmd).readline()
cmdlogs=utils.formallog(cmdlogs)
result=[]
result.append(cmdlogs[1])
result.append(cmdlogs[13])
result.append(cmdlogs[14])
except Exception as err:
result=[]
print(err)
return result
#获取手机cpu信息
def getCpuAction(self):
#cmd='adb shell cat proc/stat'
cmd = 'adb -s %s shell cat proc/stat' % self.serial
try:
cmdlogs=utils.execmd(cmd).readline()
cmdlogs=utils.formallog(cmdlogs)
except Exception as err:
cmdlogs=[]
print(err)
return cmdlogs
#获取pid占用的cpu
def getProcessCpuValue(self):
# adb shell dumpsys cpuinfo |grep pid
result1 = self.getProcessCpuAction()
if result1:
self.pCpu=float(result1[1])+float(result1[2])
# adb shell cat proc/stat
result2 = self.getCpuAction();
if result2:
self.aCpu = 0.0
for i in range(2,len(result2)):
self.aCpu += float(result2[i])
usage = 0.0
if self.aCpu-self.o_aCpu!=0:
usage=float("%.2f" %((self.pCpu - self.o_pCpu) * 100.00/(self.aCpu - self.o_aCpu)))
if usage<0:
usage = 0
elif usage > 100:
usage = 100
self.o_pCpu = self.pCpu
self.o_aCpu = self.aCpu
# result = str(usage) + "%"
result=usage
self.p_jif = self.pCpu
print("CPU:",result)
return result
#dumpsys方式获取pid的cpu信息
def getCpuInfpByCMD(self,pid):
#cmd='adb shell dumpsys cpuinfo |grep '
#cmd=cmd+pid
cmd = 'adb -s %s shell dumpsys cpuinfo | grep %s' % (self.serial, str(pid))
try:
cpuInfo=utils.execmd(cmd)
except Exception as err:
print(err)
return cpuInfo
|
the-stack_106_31457 | from __future__ import absolute_import, print_function
import threading
from concurrent import futures
import logging
import multiprocessing
import os
import sys
import tempfile
import typing
import grpc
from grpc_health.v1.health import HealthServicer
from grpc_health.v1 import health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
import pygo_plugin
import pygo_plugin.proto.grpc_controller_pb2 as _controller_pb2
import pygo_plugin.proto.grpc_controller_pb2_grpc as _controller_pb2_grpc
import pygo_plugin.utils
__all__ = ['serve', 'Server', 'ServeConfig']
_GO_PLUGIN_PROTOCOL_VER = 1
def serve(cfg):
"""
Serve the plugin service and block until the
server chooses to stop.
If the server fails, then exit the process with
a non-zero status.
For more control over the Server, create a Server
instance and manage it directly.
Args:
cfg (ServeConfig):
"""
server = pygo_plugin.Server(cfg)
if not server.serve(wait=True):
if server.error_msg:
logging.error(server.error_msg)
else:
logging.error("plugin server exited with unknown error")
sys.exit(1)
class ServeConfig(object):
"""
ServeConfig defines the configuration options for
staring a plugin server.
"""
def __init__(self):
self._handshake = None
self._plugins = {} # typing.Dict[str, plugin.Plugin]
@property
def handshake_config(self):
"""
handshake_config is the configuration that must match clients.
Returns:
pygo_plugin.HandshakeConfig
"""
if self._handshake is None:
self._handshake = pygo_plugin.HandshakeConfig()
return self._handshake
@handshake_config.setter
def handshake_config(self, cfg):
if cfg is not None and not isinstance(cfg, pygo_plugin.HandshakeConfig):
raise TypeError("type %r is not a HandshakeConfig" % type(cfg))
self._handshake = cfg
@property
def plugins(self):
"""
The plugins that are served.
The implied version of this PluginSet is the Handshake.ProtocolVersion.
Returns:
typing.Dict[str, plugin.Plugin]:
"""
if self._plugins is None:
self._plugins = {}
return self._plugins
@plugins.setter
def plugins(self, plugins):
self._plugins = plugins
class Server(object):
"""
Server provides the implementation of one or more plugins,
and serves it via grpc.
"""
GRPC_SERVICE_NAME = 'plugin'
def __init__(self, cfg):
"""
Args:
cfg (ServeConfig):
"""
self._cfg = cfg
self._server = None
self._error = ''
@property
def error_msg(self):
"""
Return the last error message generated by the server
Returns:
str
"""
return self._error
def server(self, **opts):
"""
Create an instance of a grpc server, passing extra
grpc options to the server constructor.
Implementation calls ``self.default_grpc_server()``
Args:
**opts: extra grpc.Server options
Returns:
``grpc.Server``
"""
return self.default_grpc_server(**opts)
def serve(self, wait=False):
"""
Start serving the plugin grpc services, and return control
to the called. If ``wait=True``, block until the server is stopped.
If ``False`` is returned, caller can check `.error_msg` to read the
last error message.
Args:
wait (bool): Block until server stops
Returns:
bool: Return True on successful start
"""
self.stop()
self._error = ''
if not self.check_magic_key():
return False
self._server = server = self.server()
# We need to build a health service to work with go-plugin
health = HealthServicer()
health.set(self.GRPC_SERVICE_NAME, health_pb2.HealthCheckResponse.ServingStatus.Value('SERVING'))
health_pb2_grpc.add_HealthServicer_to_server(health, server)
# enable controller
_controller_pb2_grpc.add_GRPCControllerServicer_to_server(ServerController(server), server)
# instrument the server to capture the registration of the plugin
# services, so that we can automatically add them for reflection
_add_generic_rpc_handlers = server.add_generic_rpc_handlers
plugin_service_names = set()
def add_generic_rpc_handlers(self, handlers):
plugin_service_names.update({h.service_name() for h in handlers})
return _add_generic_rpc_handlers(handlers)
server.add_generic_rpc_handlers = add_generic_rpc_handlers.__get__(server, server.__class__)
# Register all plugins
plugins = self._cfg.plugins
for name in plugins:
plugin = plugins[name]
plugin.server_register(server)
# reset the handler and set up reflection
server.add_generic_rpc_handlers = _add_generic_rpc_handlers
if plugin_service_names:
names = list(plugin_service_names)
logging.info("plugin server installing grpc reflection for plugins: %s", names)
names.append(reflection.SERVICE_NAME)
reflection.enable_server_reflection(names, server)
# configure server endpoint
if os.name == 'posix':
fd, sock_path = tempfile.mkstemp(suffix=".sock", prefix="plugin_")
os.close(fd)
os.unlink(sock_path)
endpoint = os.path.abspath(sock_path)
server.add_insecure_port("unix:" + endpoint)
network = 'unix'
else:
port = 0
port_opts = {}
try:
port_opts['min_port'] = int(os.environ.get('PLUGIN_MIN_PORT', ''))
except ValueError:
pass
try:
port_opts['max_port'] = int(os.environ.get('PLUGIN_MAX_PORT', ''))
except ValueError:
pass
if port_opts:
port = pygo_plugin.utils.find_free_port(**port_opts)
port = server.add_insecure_port('127.0.0.1:{}'.format(port))
network = 'tcp'
endpoint = '127.0.0.1:%d' % port
server.start()
# Output information
handshake = "{proto_ver}|{app_proto_ver}|{network}|{endpoint}|{protocol}".format(
proto_ver=_GO_PLUGIN_PROTOCOL_VER,
app_proto_ver=self._cfg.handshake_config.protocol_version,
network=network,
endpoint=endpoint,
protocol='grpc',
)
# logging.info(handshake)
print(handshake)
sys.stdout.flush()
if wait:
server.wait_for_termination()
return True
def stop(self, grace=None): # type: (float) -> threading.Event
"""
Stop a running server.
A grace period in seconds can be given to wait for the
server to actually stop gracefully. Otherwise it will
be stopped immediately without waiting for in-flight
requests to complete.
Returns a ``threading.Event`` that will be set when this
Server has completely stopped, i.e. when running RPCs
either complete or are aborted and all handlers have
terminated.
Args:
grace (float): shutdown grace period in seconds
Returns:
threading.Event
"""
if self._server is None:
evt = threading.Event()
evt.set()
return evt
return self._server.stop(grace)
def check_magic_key(self):
"""
Checks if the handshake configuration was set in the current
environment, and if it matches the current server configuration.
If the check fails, ``.error_msg`` will be set and ``False`` will
be returned.
Returns:
bool: success
"""
# Check magic key/value
if self._cfg.handshake_config.magic_cookie_key:
env_key = self._cfg.handshake_config.magic_cookie_key
env_val = self._cfg.handshake_config.magic_cookie_value
if os.environ.get(env_key) != env_val:
self._error = (
"Misconfigured ServeConfig handshake given to serve this plugin:\n"
" no magic cookie key, or value was set incorrectly.\n"
"Please notify the plugin author and report this as a bug.\n")
return False
return True
@classmethod
def default_grpc_server(cls, **opts):
"""
Create a default grpc Server instance using a
concurrent.futures thread pool. The thread pool
will be set to a default worker count based on
the host cpu count.
Args:
**opts: ``grpc.server`` constructor options
Returns:
``grpc.Server``
"""
if 'thread_pool' not in opts:
# python 3.8+ concurrent.futures default
workers = min(32, multiprocessing.cpu_count() + 4)
opts['thread_pool'] = futures.ThreadPoolExecutor(max_workers=workers)
return grpc.server(**opts)
class ServerController(_controller_pb2_grpc.GRPCControllerServicer):
"""
ServerController implements controller requests in the server,
sent by the client.
"""
def __init__(self, server, grace=2):
"""
Args:
server (Server): Server instance
grace (float): Graceful shutdown time in seconds
"""
self._server = server # type: Server
self._grace = float(grace)
def Shutdown(self, request, context):
"""
Shut down the server using the configured grace period
"""
event = self._server.stop(self._grace) # type: threading.Event
if not event.wait():
self._server.stop(0)
return _controller_pb2.Empty()
|
the-stack_106_31458 | # THE WINTER IS COMING! the old driver will be driving who was a man of the world!
# -*- coding: utf-8 -*- python 3.6.7, create time is 18-11-30 下午12:26 GMT+8
DEFAULT_LOG_FILENAME = '日志.log'
# 启用的爬虫类
SPIDERS = [
# 'spiders.baidu.BaiduSpider',
'spiders.douban.DoubanSpider'
]
# 启用的管道类
PIPELINES = [
# 'pipelines.BaiduPipeline',
'pipelines.DoubanPipeline'
]
# 启用的爬虫中间件类
# SPIDER_MIDDLEWARES = []
# 启用的下载器中间件类
# DOWNLOADER_MIDDLEWARES = []
# ASYNC_TYPE = 'coroutine' |
the-stack_106_31460 | import queue
import logging
import traceback
import threading
import contextlib
import collections
import envi
import envi.bits as e_bits
import envi.memory as e_mem
import envi.pagelookup as e_page
import envi.codeflow as e_codeflow
import vstruct.cparse as vs_cparse
import vstruct.builder as vs_builder
import vstruct.constants as vs_const
import vivisect.const as viv_const
import vivisect.impapi as viv_impapi
import vivisect.analysis as viv_analysis
import vivisect.codegraph as viv_codegraph
from envi.threads import firethread
from vivisect.exc import *
from vivisect.const import *
logger = logging.getLogger(__name__)
"""
Mostly this is a place to scuttle away some of the inner workings
of a workspace, so the outer facing API is a little cleaner.
"""
class VivEventCore(object):
'''
A class to facilitate event monitoring in the viv workspace.
'''
def __init__(self, vw=None, **kwargs):
self._ve_vw = vw
self._ve_ehand = [None for x in range(VWE_MAX)]
self._ve_thand = [None for x in range(VTE_MAX)]
self._ve_lock = threading.Lock()
# Find and put handler functions into the list
for name in dir(self):
if name.startswith('VWE_'):
idx = getattr(viv_const, name, None)
self._ve_ehand[idx] = getattr(self, name)
if name.startswith('VTE_'):
idx = getattr(viv_const, name, None)
self._ve_thand[idx] = getattr(self, name)
def _ve_fireEvent(self, event, edata):
hlist = self._ve_ehand
if event & VTE_MASK:
event ^= VTE_MASK
hlist = self._ve_thand
h = hlist[event]
if h is not None:
try:
h(self._ve_vw, event, edata)
except Exception as e:
logger.error(traceback.format_exc())
@firethread
def _ve_fireListener(self):
chanid = self._ve_vw.createEventChannel()
try:
etup = self._ve_vw.waitForEvent(chanid)
while etup is not None:
self._ve_lock.acquire()
self._ve_lock.release()
self._ve_fireEvent(*etup)
etup = self._ve_vw.waitForEvent(chanid)
finally:
self._ve_vw.deleteEventChannel(chanid)
def _ve_freezeEvents(self):
self._ve_lock.acquire()
def _ve_thawEvents(self):
self._ve_lock.release()
vaset_xlate = {
int:VASET_ADDRESS,
str:VASET_STRING,
}
class VivEventDist(VivEventCore):
'''
Similar to an event core, but does optimized distribution
to a set of sub eventcore objects (think GUI windows...)
'''
def __init__(self, vw=None, **kwargs):
if vw is None:
raise Exception("VivEventDist requires a vw argument")
VivEventCore.__init__(self, vw)
self._ve_subs = [ [] for x in range(VWE_MAX) ]
self._ve_tsubs = [ [] for x in range(VTE_MAX) ]
self.addEventCore(self)
# event distributors pretty much always need a thread
self._ve_fireListener()
def addEventCore(self, core):
for i in range(VWE_MAX):
h = core._ve_ehand[i]
if h is not None:
self._ve_subs[i].append(h)
for i in range(VTE_MAX):
h = core._ve_thand[i]
if h is not None:
self._ve_tsubs[i].append(h)
def delEventCore(self, core):
for i in range(VWE_MAX):
h = core._ve_ehand[i]
if h is not None:
self._ve_subs[i].remove(h)
for i in range(VTE_MAX):
h = core._ve_thand[i]
if h is not None:
self._ve_tsubs[i].remove(h)
def _ve_fireEvent(self, event, edata):
'''
We don't have events of our own, we just hand them down.
'''
subs = self._ve_subs
if event & VTE_MASK:
event ^= VTE_MASK
subs = self._ve_tsubs
hlist = subs[event]
for h in hlist:
try:
h(self._ve_vw, event, edata)
except Exception:
logger.error(traceback.format_exc())
VivEventCore._ve_fireEvent(self, event, edata)
def ddict():
return collections.defaultdict(dict)
class VivWorkspaceCore(viv_impapi.ImportApi):
'''
A base class that the VivWorkspace inherits from that defines a lot of the event handlers
for things like the creation of the various location types.
'''
def __init__(self):
viv_impapi.ImportApi.__init__(self)
self.loclist = []
self.bigend = False
self.locmap = e_page.MapLookup()
self.blockmap = e_page.MapLookup()
self._mods_loaded = False
# Storage for function local symbols
self.localsyms = ddict()
self._call_graph = viv_codegraph.CallGraph()
# Just in case of the GUI... :)
self._call_graph.setMeta('bgcolor', '#000')
self._call_graph.setMeta('nodecolor', '#00ff00')
self._call_graph.setMeta('edgecolor', '#00802b')
self._event_list = []
self._event_saved = 0 # The index of the last "save" event...
# Give ourself a structure namespace!
self.vsbuilder = vs_builder.VStructBuilder()
self.vsconsts = vs_const.VSConstResolver()
def _snapInAnalysisModules(self):
'''
Snap in the analysis modules which are appropriate for the
format/architecture/platform of this workspace by calling
'''
if self._mods_loaded:
return
viv_analysis.addAnalysisModules(self)
self._mods_loaded = True
def _createSaveMark(self):
'''
Update the index of the most recent saved event to the current
length of the event list (called after successful save)..
'''
self._event_saved = len(self._event_list)
@contextlib.contextmanager
def getAdminRights(self):
self._supervisor = True
yield
self._supervisor = False
def _handleADDLOCATION(self, loc):
lva, lsize, ltype, linfo = loc
self.locmap.setMapLookup(lva, lsize, loc)
self.loclist.append(loc)
# A few special handling cases...
if ltype == LOC_IMPORT:
# Check if the import is registered in NoReturnApis
if self.getMeta('NoReturnApis', {}).get(linfo.lower()):
self.cfctx.addNoReturnAddr( lva )
def _handleDELLOCATION(self, loc):
# FIXME delete xrefs
lva, lsize, ltype, linfo = loc
self.locmap.setMapLookup(lva, lsize, None)
self.loclist.remove(loc)
def _handleADDSEGMENT(self, einfo):
self.segments.append(einfo)
def _handleADDRELOC(self, einfo):
if len(einfo) == 2: # FIXME: legacy: remove after 02/13/2020
rva, rtype = einfo
mmva, mmsz, mmperm, fname = self.getMemoryMap(rva) # FIXME: getFileByVa does not obey file defs
imgbase = self.getFileMeta(fname, 'imagebase')
data = None
einfo = fname, rva-imgbase, rtype, data
else:
fname, ptroff, rtype, data = einfo
imgbase = self.getFileMeta(fname, 'imagebase')
rva = imgbase + ptroff
self.reloc_by_va[rva] = rtype
self.relocations.append(einfo)
# RTYPE_BASERELOC assumes the memory is already accurate (eg. PE's unless rebased)
if rtype in REBASE_TYPES:
# add imgbase and offset to pointer in memory
# 'data' arg must be 'offset' number
ptr = imgbase + data
if ptr != (ptr & e_bits.u_maxes[self.psize]):
logger.warning('RTYPE_BASEOFF calculated a bad pointer: 0x%x (imgbase: 0x%x)', ptr, imgbase)
# writes are costly, especially on larger binaries
if ptr != self.readMemoryPtr(rva):
with self.getAdminRights():
self.writeMemoryPtr(rva, ptr)
if rtype == RTYPE_BASEPTR:
# make it like a pointer (but one that could move with each load)
# self.addXref(va, tova, REF_PTR)
# ploc = self.addLocation(va, psize, LOC_POINTER)
# don't follow. handle it later, once "known code" is analyzed
ptr, reftype, rflags = self.arch.archModifyXrefAddr(ptr, None, None)
self._handleADDXREF((rva, ptr, REF_PTR, 0))
self._handleADDLOCATION((rva, self.psize, LOC_POINTER, ptr))
def _handleADDMODULE(self, einfo):
logger.warning('DEPRECATED (ADDMODULE) ignored: %s', einfo)
def _handleDELMODULE(self, einfo):
logger.warning('DEPRECATED (DELMODULE) ignored: %s', einfo)
def _handleADDFMODULE(self, einfo):
logger.warning('DEPRECATED (ADDFMODULE) ignored: %s', einfo)
def _handleDELFMODULE(self, einfo):
logger.warning('DEPRECATED (DELFMODULE) ignored: %s', einfo)
def _handleADDFUNCTION(self, einfo):
va, meta = einfo
self._initFunction(va)
# node = self._call_graph.addNode( nid=va, repr=self.getName( va ) ) #, color='#00ff00' )
# node = self._call_graph.getFunctionNode(va, repr=self.getName( va ) )
node = self._call_graph.getFunctionNode(va)
self._call_graph.setNodeProp(node,'repr', self.getName(va))
# Tell the codeflow subsystem about this one!
calls_from = meta.get('CallsFrom')
self.cfctx.addFunctionDef(va, calls_from)
self.funcmeta[va] = meta
for name, value in meta.items():
mcbname = "_fmcb_%s" % name.split(':')[0]
mcb = getattr(self, mcbname, None)
if mcb is not None:
mcb(va, name, value)
def _handleDELFUNCTION(self, einfo):
# clear funcmeta, func_args, codeblocks_by_funcva, update codeblocks, blockgraph, locations, etc...
fva = einfo
# not every codeblock identifying as this function is stored in funcmeta
for cb in self.getCodeBlocks():
if cb[CB_FUNCVA] == fva:
self._handleDELCODEBLOCK(cb)
self.funcmeta.pop(fva)
self.func_args.pop(fva, None)
self.codeblocks_by_funcva.pop(fva)
node = self._call_graph.getNode(fva)
self._call_graph.delNode(node)
self.cfctx.flushFunction(fva)
# FIXME: do we want to now seek the function we *should* be in?
# if xrefs_to, look for non-PROC code xrefs and take their function
# if the previous instruction falls through, take its function
# run codeblock analysis on that function to reassociate the blocks
# with that function
def _handleSETFUNCMETA(self, einfo):
funcva, name, value = einfo
m = self.funcmeta.get(funcva)
if m is not None:
m[name] = value
mcbname = "_fmcb_%s" % name.split(':')[0]
mcb = getattr(self, mcbname, None)
if mcb is not None:
mcb(funcva, name, value)
def _handleADDCODEBLOCK(self, einfo):
va,size,funcva = einfo
self.blockmap.setMapLookup(va, size, einfo)
self.codeblocks_by_funcva.get(funcva).append(einfo)
self.codeblocks.append(einfo)
def _handleDELCODEBLOCK(self, cb):
va,size,funcva = cb
self.codeblocks.remove(cb)
self.codeblocks_by_funcva.get(cb[CB_FUNCVA]).remove(cb)
self.blockmap.setMapLookup(va, size, None)
def _handleADDXREF(self, einfo):
fromva, tova, reftype, rflags = einfo
xr_to = self.xrefs_by_to.get(tova, None)
xr_from = self.xrefs_by_from.get(fromva, None)
if xr_to is None:
xr_to = []
self.xrefs_by_to[tova] = xr_to
if xr_from is None:
xr_from = []
self.xrefs_by_from[fromva] = xr_from
if einfo not in xr_to: # Just check one for now
xr_to.append(einfo)
xr_from.append(einfo)
self.xrefs.append(einfo)
def _handleDELXREF(self, einfo):
fromva, tova, reftype, refflags = einfo
self.xrefs_by_to[tova].remove(einfo)
self.xrefs_by_from[fromva].remove(einfo)
def _handleSETNAME(self, einfo):
va, name = einfo
if name is None:
oldname = self.name_by_va.pop(va, None)
self.va_by_name.pop(oldname, None)
else:
curname = self.name_by_va.get(va)
if curname is not None:
logger.debug('replacing 0x%x: %r -> %r', va, curname, name)
self.va_by_name.pop(curname)
self.va_by_name[name] = va
self.name_by_va[va] = name
if self.isFunction(va):
fnode = self._call_graph.getFunctionNode(va)
if name is None:
self._call_graph.delNodeProp(fnode, 'repr')
else:
self._call_graph.setNodeProp(fnode, 'repr', name)
def _handleADDMMAP(self, einfo):
va, perms, fname, mbytes = einfo
e_mem.MemoryObject.addMemoryMap(self, va, perms, fname, mbytes)
blen = len(mbytes)
self.locmap.initMapLookup(va, blen)
self.blockmap.initMapLookup(va, blen)
# On loading a new memory map, we need to crush a few
# transmeta items...
self.transmeta.pop('findPointers',None)
def _handleADDEXPORT(self, einfo):
va, etype, name, filename = einfo
self.exports.append(einfo)
self.exports_by_va[va] = einfo
def _handleSETMETA(self, einfo):
name,value = einfo
# See if there's a callback handler for this meta set.
# For "meta namespaces" use the first part to find the
# callback name....
mcbname = "_mcb_%s" % name.split(':')[0]
mcb = getattr(self, mcbname, None)
if mcb is not None:
mcb(name, value)
self.metadata[name] = value
def _handleCOMMENT(self, einfo):
va,comment = einfo
if comment is None:
self.comments.pop(va, None)
else:
self.comments[va] = comment
def _handleADDFILE(self, einfo):
normname, imagebase, md5sum = einfo
self.filemeta[normname] = {"md5sum":md5sum,"imagebase":imagebase}
def _handleSETFILEMETA(self, einfo):
fname, key, value = einfo
self.filemeta.get(fname)[key] = value
def _handleADDCOLOR(self, coltup):
mapname, colmap = coltup
self.colormaps[mapname] = colmap
def _handleDELCOLOR(self, mapname):
self.colormaps.pop(mapname)
def _handleADDVASET(self, argtup):
name, defs, rows = argtup
# NOTE: legacy translation for vaset column types...
defs = [ (cname,vaset_xlate.get(ctype,ctype)) for (cname,ctype) in defs ]
self.vasetdefs[name] = defs
vals = {}
for row in rows:
vals[row[0]] = row
self.vasets[name] = vals
def _handleDELVASET(self, setname):
self.vasetdefs.pop(setname)
self.vasets.pop(setname)
def _handleADDFREF(self, frtup):
va, idx, val = frtup
self.frefs[(va,idx)] = val
def _handleDELFREF(self, frtup):
va, idx, val = frtup
self.frefs.pop((va,idx), None)
def _handleSETVASETROW(self, argtup):
name, row = argtup
self.vasets[name][row[0]] = row
def _handleDELVASETROW(self, argtup):
name, va = argtup
self.vasets[name].pop(va, None)
def _handleADDFSIG(self, einfo):
raise NotImplementedError("FSIG is deprecated and should not be used")
def _handleFOLLOWME(self, va):
pass
def _handleCHAT(self, msgtup):
# FIXME make a GUI window for this...
user, msg = msgtup
self.vprint('%s: %s' % (user, msg))
def _handleSYMHINT(self, msgtup):
va, idx, hint = msgtup
if hint is None:
self.symhints.pop((va,idx), None)
else:
self.symhints[(va,idx)] = hint
def _handleSETFUNCARGS(self, einfo):
fva, args = einfo
self.func_args[fva] = args
def _handleAUTOANALFIN(self, einfo):
'''
This event is more for the storage subsystem than anything else. It
marks the end of autoanalysis. Any event beyond this is due to the
end user or analysis modules they've executed.
'''
pass
def _initEventHandlers(self):
self.ehand = [None for x in range(VWE_MAX)]
self.ehand[VWE_ADDLOCATION] = self._handleADDLOCATION
self.ehand[VWE_DELLOCATION] = self._handleDELLOCATION
self.ehand[VWE_ADDSEGMENT] = self._handleADDSEGMENT
self.ehand[VWE_DELSEGMENT] = None
self.ehand[VWE_ADDRELOC] = self._handleADDRELOC
self.ehand[VWE_DELRELOC] = None
self.ehand[VWE_ADDMODULE] = self._handleADDMODULE
self.ehand[VWE_DELMODULE] = self._handleDELMODULE
self.ehand[VWE_ADDFMODULE] = self._handleADDFMODULE
self.ehand[VWE_DELFMODULE] = self._handleDELFMODULE
self.ehand[VWE_ADDFUNCTION] = self._handleADDFUNCTION
self.ehand[VWE_DELFUNCTION] = self._handleDELFUNCTION
self.ehand[VWE_SETFUNCARGS] = self._handleSETFUNCARGS
self.ehand[VWE_SETFUNCMETA] = self._handleSETFUNCMETA
self.ehand[VWE_ADDCODEBLOCK] = self._handleADDCODEBLOCK
self.ehand[VWE_DELCODEBLOCK] = self._handleDELCODEBLOCK
self.ehand[VWE_ADDXREF] = self._handleADDXREF
self.ehand[VWE_DELXREF] = self._handleDELXREF
self.ehand[VWE_SETNAME] = self._handleSETNAME
self.ehand[VWE_ADDMMAP] = self._handleADDMMAP
self.ehand[VWE_DELMMAP] = None
self.ehand[VWE_ADDEXPORT] = self._handleADDEXPORT
self.ehand[VWE_DELEXPORT] = None
self.ehand[VWE_SETMETA] = self._handleSETMETA
self.ehand[VWE_COMMENT] = self._handleCOMMENT
self.ehand[VWE_ADDFILE] = self._handleADDFILE
self.ehand[VWE_DELFILE] = None
self.ehand[VWE_SETFILEMETA] = self._handleSETFILEMETA
self.ehand[VWE_ADDCOLOR] = self._handleADDCOLOR
self.ehand[VWE_DELCOLOR] = self._handleDELCOLOR
self.ehand[VWE_ADDVASET] = self._handleADDVASET
self.ehand[VWE_DELVASET] = self._handleDELVASET
self.ehand[VWE_SETVASETROW] = self._handleSETVASETROW
self.ehand[VWE_DELVASETROW] = self._handleDELVASETROW
self.ehand[VWE_ADDFSIG] = self._handleADDFSIG
self.ehand[VWE_ADDFREF] = self._handleADDFREF
self.ehand[VWE_DELFREF] = self._handleDELFREF
self.ehand[VWE_FOLLOWME] = self._handleFOLLOWME
self.ehand[VWE_CHAT] = self._handleCHAT
self.ehand[VWE_SYMHINT] = self._handleSYMHINT
self.ehand[VWE_AUTOANALFIN] = self._handleAUTOANALFIN
self.thand = [None for x in range(VTE_MAX)]
self.thand[VTE_IAMLEADER] = self._handleIAMLEADER
self.thand[VTE_FOLLOWME] = self._handleFOLLOWME
def _handleIAMLEADER(self, event, einfo):
user,follow = einfo
self.vprint('*%s invites everyone to follow "%s"' % (user,follow))
def _handleFOLLOWME(self, event, einfo):
# workspace has nothing to do...
pass
def _fireEvent(self, event, einfo, local=False, skip=None):
'''
Fire an event down the hole. "local" specifies that this is
being called on a client (self.server is not None) but we got it
from the server in the first place so no need to send it back.
skip is used to tell the server to bypass our channelid when
putting the event into channel queues (we took care of our own).
'''
try:
if event & VTE_MASK:
return self._fireTransEvent(event, einfo)
# Do our main event processing
self.ehand[event](einfo)
# If we're supposed to call a server, do that.
if self.server is not None and local == False:
self.server._fireEvent(event, einfo, skip=self.rchan)
# FIXME perhaps we should only process events *via* our server
# if we have one? Just to confirm it works before we apply it...
self._event_list.append((event, einfo))
for id, q in self.chan_lookup.items():
if id == skip:
continue
try:
q.put_nowait((event, einfo))
except queue.Full as e:
logger.warning('Queue is full!')
except Exception as e:
logger.error(traceback.format_exc())
def _fireTransEvent(self, event, einfo):
for q in self.chan_lookup.values():
q.put((event, einfo))
return self.thand[event ^ VTE_MASK](event,einfo)
def _initFunction(self, funcva):
# Internal function to initialize all datastructures necessary for
# a function, but only if they haven't been done already.
if self.funcmeta.get(funcva) is None:
self.funcmeta[funcva] = {} # His metadata
self.codeblocks_by_funcva[funcva] = [] # Init code block list
#def _loadImportApi(self, apidict):
#self._imp_api.update( apidict )
def getEndian(self):
return self.bigend
def setEndian(self, endian):
self.bigend = endian
for arch in self.imem_archs:
arch.setEndian(self.bigend)
if self.arch is not None:
self.arch.setEndian(self.bigend)
#################################################################
#
# setMeta key callbacks
#
def _mcb_Architecture(self, name, value):
# This is for legacy stuff...
self.arch = envi.getArchModule(value)
self.psize = self.arch.getPointerSize()
archid = envi.getArchByName(value)
self.setMemArchitecture(archid)
# Default calling convention for architecture
# This will be superceded by Platform and Parser settings
defcall = self.arch.getArchDefaultCall()
if defcall:
self.setMeta('DefaultCall', defcall)
def _mcb_bigend(self, name, value):
self.setEndian(bool(value))
def _mcb_Platform(self, name, value):
# Default calling convention for platform
# This supercedes Architecture's setting and should make
# parser settings obsolete
defcall = self.arch.getPlatDefaultCall(value)
if defcall:
self.setMeta('DefaultCall', defcall)
def _mcb_ustruct(self, name, ssrc):
# All meta values in the "ustruct" namespace are user defined
# structure defintions in C.
sname = name.split(':')[1]
ctor = vs_cparse.ctorFromCSource( ssrc )
self.vsbuilder.addVStructCtor( sname, ctor )
def _mcb_WorkspaceServer(self, name, wshost):
self.vprint('Workspace was Saved to Server: %s' % wshost)
self.vprint('(You must close this local copy and work from the server to stay in sync.)')
def _fmcb_Thunk(self, funcva, th, thunkname):
# If the function being made a thunk is registered
# in NoReturnApis, update codeflow...
if self.getMeta('NoReturnApis').get( thunkname.lower() ):
self.cfctx.addNoReturnAddr( funcva )
def _fmcb_CallsFrom(self, funcva, th, callsfrom):
for va in callsfrom:
f2va = self.getFunction( va )
if f2va is not None:
self._call_graph.getCallEdge( funcva, f2va )
def _fmcb_LocalSymbol(self, fva, mname, locsym):
fva,spdelta,symtype,syminfo = locsym
self.localsyms[fva][spdelta] = locsym
def trackDynBranches(cfctx, op, vw, bflags, branches):
'''
track dynamic branches
'''
# FIXME: do we want to filter anything out?
# jmp edx
# jmp dword [ebx + 68]
# call eax
# call dword [ebx + eax * 4 - 228]
# if we have any xrefs from here, we have already been analyzed. nevermind.
if len(vw.getXrefsFrom(op.va)):
return
vw.vprint("0x%x: Dynamic Branch found at (%s)" % (op.va, op))
vw.setVaSetRow('DynamicBranches', (op.va, repr(op), bflags))
class VivCodeFlowContext(e_codeflow.CodeFlowContext):
def __init__(self, mem, persist=False, exptable=True, recurse=True):
e_codeflow.CodeFlowContext.__init__(self, mem, persist=persist, exptable=exptable, recurse=recurse)
self.addDynamicBranchHandler(trackDynBranches)
def _cb_noflow(self, srcva, dstva):
vw = self._mem
loc = vw.getLocation( srcva )
if loc is None:
return
lva,lsize,ltype,linfo = loc
if ltype != LOC_OP:
return
# Update the location def for NOFALL bit
vw.delLocation(lva)
vw.addLocation(lva, lsize, ltype, linfo | envi.IF_NOFALL)
vw.setVaSetRow('NoReturnCalls', (lva,))
# NOTE: self._mem is the viv workspace...
def _cb_opcode(self, va, op, branches):
'''
callback for each OPCODE in codeflow analysis
must return list of branches, modified for our purposes
'''
loc = self._mem.getLocation(va)
if loc is None:
# dont code flow through import calls
branches = [br for br in branches if not self._mem.isLocType(br[0], LOC_IMPORT)]
self._mem.makeOpcode(op.va, op=op)
# TODO: future home of makeOpcode branch/xref analysis
return branches
elif loc[L_LTYPE] != LOC_OP:
locrepr = self._mem.reprLocation(loc)
logger.warning("_cb_opcode(0x%x): LOCATION ALREADY EXISTS: loc: %r", va, locrepr)
return ()
def _cb_function(self, fva, fmeta):
vw = self._mem
if vw.isFunction(fva):
return
# This may be possible if an export/symbol was mistaken for
# a function...
if not vw.isLocType(fva, LOC_OP):
return
# If the function doesn't have a name, make one
if vw.getName(fva) is None:
vw.makeName(fva, "sub_%.8x" % fva)
vw._fireEvent(VWE_ADDFUNCTION, (fva,fmeta))
# Go through the function analysis modules in order
vw.analyzeFunction(fva)
fname = vw.getName( fva )
if vw.getMeta('NoReturnApis').get( fname.lower() ):
self._cf_noret[ fva ] = True
if len( vw.getFunctionBlocks( fva )) == 1:
return
fmeta = vw.getFunctionMetaDict(fva)
for lva in vw.getVaSetRows('NoReturnCalls'):
va = lva[0]
ctup = vw.getCodeBlock(va)
if ctup and fva == ctup[2] and vw.getFunctionMeta(fva, 'BlockCount', default=0) == 1:
self._cf_noret[ fva ] = True
break
def _cb_branchtable(self, tablebase, tableva, destva):
if tablebase != tableva and self._mem.getXrefsTo(tableva):
return False
if self._mem.getLocation(tableva) is None:
self._mem.makePointer(tableva, tova=destva, follow=False)
return True
|
the-stack_106_31461 | # -*- coding: utf-8 -*-
import sys
import redis
from pickle import dumps, loads
from lumbermill.BaseThreadedModule import BaseThreadedModule
from lumbermill.utils.Buffers import Buffer
from lumbermill.utils.Decorators import ModuleDocstringParser
@ModuleDocstringParser
class Cache(BaseThreadedModule):
"""
A simple wrapper around the python simplekv module.
It can be used to store results of modules in all simplekv supported backends.
When set, the following options cause RedisStore to use a buffer for setting values.
Multiple values are set via the pipe command, which speeds up storage. Still this comes at a price.
Buffered values, that have not yet been send to redis, will be lost when LumberMill crashes.
backend: backends supported by [simplekv](http://pythonhosted.org//simplekv/)
store_interval_in_secs: Sending data to redis in x seconds intervals.
batch_size: Sending data to redis if count is above, even if store_interval_in_secs is not reached.
backlog_size: Maximum count of values waiting for transmission. Values above count will be dropped.
Configuration template:
- Cache:
backend: # <default: 'DictStore'; type: string; values:['DictStore', 'RedisStore', 'MemcacheStore']; is: optional>
server: # <default: None; type: None||string; is: required if backend in ['RedisStore', 'MemcacheStore'] and cluster is None else optional>
cluster: # <default: None; type: None||dictionary; is: required if backend == 'RedisStore' and server is None else optional>
port: # <default: 6379; type: integer; is: optional>
db: # <default: 0; type: integer; is: optional>
password: # <default: None; type: None||string; is: optional>
socket_timeout: # <default: 10; type: integer; is: optional>
charset: # <default: 'utf-8'; type: string; is: optional>
errors: # <default: 'strict'; type: string; is: optional>
decode_responses: # <default: False; type: boolean; is: optional>
unix_socket_path: # <default: None; type: None||string; is: optional>
batch_size: # <default: None; type: None||integer; is: optional>
store_interval_in_secs: # <default: None; type: None||integer; is: optional>
backlog_size: # <default: 5000; type: integer; is: optional>
"""
module_type = "stand_alone"
"""Set module type"""
def configure(self, configuration):
# Call parent configure method
BaseThreadedModule.configure(self, configuration)
self.backend = self.getConfigurationValue('backend')
self.backend_client = None
self.kv_store = None
self.set_buffer = None
if self.backend == 'DictStore':
import simplekv.memory
self.kv_store = simplekv.memory.DictStore()
elif self.backend == 'RedisStore':
import simplekv.memory.redisstore
self.backend_client = self._getRedisClient()
self.kv_store = simplekv.memory.redisstore.RedisStore(self.backend_client)
elif self.backend == 'MemcacheStore':
import simplekv.memory.memcachestore
self.backend_client = self._getMemcacheClient()
self.kv_store = simplekv.memory.memcachestore.MemcacheStore(self.backend_client)
else:
self.logger("Unknown backend type %s. Please check." % backend)
self.lumbermill.shutDown();
if self.getConfigurationValue('store_interval_in_secs') or self.getConfigurationValue('batch_size'):
if self.backend == 'RedisStore':
self.set_buffer = Buffer(self.getConfigurationValue('batch_size'), self._setRedisBufferedCallback, self.getConfigurationValue('store_interval_in_secs'), maxsize=self.getConfigurationValue('backlog_size'))
else:
self.set_buffer = Buffer(self.getConfigurationValue('batch_size'), self._setBufferedCallback, self.getConfigurationValue('store_interval_in_secs'), maxsize=self.getConfigurationValue('backlog_size'))
self._set = self.set
self.set = self._setBuffered
self._get = self.get
self.get = self._getBuffered
self._delete = self.delete
self.delete = self._deleteBuffered
self._pop = self.pop
self.pop = self._popBuffered
def _getRedisClient(self):
if not self.getConfigurationValue('cluster') or len(self.getConfigurationValue('cluster')) == 0:
redis_store = self.getConfigurationValue('server')
client = self._getSimpleRedisClient()
else:
redis_store = self.getConfigurationValue('cluster')
client = self._getClusterRedisClient()
try:
client.ping()
except:
etype, evalue, etb = sys.exc_info()
self.logger.error("Could not connect to redis store at %s. Exception: %s, Error: %s." % (redis_store, etype, evalue))
self.lumbermill.shutDown()
return client
def _getMemcacheClient(self):
client = None
# TODO: implement memcache client
return client
def _getSimpleRedisClient(self):
try:
client = redis.StrictRedis(host=self.getConfigurationValue('server'),
port=self.getConfigurationValue('port'),
db=self.getConfigurationValue('db'),
password=self.getConfigurationValue('password'),
socket_timeout=self.getConfigurationValue('socket_timeout'),
charset=self.getConfigurationValue('charset'),
errors=self.getConfigurationValue('errors'),
decode_responses=self.getConfigurationValue('decode_responses'),
unix_socket_path=self.getConfigurationValue('unix_socket_path'))
return client
except:
etype, evalue, etb = sys.exc_info()
self.logger.error("Could not connect to redis store at %s. Exception: %s, Error: %s." % (self.getConfigurationValue['server'], etype, evalue))
self.lumbermill.shutDown()
def _getClusterRedisClient(self):
try:
import rediscluster
except ImportError:
self.logger.error("Could not import rediscluster module. To install follow instructions @https://github.com/salimane/rediscluster-py")
self.lumbermill.shutDown()
# TODO: Implement a locking mechanism for the cluster client.
# Some modules like Facet depend on this.
cluster = {'nodes': {}, 'master_of': {}}
counter = 1
for master_node, slave_nodes in self.getConfigurationValue('cluster').items():
master_node_key = "node_%d" % counter
node_name_or_ip, node_port = self._parseRedisServerAddress(master_node)
cluster['nodes'].update({master_node_key: {'host': node_name_or_ip, 'port': node_port}})
if 'default_node' not in cluster:
cluster['default_node'] = master_node
if type(slave_nodes) is str:
slave_nodes = [slave_nodes]
for slave_node in slave_nodes:
counter += 1
slave_node_key = "node_%d" % counter
node_name_or_ip, node_port = self._parseRedisServerAddress(slave_node)
cluster['nodes'].update({slave_node_key: {'host':node_name_or_ip, 'port': node_port}})
cluster['master_of'].update({master_node_key: slave_node_key})
try:
client = rediscluster.StrictRedisCluster(cluster=cluster, db=self.getConfigurationValue('db'))
except:
etype, evalue, etb = sys.exc_info()
self.logger.error("Could not connect to redis store at %s. Exception: %s, Error: %s." % (self.getConfigurationValue['cluster'], etype, evalue))
self.lumbermill.shutDown()
return client
def _parseRedisServerAddress(self, node_address):
try:
node_name_or_ip, node_port = node_address.split(":")
except ValueError:
node_name_or_ip = node_address
node_port = self.getConfigurationValue('port')
return (node_name_or_ip, node_port)
def getBackendName(self):
return self.backend
def iterKeys(self):
for key in self.kv_store.iter_keys():
yield key
def getClient(self):
return self.backend_client
def getLock(self, name, timeout=None, sleep=0.1):
lock = False
try:
lock = self.backend_client.lock(name, timeout, sleep)
except AttributeError:
pass
return lock
def set(self, key, value, ttl=0, pickle=True):
if pickle is True:
try:
value = dumps(value)
except:
etype, evalue, etb = sys.exc_info()
self.logger.error("Could not store %s:%s in redis. Exception: %s, Error: %s." % (key, value, etype, evalue))
raise
# Only backend clients support ttl.
if self.backend_client and ttl:
self.kv_store.put(key, value, ttl_secs=ttl)
else:
self.kv_store.put(key, value)
def _setBuffered(self, key, value, ttl=0, pickle=True):
self.set_buffer.append({'key': key, 'value': value, 'ttl': ttl, 'pickle': pickle})
def _setBufferedCallback(self, values):
for value in values:
self._set(value['key'], value['value'], value['ttl'], value['pickle'])
def _setRedisBufferedCallback(self, values):
pipe = self.backend_client.pipeline()
for value in values:
if value['pickle'] is True:
try:
value['value'] = dumps(value['value'])
except:
etype, evalue, etb = sys.exc_info()
self.logger.error("Could not store %s:%s in redis. Exception: %s, Error: %s." % (value['key'], value['value'], etype, evalue))
raise
if(value['ttl'] == 0):
pipe.set(value['key'], value['value'])
else:
pipe.setex(value['key'], value['ttl'], value['value'])
try:
pipe.execute()
return True
except:
etype, evalue, etb = sys.exc_info()
self.logger.error("Could not flush buffer. Exception: %s, Error: %s." % (etype, evalue))
def get(self, key, unpickle=True):
value = self.kv_store.get(key)
if unpickle and value:
try:
value = loads(value)
except:
etype, evalue, etb = sys.exc_info()
self.logger.error("Could not unpickle %s:%s from redis. Exception: %s, Error: %s." % (key, value, etype, evalue))
raise
return value
def _getBuffered(self, key, unpickle=True):
try:
value_idx = next(index for (index, entry) in enumerate(self.set_buffer.buffer) if entry["key"] == key)
return self.set_buffer.buffer[value_idx]['value']
except:
return self._get(key, unpickle)
def delete(self, key):
self.kv_store.delete(key)
def _deleteBuffered(self, key):
try:
value_idx = next(index for (index, entry) in enumerate(self.set_buffer.buffer) if entry["key"] == key)
self.set_buffer.buffer.pop(value_idx)
return
except:
self._delete(key)
def pop(self, key, unpickle=True):
value = self.get(key, unpickle)
if value:
self.delete(key)
return value
def _popBuffered(self, key, unpickle=True):
try:
value_idx = next(index for (index, entry) in enumerate(self.set_buffer.buffer) if entry["key"] == key)
return self.set_buffer.buffer.pop(value_idx)['value']
except:
return self._pop(key, unpickle)
def shutDown(self):
try:
self.buffer.flush()
except:
pass
BaseThreadedModule.shutDown(self)
|
the-stack_106_31462 | import insightconnect_plugin_runtime
from .schema import SetEncodingInput, SetEncodingOutput, Input, Output, Component
# Custom imports below
from insightconnect_plugin_runtime.exceptions import PluginException
class SetEncoding(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='set_encoding',
description=Component.DESCRIPTION,
input=SetEncodingInput(),
output=SetEncodingOutput())
def run(self, params={}):
string = params.get(Input.STRING)
encoding_val = params.get(Input.ENCODING).lower()
error_handler = params.get(Input.ERROR_HANDLING)
try:
output = string.encode(encoding_val, error_handler)
except UnicodeError:
raise PluginException(cause="Encoding failed.", assistance="Could not encode given string.")
output = output.decode(encoding_val, error_handler)
return {Output.ENCODED: output}
|
the-stack_106_31463 | import cv2
import numpy as np
# mouse callback function
def draw_circle(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img,(x,y),100,(255,0,0),-1)
# Create a black image, a window and bind the function to window
img = np.zeros((512,512,3), np.uint8)
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw_circle)
while(1):
cv2.imshow('image',img)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows() |
the-stack_106_31464 | import numpy as np
from bayes_opt import BayesianOptimization
from bayes_opt.util import UtilityFunction, acq_max, load_logs, ensure_rng
from sklearn.gaussian_process.kernels import Matern
from sklearn.gaussian_process import GaussianProcessRegressor
def get_globals():
X = np.array([
[0.00, 0.00],
[0.99, 0.99],
[0.00, 0.99],
[0.99, 0.00],
[0.50, 0.50],
[0.25, 0.50],
[0.50, 0.25],
[0.75, 0.50],
[0.50, 0.75],
])
def get_y(X):
return -(X[:, 0] - 0.3) ** 2 - 0.5 * (X[:, 1] - 0.6)**2 + 2
y = get_y(X)
mesh = np.dstack(
np.meshgrid(np.arange(0, 1, 0.005), np.arange(0, 1, 0.005))
).reshape(-1, 2)
GP = GaussianProcessRegressor(
kernel=Matern(),
n_restarts_optimizer=25,
)
GP.fit(X, y)
return {'x': X, 'y': y, 'gp': GP, 'mesh': mesh}
def brute_force_maximum(MESH, GP, kind='ucb', kappa=1.0, xi=1.0):
uf = UtilityFunction(kind=kind, kappa=kappa, xi=xi)
mesh_vals = uf.utility(MESH, GP, 2)
max_val = mesh_vals.max()
max_arg_val = MESH[np.argmax(mesh_vals)]
return max_val, max_arg_val
GLOB = get_globals()
X, Y, GP, MESH = GLOB['x'], GLOB['y'], GLOB['gp'], GLOB['mesh']
def test_acq_with_ucb():
util = UtilityFunction(kind="ucb", kappa=1.0, xi=1.0)
episilon = 1e-2
y_max = 2.0
max_arg = acq_max(
util.utility,
GP,
y_max,
bounds=np.array([[0, 1], [0, 1]]),
random_state=ensure_rng(0),
n_iter=20
)
_, brute_max_arg = brute_force_maximum(MESH, GP, kind='ucb', kappa=1.0, xi=1.0)
assert all(abs(brute_max_arg - max_arg) < episilon)
def test_acq_with_ei():
util = UtilityFunction(kind="ei", kappa=1.0, xi=1e-6)
episilon = 1e-2
y_max = 2.0
max_arg = acq_max(
util.utility,
GP,
y_max,
bounds=np.array([[0, 1], [0, 1]]),
random_state=ensure_rng(0),
n_iter=200,
)
_, brute_max_arg = brute_force_maximum(MESH, GP, kind='ei', kappa=1.0, xi=1e-6)
assert all(abs(brute_max_arg - max_arg) < episilon)
def test_acq_with_poi():
util = UtilityFunction(kind="poi", kappa=1.0, xi=1e-4)
episilon = 1e-2
y_max = 2.0
max_arg = acq_max(
util.utility,
GP,
y_max,
bounds=np.array([[0, 1], [0, 1]]),
random_state=ensure_rng(0),
n_iter=200,
)
_, brute_max_arg = brute_force_maximum(MESH, GP, kind='poi', kappa=1.0, xi=1e-4)
assert all(abs(brute_max_arg - max_arg) < episilon)
def test_logs():
import pytest
def f(x, y):
return -x ** 2 - (y - 1) ** 2 + 1
optimizer = BayesianOptimization(
f=f,
pbounds={"x": (-2, 2), "y": (-2, 2)}
)
assert len(optimizer.space) == 0
load_logs(optimizer, "./tests/test_logs.json")
assert len(optimizer.space) == 5
load_logs(optimizer, ["./tests/test_logs.json"])
assert len(optimizer.space) == 5
other_optimizer = BayesianOptimization(
f=lambda x: -x ** 2,
pbounds={"x": (-2, 2)}
)
with pytest.raises(ValueError):
load_logs(other_optimizer, ["./tests/test_logs.json"])
if __name__ == '__main__':
r"""
CommandLine:
python tests/test_target_space.py
"""
import pytest
pytest.main([__file__])
|
the-stack_106_31466 | import json, os, re
import numpy as np
import pandas as pd
import plotnine as p9
import scipy.stats as stats
from Bio import SeqIO
from tqdm import tqdm
from statsmodels.stats.multitest import multipletests
from figure_3 import generate_input_for_figure
def check_annotations(ID, fastaFile):
for seqRecord in SeqIO.parse(fastaFile, format='fasta'):
if ID in seqRecord.id :
if '_MOUSE' in seqRecord.description:
description = re.findall(r'_MOUSE (.*) OS', seqRecord.description)[0]
if '_HETGA' in seqRecord.description:
description = re.findall(r'_HETGA (.*) OS', seqRecord.description)[0]
return description
def chaperone_clients_subset():
uniprot_mapping = pd.read_csv('../data/chaperone_clients/human_ensembl_to_uniprot.tab', sep='\t')
hs_mm_orthologs = pd.read_csv('../data/chaperone_clients/HS_MM_uni_ortholog_groups.csv', sep='\t')
hs_mm_orthologs = hs_mm_orthologs[['proteinID_x', 'proteinID_y']]
mm_chap_clt = hs_mm_orthologs[hs_mm_orthologs['proteinID_x'].isin(uniprot_mapping['Entry'])]['proteinID_y']
return mm_chap_clt
def build_gene_list(mm_chap_clt):
MM_fasta = '../data/ortholog_dataset/uni_MM_orthologs.faa'
main_path = os.path.dirname(os.getcwd())
dom_path = os.path.join(main_path, 'data/over-representation_analysis/domains/OUTPUTS/JSON/')
seq_path = os.path.join(main_path, 'data/over-representation_analysis/whole_sequence/OUTPUTS/JSON/')
all_gene_list = []
for json_path in [seq_path, dom_path]:
if 'domains' in json_path:
analysis = 'Domains'
elif 'whole_sequence' in json_path :
analysis = 'Proteins'
for file in os.listdir(json_path):
with open(os.path.join(json_path, file), 'r') as j:
contents = json.loads(j.read())
if 'high' in file :
phenotype = 'Higher aggregation propensity in NKM'
elif 'low' in file:
phenotype = 'Lower aggregation propensity in NKM'
if 'BP' in file :
GO_type = 'Biological Process'
elif 'CC' in file :
GO_type = 'Cellular Component'
elif 'MF' in file :
GO_type = 'Molecular Function'
if len(contents['overrepresentation']['group']) == 1:
try:
GO = contents['overrepresentation']['group']['result']
if GO['input_list']['number_in_list'] < 2 :
print(f'{GO["result"]["term"]["label"]} excluded, {GO_type}')
pass
else:
level = GO['term']['level']
go_id = GO['term']['id']
label = GO['term']['label']
fd = GO['input_list']['fold_enrichment']
pval = GO['input_list']['pValue']
protein_list = GO['input_list']['mapped_id_list']['mapped_id']
if protein_list is not str :
for ID in list(protein_list):
all_gene_list.append([GO_type, level, go_id, label, ID, fd, pval, phenotype, analysis])
else:
all_gene_list.append([GO_type, level, go_id, label, protein_list, fd, pval, phenotype, analysis])
except:
for lst in GO:
level = lst['term']['level']
go_id = lst['term']['id']
label = lst['term']['label']
fd = lst['input_list']['fold_enrichment']
pval = lst['input_list']['pValue']
protein_list = lst['input_list']['mapped_id_list']['mapped_id']
if protein_list is not str :
for ID in protein_list:
all_gene_list.append([GO_type, level, go_id, label, ID, fd, pval, phenotype, analysis])
else:
all_gene_list.append([GO_type, level, go_id, label, protein_list, fd, pval, phenotype, analysis])
for GO in contents['overrepresentation']['group']:
if type(GO) is not str:
try:
if GO['result']['input_list']['number_in_list'] < 2 :
print(f'{GO["result"]["term"]["label"]} excluded, {GO_type}')
pass
else:
level = GO['result']['term']['level']
go_id = GO['result']['term']['id']
label = GO['result']['term']['label']
fd = GO['result']['input_list']['fold_enrichment']
pval = GO['result']['input_list']['pValue']
protein_list = GO['result']['input_list']['mapped_id_list']['mapped_id']
if label in ['CCR chemokine receptor binding'] :
for ID in list(protein_list):
all_gene_list.append([GO_type, level, go_id, label, ID, fd, pval, phenotype, analysis])
if protein_list is not str :
for ID in list(protein_list):
all_gene_list.append([GO_type, level, go_id, label, ID, fd, pval, phenotype, analysis])
else:
all_gene_list.append([GO_type, level, go_id, label, protein_list, fd, pval, phenotype, analysis])
except:
try:
for lst in GO['result']:
if lst['input_list']['number_in_list'] < 2 :
print(f'{lst["term"]["label"]} excluded, {GO_type}')
pass
else:
level = lst['term']['level']
go_id = lst['term']['id']
label = lst['term']['label']
fd = lst['input_list']['fold_enrichment']
pval = lst['input_list']['pValue']
protein_list = lst['input_list']['mapped_id_list']['mapped_id']
if protein_list is not str :
for ID in protein_list:
all_gene_list.append([GO_type, level, go_id, label, ID, fd, pval, phenotype, analysis])
else:
all_gene_list.append([GO_type, level, go_id, label, protein_list, fd, pval, phenotype, analysis])
except:
for res in GO['result']:
if res['input_list']['number_in_list'] < 2 :
print(f'{res["term"]["label"]} excluded, {GO_type}')
pass
all_GO_gene_list = pd.DataFrame(all_gene_list, columns=['GO Type', 'Level', 'GO ID', 'GO Term', 'proteinID', 'Fold Enrichment', 'raw p-value', 'Phenotype', 'Analysis'])
all_GO_gene_list['description'] = all_GO_gene_list['proteinID'].progress_apply(check_annotations, args=(MM_fasta,))
all_GO_gene_list['Subset'] = [ 'chaperone clients' if ID in mm_chap_clt.values else 'other proteins' for ID in all_GO_gene_list['proteinID'].values ]
all_GO_gene_list['log2 Fold Enrichment'] = np.log2(all_GO_gene_list['Fold Enrichment'])
all_GO_gene_list['-log10 p-value'] = -np.log10(all_GO_gene_list['raw p-value'])
all_GO_gene_list.to_csv('../data/over-representation_analysis/stats/corrected_hypergeometric_tests/GO_gene_list.csv', index=False)
return all_GO_gene_list
def multiple_chisquare_tests(terms_with_all_genes):
all_tests = []
for GO in np.unique(terms_with_all_genes['GO Term']):
IN = terms_with_all_genes[terms_with_all_genes['GO Term'] == GO]
CHAP_IN = IN[IN['Subset'] == 'chaperone clients']
OTHERS_IN = IN[IN['Subset'] == 'other proteins']
OUT = terms_with_all_genes[terms_with_all_genes['GO Term'] != GO]
CHAP_OUT = OUT[OUT['Subset'] == 'chaperone clients']
OTHERS_OUT = OUT[OUT['Subset'] == 'other proteins']
OBSERVED = np.array([[len(CHAP_IN)/len(IN)*100, len(CHAP_OUT)/len(OUT)*100], [len(OTHERS_IN)/len(IN)*100, len(OTHERS_OUT)/len(OUT)*100]])
EXPECTED = stats.contingency.expected_freq(OBSERVED)
if (len(OBSERVED[OBSERVED >= 5]) > 0) & (len(EXPECTED[EXPECTED >= 5]) > 0):
test_type = 'chisquare'
chi2, pval, dof, expected = stats.chi2_contingency(OBSERVED)
all_tests.append([GO, test_type, chi2, pval])
else:
test_type = 'barnard'
barnard, pval = stats.barnard_exact(OBSERVED)
all_tests.append([GO, test_type, barnard, pval])
all_chisquare_pvals = pd.DataFrame(all_tests, columns=['GO Term', 'Test', 'statistic', 'p-value']).sort_values('p-value')
all_chisquare_pvals['FDR'] = multipletests(all_chisquare_pvals['p-value'], alpha=0.05, method='fdr_bh')[1]
all_chisquare_pvals.sort_values('FDR').to_csv('../data/over-representation_analysis/stats/corrected_chisquare/GO_distribution_chap_vs_others.csv', index=False)
return all_chisquare_pvals
if __name__ == "__main__":
tqdm.pandas()
if not os.path.isfile('../data/over-representation_analysis/stats/corrected_hypergeometric_tests/GO_gene_list.csv'):
mm_chap_clt = chaperone_clients_subset()
terms_with_all_genes = build_gene_list(mm_chap_clt)
print('Table S5 generated')
else:
print('Table S5 already generated!')
terms_with_all_genes = pd.read_csv('../data/over-representation_analysis/stats/corrected_hypergeometric_tests/GO_gene_list.csv')
GO = generate_input_for_figure()
terms_with_all_genes = terms_with_all_genes[terms_with_all_genes['GO Term'].isin(GO['GO Term'])]
hierarchical_order = list(pd.unique(terms_with_all_genes.sort_values(['GO Type', 'Fold Enrichment'], ascending=True)['GO Term']))[::-1]
analysis_list = terms_with_all_genes['Analysis'].value_counts().index.tolist()
analysis_cat = pd.Categorical(terms_with_all_genes['Analysis'], categories=analysis_list)
terms_with_all_genes = terms_with_all_genes.assign(Analysis=analysis_cat)
terms_with_all_genes['Analysis'] = terms_with_all_genes['Analysis'].cat.reorder_categories(['Domains', 'Proteins'])
all_chisquare_pvals = multiple_chisquare_tests(terms_with_all_genes)
print('GO Terms with significant chisquare pvalues ')
print(all_chisquare_pvals[all_chisquare_pvals['FDR'] <= 0.05])
fig = (p9.ggplot(
terms_with_all_genes,
p9.aes(x='GO Term', fill='Phenotype', alpha='Analysis')
)
+ p9.geom_bar()
+ p9.scale_x_discrete(limits=hierarchical_order)
+ p9.scale_fill_manual(values=('red', 'blue'))
+ p9.labs(y='Protein count')
+ p9.guides(
fill = p9.guide_legend(ncol=1),
alpha = p9.guide_legend(ncol=1)
)
+ p9.coord_flip()
+ p9.theme_classic()
+ p9.theme(figure_size=(6,15),
legend_background=p9.element_rect(size=2),
legend_box='horizontal',
legend_position='top')
+ p9.facet_wrap('Subset')
)
fig.save('../figures/FIGURE_S1.png', dpi=300)
# fig.save('../figures/FIGURE_S1.svg', dpi=300)
# fig.save('../figures/FIGURE_S1.pdf', dpi=300)
IN = terms_with_all_genes[terms_with_all_genes['GO Term'] == 'cytokine activity']
CHAP_IN = IN[IN['Subset'] == 'chaperone clients']
OTHERS_IN = IN[IN['Subset'] == 'other proteins']
OUT = terms_with_all_genes[terms_with_all_genes['GO Term'] != 'cytokine activity']
CHAP_OUT = OUT[OUT['Subset'] == 'chaperone clients']
OTHERS_OUT = OUT[OUT['Subset'] == 'other proteins']
OBSERVED = np.array([[len(CHAP_IN)/len(IN)*100, len(CHAP_OUT)/len(OUT)*100], [len(OTHERS_IN)/len(IN)*100, len(OTHERS_OUT)/len(OUT)*100]])
EXPECTED = stats.contingency.expected_freq(OBSERVED)
print('Observed')
print(pd.DataFrame(OBSERVED, columns=['Chap', 'Others']))
print('Expected')
print(pd.DataFrame(EXPECTED, columns=['Chap', 'Others']))
if (len(OBSERVED[OBSERVED >= 5]) > 0) & (len(EXPECTED[EXPECTED >= 5]) > 0):
test_type = 'chisquare'
chi2, pval, dof, expected = stats.chi2_contingency(OBSERVED)
print([GO, test_type, chi2, pval])
else:
test_type = 'barnard'
barnard, pval = stats.barnard_exact(OBSERVED)
print.append([GO, test_type, barnard, pval]) |
the-stack_106_31468 | from io import BytesIO
import fitz
import numpy as np
import pytest
import requests
from doctr import io
def test_convert_page_to_numpy(mock_pdf):
pdf = fitz.open(mock_pdf)
# Check correct read
rgb_page = io.pdf.convert_page_to_numpy(pdf[0], default_scales=(1, 1))
assert isinstance(rgb_page, np.ndarray)
assert rgb_page.shape == (842, 595, 3)
# Check channel order
bgr_page = io.pdf.convert_page_to_numpy(pdf[0], default_scales=(1, 1), bgr_output=True)
assert np.all(bgr_page == rgb_page[..., ::-1])
# Check resizing
resized_page = io.pdf.convert_page_to_numpy(pdf[0], output_size=(396, 306))
assert resized_page.shape == (396, 306, 3)
# Check rescaling
rgb_page = io.pdf.convert_page_to_numpy(pdf[0])
assert isinstance(rgb_page, np.ndarray)
assert rgb_page.shape == (1684, 1190, 3)
def _check_doc_content(doc_tensors, num_pages):
# 1 doc of 8 pages
assert(len(doc_tensors) == num_pages)
assert all(isinstance(page, np.ndarray) for page in doc_tensors)
assert all(page.dtype == np.uint8 for page in doc_tensors)
def test_read_pdf(mock_pdf):
doc = io.read_pdf(mock_pdf)
assert isinstance(doc, fitz.Document)
with open(mock_pdf, 'rb') as f:
doc = io.read_pdf(f.read())
assert isinstance(doc, fitz.Document)
# Wrong input type
with pytest.raises(TypeError):
_ = io.read_pdf(123)
# Wrong path
with pytest.raises(FileNotFoundError):
_ = io.read_pdf("my_imaginary_file.pdf")
def test_read_img_as_numpy(tmpdir_factory, mock_pdf):
# Wrong input type
with pytest.raises(TypeError):
_ = io.read_img_as_numpy(123)
# Non-existing file
with pytest.raises(FileNotFoundError):
io.read_img_as_numpy("my_imaginary_file.jpg")
# Invalid image
with pytest.raises(ValueError):
io.read_img_as_numpy(str(mock_pdf))
# From path
url = 'https://github.com/mindee/doctr/releases/download/v0.2.1/Grace_Hopper.jpg'
file = BytesIO(requests.get(url).content)
tmp_path = str(tmpdir_factory.mktemp("data").join("mock_img_file.jpg"))
with open(tmp_path, 'wb') as f:
f.write(file.getbuffer())
# Path & stream
with open(tmp_path, 'rb') as f:
page_stream = io.read_img_as_numpy(f.read())
for page in (io.read_img_as_numpy(tmp_path), page_stream):
# Data type
assert isinstance(page, np.ndarray)
assert page.dtype == np.uint8
# Shape
assert page.shape == (606, 517, 3)
# RGB
bgr_page = io.read_img_as_numpy(tmp_path, rgb_output=False)
assert np.all(page == bgr_page[..., ::-1])
# Resize
target_size = (200, 150)
resized_page = io.read_img_as_numpy(tmp_path, target_size)
assert resized_page.shape[:2] == target_size
def test_read_html():
url = "https://www.google.com"
pdf_stream = io.read_html(url)
assert isinstance(pdf_stream, bytes)
def test_document_file(mock_pdf, mock_image_stream):
pages = io.DocumentFile.from_images(mock_image_stream)
_check_doc_content(pages, 1)
assert isinstance(io.DocumentFile.from_pdf(mock_pdf).doc, fitz.Document)
assert isinstance(io.DocumentFile.from_url("https://www.google.com").doc, fitz.Document)
def test_pdf(mock_pdf):
doc = io.DocumentFile.from_pdf(mock_pdf)
# As images
pages = doc.as_images()
num_pages = 2
_check_doc_content(pages, num_pages)
# Get words
words = doc.get_words()
assert isinstance(words, list) and len(words) == num_pages
assert len([word for page_words in words for word in page_words]) == 9
assert all(isinstance(bbox, tuple) and isinstance(value, str)
for page_words in words for (bbox, value) in page_words)
assert all(all(isinstance(coord, float) for coord in bbox) for page_words in words for (bbox, value) in page_words)
# Get lines
lines = doc.get_lines()
assert isinstance(lines, list) and len(lines) == num_pages
assert len([line for page_lines in lines for line in page_lines]) == 2
assert all(isinstance(bbox, tuple) and isinstance(value, str)
for page_lines in lines for (bbox, value) in page_lines)
assert all(all(isinstance(coord, float) for coord in bbox) for page_lines in lines for (bbox, value) in page_lines)
# Get artefacts
artefacts = doc.get_artefacts()
assert isinstance(artefacts, list) and len(artefacts) == num_pages
assert len([art for page_art in artefacts for art in page_art]) == 0
assert all(isinstance(bbox, tuple) for page_artefacts in artefacts for bbox in page_artefacts)
assert all(all(isinstance(coord, float) for coord in bbox)
for page_artefacts in artefacts for bbox in page_artefacts)
|
the-stack_106_31469 | from typing import Any
from django.core.management.base import CommandParser
from zerver.lib.actions import bulk_add_subscriptions, ensure_stream
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Add some or all users in a realm to a set of streams."""
def add_arguments(self, parser: CommandParser) -> None:
self.add_realm_args(parser, True)
self.add_user_list_args(parser, all_users_help="Add all users in realm to these streams.")
parser.add_argument(
'-s', '--streams',
dest='streams',
type=str,
required=True,
help='A comma-separated list of stream names.')
def handle(self, **options: Any) -> None:
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
user_profiles = self.get_users(options, realm)
stream_names = {stream.strip() for stream in options["streams"].split(",")}
for stream_name in set(stream_names):
for user_profile in user_profiles:
stream = ensure_stream(realm, stream_name)
_ignore, already_subscribed = bulk_add_subscriptions([stream], [user_profile])
was_there_already = user_profile.id in {tup[0].id for tup in already_subscribed}
print("{} {} to {}".format(
"Already subscribed" if was_there_already else "Subscribed",
user_profile.delivery_email, stream_name))
|
the-stack_106_31470 |
import numpy as np
from helpFun import funVar
def function(X):
# SOURCE:
# Theory and Analysis of Elastic Plates and Shells,
# J. N. Reddy (pag.222)
# Navier Solutions
[Nx, Ny, ne, E, h, a, b] = funVar()
x = X[0]
y = X[1]
s = b/a
alfa = 111e-6 # thermal diffusivity [m/s]
Q0 = 3000*a*b # point force [N]
m = -1
n = -1
## Rigidity matrix
Dconst = (E*h**3)/(12*(1-ne**2))
'''D = np.eye(6)
D[0,0] = D[1,1] = Dconst
D[0,1] = ne*Dconst
D[-1,-1] = (1-ne)*Dconst/2'''
k = 0 # spring coeff [N/m]
k_ = (k*b**4)/(Dconst*np.pi**4)
## Thermal stresses
T = 0 # Temperature [K]
del_T = (T*alfa*Dconst*(1+ne)*np.pi**2)/b**2
qmn = 4*Q0/a*b
w0 = 0
w0_old = 0
cond = False
while cond == False:
m += 2
n += 2
Wmn = (b**4)/(Dconst*np.pi**4)*(qmn + del_T*(m**2 * s**2 + n**2))/ \
((m**2 * s**2 + n**2)**2 + k_)
w0 = w0 + Wmn*np.sin(m*np.pi*x/a)*np.sin(n*np.pi*y/b)
cond = np.nanmax((w0 - w0_old)/w0) < 1e-8
w0_old = w0.copy()
sigma_max = (6*qmn*2*b**2)/(np.pi**2*h**2*(s**2+1)**2)*(s**2+ne) # maximum tension [Pa]
return w0, sigma_max
def Encurvadura(x):
'''Encurvadura'''
l = x[0] # 0.4 # heigth [m]
e = x[1] # 0.001 # force eccentricity[m], 0 if none
P = 10 # force [N]
E = 117e9 # elastcity module ·[Pa] (copper)
r = 1/100 # [m]
I = (np.pi/4)*r**4
k = np.sqrt(np.abs(P)/(E*I))
flexa = e*(1-np.cos(k*l))/np.cos(k*l) # flexion [m]
Mf_max = P*(e+flexa) # maximum moment of flextion
W = l*(np.pi*r**2) # volume [mn]
tension_max = P/(np.pi*r**2) + Mf_max/W
return tension_max
|
the-stack_106_31472 | import os
import subprocess
from os.path import join
import numpy as np
import pandas as pd
import toml
import torch
from qmplot import manhattanplot
from scipy import stats
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from torch.utils.data import DataLoader
from tqdm import tqdm
from data.data_ukb import get_tfms, get_indiv_split, UKBRetina
from models.model_loading import GeneralModel
config = toml.load("paths.toml")
# need both: plink1's association computations are super slow; plink2 doesn't implement clumping
PLINK1 = config["PLINK1"]
PLINK2 = config["PLINK2"]
PRJ = config["CHECKPOINTS_BASE_PATH"]
BFILE = join(config["BASE_GEN"], "ukb_chr{chromo}_v2")
TEMPL = "gwas_results_chr{chromo}.PC{pc}.glm.linear"
# significance thresholds for clumping
P1 = 5e-8
P2 = 1e-7
SIZE = 448
COVARIATES = ["sex", "age"] + [f"genet_pc_{i}" for i in range(1, 16)]
WEIGHT_PATHS = {
# baselines
"barlow": join(PRJ, "barlow_r50_proj128/epoch_99-step_170399.ckpt"),
"byol": join(PRJ, "byol_r50_proj128/epoch_99-step_170399.ckpt"),
"nnclr": join(PRJ, "nnclr_r50_proj128/epoch_99-step_170399.ckpt"),
"simclr": join(PRJ, "simclr_r50_proj128/epoch_99-step_170399.ckpt"),
"simsiam": join(PRJ, "simsiam_r50_proj128/epoch_99-step_170399.ckpt"),
# ContIG
"rpb": join(PRJ, "cm_r50_raw_risks_burdens_outer_h1/checkpoints/last.ckpt"),
"rpb-inner": join(PRJ, "cm_r50_raw_risks_burdens_inner_h1/last.ckpt"),
"gen": join(PRJ, "cm_r50_raw_snps_h1/last.ckpt"),
"pgs": join(PRJ, "cm_r50_risk_scores_gen_h1/last.ckpt"),
"burdens": join(PRJ, "cm_r50_burden_scores_gen_h1/last.ckpt"),
}
def run_all_gwas(
split="test",
dev="cuda:0",
bs=10,
threads=20,
main_dir="gwas_results",
use_INT=True,
subset=None,
):
for key in WEIGHT_PATHS:
print("starting model", key)
run_transfer_gwas(
out_dir=join(main_dir, key),
weights_path=WEIGHT_PATHS[key],
split=split,
subset=subset,
dev=dev,
bs=bs,
threads=threads,
use_INT=use_INT,
)
def compare_models(main_dir="gwas_results"):
results = dict()
for key in WEIGHT_PATHS:
fn = join(main_dir, key, "final_clumps.csv")
if os.path.isfile(fn):
if os.path.getsize(fn) > 1:
res = pd.read_csv(fn)
results[key] = len(res)
else:
results[key] = 0
return results
def run_transfer_gwas(
out_dir="gwas_results",
weights_path=WEIGHT_PATHS["rpb"],
pheno_fn="transfer_embeddings.txt",
cov_fn="transfer_cov.txt",
size=SIZE,
split="valid",
comp=10,
dev="cuda:0",
threads=20,
seed=42,
use_INT=True,
bs=10,
subset=None,
):
os.makedirs(out_dir, exist_ok=True)
pheno_fn = join(out_dir, pheno_fn)
cov_fn = join(out_dir, cov_fn)
print("loading model & data...")
model = GeneralModel(checkpoint_path=weights_path, device=dev).eval()
tl, vl, ttl = get_gwas_data(
size=size,
batch_size=bs,
return_iid=True,
normalize_features=False,
seed=seed,
subset=subset,
)
loader = {"train": tl, "valid": vl, "test": ttl}[split]
print(f"computing {split} embeddings")
export_embeddings(
loader,
model,
pheno_fn=pheno_fn,
cov_fn=cov_fn,
comp=comp,
dev=dev,
use_INT=use_INT,
)
print(f"running GWAS")
run_plink(
pheno_fn=pheno_fn,
covar_fn=cov_fn,
out=join(out_dir, "gwas_results_chr{chromo}"),
threads=threads,
)
plot_gwas(
out_fn=join(out_dir, "mhat_plot"),
templ=join(out_dir, "gwas_results_chr{chromo}.PC{pc}.glm.linear"),
clip_min=1e-99,
)
clump_results(direc=out_dir)
def plot_gwas(
out_fn=join("gwas_results", "mhat_plot"),
templ=join("gwas_results", "gwas_results_chr{chromo}.PC{pc}.glm.linear"),
clip_min=1e-99,
):
df = get_plink_results(templ=templ)
df.P = df.P.clip(clip_min, 1)
manhattanplot(
data=df.dropna(how="any", axis=0),
figname=out_fn + ".png",
xticklabel_kws={"rotation": "vertical"},
)
manhattanplot(
data=df.dropna(how="any", axis=0),
figname=out_fn + ".pdf",
xticklabel_kws={"rotation": "vertical"},
)
def clump_results(
direc,
p1=P1,
p2=P2,
r2=0.1,
kb=150,
threads=20,
):
merged_fn = join(direc, "gwas_merged.txt")
merge_plink(
templ=join(direc, TEMPL),
out_fn=merged_fn,
)
for chromo in range(1, 23):
bfile = BFILE.format(chromo=chromo)
out_fn = join(direc, f"clumped_{chromo}")
cmd = f"{PLINK1} --bfile {bfile} --clump {merged_fn} --clump-p1 {p1} --clump-p2 {p2} --clump-r2 {r2} --clump-kb {kb} --threads {threads} --out {out_fn}"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("errors:", error, flush=True)
print("output:", output, flush=True)
results = read_clumps(direc=direc)
results.to_csv(join(direc, "final_clumps.csv"), index=False)
n_clumps = len(results)
return results, n_clumps
def read_clumps(direc):
full_df = None
for chromo in range(1, 23):
clump_fn = join(direc, f"clumped_{chromo}.clumped")
if os.path.isfile(clump_fn):
print(f"reading file {clump_fn}")
df = pd.read_csv(join(direc, f"clumped_{chromo}.clumped"), sep="\s+")
if full_df is None:
full_df = df
else:
full_df = pd.concat([full_df, df])
if full_df is None:
full_df = pd.DataFrame()
return full_df
def merge_plink(
out_fn=join("gwas_results", "gwas_merged.txt"), templ=join("gwas_results", TEMPL)
):
df = get_plink_results(templ=templ)
df["SNP"] = [line["ID"] for _, line in df.iterrows()]
df[["SNP", "P"]].to_csv(out_fn, header=True, index=False, sep=" ")
def get_plink_results(templ):
cols = ["#CHROM", "POS", "ID"]
for chromo in tqdm(range(1, 23)):
for pc in range(10):
sub_df = pd.read_csv(templ.format(chromo=chromo, pc=pc), sep="\t")
if pc == 0:
df = sub_df[cols]
df[f"P_PC{pc}"] = sub_df.P
if chromo == 1:
full_df = df
else:
full_df = pd.concat([full_df, df])
full_df["P"] = (10 * full_df.loc[:, [f"P_PC{i}" for i in range(10)]].min(1)).clip(
1e-320, 1
)
return full_df
def run_plink(
pheno_fn,
covar_fn,
out="gwas_results_chr{chromo}",
threads=20,
):
for chromo in range(1, 23):
bfile = BFILE.format(chromo=chromo)
out_fn = out.format(chromo=chromo)
print(f"running GWAS on chromo {chromo}", flush=True)
cmd = f"{PLINK2} --bfile {bfile} --linear hide-covar --covar {covar_fn} --pheno {pheno_fn} --threads {threads} --allow-no-sex --out {out_fn}"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print("errors:", error, flush=True)
print("output:", output, flush=True)
@torch.no_grad()
def export_embeddings(
loader,
model, # ModelCLR
pheno_fn="tmp.txt",
cov_fn="cov.txt",
comp=10,
dev="cuda:0",
use_INT=False,
):
feats = []
covs = []
cov_inds = [loader.dataset.cov_columns.index(trait) for trait in COVARIATES]
iids = []
for imgs, cov, iid in tqdm(loader):
batch_embedding = model(imgs.to(dev)).cpu()
feats.append(batch_embedding)
covs.append(cov[:, cov_inds])
iids.append(iid)
covs = torch.cat(covs).double().numpy()
covs[:, COVARIATES.index("sex")] += 1
feats = torch.cat(feats).double().numpy()
iids = torch.cat(iids).numpy()
pca = PCA(n_components=comp)
feats = pca.fit_transform(feats)
cov = pd.DataFrame(
{
"FID": iids,
"IID": iids,
**dict((covariate, covs[:, i]) for i, covariate in enumerate(COVARIATES)),
}
)
cov.sex = cov.sex.astype(int)
cov.to_csv(cov_fn, header=True, index=False, sep="\t")
df = pd.DataFrame(
{
"FID": iids,
"IID": iids,
**dict((f"PC{i}", feats[:, i]) for i in range(comp)),
}
)
if use_INT:
df = inverse_rank_transform(df, cov, method="adjusted")
df.to_csv(pheno_fn, header=True, index=False, sep="\t")
return feats, iids
def get_gwas_data(
seed=42,
num_workers=8,
size=256,
normalize_features=True,
batch_size=50,
train_pct=0.7,
val_pct=0.2,
cov_fillna="mean",
return_iid=False,
eye="left",
subset=None,
):
t_iids, v_iids, tt_iids = get_indiv_split(
train_pct=train_pct, val_pct=val_pct, seed=seed
)
loaders = []
tfms = get_tfms(size=size, augmentation=False)
for iids, mode in [(t_iids, "train"), (v_iids, "valid"), (tt_iids, "test")]:
dset = UKBRetina(
eye=eye,
iid_selection=iids,
tfms=tfms,
normalize_features=normalize_features,
cov_fillna=cov_fillna,
return_iid=return_iid,
subset=subset,
)
dset = prune_iids(dset)
loader = DataLoader(
dset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
)
loaders.append(loader)
return loaders
def prune_iids(dset):
"""make sure each iid only occurs once"""
unique_iids = set(dset.iids)
used_iids = set()
paths = []
iids = []
for iid, path in tqdm(zip(dset.iids, dset.paths)):
if not iid in used_iids:
paths.append(path)
iids.append(iid)
used_iids.add(iid)
dset.iids = iids
dset.paths = paths
return dset
def inverse_rank_transform(df, cov, covars=None, qcovars=None, method="adjusted"):
pcs = range(df.shape[1] - 2)
if method == "adjusted":
cov.index = cov.IID
cov = cov.loc[df.IID]
cov = cov.drop(["IID", "FID"], 1)
df.index = df.IID
ind = np.intersect1d(cov.index, df.index)
cov = cov.loc[ind]
df = df.loc[ind]
df_adj = df.copy()
for pc in tqdm(pcs):
col = f"PC{pc}"
lr = LinearRegression()
df_adj[col] = df[col] - lr.fit(cov, df[col]).predict(cov)
df = df_adj
for pc in tqdm(pcs):
col = f"PC{pc}"
df[col] = INT(df[col])
return df
def INT(x, method="average", c=3.0 / 8):
"""perform rank-based inverse normal transform"""
r = stats.rankdata(x, method=method)
x = (r - c) / (len(x) - 2 * c + 1)
norm = stats.norm.ppf(x)
return norm
|
the-stack_106_31473 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""utils"""
import os
import os.path as osp
import sys
import numpy as np
import mindspore.dataset as ds
def gen_idx(train_color_label, train_thermal_label):
"""
Generate
"""
color_pos = []
unique_label_color = np.unique(train_color_label)
for i in range(len(unique_label_color)):
tmp_pos = [k for k, v in enumerate(train_color_label) if v == unique_label_color[i]]
color_pos.append(tmp_pos)
thermal_pos = []
unique_label_thermal = np.unique(train_thermal_label)
for i in range(len(unique_label_thermal)):
tmp_pos = [k for k, v in enumerate(train_thermal_label) if v == unique_label_thermal[i]]
thermal_pos.append(tmp_pos)
return color_pos, thermal_pos
class IdentitySampler(ds.Sampler):
"""Sample person identities evenly in each batch.
Args:
train_color_label, train_thermal_label: labels of two modalities
color_pos, thermal_pos: positions of each identity
batchsize: batch size
"""
def __init__(self, train_color_label, train_thermal_label, color_pos, thermal_pos, num_pos, batchsize):
super(IdentitySampler, self).__init__()
# np.random.seed(0)
uni_label = np.unique(train_color_label)
self.n_classes = len(uni_label)
n = np.maximum(len(train_color_label), len(train_thermal_label))
index1 = index2 = None
for j in range(int(n / (batchsize * num_pos)) + 1):
batch_idx = np.random.choice(uni_label, batchsize, replace=False)
for i in range(batchsize):
sample_color = np.random.choice(color_pos[batch_idx[i]], num_pos)
sample_thermal = np.random.choice(thermal_pos[batch_idx[i]], num_pos)
if j == 0 and i == 0:
index1 = sample_color
index2 = sample_thermal
# else:
# index1 = np.hstack((index1, sample_color))
# index2 = np.hstack((index2, sample_thermal))
#
self.index1 = index1
self.index2 = index2
self.n = n
self.num_samples = n
def __iter__(self):
# return iter(np.arange(len(self.index1)))
for i in range(len(self.index1)):
yield i
def __len__(self):
return self.n
class AverageMeter:
"""Computers and stores the average & current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def mkdir_if_missing(directory):
"""mkdir_if_missing"""
if not osp.exists(directory):
try:
os.makedirs(directory)
except OSError as e:
print(e)
class Logger:
"""
Write console output to external text file.
Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py.
"""
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(osp.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
|
the-stack_106_31475 | # -*- coding: utf-8 -*-
import unittest
from iemlav.lib.log_monitor.system_log.harmful_root_command import HarmfulCommands
from iemlav.logger import IemlAVLogger
try:
# if python 3.x.x
from unittest.mock import patch
except ImportError: # python 2.x.x
from mock import patch
class TestHarmfulCommands(unittest.TestCase):
"""
Test class for HarmfulCommands.
"""
def setUp(self):
"""
Setup class for HarmfulCommands.
"""
self.os = "debian"
@patch.object(IemlAVLogger, "log")
@patch.object(HarmfulCommands, "check_command")
@patch('iemlav.lib.log_monitor.system_log.harmful_root_command.utils')
def test_parse_log_file(self, mock_utils, mock_check, mock_log):
"""
Test parse_log_file.
"""
mock_utils.categorize_os.return_value = self.os
mock_utils.open_file.return_value = ["command"]
# Create HarmfulCommands object
self.harm_com_obj = HarmfulCommands()
mock_utils.open_file.return_value = ["COMMAND=command "]
mock_check.return_value = True
self.harm_com_obj.parse_log_file()
self.assertEqual(self.harm_com_obj.found_harmful,
["command"])
mock_log.assert_called_with('Possible harmful command found: command',
logtype='warning')
@patch('iemlav.lib.log_monitor.system_log.harmful_root_command.utils')
def test_check_command(self, mock_utils):
"""
Test check_command.
"""
mock_utils.categorize_os.return_value = self.os
mock_utils.open_file.return_value = ["command"]
# Create HarmfulCommands object
self.harm_com_obj = HarmfulCommands()
# Make the "command" as harmful
if "command" not in self.harm_com_obj.harmful_commands:
self.harm_com_obj.harmful_commands.append("command")
status = self.harm_com_obj.check_command("command")
self.assertTrue(status)
|
the-stack_106_31477 | # coding=utf-8
from setuptools import find_packages, setup
base_requires = [
'Click<7.0',
'ansible==3.0.0',
'backports.shutil_get_terminal_size',
'semver',
'junit_xml',
'structlog',
'boto3'
]
test_requires = base_requires + [
'mock',
'coverage',
'pep8<=1.7.0',
'yapf==0.14.0'
]
setup(
name='origin-ci-tool',
version='0.1.0',
url='https://www.github.com/openshift/origin-ci-tool',
maintainer='Steve Kuznetsov',
maintainer_email='[email protected]',
packages=find_packages(exclude=['tests']),
include_package_data=True,
dependency_links=[
'git+https://github.com/stevekuznetsov/ansible.git@skuznets/oct-release#egg=ansible-3.0.0'
],
install_requires=base_requires,
tests_require=test_requires,
extras_require={
'development': test_requires
},
entry_points='''
[console_scripts]
oct=oct.oct:oct_command
''',
)
|
the-stack_106_31479 | import torch
from chamfer_distance import ChamferDistance
import time
chamfer_dist = ChamferDistance()
p1 = torch.rand([80000, 25, 3])
p2 = torch.rand([80000, 15, 3])
s = time.time()
dist1, dist2, idx1, idx2 = chamfer_dist(p1, p2)
loss = (torch.mean(dist1)) + (torch.mean(dist2))
torch.cuda.synchronize()
print(f"Time: {time.time() - s} seconds")
print(f"Loss: {loss}") |
the-stack_106_31480 | from auditlog.models import LogEntry
from django.contrib import admin
from polymorphic.admin import PolymorphicParentModelAdmin, PolymorphicChildModelAdmin
from dojo.models import Question, TextQuestion, ChoiceQuestion, Choice, \
Answer, TextAnswer, ChoiceAnswer, Engagement_Survey, Answered_Survey
admin.site.unregister(LogEntry)
# ==============================
# Defect Dojo Engaegment Surveys
# ==============================
class QuestionChildAdmin(PolymorphicChildModelAdmin):
"""
Base admin class for all child models of Question
"""
base_model = Question
class TextQuestionAdmin(QuestionChildAdmin):
"""
ModelAdmin for a TextQuestion
"""
class ChoiceQuestionAdmin(QuestionChildAdmin):
"""
ModelAdmin for a ChoiceQuestion
"""
class QuestionParentAdmin(PolymorphicParentModelAdmin):
"""
Question parent model admin
"""
base_model = Question
child_models = (
TextQuestion,
ChoiceQuestion
)
admin.site.register(TextQuestion, TextQuestionAdmin)
admin.site.register(ChoiceQuestion, ChoiceQuestionAdmin)
admin.site.register(Question, QuestionParentAdmin)
admin.site.register(Choice)
class AnswerChildAdmin(PolymorphicChildModelAdmin):
"""
Base admin class for all child Answer models
"""
base_model = Answer
class TextAnswerAdmin(AnswerChildAdmin):
"""
ModelAdmin for TextAnswer
"""
class ChoiceAnswerAdmin(AnswerChildAdmin):
"""
ModelAdmin for ChoiceAnswer
"""
class AnswerParentAdmin(PolymorphicParentModelAdmin):
"""
The parent model admin for answer
"""
list_display = (
'answered_survey',
'question',
)
base_model = Answer
child_models = (
TextAnswer,
ChoiceAnswer,
)
admin.site.register(TextAnswer, TextAnswerAdmin)
admin.site.register(ChoiceAnswer, ChoiceAnswerAdmin)
admin.site.register(Answer, AnswerParentAdmin)
admin.site.register(Engagement_Survey)
admin.site.register(Answered_Survey)
|
the-stack_106_31481 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''model'''
import numpy as np
from mindspore.nn import WithLossCell, TrainOneStepCell
from mindspore import context
from mindspore.train.callback import RunContext, ModelCheckpoint, CheckpointConfig, _InternalCallbackParam
import mindspore as ms
from mindspore import nn, Tensor
from mindspore.ops import operations as P
from mindspore import context
from mindspore.ops import functional as F
class MusicTaggerCNN(nn.Cell):
def __init__(self,
in_classes=[1, 128, 384, 768, 2048],
kernel_size=[3, 3, 3, 3, 3],
padding=[0] * 5,
maxpool=[(2, 4), (4, 5), (3, 8), (4, 8)],
has_bias=True):
super(MusicTaggerCNN, self).__init__()
self.in_classes = in_classes
self.kernel_size = kernel_size
self.maxpool = maxpool
self.padding = padding
self.has_bias = has_bias
# build model
self.conv1 = nn.Conv2d(self.in_classes[0], self.in_classes[1],
self.kernel_size[0])
self.conv2 = nn.Conv2d(self.in_classes[1], self.in_classes[2],
self.kernel_size[1])
self.conv3 = nn.Conv2d(self.in_classes[2], self.in_classes[3],
self.kernel_size[2])
self.conv4 = nn.Conv2d(self.in_classes[3], self.in_classes[4],
self.kernel_size[3])
self.bn1 = nn.BatchNorm2d(self.in_classes[1])
self.bn2 = nn.BatchNorm2d(self.in_classes[2])
self.bn3 = nn.BatchNorm2d(self.in_classes[3])
self.bn4 = nn.BatchNorm2d(self.in_classes[4])
self.pool1 = nn.MaxPool2d(maxpool[0], maxpool[0])
self.pool2 = nn.MaxPool2d(maxpool[1], maxpool[1])
self.pool3 = nn.MaxPool2d(maxpool[2], maxpool[2])
self.pool4 = nn.MaxPool2d(maxpool[3], maxpool[3])
self.poolreduce = P.ReduceMax(keep_dims=False)
self.Act = nn.ReLU()
self.flatten = nn.Flatten()
self.dense = nn.Dense(2048, 50, activation='sigmoid')
self.sigmoid = nn.Sigmoid()
def construct(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.Act(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.Act(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.Act(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.Act(x)
x = self.poolreduce(x, (2, 3))
x = self.flatten(x)
x = self.dense(x)
return x
|
the-stack_106_31482 | import torch.optim as optim
import torch
import os
from net.resnet import *
from net.vgg import *
from net.lenet import LeNet
from net.googlenet import GoogLeNet
from net.mobilenet import *
from net.mobilenetv2 import *
from net.shufflenetv2 import *
from net.shufflenet import *
from net.densenet import *
from net.preact_resnet import *
from net.resnext import *
from net.wrn import *
from net.squeezenet import *
from net.senet import *
from net.efficientnet import *
from net.dpn import *
from dataloader import CIFAR10_DataLoader
class Trainer(object):
def __init__(self, args):
self.args = args
self.train_data = CIFAR10_DataLoader(train=True)
self.test_data = CIFAR10_DataLoader(train=False)
self.num_epochs = args.epoch
self.model = eval(args.model+str('()'))
if torch.cuda.is_available():
self.model = self.model.cuda()
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.SGD(self.model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0001)
self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=100)
def train(self):
best_acc = 0.0
for epoch in range(self.num_epochs):
self.model.train()
sum_loss = 0.0
self.scheduler.step()
for i, data in enumerate(self.train_data):
inputs, labels = data
if torch.cuda.is_available():
inputs, labels = inputs.cuda(), labels.cuda()
self.optimizer.zero_grad()
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
loss.backward()
self.optimizer.step()
sum_loss += loss.item()
if i % 100 == 99:
print('[%d %d] loss:%.03f' %
(epoch + 1, i + 1, sum_loss / 100))
sum_loss = 0.0
acc = self.test()
if acc > best_acc:
best_acc = acc
state = {
'net': self.model.state_dict(),
'acc': best_acc,
'epoch': self.num_epochs,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/{0}_{1}_{2}.pth'.format(self.args.model, best_acc, self.num_epochs))
with open('accuracy.txt', 'a') as f:
f.write('model={0}, acc={1}, epoch={2}\n'.format(self.args.model, best_acc, self.num_epochs))
def test(self):
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, data_test in enumerate(self.test_data):
images, labels = data_test
if torch.cuda.is_available():
images, labels = images.cuda(), labels.cuda(0)
outputs_test = self.model(images)
_, predicted = outputs_test.max(1)
correct += (predicted == labels).sum().item()
total += labels.size(0)
acc = correct / total
return acc
|
the-stack_106_31483 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Unit tests for the BackendComment and BackendCommentCollection classes."""
from datetime import datetime
from uuid import UUID
import pytz
from aiida import orm
from aiida.backends.testbase import AiidaTestCase
from aiida.common import exceptions, timezone
class TestBackendComment(AiidaTestCase):
"""Test BackendComment."""
@classmethod
def setUpClass(cls, *args, **kwargs):
super().setUpClass(*args, **kwargs)
cls.computer = cls.computer.backend_entity # Unwrap the `Computer` instance to `BackendComputer`
cls.user = cls.backend.users.create(email='tester@localhost').store()
def setUp(self):
super().setUp()
self.node = self.backend.nodes.create(
node_type='', user=self.user, computer=self.computer, label='label', description='description'
).store()
self.comment_content = 'comment content'
def create_comment(self, **kwargs):
"""Create BackendComment"""
node = kwargs['node'] if 'node' in kwargs else self.node
user = kwargs['user'] if 'user' in kwargs else self.user
ctime = kwargs['ctime'] if 'ctime' in kwargs else None
mtime = kwargs['mtime'] if 'mtime' in kwargs else None
return self.backend.comments.create(
node=node, user=user, content=self.comment_content, ctime=ctime, mtime=mtime
)
def test_creation(self):
"""Test creation of a BackendComment and all its properties."""
comment = self.backend.comments.create(node=self.node, user=self.user, content=self.comment_content)
# Before storing
self.assertIsNone(comment.id)
self.assertIsNone(comment.pk)
self.assertTrue(isinstance(comment.uuid, str))
self.assertTrue(comment.node, self.node)
self.assertTrue(isinstance(comment.ctime, datetime))
self.assertIsNone(comment.mtime)
self.assertTrue(comment.user, self.user)
self.assertEqual(comment.content, self.comment_content)
# Store the comment.ctime before the store as a reference
now = timezone.now()
comment_ctime_before_store = comment.ctime
self.assertTrue(now > comment.ctime, f'{comment.ctime} is not smaller than now {now}')
comment.store()
comment_ctime = comment.ctime
comment_mtime = comment.mtime
# The comment.ctime should have been unchanged, but the comment.mtime should have changed
self.assertEqual(comment.ctime, comment_ctime_before_store)
self.assertIsNotNone(comment.mtime)
self.assertTrue(now < comment.mtime, f'{comment.mtime} is not larger than now {now}')
# After storing
self.assertTrue(isinstance(comment.id, int))
self.assertTrue(isinstance(comment.pk, int))
self.assertTrue(isinstance(comment.uuid, str))
self.assertTrue(comment.node, self.node)
self.assertTrue(isinstance(comment.ctime, datetime))
self.assertTrue(isinstance(comment.mtime, datetime))
self.assertTrue(comment.user, self.user)
self.assertEqual(comment.content, self.comment_content)
# Try to construct a UUID from the UUID value to prove that it has a valid UUID
UUID(comment.uuid)
# Change a column, which should trigger the save, update the mtime but leave the ctime untouched
comment.set_content('test')
self.assertEqual(comment.ctime, comment_ctime)
self.assertTrue(comment.mtime > comment_mtime)
def test_creation_with_time(self):
"""
Test creation of a BackendComment when passing the mtime and the ctime. The passed ctime and mtime
should be respected since it is important for the correct import of nodes at the AiiDA import/export.
"""
ctime = datetime(2019, 2, 27, 16, 20, 12, 245738, pytz.utc)
mtime = datetime(2019, 2, 27, 16, 27, 14, 798838, pytz.utc)
comment = self.backend.comments.create(
node=self.node, user=self.user, content=self.comment_content, mtime=mtime, ctime=ctime
)
# Check that the ctime and mtime are the given ones
self.assertEqual(comment.ctime, ctime)
self.assertEqual(comment.mtime, mtime)
comment.store()
# Check that the given values remain even after storing
self.assertEqual(comment.ctime, ctime)
self.assertEqual(comment.mtime, mtime)
def test_delete(self):
"""Test `delete` method"""
# Create Comment, making sure it exists
comment = self.create_comment()
comment.store()
comment_uuid = str(comment.uuid)
builder = orm.QueryBuilder().append(orm.Comment, project='uuid')
no_of_comments = builder.count()
found_comments_uuid = [_[0] for _ in builder.all()]
self.assertIn(comment_uuid, found_comments_uuid)
# Delete Comment, making sure it was deleted
self.backend.comments.delete(comment.id)
builder = orm.QueryBuilder().append(orm.Comment, project='uuid')
self.assertEqual(builder.count(), no_of_comments - 1)
found_comments_uuid = [_[0] for _ in builder.all()]
self.assertNotIn(comment_uuid, found_comments_uuid)
def test_delete_all(self):
"""Test `delete_all` method"""
self.create_comment().store()
self.assertGreater(len(orm.Comment.objects.all()), 0, msg='There should be Comments in the database')
self.backend.comments.delete_all()
self.assertEqual(len(orm.Comment.objects.all()), 0, msg='All Comments should have been deleted')
def test_delete_many_no_filters(self):
"""Test `delete_many` method with empty filters"""
self.create_comment().store()
count = len(orm.Comment.objects.all())
self.assertGreater(count, 0)
# Pass empty filter to delete_many, making sure ValidationError is raised
with self.assertRaises(exceptions.ValidationError):
self.backend.comments.delete_many({})
self.assertEqual(
len(orm.Comment.objects.all()),
count,
msg='No Comments should have been deleted. There should still be {} Comment(s), '
'however {} Comment(s) was/were found.'.format(count, len(orm.Comment.objects.all()))
)
def test_delete_many_ids(self):
"""Test `delete_many` method filtering on both `id` and `uuid`"""
comment1 = self.create_comment()
comment2 = self.create_comment()
comment3 = self.create_comment()
comment_uuids = []
for comment in [comment1, comment2, comment3]:
comment.store()
comment_uuids.append(str(comment.uuid))
# Make sure they exist
count_comments_found = orm.QueryBuilder().append(orm.Comment, filters={'uuid': {'in': comment_uuids}}).count()
self.assertEqual(
count_comments_found,
len(comment_uuids),
msg='There should be {} Comments, instead {} Comment(s) was/were found'.format(
len(comment_uuids), count_comments_found
)
)
# Delete last two comments (comment2, comment3)
filters = {'or': [{'id': comment2.id}, {'uuid': str(comment3.uuid)}]}
self.backend.comments.delete_many(filters=filters)
# Check they were deleted
builder = orm.QueryBuilder().append(orm.Comment, filters={'uuid': {'in': comment_uuids}}, project='uuid').all()
found_comments_uuid = [_[0] for _ in builder]
self.assertEqual([comment_uuids[0]], found_comments_uuid)
def test_delete_many_dbnode_id(self):
"""Test `delete_many` method filtering on `dbnode_id`"""
# Create comments and separate node
calc = self.backend.nodes.create(
node_type='', user=self.user, computer=self.computer, label='label', description='description'
).store()
comment1 = self.create_comment(node=calc)
comment2 = self.create_comment()
comment3 = self.create_comment()
comment_uuids = []
for comment in [comment1, comment2, comment3]:
comment.store()
comment_uuids.append(str(comment.uuid))
# Make sure they exist
count_comments_found = orm.QueryBuilder().append(orm.Comment, filters={'uuid': {'in': comment_uuids}}).count()
self.assertEqual(
count_comments_found,
len(comment_uuids),
msg='There should be {} Comments, instead {} Comment(s) was/were found'.format(
len(comment_uuids), count_comments_found
)
)
# Delete comments for self.node
filters = {'dbnode_id': self.node.id}
self.backend.comments.delete_many(filters=filters)
# Check they were deleted
builder = orm.QueryBuilder().append(orm.Comment, filters={'uuid': {'in': comment_uuids}}, project='uuid').all()
found_comments_uuid = [_[0] for _ in builder]
self.assertEqual([comment_uuids[0]], found_comments_uuid)
# pylint: disable=too-many-locals
def test_delete_many_ctime_mtime(self):
"""Test `delete_many` method filtering on `ctime` and `mtime`"""
from datetime import timedelta
# Initialization
comment_uuids = []
found_comments_ctime = []
found_comments_mtime = []
found_comments_uuid = []
now = timezone.now()
two_days_ago = now - timedelta(days=2)
one_day_ago = now - timedelta(days=1)
comment_times = [now, one_day_ago, two_days_ago]
# Create comments
comment1 = self.create_comment(ctime=now, mtime=now)
comment2 = self.create_comment(ctime=one_day_ago, mtime=now)
comment3 = self.create_comment(ctime=two_days_ago, mtime=one_day_ago)
for comment in [comment1, comment2, comment3]:
comment.store()
comment_uuids.append(str(comment.uuid))
# Make sure they exist with the correct times
builder = orm.QueryBuilder().append(orm.Comment, project=['ctime', 'mtime', 'uuid'])
self.assertGreater(builder.count(), 0)
for comment in builder.all():
found_comments_ctime.append(comment[0])
found_comments_mtime.append(comment[1])
found_comments_uuid.append(comment[2])
for time, uuid in zip(comment_times, comment_uuids):
self.assertIn(time, found_comments_ctime)
self.assertIn(uuid, found_comments_uuid)
if time != two_days_ago:
self.assertIn(time, found_comments_mtime)
# Delete comments that are created more than 1 hour ago,
# unless they have been modified within 5 hours
ctime_turning_point = now - timedelta(seconds=60 * 60)
mtime_turning_point = now - timedelta(seconds=60 * 60 * 5)
filters = {'and': [{'ctime': {'<': ctime_turning_point}}, {'mtime': {'<': mtime_turning_point}}]}
self.backend.comments.delete_many(filters=filters)
# Check only the most stale comment (comment3) was deleted
builder = orm.QueryBuilder().append(orm.Comment, project='uuid')
self.assertGreater(builder.count(), 1) # There should still be at least 2
found_comments_uuid = [_[0] for _ in builder.all()]
self.assertNotIn(comment_uuids[2], found_comments_uuid)
# Make sure the other comments were not deleted
for comment_uuid in comment_uuids[:-1]:
self.assertIn(comment_uuid, found_comments_uuid)
def test_delete_many_user_id(self):
"""Test `delete_many` method filtering on `user_id`"""
# Create comments and separate user
user_two = self.backend.users.create(email='tester_two@localhost').store()
comment1 = self.create_comment(user=user_two)
comment2 = self.create_comment()
comment3 = self.create_comment()
comment_uuids = []
for comment in [comment1, comment2, comment3]:
comment.store()
comment_uuids.append(str(comment.uuid))
# Make sure they exist
builder = orm.QueryBuilder().append(orm.Comment, project='uuid')
self.assertGreater(builder.count(), 0)
found_comments_uuid = [_[0] for _ in builder.all()]
for comment_uuid in comment_uuids:
self.assertIn(comment_uuid, found_comments_uuid)
# Delete last comments for `self.user`
filters = {'user_id': self.user.id}
self.backend.comments.delete_many(filters=filters)
# Check they were deleted
builder = orm.QueryBuilder().append(orm.Comment, project='uuid')
found_comments_uuid = [_[0] for _ in builder.all()]
self.assertGreater(builder.count(), 0)
for comment_uuid in comment_uuids[1:]:
self.assertNotIn(comment_uuid, found_comments_uuid)
# Make sure the first comment (comment1) was not deleted
self.assertIn(comment_uuids[0], found_comments_uuid)
def test_deleting_non_existent_entities(self):
"""Test deleting non-existent Comments for different cases"""
comment = self.create_comment()
comment.store()
comment_id = comment.id
comment_uuid = comment.uuid
# Get a non-existent Comment
valid_comment_found = True
id_ = 0
while valid_comment_found:
id_ += 1
builder = orm.QueryBuilder().append(orm.Comment, filters={'id': id_})
if builder.count() == 0:
valid_comment_found = False
# Try to delete non-existing Comment - using delete_many
# delete_many should return an empty list
deleted_entities = self.backend.comments.delete_many(filters={'id': id_})
self.assertEqual(
deleted_entities, [], msg=f'No entities should have been deleted, since Comment id {id_} does not exist'
)
# Try to delete non-existing Comment - using delete
# NotExistent should be raised, since no entities are found
with self.assertRaises(exceptions.NotExistent) as exc:
self.backend.comments.delete(comment_id=id_)
self.assertIn(f"Comment with id '{id_}' not found", str(exc.exception))
# Try to delete existing and non-existing Comment - using delete_many
# delete_many should return a list that *only* includes the existing Comment
filters = {'id': {'in': [id_, comment_id]}}
deleted_entities = self.backend.comments.delete_many(filters=filters)
self.assertEqual([comment_id],
deleted_entities,
msg=f'Only Comment id {comment_id} should be returned from delete_many')
# Make sure the existing Comment was deleted
builder = orm.QueryBuilder().append(orm.Comment, filters={'uuid': comment_uuid})
self.assertEqual(builder.count(), 0)
# Get a non-existent Node
valid_node_found = True
id_ = 0
while valid_node_found:
id_ += 1
builder = orm.QueryBuilder().append(orm.Node, filters={'id': id_})
if builder.count() == 0:
valid_node_found = False
# Try to delete Comments filtering on non-existing dbnode_id
# NotExistent should NOT be raised nor should any Comments be deleted
comment_count_before = orm.QueryBuilder().append(orm.Comment).count()
filters = {'dbnode_id': id_}
self.backend.comments.delete_many(filters=filters)
comment_count_after = orm.QueryBuilder().append(orm.Comment).count()
self.assertEqual(
comment_count_after,
comment_count_before,
msg='The number of comments changed after performing `delete_many`, '
"while filtering for a non-existing 'dbnode_id'"
)
def test_delete_many_same_twice(self):
"""Test no exception is raised when entity is filtered by both `id` and `uuid`"""
# Create comment
comment = self.create_comment()
comment.store()
comment_id = comment.id
comment_uuid = comment.uuid
# Try to delete Comment by specifying both `id` and `uuid` for it - nothing should be raised
self.backend.comments.delete_many(filters={'id': comment_id, 'uuid': comment_uuid})
# Make sure comment is removed
builder = orm.QueryBuilder().append(orm.Comment, filters={'uuid': comment_uuid})
self.assertEqual(builder.count(), 0)
def test_delete_wrong_type(self):
"""Test TypeError is raised when `filters` is wrong type"""
with self.assertRaises(TypeError):
self.backend.comments.delete(comment_id=None)
def test_delete_many_wrong_type(self):
"""Test TypeError is raised when `filters` is wrong type"""
with self.assertRaises(TypeError):
self.backend.comments.delete_many(filters=None)
|
the-stack_106_31484 | #!/usr/bin/env python
"""
A small binary showcasing the search library
"""
import random
from typing import List
import numpy as np # type: ignore
from termcolor import colored
from search.algorithms.astar import AStar
from search.algorithms.bfs import BFS
from search.algorithms.dfs import DFS
from search.algorithms.dijkstra import Dijkstra
from search.algorithms.greedy import Greedy
from search.algorithms.search import HeuristicSearchAlgorithm, SearchAlgorithm
from search.problems.grid.board2d import Grid2DMetaProblem, Grid2DProblem
from search.problems.grid.bomb import Bombs2DMetaProblem, Bombs2DProblem
from search.problems.grid.sokoban import SokobanMetaProblem, SokobanProblem
from search.problems.nm_puzzle import NMPuzzleMetaProblem, NMPuzzleProblem
from search.space import Heuristic, Problem
def solve(algorithm_class, problem: Problem, heuristic_class):
"""Solves a problem with a given search algorithm.
Returns: Dictionary with a summary and key metrics.
"""
if issubclass(algorithm_class, HeuristicSearchAlgorithm):
search_algorithm = algorithm_class(problem, heuristic_class(problem))
else:
search_algorithm = algorithm_class(problem)
assert search_algorithm is not None
goal_node = search_algorithm.search()
assert search_algorithm.time_ns is not None
time_ms = (search_algorithm.time_ns) / 1_000_000.0
stats = {
"summary": "No solution found for this problem after {} expansions".format(
search_algorithm.expansions
),
"cost": float("inf"),
"length": float("inf"),
"expansions": search_algorithm.expansions,
"states_generated": search_algorithm.states_generated,
"states_reached": search_algorithm.states_reached,
"nodes_created": search_algorithm.nodes_created,
"nodes_updated": search_algorithm.nodes_updated,
"time_ms": time_ms,
}
if goal_node is None:
stats[
"summary"
] = "No solution found for this problem after {} expansions".format(
search_algorithm.expansions
)
return stats
path = goal_node.path(problem.space)
stats["summary"] = "Expanded {:-3} nodes in t:{:.2}ms to find: {}".format(
search_algorithm.expansions, time_ms, path
)
stats["path"] = path
stats["cost"] = path.cost()
stats["length"] = len(path)
stats["actions"] = path.actions()
return stats
def compare(algorithms: List[SearchAlgorithm], problem: Problem, heuristic_class):
"""Solves a problem with many search algorithms and compares the solutions.
Returns: Dictionary with a summary and key metrics.
"""
print(
"Solving this {} problem with the '{}' heuristic,".format(
problem.space.__class__.__name__, heuristic_class
)
)
print(problem.start_to_str())
solutions = dict()
best = {
"cost": float("inf"),
"length": float("inf"),
"expansions": float("inf"),
"states_generated": float("inf"),
"states_reached": float("inf"),
"nodes_created": float("inf"),
"nodes_updated": float("inf"),
"time_ms": float("inf"),
}
metrics = list(best.keys())
for algorithm in algorithms:
solutions[algorithm] = solve(algorithm, problem, heuristic_class)
for metric in metrics:
if solutions[algorithm][metric] < best[metric]:
best[metric] = solutions[algorithm][metric]
metrics.remove("cost")
for a, solution in solutions.items():
print(" * {:20}: {}".format(a.name(), solution["summary"]))
if solution["cost"] > best["cost"]:
print(
" -",
colored("Sub-optimal!!", "red", attrs=[]),
" {} ({} vs {})".format(
colored(
"{:.2%}".format(solution["cost"] / best["cost"]),
"red",
attrs=["bold"],
),
solution["cost"],
best["cost"],
),
)
for metric in metrics:
if solution[metric] > best[metric]:
if best[metric] > 0:
ratio = solution[metric] / best[metric]
else:
ratio = float("inf")
color = None
attrs = []
if ratio < 1.04:
continue
if ratio >= 1.5:
color = "red"
attrs = ["bold"]
elif ratio >= 1.2:
color = "yellow"
attrs = ["bold"]
elif ratio >= 1.1:
color = "white"
attrs = ["bold"]
print(
" - Not the best on {}!! {} ({} vs {})".format(
colored("{:12}".format(metric), color, attrs=attrs),
colored(
"{:.2%}".format(ratio),
color,
attrs=attrs,
),
solution[metric],
best[metric],
)
)
print("")
return solutions
def main():
"""A simple program solving an easy maze."""
metaproblems_dict = {
Grid2DMetaProblem: {
"problems": [
Grid2DMetaProblem(
[
" G ",
" ####",
" ",
"#### ",
" ",
"S ",
]
),
Grid2DMetaProblem(
[
"G ",
" ",
"########## ",
" ",
" G",
" ##########",
" ",
" ",
"########## ",
" ",
"S ",
]
),
# It can't get easier right?
Grid2DMetaProblem(
[
"G S{:60}G".format(" "),
]
),
# What if there's no goal?
Grid2DMetaProblem(
[
" S{:60} ".format(" "),
]
),
],
"heuristics": Grid2DProblem.all_heuristics(),
},
Bombs2DMetaProblem: {
"problems": [
Bombs2DMetaProblem(
[
" G",
"###",
"B S",
],
starting_bombs=0,
),
Bombs2DMetaProblem(
[
"G ",
"#####",
" B",
"S ",
"B ",
],
starting_bombs=0,
),
],
"heuristics": Bombs2DProblem.all_heuristics(),
},
NMPuzzleMetaProblem: {
"problems": [
# NOTE(ddaroch): The puzzles from these states to random goals might
# be unexpected.
NMPuzzleMetaProblem(
np.array(
[
[1, 2, 0],
]
)
),
NMPuzzleMetaProblem(
np.array(
[
[0, 1, 2],
[3, 4, 5],
[8, 6, 7],
]
)
),
NMPuzzleMetaProblem(
np.array(
[
[11, 1, 2],
[0, 4, 5],
[3, 7, 8],
[6, 9, 10],
]
)
),
],
"heuristics": NMPuzzleProblem.all_heuristics(),
},
SokobanMetaProblem: {
"problems": [
SokobanMetaProblem(
[
"S !G G",
"B B#!#B# ",
"G#G# #G# ",
]
),
],
"heuristics": SokobanProblem.all_heuristics(),
},
}
random_problems = 1
# pylint: disable=invalid-name
algorithms = [
DFS,
BFS,
Dijkstra,
# TODO: Add IDDFS
# IDDFS,
Greedy,
AStar,
]
for problem_class in metaproblems_dict:
metaproblems = metaproblems_dict[problem_class]["problems"]
heuristic_classes = metaproblems_dict[problem_class]["heuristics"]
if len(heuristic_classes) == 0:
heuristic_classes = [Heuristic]
# Generate all the problems for this problem class
problems = []
for mp in metaproblems:
# Add all the simple given problems
for p in mp.simple_given():
problems.append(p)
# Add all the multi-goal given problems
for p in mp.multi_goal_given():
problems.append(p)
random.seed(1)
for _ in range(random_problems):
problems.append(mp.simple_random())
for problem in problems:
for heuristic_class in heuristic_classes:
compare(algorithms, problem, heuristic_class)
if __name__ == "__main__":
main()
|
the-stack_106_31487 | from PIL import ImageOps
from utils.endpoint import SimpleFilter, setup
@setup
class VerticalMirror(SimpleFilter):
def use_filter(self, img, kwargs):
img = img.convert('RGBA')
half = round(img.height / 2)
use_last_half = False if not 'last_half' in kwargs else bool(kwargs['last_half'])
if use_last_half:
first_half = img.crop((0, half, img.width, img.height))
second_half = img.crop((0, img.width - half, img.width, img.height))
second_half = ImageOps.flip(second_half)
second_half = second_half.crop((0, 0, img.width, img.height))
second_half.paste(first_half, (0, img.width - half), first_half)
return second_half
first_half = img.crop((0, 0, img.width, half))
second_half = img.crop((0, 0, img.width, img.height - half))
second_half = ImageOps.flip(second_half)
first_half = first_half.crop((0, 0, img.width, img.height))
first_half.paste(second_half, (0, half), second_half)
return first_half |
the-stack_106_31491 | # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import io
import pytest
from tests import create_session
from tests import mock
from tests import unittest
from tests import RawResponse
from dateutil.tz import tzutc, tzoffset
import datetime
import copy
import botocore
from botocore import xform_name
from botocore.compat import json
from botocore.compat import six
from botocore.awsrequest import AWSRequest, HeadersDict
from botocore.exceptions import InvalidExpressionError, ConfigNotFound
from botocore.exceptions import ClientError, ConnectionClosedError
from botocore.exceptions import InvalidDNSNameError, MetadataRetrievalError
from botocore.exceptions import InvalidIMDSEndpointError
from botocore.exceptions import InvalidIMDSEndpointModeError
from botocore.exceptions import ReadTimeoutError
from botocore.exceptions import ConnectTimeoutError
from botocore.exceptions import UnsupportedS3ArnError
from botocore.exceptions import UnsupportedS3AccesspointConfigurationError
from botocore.exceptions import UnsupportedOutpostResourceError
from botocore.model import ServiceModel
from botocore.model import OperationModel
from botocore.utils import ensure_boolean
from botocore.utils import resolve_imds_endpoint_mode
from botocore.utils import is_json_value_header
from botocore.utils import remove_dot_segments
from botocore.utils import normalize_url_path
from botocore.utils import validate_jmespath_for_set
from botocore.utils import set_value_from_jmespath
from botocore.utils import parse_key_val_file_contents
from botocore.utils import parse_key_val_file
from botocore.utils import parse_timestamp
from botocore.utils import parse_to_aware_datetime
from botocore.utils import datetime2timestamp
from botocore.utils import CachedProperty
from botocore.utils import ArgumentGenerator
from botocore.utils import calculate_tree_hash
from botocore.utils import calculate_sha256
from botocore.utils import is_valid_endpoint_url
from botocore.utils import fix_s3_host
from botocore.utils import switch_to_virtual_host_style
from botocore.utils import instance_cache
from botocore.utils import merge_dicts
from botocore.utils import lowercase_dict
from botocore.utils import get_service_module_name
from botocore.utils import percent_encode_sequence
from botocore.utils import percent_encode
from botocore.utils import switch_host_s3_accelerate
from botocore.utils import deep_merge
from botocore.utils import S3RegionRedirector
from botocore.utils import InvalidArnException
from botocore.utils import ArnParser
from botocore.utils import S3ArnParamHandler
from botocore.utils import S3EndpointSetter
from botocore.utils import ContainerMetadataFetcher
from botocore.utils import InstanceMetadataFetcher
from botocore.utils import InstanceMetadataRegionFetcher
from botocore.utils import IMDSRegionProvider
from botocore.utils import SSOTokenLoader
from botocore.utils import is_valid_uri, is_valid_ipv6_endpoint_url
from botocore.utils import has_header
from botocore.exceptions import SSOTokenLoadError
from botocore.model import DenormalizedStructureBuilder
from botocore.model import ShapeResolver
from botocore.config import Config
from botocore.session import Session
class TestEnsureBoolean(unittest.TestCase):
def test_boolean_true(self):
self.assertEqual(ensure_boolean(True), True)
def test_boolean_false(self):
self.assertEqual(ensure_boolean(False), False)
def test_string_true(self):
self.assertEqual(ensure_boolean('True'), True)
def test_string_false(self):
self.assertEqual(ensure_boolean('False'), False)
def test_string_lowercase_true(self):
self.assertEqual(ensure_boolean('true'), True)
def test_invalid_type_false(self):
self.assertEqual(ensure_boolean({'foo': 'bar'}), False)
class TestResolveIMDSEndpointMode(unittest.TestCase):
def create_session_with_config(self, endpoint_mode, imds_use_IPv6):
session = create_session()
session.set_config_variable('ec2_metadata_service_endpoint_mode',
endpoint_mode)
session.set_config_variable('imds_use_ipv6',
imds_use_IPv6)
return session
def test_resolve_endpoint_mode_no_config(self):
session = self.create_session_with_config(None, None)
self.assertEqual(resolve_imds_endpoint_mode(session), 'ipv4')
def test_resolve_endpoint_mode_IPv6(self):
session = self.create_session_with_config('IPv6', None)
self.assertEqual(resolve_imds_endpoint_mode(session), 'ipv6')
def test_resolve_endpoint_mode_IPv4(self):
session = self.create_session_with_config('IPv4', None)
self.assertEqual(resolve_imds_endpoint_mode(session), 'ipv4')
def test_resolve_endpoint_mode_none_use_IPv6_true(self):
session = self.create_session_with_config(None, True)
self.assertEqual(resolve_imds_endpoint_mode(session), 'ipv6')
def test_resolve_endpoint_mode_none_use_IPv6_false(self):
session = self.create_session_with_config(None, False)
self.assertEqual(resolve_imds_endpoint_mode(session), 'ipv4')
def test_resolve_endpoint_mode_IPv6_use_IPv6_false(self):
session = self.create_session_with_config('IPv6', False)
self.assertEqual(resolve_imds_endpoint_mode(session), 'ipv6')
def test_resolve_endpoint_mode_IPv4_use_IPv6_true(self):
session = self.create_session_with_config('IPv4', True)
self.assertEqual(resolve_imds_endpoint_mode(session), 'ipv4')
def test_resolve_endpoint_mode_IPv6_use_IPv6_true(self):
session = self.create_session_with_config('IPv6', True)
self.assertEqual(resolve_imds_endpoint_mode(session), 'ipv6')
def test_resolve_endpoint_mode_IPv6_mixed_casing_use_IPv6_true(self):
session = self.create_session_with_config('iPv6', None)
self.assertEqual(resolve_imds_endpoint_mode(session), 'ipv6')
def test_resolve_endpoint_mode_invalid_input(self):
session = self.create_session_with_config('IPv3', True)
with self.assertRaises(InvalidIMDSEndpointModeError):
resolve_imds_endpoint_mode(session)
class TestIsJSONValueHeader(unittest.TestCase):
def test_no_serialization_section(self):
shape = mock.Mock()
shape.type_name = 'string'
self.assertFalse(is_json_value_header(shape))
def test_non_jsonvalue_shape(self):
shape = mock.Mock()
shape.serialization = {
'location': 'header'
}
shape.type_name = 'string'
self.assertFalse(is_json_value_header(shape))
def test_non_header_jsonvalue_shape(self):
shape = mock.Mock()
shape.serialization = {
'jsonvalue': True
}
shape.type_name = 'string'
self.assertFalse(is_json_value_header(shape))
def test_non_string_jsonvalue_shape(self):
shape = mock.Mock()
shape.serialization = {
'location': 'header',
'jsonvalue': True
}
shape.type_name = 'integer'
self.assertFalse(is_json_value_header(shape))
def test_json_value_header(self):
shape = mock.Mock()
shape.serialization = {
'jsonvalue': True,
'location': 'header'
}
shape.type_name = 'string'
self.assertTrue(is_json_value_header(shape))
class TestURINormalization(unittest.TestCase):
def test_remove_dot_segments(self):
self.assertEqual(remove_dot_segments('../foo'), 'foo')
self.assertEqual(remove_dot_segments('../../foo'), 'foo')
self.assertEqual(remove_dot_segments('./foo'), 'foo')
self.assertEqual(remove_dot_segments('/./'), '/')
self.assertEqual(remove_dot_segments('/../'), '/')
self.assertEqual(remove_dot_segments('/foo/bar/baz/../qux'), '/foo/bar/qux')
self.assertEqual(remove_dot_segments('/foo/..'), '/')
self.assertEqual(remove_dot_segments('foo/bar/baz'), 'foo/bar/baz')
self.assertEqual(remove_dot_segments('..'), '')
self.assertEqual(remove_dot_segments('.'), '')
self.assertEqual(remove_dot_segments('/.'), '/')
self.assertEqual(remove_dot_segments('/.foo'), '/.foo')
self.assertEqual(remove_dot_segments('/..foo'), '/..foo')
self.assertEqual(remove_dot_segments(''), '')
self.assertEqual(remove_dot_segments('/a/b/c/./../../g'), '/a/g')
self.assertEqual(remove_dot_segments('mid/content=5/../6'), 'mid/6')
# I don't think this is RFC compliant...
self.assertEqual(remove_dot_segments('//foo//'), '/foo/')
def test_empty_url_normalization(self):
self.assertEqual(normalize_url_path(''), '/')
class TestTransformName(unittest.TestCase):
def test_upper_camel_case(self):
self.assertEqual(xform_name('UpperCamelCase'), 'upper_camel_case')
self.assertEqual(xform_name('UpperCamelCase', '-'), 'upper-camel-case')
def test_lower_camel_case(self):
self.assertEqual(xform_name('lowerCamelCase'), 'lower_camel_case')
self.assertEqual(xform_name('lowerCamelCase', '-'), 'lower-camel-case')
def test_consecutive_upper_case(self):
self.assertEqual(xform_name('HTTPHeaders'), 'http_headers')
self.assertEqual(xform_name('HTTPHeaders', '-'), 'http-headers')
def test_consecutive_upper_case_middle_string(self):
self.assertEqual(xform_name('MainHTTPHeaders'), 'main_http_headers')
self.assertEqual(xform_name('MainHTTPHeaders', '-'),
'main-http-headers')
def test_s3_prefix(self):
self.assertEqual(xform_name('S3BucketName'), 's3_bucket_name')
def test_already_snake_cased(self):
self.assertEqual(xform_name('leave_alone'), 'leave_alone')
self.assertEqual(xform_name('s3_bucket_name'), 's3_bucket_name')
self.assertEqual(xform_name('bucket_s3_name'), 'bucket_s3_name')
def test_special_cases(self):
# Some patterns don't actually match the rules we expect.
self.assertEqual(xform_name('SwapEnvironmentCNAMEs'),
'swap_environment_cnames')
self.assertEqual(xform_name('SwapEnvironmentCNAMEs', '-'),
'swap-environment-cnames')
self.assertEqual(xform_name('CreateCachediSCSIVolume', '-'),
'create-cached-iscsi-volume')
self.assertEqual(xform_name('DescribeCachediSCSIVolumes', '-'),
'describe-cached-iscsi-volumes')
self.assertEqual(xform_name('DescribeStorediSCSIVolumes', '-'),
'describe-stored-iscsi-volumes')
self.assertEqual(xform_name('CreateStorediSCSIVolume', '-'),
'create-stored-iscsi-volume')
self.assertEqual(xform_name('sourceServerIDs', '-'),
'source-server-ids')
def test_special_case_ends_with_s(self):
self.assertEqual(xform_name('GatewayARNs', '-'), 'gateway-arns')
def test_partial_rename(self):
transformed = xform_name('IPV6', '-')
self.assertEqual(transformed, 'ipv6')
transformed = xform_name('IPV6', '_')
self.assertEqual(transformed, 'ipv6')
def test_s3_partial_rename(self):
transformed = xform_name('s3Resources', '-')
self.assertEqual(transformed, 's3-resources')
transformed = xform_name('s3Resources', '_')
self.assertEqual(transformed, 's3_resources')
class TestValidateJMESPathForSet(unittest.TestCase):
def setUp(self):
super(TestValidateJMESPathForSet, self).setUp()
self.data = {
'Response': {
'Thing': {
'Id': 1,
'Name': 'Thing #1',
}
},
'Marker': 'some-token'
}
def test_invalid_exp(self):
with self.assertRaises(InvalidExpressionError):
validate_jmespath_for_set('Response.*.Name')
with self.assertRaises(InvalidExpressionError):
validate_jmespath_for_set('Response.Things[0]')
with self.assertRaises(InvalidExpressionError):
validate_jmespath_for_set('')
with self.assertRaises(InvalidExpressionError):
validate_jmespath_for_set('.')
class TestSetValueFromJMESPath(unittest.TestCase):
def setUp(self):
super(TestSetValueFromJMESPath, self).setUp()
self.data = {
'Response': {
'Thing': {
'Id': 1,
'Name': 'Thing #1',
}
},
'Marker': 'some-token'
}
def test_single_depth_existing(self):
set_value_from_jmespath(self.data, 'Marker', 'new-token')
self.assertEqual(self.data['Marker'], 'new-token')
def test_single_depth_new(self):
self.assertFalse('Limit' in self.data)
set_value_from_jmespath(self.data, 'Limit', 100)
self.assertEqual(self.data['Limit'], 100)
def test_multiple_depth_existing(self):
set_value_from_jmespath(self.data, 'Response.Thing.Name', 'New Name')
self.assertEqual(self.data['Response']['Thing']['Name'], 'New Name')
def test_multiple_depth_new(self):
self.assertFalse('Brand' in self.data)
set_value_from_jmespath(self.data, 'Brand.New', {'abc': 123})
self.assertEqual(self.data['Brand']['New']['abc'], 123)
class TestParseEC2CredentialsFile(unittest.TestCase):
def test_parse_ec2_content(self):
contents = "AWSAccessKeyId=a\nAWSSecretKey=b\n"
self.assertEqual(parse_key_val_file_contents(contents),
{'AWSAccessKeyId': 'a',
'AWSSecretKey': 'b'})
def test_parse_ec2_content_empty(self):
contents = ""
self.assertEqual(parse_key_val_file_contents(contents), {})
def test_key_val_pair_with_blank_lines(self):
# The \n\n has an extra blank between the access/secret keys.
contents = "AWSAccessKeyId=a\n\nAWSSecretKey=b\n"
self.assertEqual(parse_key_val_file_contents(contents),
{'AWSAccessKeyId': 'a',
'AWSSecretKey': 'b'})
def test_key_val_parser_lenient(self):
# Ignore any line that does not have a '=' char in it.
contents = "AWSAccessKeyId=a\nNOTKEYVALLINE\nAWSSecretKey=b\n"
self.assertEqual(parse_key_val_file_contents(contents),
{'AWSAccessKeyId': 'a',
'AWSSecretKey': 'b'})
def test_multiple_equals_on_line(self):
contents = "AWSAccessKeyId=a\nAWSSecretKey=secret_key_with_equals=b\n"
self.assertEqual(parse_key_val_file_contents(contents),
{'AWSAccessKeyId': 'a',
'AWSSecretKey': 'secret_key_with_equals=b'})
def test_os_error_raises_config_not_found(self):
mock_open = mock.Mock()
mock_open.side_effect = OSError()
with self.assertRaises(ConfigNotFound):
parse_key_val_file('badfile', _open=mock_open)
class TestParseTimestamps(unittest.TestCase):
def test_parse_iso8601(self):
self.assertEqual(
parse_timestamp('1970-01-01T00:10:00.000Z'),
datetime.datetime(1970, 1, 1, 0, 10, tzinfo=tzutc()))
def test_parse_epoch(self):
self.assertEqual(
parse_timestamp(1222172800),
datetime.datetime(2008, 9, 23, 12, 26, 40, tzinfo=tzutc()))
def test_parse_epoch_zero_time(self):
self.assertEqual(
parse_timestamp(0),
datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc()))
def test_parse_epoch_as_string(self):
self.assertEqual(
parse_timestamp('1222172800'),
datetime.datetime(2008, 9, 23, 12, 26, 40, tzinfo=tzutc()))
def test_parse_rfc822(self):
self.assertEqual(
parse_timestamp('Wed, 02 Oct 2002 13:00:00 GMT'),
datetime.datetime(2002, 10, 2, 13, 0, tzinfo=tzutc()))
def test_parse_gmt_in_uk_time(self):
# In the UK the time switches from GMT to BST and back as part of
# their daylight savings time. time.tzname will therefore report
# both time zones. dateutil sees that the time zone is a local time
# zone and so parses it as local time, but it ends up being BST
# instead of GMT. To remedy this issue we can provide a time zone
# context which will enforce GMT == UTC.
with mock.patch('time.tzname', ('GMT', 'BST')):
self.assertEqual(
parse_timestamp('Wed, 02 Oct 2002 13:00:00 GMT'),
datetime.datetime(2002, 10, 2, 13, 0, tzinfo=tzutc()))
def test_parse_invalid_timestamp(self):
with self.assertRaises(ValueError):
parse_timestamp('invalid date')
def test_parse_timestamp_fails_with_bad_tzinfo(self):
mock_tzinfo = mock.Mock()
mock_tzinfo.__name__ = 'tzinfo'
mock_tzinfo.side_effect = OSError()
mock_get_tzinfo_options = mock.MagicMock(return_value=(mock_tzinfo,))
with mock.patch('botocore.utils.get_tzinfo_options', mock_get_tzinfo_options):
with self.assertRaises(RuntimeError):
parse_timestamp(0)
class TestDatetime2Timestamp(unittest.TestCase):
def test_datetime2timestamp_naive(self):
self.assertEqual(
datetime2timestamp(datetime.datetime(1970, 1, 2)), 86400)
def test_datetime2timestamp_aware(self):
tzinfo = tzoffset("BRST", -10800)
self.assertEqual(
datetime2timestamp(datetime.datetime(1970, 1, 2, tzinfo=tzinfo)),
97200)
class TestParseToUTCDatetime(unittest.TestCase):
def test_handles_utc_time(self):
original = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc())
self.assertEqual(parse_to_aware_datetime(original), original)
def test_handles_other_timezone(self):
tzinfo = tzoffset("BRST", -10800)
original = datetime.datetime(2014, 1, 1, 0, 0, 0, tzinfo=tzinfo)
self.assertEqual(parse_to_aware_datetime(original), original)
def test_handles_naive_datetime(self):
original = datetime.datetime(1970, 1, 1, 0, 0, 0)
expected = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc())
self.assertEqual(parse_to_aware_datetime(original), expected)
def test_handles_string_epoch(self):
expected = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc())
self.assertEqual(parse_to_aware_datetime('0'), expected)
def test_handles_int_epoch(self):
expected = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc())
self.assertEqual(parse_to_aware_datetime(0), expected)
def test_handles_full_iso_8601(self):
expected = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc())
self.assertEqual(
parse_to_aware_datetime('1970-01-01T00:00:00Z'),
expected)
def test_year_only_iso_8601(self):
expected = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc())
self.assertEqual(parse_to_aware_datetime('1970-01-01'), expected)
class TestCachedProperty(unittest.TestCase):
def test_cached_property_same_value(self):
class CacheMe(object):
@CachedProperty
def foo(self):
return 'foo'
c = CacheMe()
self.assertEqual(c.foo, 'foo')
self.assertEqual(c.foo, 'foo')
def test_cached_property_only_called_once(self):
# Note: you would normally never want to cache
# a property that returns a new value each time,
# but this is done to demonstrate the caching behavior.
class NoIncrement(object):
def __init__(self):
self.counter = 0
@CachedProperty
def current_value(self):
self.counter += 1
return self.counter
c = NoIncrement()
self.assertEqual(c.current_value, 1)
# If the property wasn't cached, the next value should be
# be 2, but because it's cached, we know the value will be 1.
self.assertEqual(c.current_value, 1)
class TestArgumentGenerator(unittest.TestCase):
def setUp(self):
self.arg_generator = ArgumentGenerator()
def assert_skeleton_from_model_is(self, model, generated_skeleton):
shape = DenormalizedStructureBuilder().with_members(
model).build_model()
actual = self.arg_generator.generate_skeleton(shape)
self.assertEqual(actual, generated_skeleton)
def test_generate_string(self):
self.assert_skeleton_from_model_is(
model={
'A': {'type': 'string'}
},
generated_skeleton={
'A': ''
}
)
def test_generate_string_enum(self):
enum_values = ['A', 'B', 'C']
model = {
'A': {'type': 'string', 'enum': enum_values}
}
shape = DenormalizedStructureBuilder().with_members(
model).build_model()
actual = self.arg_generator.generate_skeleton(shape)
self.assertIn(actual['A'], enum_values)
def test_generate_scalars(self):
self.assert_skeleton_from_model_is(
model={
'A': {'type': 'string'},
'B': {'type': 'integer'},
'C': {'type': 'float'},
'D': {'type': 'boolean'},
'E': {'type': 'timestamp'},
'F': {'type': 'double'},
},
generated_skeleton={
'A': '',
'B': 0,
'C': 0.0,
'D': True,
'E': datetime.datetime(1970, 1, 1, 0, 0, 0),
'F': 0.0,
}
)
def test_will_use_member_names_for_string_values(self):
self.arg_generator = ArgumentGenerator(use_member_names=True)
self.assert_skeleton_from_model_is(
model={
'A': {'type': 'string'},
'B': {'type': 'integer'},
'C': {'type': 'float'},
'D': {'type': 'boolean'},
},
generated_skeleton={
'A': 'A',
'B': 0,
'C': 0.0,
'D': True,
}
)
def test_will_use_member_names_for_string_values_of_list(self):
self.arg_generator = ArgumentGenerator(use_member_names=True)
# We're not using assert_skeleton_from_model_is
# because we can't really control the name of strings shapes
# being used in the DenormalizedStructureBuilder. We can only
# control the name of structures and list shapes.
shape_map = ShapeResolver({
'InputShape': {
'type': 'structure',
'members': {
'StringList': {'shape': 'StringList'},
}
},
'StringList': {
'type': 'list',
'member': {'shape': 'StringType'},
},
'StringType': {
'type': 'string',
}
})
shape = shape_map.get_shape_by_name('InputShape')
actual = self.arg_generator.generate_skeleton(shape)
expected = {'StringList': ['StringType']}
self.assertEqual(actual, expected)
def test_generate_nested_structure(self):
self.assert_skeleton_from_model_is(
model={
'A': {
'type': 'structure',
'members': {
'B': {'type': 'string'},
}
}
},
generated_skeleton={
'A': {'B': ''}
}
)
def test_generate_scalar_list(self):
self.assert_skeleton_from_model_is(
model={
'A': {
'type': 'list',
'member': {
'type': 'string'
}
},
},
generated_skeleton={
'A': [''],
}
)
def test_generate_scalar_map(self):
self.assert_skeleton_from_model_is(
model={
'A': {
'type': 'map',
'key': {'type': 'string'},
'value': {'type': 'string'},
}
},
generated_skeleton={
'A': {
'KeyName': '',
}
}
)
def test_handles_recursive_shapes(self):
# We're not using assert_skeleton_from_model_is
# because we can't use a DenormalizedStructureBuilder,
# we need a normalized model to represent recursive
# shapes.
shape_map = ShapeResolver({
'InputShape': {
'type': 'structure',
'members': {
'A': {'shape': 'RecursiveStruct'},
'B': {'shape': 'StringType'},
}
},
'RecursiveStruct': {
'type': 'structure',
'members': {
'C': {'shape': 'RecursiveStruct'},
'D': {'shape': 'StringType'},
}
},
'StringType': {
'type': 'string',
}
})
shape = shape_map.get_shape_by_name('InputShape')
actual = self.arg_generator.generate_skeleton(shape)
expected = {
'A': {
'C': {
# For recurisve shapes, we'll just show
# an empty dict.
},
'D': ''
},
'B': ''
}
self.assertEqual(actual, expected)
class TestChecksums(unittest.TestCase):
def test_empty_hash(self):
self.assertEqual(
calculate_sha256(six.BytesIO(b''), as_hex=True),
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
def test_as_hex(self):
self.assertEqual(
calculate_sha256(six.BytesIO(b'hello world'), as_hex=True),
'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9')
def test_as_binary(self):
self.assertEqual(
calculate_sha256(six.BytesIO(b'hello world'), as_hex=False),
(b"\xb9M'\xb9\x93M>\x08\xa5.R\xd7\xda}\xab\xfa\xc4\x84\xef"
b"\xe3zS\x80\xee\x90\x88\xf7\xac\xe2\xef\xcd\xe9"))
class TestTreeHash(unittest.TestCase):
# Note that for these tests I've independently verified
# what the expected tree hashes should be from other
# SDK implementations.
def test_empty_tree_hash(self):
self.assertEqual(
calculate_tree_hash(six.BytesIO(b'')),
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
def test_tree_hash_less_than_one_mb(self):
one_k = six.BytesIO(b'a' * 1024)
self.assertEqual(
calculate_tree_hash(one_k),
'2edc986847e209b4016e141a6dc8716d3207350f416969382d431539bf292e4a')
def test_tree_hash_exactly_one_mb(self):
one_meg_bytestring = b'a' * (1 * 1024 * 1024)
one_meg = six.BytesIO(one_meg_bytestring)
self.assertEqual(
calculate_tree_hash(one_meg),
'9bc1b2a288b26af7257a36277ae3816a7d4f16e89c1e7e77d0a5c48bad62b360')
def test_tree_hash_multiple_of_one_mb(self):
four_mb = six.BytesIO(b'a' * (4 * 1024 * 1024))
self.assertEqual(
calculate_tree_hash(four_mb),
'9491cb2ed1d4e7cd53215f4017c23ec4ad21d7050a1e6bb636c4f67e8cddb844')
def test_tree_hash_offset_of_one_mb_multiple(self):
offset_four_mb = six.BytesIO(b'a' * (4 * 1024 * 1024) + b'a' * 20)
self.assertEqual(
calculate_tree_hash(offset_four_mb),
'12f3cbd6101b981cde074039f6f728071da8879d6f632de8afc7cdf00661b08f')
class TestIsValidEndpointURL(unittest.TestCase):
def test_dns_name_is_valid(self):
self.assertTrue(is_valid_endpoint_url('https://s3.amazonaws.com/'))
def test_ip_address_is_allowed(self):
self.assertTrue(is_valid_endpoint_url('https://10.10.10.10/'))
def test_path_component_ignored(self):
self.assertTrue(
is_valid_endpoint_url('https://foo.bar.com/other/path/'))
def test_can_have_port(self):
self.assertTrue(is_valid_endpoint_url('https://foo.bar.com:12345/'))
def test_ip_can_have_port(self):
self.assertTrue(is_valid_endpoint_url('https://10.10.10.10:12345/'))
def test_cannot_have_spaces(self):
self.assertFalse(is_valid_endpoint_url('https://my invalid name/'))
def test_missing_scheme(self):
self.assertFalse(is_valid_endpoint_url('foo.bar.com'))
def test_no_new_lines(self):
self.assertFalse(is_valid_endpoint_url('https://foo.bar.com\nbar/'))
def test_long_hostname(self):
long_hostname = 'htps://%s.com' % ('a' * 256)
self.assertFalse(is_valid_endpoint_url(long_hostname))
def test_hostname_can_end_with_dot(self):
self.assertTrue(is_valid_endpoint_url('https://foo.bar.com./'))
def test_hostname_no_dots(self):
self.assertTrue(is_valid_endpoint_url('https://foo/'))
class TestFixS3Host(unittest.TestCase):
def test_fix_s3_host_initial(self):
request = AWSRequest(
method='PUT', headers={},
url='https://s3-us-west-2.amazonaws.com/bucket/key.txt'
)
region_name = 'us-west-2'
signature_version = 's3'
fix_s3_host(
request=request, signature_version=signature_version,
region_name=region_name)
self.assertEqual(request.url,
'https://bucket.s3-us-west-2.amazonaws.com/key.txt')
self.assertEqual(request.auth_path, '/bucket/key.txt')
def test_fix_s3_host_only_applied_once(self):
request = AWSRequest(
method='PUT', headers={},
url='https://s3.us-west-2.amazonaws.com/bucket/key.txt'
)
region_name = 'us-west-2'
signature_version = 's3'
fix_s3_host(
request=request, signature_version=signature_version,
region_name=region_name)
# Calling the handler again should not affect the end result:
fix_s3_host(
request=request, signature_version=signature_version,
region_name=region_name)
self.assertEqual(request.url,
'https://bucket.s3.us-west-2.amazonaws.com/key.txt')
# This was a bug previously. We want to make sure that
# calling fix_s3_host() again does not alter the auth_path.
# Otherwise we'll get signature errors.
self.assertEqual(request.auth_path, '/bucket/key.txt')
def test_dns_style_not_used_for_get_bucket_location(self):
original_url = 'https://s3-us-west-2.amazonaws.com/bucket?location'
request = AWSRequest(
method='GET', headers={},
url=original_url,
)
signature_version = 's3'
region_name = 'us-west-2'
fix_s3_host(
request=request, signature_version=signature_version,
region_name=region_name)
# The request url should not have been modified because this is
# a request for GetBucketLocation.
self.assertEqual(request.url, original_url)
def test_can_provide_default_endpoint_url(self):
request = AWSRequest(
method='PUT', headers={},
url='https://s3-us-west-2.amazonaws.com/bucket/key.txt'
)
region_name = 'us-west-2'
signature_version = 's3'
fix_s3_host(
request=request, signature_version=signature_version,
region_name=region_name,
default_endpoint_url='foo.s3.amazonaws.com')
self.assertEqual(request.url,
'https://bucket.foo.s3.amazonaws.com/key.txt')
def test_no_endpoint_url_uses_request_url(self):
request = AWSRequest(
method='PUT', headers={},
url='https://s3-us-west-2.amazonaws.com/bucket/key.txt'
)
region_name = 'us-west-2'
signature_version = 's3'
fix_s3_host(
request=request, signature_version=signature_version,
region_name=region_name,
# A value of None means use the url in the current request.
default_endpoint_url=None,
)
self.assertEqual(request.url,
'https://bucket.s3-us-west-2.amazonaws.com/key.txt')
class TestSwitchToVirtualHostStyle(unittest.TestCase):
def test_switch_to_virtual_host_style(self):
request = AWSRequest(
method='PUT', headers={},
url='https://foo.amazonaws.com/bucket/key.txt'
)
region_name = 'us-west-2'
signature_version = 's3'
switch_to_virtual_host_style(
request=request, signature_version=signature_version,
region_name=region_name)
self.assertEqual(request.url,
'https://bucket.foo.amazonaws.com/key.txt')
self.assertEqual(request.auth_path, '/bucket/key.txt')
def test_uses_default_endpoint(self):
request = AWSRequest(
method='PUT', headers={},
url='https://foo.amazonaws.com/bucket/key.txt'
)
region_name = 'us-west-2'
signature_version = 's3'
switch_to_virtual_host_style(
request=request, signature_version=signature_version,
region_name=region_name, default_endpoint_url='s3.amazonaws.com')
self.assertEqual(request.url,
'https://bucket.s3.amazonaws.com/key.txt')
self.assertEqual(request.auth_path, '/bucket/key.txt')
def test_throws_invalid_dns_name_error(self):
request = AWSRequest(
method='PUT', headers={},
url='https://foo.amazonaws.com/mybucket.foo/key.txt'
)
region_name = 'us-west-2'
signature_version = 's3'
with self.assertRaises(InvalidDNSNameError):
switch_to_virtual_host_style(
request=request, signature_version=signature_version,
region_name=region_name)
def test_fix_s3_host_only_applied_once(self):
request = AWSRequest(
method='PUT', headers={},
url='https://foo.amazonaws.com/bucket/key.txt'
)
region_name = 'us-west-2'
signature_version = 's3'
switch_to_virtual_host_style(
request=request, signature_version=signature_version,
region_name=region_name)
# Calling the handler again should not affect the end result:
switch_to_virtual_host_style(
request=request, signature_version=signature_version,
region_name=region_name)
self.assertEqual(request.url,
'https://bucket.foo.amazonaws.com/key.txt')
# This was a bug previously. We want to make sure that
# calling fix_s3_host() again does not alter the auth_path.
# Otherwise we'll get signature errors.
self.assertEqual(request.auth_path, '/bucket/key.txt')
def test_virtual_host_style_for_make_bucket(self):
request = AWSRequest(
method='PUT', headers={},
url='https://foo.amazonaws.com/bucket'
)
region_name = 'us-west-2'
signature_version = 's3'
switch_to_virtual_host_style(
request=request, signature_version=signature_version,
region_name=region_name)
self.assertEqual(request.url,
'https://bucket.foo.amazonaws.com/')
def test_virtual_host_style_not_used_for_get_bucket_location(self):
original_url = 'https://foo.amazonaws.com/bucket?location'
request = AWSRequest(
method='GET', headers={},
url=original_url,
)
signature_version = 's3'
region_name = 'us-west-2'
switch_to_virtual_host_style(
request=request, signature_version=signature_version,
region_name=region_name)
# The request url should not have been modified because this is
# a request for GetBucketLocation.
self.assertEqual(request.url, original_url)
def test_virtual_host_style_not_used_for_list_buckets(self):
original_url = 'https://foo.amazonaws.com/'
request = AWSRequest(
method='GET', headers={},
url=original_url,
)
signature_version = 's3'
region_name = 'us-west-2'
switch_to_virtual_host_style(
request=request, signature_version=signature_version,
region_name=region_name)
# The request url should not have been modified because this is
# a request for GetBucketLocation.
self.assertEqual(request.url, original_url)
def test_is_unaffected_by_sigv4(self):
request = AWSRequest(
method='PUT', headers={},
url='https://foo.amazonaws.com/bucket/key.txt'
)
region_name = 'us-west-2'
signature_version = 's3v4'
switch_to_virtual_host_style(
request=request, signature_version=signature_version,
region_name=region_name, default_endpoint_url='s3.amazonaws.com')
self.assertEqual(request.url,
'https://bucket.s3.amazonaws.com/key.txt')
class TestSwitchToChunkedEncodingForNonSeekableObjects(unittest.TestCase):
def test_switch_to_chunked_encodeing_for_stream_like_object(self):
request = AWSRequest(
method='POST', headers={},
data=io.BufferedIOBase(b"some initial binary data"),
url='https://foo.amazonaws.com/bucket/key.txt'
)
prepared_request = request.prepare()
self.assertEqual(
prepared_request.headers, {'Transfer-Encoding': 'chunked'}
)
class TestInstanceCache(unittest.TestCase):
class DummyClass(object):
def __init__(self, cache):
self._instance_cache = cache
@instance_cache
def add(self, x, y):
return x + y
@instance_cache
def sub(self, x, y):
return x - y
def setUp(self):
self.cache = {}
def test_cache_single_method_call(self):
adder = self.DummyClass(self.cache)
self.assertEqual(adder.add(2, 1), 3)
# This should result in one entry in the cache.
self.assertEqual(len(self.cache), 1)
# When we call the method with the same args,
# we should reuse the same entry in the cache.
self.assertEqual(adder.add(2, 1), 3)
self.assertEqual(len(self.cache), 1)
def test_can_cache_multiple_methods(self):
adder = self.DummyClass(self.cache)
adder.add(2, 1)
# A different method results in a new cache entry,
# so now there should be two elements in the cache.
self.assertEqual(adder.sub(2, 1), 1)
self.assertEqual(len(self.cache), 2)
self.assertEqual(adder.sub(2, 1), 1)
def test_can_cache_kwargs(self):
adder = self.DummyClass(self.cache)
adder.add(x=2, y=1)
self.assertEqual(adder.add(x=2, y=1), 3)
self.assertEqual(len(self.cache), 1)
class TestMergeDicts(unittest.TestCase):
def test_merge_dicts_overrides(self):
first = {
'foo': {'bar': {'baz': {'one': 'ORIGINAL', 'two': 'ORIGINAL'}}}}
second = {'foo': {'bar': {'baz': {'one': 'UPDATE'}}}}
merge_dicts(first, second)
# The value from the second dict wins.
self.assertEqual(first['foo']['bar']['baz']['one'], 'UPDATE')
# And we still preserve the other attributes.
self.assertEqual(first['foo']['bar']['baz']['two'], 'ORIGINAL')
def test_merge_dicts_new_keys(self):
first = {
'foo': {'bar': {'baz': {'one': 'ORIGINAL', 'two': 'ORIGINAL'}}}}
second = {'foo': {'bar': {'baz': {'three': 'UPDATE'}}}}
merge_dicts(first, second)
self.assertEqual(first['foo']['bar']['baz']['one'], 'ORIGINAL')
self.assertEqual(first['foo']['bar']['baz']['two'], 'ORIGINAL')
self.assertEqual(first['foo']['bar']['baz']['three'], 'UPDATE')
def test_merge_empty_dict_does_nothing(self):
first = {'foo': {'bar': 'baz'}}
merge_dicts(first, {})
self.assertEqual(first, {'foo': {'bar': 'baz'}})
def test_more_than_one_sub_dict(self):
first = {'one': {'inner': 'ORIGINAL', 'inner2': 'ORIGINAL'},
'two': {'inner': 'ORIGINAL', 'inner2': 'ORIGINAL'}}
second = {'one': {'inner': 'UPDATE'}, 'two': {'inner': 'UPDATE'}}
merge_dicts(first, second)
self.assertEqual(first['one']['inner'], 'UPDATE')
self.assertEqual(first['one']['inner2'], 'ORIGINAL')
self.assertEqual(first['two']['inner'], 'UPDATE')
self.assertEqual(first['two']['inner2'], 'ORIGINAL')
def test_new_keys(self):
first = {'one': {'inner': 'ORIGINAL'}, 'two': {'inner': 'ORIGINAL'}}
second = {'three': {'foo': {'bar': 'baz'}}}
# In this case, second has no keys in common, but we'd still expect
# this to get merged.
merge_dicts(first, second)
self.assertEqual(first['three']['foo']['bar'], 'baz')
def test_list_values_no_append(self):
dict1 = {'Foo': ['old_foo_value']}
dict2 = {'Foo': ['new_foo_value']}
merge_dicts(dict1, dict2)
self.assertEqual(
dict1, {'Foo': ['new_foo_value']})
def test_list_values_append(self):
dict1 = {'Foo': ['old_foo_value']}
dict2 = {'Foo': ['new_foo_value']}
merge_dicts(dict1, dict2, append_lists=True)
self.assertEqual(
dict1, {'Foo': ['old_foo_value', 'new_foo_value']})
def test_list_values_mismatching_types(self):
dict1 = {'Foo': 'old_foo_value'}
dict2 = {'Foo': ['new_foo_value']}
merge_dicts(dict1, dict2, append_lists=True)
self.assertEqual(
dict1, {'Foo': ['new_foo_value']})
def test_list_values_missing_key(self):
dict1 = {}
dict2 = {'Foo': ['foo_value']}
merge_dicts(dict1, dict2, append_lists=True)
self.assertEqual(
dict1, {'Foo': ['foo_value']})
class TestLowercaseDict(unittest.TestCase):
def test_lowercase_dict_empty(self):
original = {}
copy = lowercase_dict(original)
self.assertEqual(original, copy)
def test_lowercase_dict_original_keys_lower(self):
original = {
'lower_key1': 1,
'lower_key2': 2,
}
copy = lowercase_dict(original)
self.assertEqual(original, copy)
def test_lowercase_dict_original_keys_mixed(self):
original = {
'SOME_KEY': 'value',
'AnOTher_OnE': 'anothervalue',
}
copy = lowercase_dict(original)
expected = {
'some_key': 'value',
'another_one': 'anothervalue',
}
self.assertEqual(expected, copy)
class TestGetServiceModuleName(unittest.TestCase):
def setUp(self):
self.service_description = {
'metadata': {
'serviceFullName': 'AWS MyService',
'apiVersion': '2014-01-01',
'endpointPrefix': 'myservice',
'signatureVersion': 'v4',
'protocol': 'query'
},
'operations': {},
'shapes': {},
}
self.service_model = ServiceModel(
self.service_description, 'myservice')
def test_default(self):
self.assertEqual(
get_service_module_name(self.service_model),
'MyService'
)
def test_client_name_with_amazon(self):
self.service_description['metadata']['serviceFullName'] = (
'Amazon MyService')
self.assertEqual(
get_service_module_name(self.service_model),
'MyService'
)
def test_client_name_using_abreviation(self):
self.service_description['metadata']['serviceAbbreviation'] = (
'Abbreviation')
self.assertEqual(
get_service_module_name(self.service_model),
'Abbreviation'
)
def test_client_name_with_non_alphabet_characters(self):
self.service_description['metadata']['serviceFullName'] = (
'Amazon My-Service')
self.assertEqual(
get_service_module_name(self.service_model),
'MyService'
)
def test_client_name_with_no_full_name_or_abbreviation(self):
del self.service_description['metadata']['serviceFullName']
self.assertEqual(
get_service_module_name(self.service_model),
'myservice'
)
class TestPercentEncodeSequence(unittest.TestCase):
def test_percent_encode_empty(self):
self.assertEqual(percent_encode_sequence({}), '')
def test_percent_encode_special_chars(self):
self.assertEqual(
percent_encode_sequence({'k1': 'with spaces++/'}),
'k1=with%20spaces%2B%2B%2F')
def test_percent_encode_string_string_tuples(self):
self.assertEqual(
percent_encode_sequence([('k1', 'v1'), ('k2', 'v2')]),
'k1=v1&k2=v2'
)
def test_percent_encode_dict_single_pair(self):
self.assertEqual(percent_encode_sequence({'k1': 'v1'}), 'k1=v1')
def test_percent_encode_dict_string_string(self):
self.assertEqual(
percent_encode_sequence({'k1': 'v1', 'k2': 'v2'}),
'k1=v1&k2=v2'
)
def test_percent_encode_single_list_of_values(self):
self.assertEqual(
percent_encode_sequence({'k1': ['a', 'b', 'c']}),
'k1=a&k1=b&k1=c'
)
def test_percent_encode_list_values_of_string(self):
self.assertEqual(
percent_encode_sequence(
{'k1': ['a', 'list'], 'k2': ['another', 'list']}
),
'k1=a&k1=list&k2=another&k2=list'
)
class TestPercentEncode(unittest.TestCase):
def test_percent_encode_obj(self):
self.assertEqual(percent_encode(1), '1')
def test_percent_encode_text(self):
self.assertEqual(percent_encode(u''), '')
self.assertEqual(percent_encode(u'a'), 'a')
self.assertEqual(percent_encode(u'\u0000'), '%00')
# Codepoint > 0x7f
self.assertEqual(percent_encode(u'\u2603'), '%E2%98%83')
# Codepoint > 0xffff
self.assertEqual(percent_encode(u'\U0001f32e'), '%F0%9F%8C%AE')
def test_percent_encode_bytes(self):
self.assertEqual(percent_encode(b''), '')
self.assertEqual(percent_encode(b'a'), u'a')
self.assertEqual(percent_encode(b'\x00'), u'%00')
# UTF-8 Snowman
self.assertEqual(percent_encode(b'\xe2\x98\x83'), '%E2%98%83')
# Arbitrary bytes (not valid UTF-8).
self.assertEqual(percent_encode(b'\x80\x00'), '%80%00')
class TestSwitchHostS3Accelerate(unittest.TestCase):
def setUp(self):
self.original_url = 'https://s3.amazonaws.com/foo/key.txt'
self.request = AWSRequest(
method='PUT', headers={},
url=self.original_url
)
self.client_config = Config()
self.request.context['client_config'] = self.client_config
def test_switch_host(self):
switch_host_s3_accelerate(self.request, 'PutObject')
self.assertEqual(
self.request.url,
'https://s3-accelerate.amazonaws.com/foo/key.txt')
def test_do_not_switch_black_listed_operations(self):
# It should not get switched for ListBuckets, DeleteBucket, and
# CreateBucket
blacklist_ops = [
'ListBuckets',
'DeleteBucket',
'CreateBucket'
]
for op_name in blacklist_ops:
switch_host_s3_accelerate(self.request, op_name)
self.assertEqual(self.request.url, self.original_url)
def test_uses_original_endpoint_scheme(self):
self.request.url = 'http://s3.amazonaws.com/foo/key.txt'
switch_host_s3_accelerate(self.request, 'PutObject')
self.assertEqual(
self.request.url,
'http://s3-accelerate.amazonaws.com/foo/key.txt')
def test_uses_dualstack(self):
self.client_config.s3 = {'use_dualstack_endpoint': True}
self.original_url = 'https://s3.dualstack.amazonaws.com/foo/key.txt'
self.request = AWSRequest(
method='PUT', headers={},
url=self.original_url
)
self.request.context['client_config'] = self.client_config
switch_host_s3_accelerate(self.request, 'PutObject')
self.assertEqual(
self.request.url,
'https://s3-accelerate.dualstack.amazonaws.com/foo/key.txt')
class TestDeepMerge(unittest.TestCase):
def test_simple_merge(self):
a = {'key': 'value'}
b = {'otherkey': 'othervalue'}
deep_merge(a, b)
expected = {'key': 'value', 'otherkey': 'othervalue'}
self.assertEqual(a, expected)
def test_merge_list(self):
# Lists are treated as opaque data and so no effort should be made to
# combine them.
a = {'key': ['original']}
b = {'key': ['new']}
deep_merge(a, b)
self.assertEqual(a, {'key': ['new']})
def test_merge_number(self):
# The value from b is always taken
a = {'key': 10}
b = {'key': 45}
deep_merge(a, b)
self.assertEqual(a, {'key': 45})
a = {'key': 45}
b = {'key': 10}
deep_merge(a, b)
self.assertEqual(a, {'key': 10})
def test_merge_boolean(self):
# The value from b is always taken
a = {'key': False}
b = {'key': True}
deep_merge(a, b)
self.assertEqual(a, {'key': True})
a = {'key': True}
b = {'key': False}
deep_merge(a, b)
self.assertEqual(a, {'key': False})
def test_merge_string(self):
a = {'key': 'value'}
b = {'key': 'othervalue'}
deep_merge(a, b)
self.assertEqual(a, {'key': 'othervalue'})
def test_merge_overrides_value(self):
# The value from b is always taken, even when it's a different type
a = {'key': 'original'}
b = {'key': {'newkey': 'newvalue'}}
deep_merge(a, b)
self.assertEqual(a, {'key': {'newkey': 'newvalue'}})
a = {'key': {'anotherkey': 'value'}}
b = {'key': 'newvalue'}
deep_merge(a, b)
self.assertEqual(a, {'key': 'newvalue'})
def test_deep_merge(self):
a = {
'first': {
'second': {
'key': 'value',
'otherkey': 'othervalue'
},
'key': 'value'
}
}
b = {
'first': {
'second': {
'otherkey': 'newvalue',
'yetanotherkey': 'yetanothervalue'
}
}
}
deep_merge(a, b)
expected = {
'first': {
'second': {
'key': 'value',
'otherkey': 'newvalue',
'yetanotherkey': 'yetanothervalue'
},
'key': 'value'
}
}
self.assertEqual(a, expected)
class TestS3RegionRedirector(unittest.TestCase):
def setUp(self):
self.endpoint_bridge = mock.Mock()
self.endpoint_bridge.resolve.return_value = {
'endpoint_url': 'https://eu-central-1.amazonaws.com'
}
self.client = mock.Mock()
self.cache = {}
self.redirector = S3RegionRedirector(self.endpoint_bridge, self.client)
self.set_client_response_headers({})
self.operation = mock.Mock()
self.operation.name = 'foo'
def set_client_response_headers(self, headers):
error_response = ClientError({
'Error': {
'Code': '',
'Message': ''
},
'ResponseMetadata': {
'HTTPHeaders': headers
}
}, 'HeadBucket')
success_response = {
'ResponseMetadata': {
'HTTPHeaders': headers
}
}
self.client.head_bucket.side_effect = [
error_response, success_response]
def test_set_request_url(self):
params = {'url': 'https://us-west-2.amazonaws.com/foo'}
context = {'signing': {
'endpoint': 'https://eu-central-1.amazonaws.com'
}}
self.redirector.set_request_url(params, context)
self.assertEqual(
params['url'], 'https://eu-central-1.amazonaws.com/foo')
def test_only_changes_request_url_if_endpoint_present(self):
params = {'url': 'https://us-west-2.amazonaws.com/foo'}
context = {}
self.redirector.set_request_url(params, context)
self.assertEqual(
params['url'], 'https://us-west-2.amazonaws.com/foo')
def test_set_request_url_keeps_old_scheme(self):
params = {'url': 'http://us-west-2.amazonaws.com/foo'}
context = {'signing': {
'endpoint': 'https://eu-central-1.amazonaws.com'
}}
self.redirector.set_request_url(params, context)
self.assertEqual(
params['url'], 'http://eu-central-1.amazonaws.com/foo')
def test_sets_signing_context_from_cache(self):
signing_context = {'endpoint': 'bar'}
self.cache['foo'] = signing_context
self.redirector = S3RegionRedirector(
self.endpoint_bridge, self.client, cache=self.cache)
params = {'Bucket': 'foo'}
context = {}
self.redirector.redirect_from_cache(params, context)
self.assertEqual(context.get('signing'), signing_context)
def test_only_changes_context_if_bucket_in_cache(self):
signing_context = {'endpoint': 'bar'}
self.cache['bar'] = signing_context
self.redirector = S3RegionRedirector(
self.endpoint_bridge, self.client, cache=self.cache)
params = {'Bucket': 'foo'}
context = {}
self.redirector.redirect_from_cache(params, context)
self.assertNotEqual(context.get('signing'), signing_context)
def test_redirect_from_error(self):
request_dict = {
'context': {'signing': {'bucket': 'foo'}},
'url': 'https://us-west-2.amazonaws.com/foo'
}
response = (None, {
'Error': {
'Code': 'PermanentRedirect',
'Endpoint': 'foo.eu-central-1.amazonaws.com',
'Bucket': 'foo'
},
'ResponseMetadata': {
'HTTPHeaders': {'x-amz-bucket-region': 'eu-central-1'}
}
})
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
# The response needs to be 0 so that there is no retry delay
self.assertEqual(redirect_response, 0)
self.assertEqual(
request_dict['url'], 'https://eu-central-1.amazonaws.com/foo')
expected_signing_context = {
'endpoint': 'https://eu-central-1.amazonaws.com',
'bucket': 'foo',
'region': 'eu-central-1'
}
signing_context = request_dict['context'].get('signing')
self.assertEqual(signing_context, expected_signing_context)
self.assertTrue(request_dict['context'].get('s3_redirected'))
def test_does_not_redirect_if_previously_redirected(self):
request_dict = {
'context': {
'signing': {'bucket': 'foo', 'region': 'us-west-2'},
's3_redirected': True,
},
'url': 'https://us-west-2.amazonaws.com/foo'
}
response = (None, {
'Error': {
'Code': '400',
'Message': 'Bad Request',
},
'ResponseMetadata': {
'HTTPHeaders': {'x-amz-bucket-region': 'us-west-2'}
}
})
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
self.assertIsNone(redirect_response)
def test_does_not_redirect_unless_permanentredirect_recieved(self):
request_dict = {}
response = (None, {})
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
self.assertIsNone(redirect_response)
self.assertEqual(request_dict, {})
def test_does_not_redirect_if_region_cannot_be_found(self):
request_dict = {'url': 'https://us-west-2.amazonaws.com/foo',
'context': {'signing': {'bucket': 'foo'}}}
response = (None, {
'Error': {
'Code': 'PermanentRedirect',
'Endpoint': 'foo.eu-central-1.amazonaws.com',
'Bucket': 'foo'
},
'ResponseMetadata': {
'HTTPHeaders': {}
}
})
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
self.assertIsNone(redirect_response)
def test_redirects_301(self):
request_dict = {'url': 'https://us-west-2.amazonaws.com/foo',
'context': {'signing': {'bucket': 'foo'}}}
response = (None, {
'Error': {
'Code': '301',
'Message': 'Moved Permanently'
},
'ResponseMetadata': {
'HTTPHeaders': {'x-amz-bucket-region': 'eu-central-1'}
}
})
self.operation.name = 'HeadObject'
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
self.assertEqual(redirect_response, 0)
self.operation.name = 'ListObjects'
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
self.assertIsNone(redirect_response)
def test_redirects_400_head_bucket(self):
request_dict = {'url': 'https://us-west-2.amazonaws.com/foo',
'context': {'signing': {'bucket': 'foo'}}}
response = (None, {
'Error': {'Code': '400', 'Message': 'Bad Request'},
'ResponseMetadata': {
'HTTPHeaders': {'x-amz-bucket-region': 'eu-central-1'}
}
})
self.operation.name = 'HeadObject'
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
self.assertEqual(redirect_response, 0)
self.operation.name = 'ListObjects'
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
self.assertIsNone(redirect_response)
def test_does_not_redirect_400_head_bucket_no_region_header(self):
# We should not redirect a 400 Head* if the region header is not
# present as this will lead to infinitely calling HeadBucket.
request_dict = {'url': 'https://us-west-2.amazonaws.com/foo',
'context': {'signing': {'bucket': 'foo'}}}
response = (None, {
'Error': {'Code': '400', 'Message': 'Bad Request'},
'ResponseMetadata': {
'HTTPHeaders': {}
}
})
self.operation.name = 'HeadBucket'
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
head_bucket_calls = self.client.head_bucket.call_count
self.assertIsNone(redirect_response)
# We should not have made an additional head bucket call
self.assertEqual(head_bucket_calls, 0)
def test_does_not_redirect_if_None_response(self):
request_dict = {'url': 'https://us-west-2.amazonaws.com/foo',
'context': {'signing': {'bucket': 'foo'}}}
response = None
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
self.assertIsNone(redirect_response)
def test_get_region_from_response(self):
response = (None, {
'Error': {
'Code': 'PermanentRedirect',
'Endpoint': 'foo.eu-central-1.amazonaws.com',
'Bucket': 'foo'
},
'ResponseMetadata': {
'HTTPHeaders': {'x-amz-bucket-region': 'eu-central-1'}
}
})
region = self.redirector.get_bucket_region('foo', response)
self.assertEqual(region, 'eu-central-1')
def test_get_region_from_response_error_body(self):
response = (None, {
'Error': {
'Code': 'PermanentRedirect',
'Endpoint': 'foo.eu-central-1.amazonaws.com',
'Bucket': 'foo',
'Region': 'eu-central-1'
},
'ResponseMetadata': {
'HTTPHeaders': {}
}
})
region = self.redirector.get_bucket_region('foo', response)
self.assertEqual(region, 'eu-central-1')
def test_get_region_from_head_bucket_error(self):
self.set_client_response_headers(
{'x-amz-bucket-region': 'eu-central-1'})
response = (None, {
'Error': {
'Code': 'PermanentRedirect',
'Endpoint': 'foo.eu-central-1.amazonaws.com',
'Bucket': 'foo',
},
'ResponseMetadata': {
'HTTPHeaders': {}
}
})
region = self.redirector.get_bucket_region('foo', response)
self.assertEqual(region, 'eu-central-1')
def test_get_region_from_head_bucket_success(self):
success_response = {
'ResponseMetadata': {
'HTTPHeaders': {'x-amz-bucket-region': 'eu-central-1'}
}
}
self.client.head_bucket.side_effect = None
self.client.head_bucket.return_value = success_response
response = (None, {
'Error': {
'Code': 'PermanentRedirect',
'Endpoint': 'foo.eu-central-1.amazonaws.com',
'Bucket': 'foo',
},
'ResponseMetadata': {
'HTTPHeaders': {}
}
})
region = self.redirector.get_bucket_region('foo', response)
self.assertEqual(region, 'eu-central-1')
def test_no_redirect_from_error_for_accesspoint(self):
request_dict = {
'url': (
'https://myendpoint-123456789012.s3-accesspoint.'
'us-west-2.amazonaws.com/key'
),
'context': {
's3_accesspoint': {}
}
}
response = (None, {
'Error': {'Code': '400', 'Message': 'Bad Request'},
'ResponseMetadata': {
'HTTPHeaders': {'x-amz-bucket-region': 'eu-central-1'}
}
})
self.operation.name = 'HeadObject'
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
self.assertEqual(redirect_response, None)
def test_no_redirect_from_cache_for_accesspoint(self):
self.cache['foo'] = {'endpoint': 'foo-endpoint'}
self.redirector = S3RegionRedirector(
self.endpoint_bridge, self.client, cache=self.cache)
params = {'Bucket': 'foo'}
context = {'s3_accesspoint': {}}
self.redirector.redirect_from_cache(params, context)
self.assertNotIn('signing', context)
class TestArnParser(unittest.TestCase):
def setUp(self):
self.parser = ArnParser()
def test_parse(self):
arn = 'arn:aws:s3:us-west-2:1023456789012:myresource'
self.assertEqual(
self.parser.parse_arn(arn),
{
'partition': 'aws',
'service': 's3',
'region': 'us-west-2',
'account': '1023456789012',
'resource': 'myresource',
}
)
def test_parse_invalid_arn(self):
with self.assertRaises(InvalidArnException):
self.parser.parse_arn('arn:aws:s3')
def test_parse_arn_with_resource_type(self):
arn = 'arn:aws:s3:us-west-2:1023456789012:bucket_name:mybucket'
self.assertEqual(
self.parser.parse_arn(arn),
{
'partition': 'aws',
'service': 's3',
'region': 'us-west-2',
'account': '1023456789012',
'resource': 'bucket_name:mybucket',
}
)
def test_parse_arn_with_empty_elements(self):
arn = 'arn:aws:s3:::mybucket'
self.assertEqual(
self.parser.parse_arn(arn),
{
'partition': 'aws',
'service': 's3',
'region': '',
'account': '',
'resource': 'mybucket',
}
)
class TestS3ArnParamHandler(unittest.TestCase):
def setUp(self):
self.arn_handler = S3ArnParamHandler()
self.model = mock.Mock(OperationModel)
self.model.name = 'GetObject'
def test_register(self):
event_emitter = mock.Mock()
self.arn_handler.register(event_emitter)
event_emitter.register.assert_called_with(
'before-parameter-build.s3', self.arn_handler.handle_arn)
def test_accesspoint_arn(self):
params = {
'Bucket': 'arn:aws:s3:us-west-2:123456789012:accesspoint/endpoint'
}
context = {}
self.arn_handler.handle_arn(params, self.model, context)
self.assertEqual(params, {'Bucket': 'endpoint'})
self.assertEqual(
context,
{
's3_accesspoint': {
'name': 'endpoint',
'account': '123456789012',
'region': 'us-west-2',
'partition': 'aws',
'service': 's3',
}
}
)
def test_accesspoint_arn_with_colon(self):
params = {
'Bucket': 'arn:aws:s3:us-west-2:123456789012:accesspoint:endpoint'
}
context = {}
self.arn_handler.handle_arn(params, self.model, context)
self.assertEqual(params, {'Bucket': 'endpoint'})
self.assertEqual(
context,
{
's3_accesspoint': {
'name': 'endpoint',
'account': '123456789012',
'region': 'us-west-2',
'partition': 'aws',
'service': 's3',
}
}
)
def test_errors_for_non_accesspoint_arn(self):
params = {
'Bucket': 'arn:aws:s3:us-west-2:123456789012:unsupported:resource'
}
context = {}
with self.assertRaises(UnsupportedS3ArnError):
self.arn_handler.handle_arn(params, self.model, context)
def test_outpost_arn_with_colon(self):
params = {
'Bucket': (
'arn:aws:s3-outposts:us-west-2:123456789012:outpost:'
'op-01234567890123456:accesspoint:myaccesspoint'
)
}
context = {}
self.arn_handler.handle_arn(params, self.model, context)
self.assertEqual(params, {'Bucket': 'myaccesspoint'})
self.assertEqual(
context,
{
's3_accesspoint': {
'name': 'myaccesspoint',
'outpost_name': 'op-01234567890123456',
'account': '123456789012',
'region': 'us-west-2',
'partition': 'aws',
'service': 's3-outposts',
}
}
)
def test_outpost_arn_with_slash(self):
params = {
'Bucket': (
'arn:aws:s3-outposts:us-west-2:123456789012:outpost/'
'op-01234567890123456/accesspoint/myaccesspoint'
)
}
context = {}
self.arn_handler.handle_arn(params, self.model, context)
self.assertEqual(params, {'Bucket': 'myaccesspoint'})
self.assertEqual(
context,
{
's3_accesspoint': {
'name': 'myaccesspoint',
'outpost_name': 'op-01234567890123456',
'account': '123456789012',
'region': 'us-west-2',
'partition': 'aws',
'service': 's3-outposts',
}
}
)
def test_outpost_arn_errors_for_missing_fields(self):
params = {
'Bucket': 'arn:aws:s3-outposts:us-west-2:123456789012:outpost/'
'op-01234567890123456/accesspoint'
}
with self.assertRaises(UnsupportedOutpostResourceError):
self.arn_handler.handle_arn(params, self.model, {})
def test_outpost_arn_errors_for_empty_fields(self):
params = {
'Bucket': 'arn:aws:s3-outposts:us-west-2:123456789012:outpost/'
'/accesspoint/myaccesspoint'
}
with self.assertRaises(UnsupportedOutpostResourceError):
self.arn_handler.handle_arn(params, self.model, {})
def test_ignores_bucket_names(self):
params = {'Bucket': 'mybucket'}
context = {}
self.arn_handler.handle_arn(params, self.model, context)
self.assertEqual(params, {'Bucket': 'mybucket'})
self.assertEqual(context, {})
def test_ignores_create_bucket(self):
arn = 'arn:aws:s3:us-west-2:123456789012:accesspoint/endpoint'
params = {'Bucket': arn}
context = {}
self.model.name = 'CreateBucket'
self.arn_handler.handle_arn(params, self.model, context)
self.assertEqual(params, {'Bucket': arn})
self.assertEqual(context, {})
class TestS3EndpointSetter(unittest.TestCase):
def setUp(self):
self.operation_name = 'GetObject'
self.signature_version = 's3v4'
self.region_name = 'us-west-2'
self.service = 's3'
self.account = '123456789012'
self.bucket = 'mybucket'
self.key = 'key.txt'
self.accesspoint_name = 'myaccesspoint'
self.outpost_name = 'op-123456789012'
self.partition = 'aws'
self.endpoint_resolver = mock.Mock()
self.dns_suffix = 'amazonaws.com'
self.endpoint_resolver.construct_endpoint.return_value = {
'dnsSuffix': self.dns_suffix
}
self.endpoint_setter = self.get_endpoint_setter()
def get_endpoint_setter(self, **kwargs):
setter_kwargs = {
'endpoint_resolver': self.endpoint_resolver,
'region': self.region_name,
}
setter_kwargs.update(kwargs)
return S3EndpointSetter(**setter_kwargs)
def get_s3_request(self, bucket=None, key=None, scheme='https://',
querystring=None):
url = scheme + 's3.us-west-2.amazonaws.com/'
if bucket:
url += bucket
if key:
url += '/%s' % key
if querystring:
url += '?%s' % querystring
return AWSRequest(method='GET', headers={}, url=url)
def get_s3_outpost_request(self, **s3_request_kwargs):
request = self.get_s3_request(
self.accesspoint_name, **s3_request_kwargs)
accesspoint_context = self.get_s3_accesspoint_context(
name=self.accesspoint_name, outpost_name=self.outpost_name)
request.context['s3_accesspoint'] = accesspoint_context
return request
def get_s3_accesspoint_request(self, accesspoint_name=None,
accesspoint_context=None,
**s3_request_kwargs):
if not accesspoint_name:
accesspoint_name = self.accesspoint_name
request = self.get_s3_request(accesspoint_name, **s3_request_kwargs)
if accesspoint_context is None:
accesspoint_context = self.get_s3_accesspoint_context(
name=accesspoint_name)
request.context['s3_accesspoint'] = accesspoint_context
return request
def get_s3_accesspoint_context(self, **overrides):
accesspoint_context = {
'name': self.accesspoint_name,
'account': self.account,
'region': self.region_name,
'partition': self.partition,
'service': self.service,
}
accesspoint_context.update(overrides)
return accesspoint_context
def call_set_endpoint(self, endpoint_setter, request, **kwargs):
set_endpoint_kwargs = {
'request': request,
'operation_name': self.operation_name,
'signature_version': self.signature_version,
'region_name': self.region_name,
}
set_endpoint_kwargs.update(kwargs)
endpoint_setter.set_endpoint(**set_endpoint_kwargs)
def test_register(self):
event_emitter = mock.Mock()
self.endpoint_setter.register(event_emitter)
event_emitter.register.assert_has_calls([
mock.call('before-sign.s3', self.endpoint_setter.set_endpoint),
mock.call('choose-signer.s3', self.endpoint_setter.set_signer),
mock.call(
'before-call.s3.WriteGetObjectResponse',
self.endpoint_setter.update_endpoint_to_s3_object_lambda,
)
])
def test_outpost_endpoint(self):
request = self.get_s3_outpost_request()
self.call_set_endpoint(self.endpoint_setter, request=request)
expected_url = 'https://%s-%s.%s.s3-outposts.%s.amazonaws.com/' % (
self.accesspoint_name, self.account, self.outpost_name,
self.region_name,
)
self.assertEqual(request.url, expected_url)
def test_outpost_endpoint_preserves_key_in_path(self):
request = self.get_s3_outpost_request(key=self.key)
self.call_set_endpoint(self.endpoint_setter, request=request)
expected_url = 'https://%s-%s.%s.s3-outposts.%s.amazonaws.com/%s' % (
self.accesspoint_name, self.account, self.outpost_name,
self.region_name, self.key
)
self.assertEqual(request.url, expected_url)
def test_accesspoint_endpoint(self):
request = self.get_s3_accesspoint_request()
self.call_set_endpoint(self.endpoint_setter, request=request)
expected_url = 'https://%s-%s.s3-accesspoint.%s.amazonaws.com/' % (
self.accesspoint_name, self.account, self.region_name
)
self.assertEqual(request.url, expected_url)
def test_accesspoint_preserves_key_in_path(self):
request = self.get_s3_accesspoint_request(key=self.key)
self.call_set_endpoint(self.endpoint_setter, request=request)
expected_url = 'https://%s-%s.s3-accesspoint.%s.amazonaws.com/%s' % (
self.accesspoint_name, self.account, self.region_name,
self.key
)
self.assertEqual(request.url, expected_url)
def test_accesspoint_preserves_scheme(self):
request = self.get_s3_accesspoint_request(scheme='http://')
self.call_set_endpoint(self.endpoint_setter, request=request)
expected_url = 'http://%s-%s.s3-accesspoint.%s.amazonaws.com/' % (
self.accesspoint_name, self.account, self.region_name,
)
self.assertEqual(request.url, expected_url)
def test_accesspoint_preserves_query_string(self):
request = self.get_s3_accesspoint_request(querystring='acl')
self.call_set_endpoint(self.endpoint_setter, request=request)
expected_url = 'https://%s-%s.s3-accesspoint.%s.amazonaws.com/?acl' % (
self.accesspoint_name, self.account, self.region_name,
)
self.assertEqual(request.url, expected_url)
def test_uses_resolved_dns_suffix(self):
self.endpoint_resolver.construct_endpoint.return_value = {
'dnsSuffix': 'mysuffix.com'
}
request = self.get_s3_accesspoint_request()
self.call_set_endpoint(self.endpoint_setter, request=request)
expected_url = 'https://%s-%s.s3-accesspoint.%s.mysuffix.com/' % (
self.accesspoint_name, self.account, self.region_name,
)
self.assertEqual(request.url, expected_url)
def test_uses_region_of_client_if_use_arn_disabled(self):
client_region = 'client-region'
self.endpoint_setter = self.get_endpoint_setter(
region=client_region, s3_config={'use_arn_region': False})
request = self.get_s3_accesspoint_request()
self.call_set_endpoint(self.endpoint_setter, request=request)
expected_url = 'https://%s-%s.s3-accesspoint.%s.amazonaws.com/' % (
self.accesspoint_name, self.account, client_region,
)
self.assertEqual(request.url, expected_url)
def test_accesspoint_supports_custom_endpoint(self):
endpoint_setter = self.get_endpoint_setter(
endpoint_url='https://custom.com')
request = self.get_s3_accesspoint_request()
self.call_set_endpoint(endpoint_setter, request=request)
expected_url = 'https://%s-%s.custom.com/' % (
self.accesspoint_name, self.account,
)
self.assertEqual(request.url, expected_url)
def test_errors_for_mismatching_partition(self):
endpoint_setter = self.get_endpoint_setter(partition='aws-cn')
accesspoint_context = self.get_s3_accesspoint_context(partition='aws')
request = self.get_s3_accesspoint_request(
accesspoint_context=accesspoint_context)
with self.assertRaises(UnsupportedS3AccesspointConfigurationError):
self.call_set_endpoint(endpoint_setter, request=request)
def test_errors_for_mismatching_partition_when_using_client_region(self):
endpoint_setter = self.get_endpoint_setter(
s3_config={'use_arn_region': False}, partition='aws-cn'
)
accesspoint_context = self.get_s3_accesspoint_context(partition='aws')
request = self.get_s3_accesspoint_request(
accesspoint_context=accesspoint_context)
with self.assertRaises(UnsupportedS3AccesspointConfigurationError):
self.call_set_endpoint(endpoint_setter, request=request)
def test_set_endpoint_for_auto(self):
endpoint_setter = self.get_endpoint_setter(
s3_config={'addressing_style': 'auto'})
request = self.get_s3_request(self.bucket, self.key)
self.call_set_endpoint(endpoint_setter, request)
expected_url = 'https://%s.s3.us-west-2.amazonaws.com/%s' % (
self.bucket, self.key
)
self.assertEqual(request.url, expected_url)
def test_set_endpoint_for_virtual(self):
endpoint_setter = self.get_endpoint_setter(
s3_config={'addressing_style': 'virtual'})
request = self.get_s3_request(self.bucket, self.key)
self.call_set_endpoint(endpoint_setter, request)
expected_url = 'https://%s.s3.us-west-2.amazonaws.com/%s' % (
self.bucket, self.key
)
self.assertEqual(request.url, expected_url)
def test_set_endpoint_for_path(self):
endpoint_setter = self.get_endpoint_setter(
s3_config={'addressing_style': 'path'})
request = self.get_s3_request(self.bucket, self.key)
self.call_set_endpoint(endpoint_setter, request)
expected_url = 'https://s3.us-west-2.amazonaws.com/%s/%s' % (
self.bucket, self.key
)
self.assertEqual(request.url, expected_url)
def test_set_endpoint_for_accelerate(self):
endpoint_setter = self.get_endpoint_setter(
s3_config={'use_accelerate_endpoint': True})
request = self.get_s3_request(self.bucket, self.key)
self.call_set_endpoint(endpoint_setter, request)
expected_url = 'https://%s.s3-accelerate.amazonaws.com/%s' % (
self.bucket, self.key
)
self.assertEqual(request.url, expected_url)
class TestContainerMetadataFetcher(unittest.TestCase):
def setUp(self):
self.responses = []
self.http = mock.Mock()
self.sleep = mock.Mock()
def create_fetcher(self):
return ContainerMetadataFetcher(self.http, sleep=self.sleep)
def fake_response(self, status_code, body):
response = mock.Mock()
response.status_code = status_code
response.content = body
return response
def set_http_responses_to(self, *responses):
http_responses = []
for response in responses:
if isinstance(response, Exception):
# Simulating an error condition.
http_response = response
elif hasattr(response, 'status_code'):
# It's a precreated fake_response.
http_response = response
else:
http_response = self.fake_response(
status_code=200, body=json.dumps(response).encode('utf-8'))
http_responses.append(http_response)
self.http.send.side_effect = http_responses
def assert_request(self, method, url, headers):
request = self.http.send.call_args[0][0]
self.assertEqual(request.method, method)
self.assertEqual(request.url, url)
self.assertEqual(request.headers, headers)
def assert_can_retrieve_metadata_from(self, full_uri):
response_body = {'foo': 'bar'}
self.set_http_responses_to(response_body)
fetcher = self.create_fetcher()
response = fetcher.retrieve_full_uri(full_uri)
self.assertEqual(response, response_body)
self.assert_request('GET', full_uri, {'Accept': 'application/json'})
def assert_host_is_not_allowed(self, full_uri):
response_body = {'foo': 'bar'}
self.set_http_responses_to(response_body)
fetcher = self.create_fetcher()
with self.assertRaisesRegex(ValueError, 'Unsupported host'):
fetcher.retrieve_full_uri(full_uri)
self.assertFalse(self.http.send.called)
def test_can_specify_extra_headers_are_merged(self):
headers = {
# The 'Accept' header will override the
# default Accept header of application/json.
'Accept': 'application/not-json',
'X-Other-Header': 'foo',
}
self.set_http_responses_to({'foo': 'bar'})
fetcher = self.create_fetcher()
fetcher.retrieve_full_uri('http://localhost', headers)
self.assert_request('GET', 'http://localhost', headers)
def test_can_retrieve_uri(self):
json_body = {
"AccessKeyId" : "a",
"SecretAccessKey" : "b",
"Token" : "c",
"Expiration" : "d"
}
self.set_http_responses_to(json_body)
fetcher = self.create_fetcher()
response = fetcher.retrieve_uri('/foo?id=1')
self.assertEqual(response, json_body)
# Ensure we made calls to the right endpoint.
headers = {'Accept': 'application/json'}
self.assert_request('GET', 'http://169.254.170.2/foo?id=1', headers)
def test_can_retry_requests(self):
success_response = {
"AccessKeyId" : "a",
"SecretAccessKey" : "b",
"Token" : "c",
"Expiration" : "d"
}
self.set_http_responses_to(
# First response is a connection error, should
# be retried.
ConnectionClosedError(endpoint_url=''),
# Second response is the successful JSON response
# with credentials.
success_response,
)
fetcher = self.create_fetcher()
response = fetcher.retrieve_uri('/foo?id=1')
self.assertEqual(response, success_response)
def test_propagates_credential_error_on_http_errors(self):
self.set_http_responses_to(
# In this scenario, we never get a successful response.
ConnectionClosedError(endpoint_url=''),
ConnectionClosedError(endpoint_url=''),
ConnectionClosedError(endpoint_url=''),
ConnectionClosedError(endpoint_url=''),
ConnectionClosedError(endpoint_url=''),
)
# As a result, we expect an appropriate error to be raised.
fetcher = self.create_fetcher()
with self.assertRaises(MetadataRetrievalError):
fetcher.retrieve_uri('/foo?id=1')
self.assertEqual(self.http.send.call_count, fetcher.RETRY_ATTEMPTS)
def test_error_raised_on_non_200_response(self):
self.set_http_responses_to(
self.fake_response(status_code=404, body=b'Error not found'),
self.fake_response(status_code=404, body=b'Error not found'),
self.fake_response(status_code=404, body=b'Error not found'),
)
fetcher = self.create_fetcher()
with self.assertRaises(MetadataRetrievalError):
fetcher.retrieve_uri('/foo?id=1')
# Should have tried up to RETRY_ATTEMPTS.
self.assertEqual(self.http.send.call_count, fetcher.RETRY_ATTEMPTS)
def test_error_raised_on_no_json_response(self):
# If the service returns a sucess response but with a body that
# does not contain JSON, we should still retry up to RETRY_ATTEMPTS,
# but after exhausting retries we propagate the exception.
self.set_http_responses_to(
self.fake_response(status_code=200, body=b'Not JSON'),
self.fake_response(status_code=200, body=b'Not JSON'),
self.fake_response(status_code=200, body=b'Not JSON'),
)
fetcher = self.create_fetcher()
with self.assertRaises(MetadataRetrievalError) as e:
fetcher.retrieve_uri('/foo?id=1')
self.assertNotIn('Not JSON', str(e.exception))
# Should have tried up to RETRY_ATTEMPTS.
self.assertEqual(self.http.send.call_count, fetcher.RETRY_ATTEMPTS)
def test_can_retrieve_full_uri_with_fixed_ip(self):
self.assert_can_retrieve_metadata_from(
'http://%s/foo?id=1' % ContainerMetadataFetcher.IP_ADDRESS)
def test_localhost_http_is_allowed(self):
self.assert_can_retrieve_metadata_from('http://localhost/foo')
def test_localhost_with_port_http_is_allowed(self):
self.assert_can_retrieve_metadata_from('http://localhost:8000/foo')
def test_localhost_https_is_allowed(self):
self.assert_can_retrieve_metadata_from('https://localhost/foo')
def test_can_use_127_ip_addr(self):
self.assert_can_retrieve_metadata_from('https://127.0.0.1/foo')
def test_can_use_127_ip_addr_with_port(self):
self.assert_can_retrieve_metadata_from('https://127.0.0.1:8080/foo')
def test_link_local_http_is_not_allowed(self):
self.assert_host_is_not_allowed('http://169.254.0.1/foo')
def test_link_local_https_is_not_allowed(self):
self.assert_host_is_not_allowed('https://169.254.0.1/foo')
def test_non_link_local_nonallowed_url(self):
self.assert_host_is_not_allowed('http://169.1.2.3/foo')
def test_error_raised_on_nonallowed_url(self):
self.assert_host_is_not_allowed('http://somewhere.com/foo')
def test_external_host_not_allowed_if_https(self):
self.assert_host_is_not_allowed('https://somewhere.com/foo')
class TestUnsigned(unittest.TestCase):
def test_copy_returns_same_object(self):
self.assertIs(botocore.UNSIGNED, copy.copy(botocore.UNSIGNED))
def test_deepcopy_returns_same_object(self):
self.assertIs(botocore.UNSIGNED, copy.deepcopy(botocore.UNSIGNED))
class TestInstanceMetadataFetcher(unittest.TestCase):
def setUp(self):
urllib3_session_send = 'botocore.httpsession.URLLib3Session.send'
self._urllib3_patch = mock.patch(urllib3_session_send)
self._send = self._urllib3_patch.start()
self._imds_responses = []
self._send.side_effect = self.get_imds_response
self._role_name = 'role-name'
self._creds = {
'AccessKeyId': 'spam',
'SecretAccessKey': 'eggs',
'Token': 'spam-token',
'Expiration': 'something',
}
self._expected_creds = {
'access_key': self._creds['AccessKeyId'],
'secret_key': self._creds['SecretAccessKey'],
'token': self._creds['Token'],
'expiry_time': self._creds['Expiration'],
'role_name': self._role_name
}
def tearDown(self):
self._urllib3_patch.stop()
def add_imds_response(self, body, status_code=200):
response = botocore.awsrequest.AWSResponse(
url='http://169.254.169.254/',
status_code=status_code,
headers={},
raw=RawResponse(body)
)
self._imds_responses.append(response)
def add_get_role_name_imds_response(self, role_name=None):
if role_name is None:
role_name = self._role_name
self.add_imds_response(body=role_name.encode('utf-8'))
def add_get_credentials_imds_response(self, creds=None):
if creds is None:
creds = self._creds
self.add_imds_response(body=json.dumps(creds).encode('utf-8'))
def add_get_token_imds_response(self, token, status_code=200):
self.add_imds_response(body=token.encode('utf-8'),
status_code=status_code)
def add_metadata_token_not_supported_response(self):
self.add_imds_response(b'', status_code=404)
def add_imds_connection_error(self, exception):
self._imds_responses.append(exception)
def add_default_imds_responses(self):
self.add_get_token_imds_response(token='token')
self.add_get_role_name_imds_response()
self.add_get_credentials_imds_response()
def get_imds_response(self, request):
response = self._imds_responses.pop(0)
if isinstance(response, Exception):
raise response
return response
def _test_imds_base_url(self, config, expected_url):
self.add_default_imds_responses()
fetcher = InstanceMetadataFetcher(config=config)
result = fetcher.retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
self.assertEqual(fetcher.get_base_url(), expected_url)
def test_disabled_by_environment(self):
env = {'AWS_EC2_METADATA_DISABLED': 'true'}
fetcher = InstanceMetadataFetcher(env=env)
result = fetcher.retrieve_iam_role_credentials()
self.assertEqual(result, {})
self._send.assert_not_called()
def test_disabled_by_environment_mixed_case(self):
env = {'AWS_EC2_METADATA_DISABLED': 'tRuE'}
fetcher = InstanceMetadataFetcher(env=env)
result = fetcher.retrieve_iam_role_credentials()
self.assertEqual(result, {})
self._send.assert_not_called()
def test_disabling_env_var_not_true(self):
url = 'https://example.com/'
env = {'AWS_EC2_METADATA_DISABLED': 'false'}
self.add_default_imds_responses()
fetcher = InstanceMetadataFetcher(base_url=url, env=env)
result = fetcher.retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
def test_ec2_metadata_endpoint_service_mode(self):
configs = [
({'ec2_metadata_service_endpoint_mode': 'ipv6'},
'http://[fd00:ec2::254]/'),
({'ec2_metadata_service_endpoint_mode': 'ipv6'},
'http://[fd00:ec2::254]/'),
({'ec2_metadata_service_endpoint_mode': 'ipv4'},
'http://169.254.169.254/'),
({'ec2_metadata_service_endpoint_mode': 'foo'},
'http://169.254.169.254/'),
({'ec2_metadata_service_endpoint_mode': 'ipv6',
'ec2_metadata_service_endpoint': 'http://[fd00:ec2::010]/'},
'http://[fd00:ec2::010]/')
]
for config, expected_url in configs:
self._test_imds_base_url(config, expected_url)
def test_metadata_endpoint(self):
urls = ['http://fd00:ec2:0000:0000:0000:0000:0000:0000/',
'http://[fd00:ec2::010]/', 'http://192.168.1.1/']
for url in urls:
self.assertTrue(is_valid_uri(url))
def test_ipv6_endpoint_no_brackets_env_var_set(self):
url = 'http://fd00:ec2::010/'
self.assertFalse(is_valid_ipv6_endpoint_url(url))
def test_ipv6_invalid_endpoint(self):
url = 'not.a:valid:dom@in'
config = {'ec2_metadata_service_endpoint': url}
with self.assertRaises(InvalidIMDSEndpointError):
InstanceMetadataFetcher(config=config)
def test_ipv6_endpoint_env_var_set_and_args(self):
url = 'http://[fd00:ec2::254]/'
url_arg = 'http://fd00:ec2:0000:0000:0000:8a2e:0370:7334/'
config = {'ec2_metadata_service_endpoint': url}
self.add_default_imds_responses()
fetcher = InstanceMetadataFetcher(config=config, base_url=url_arg)
result = fetcher.retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
self.assertEqual(fetcher.get_base_url(), url_arg)
def test_ipv6_imds_not_allocated(self):
url = 'http://fd00:ec2:0000:0000:0000:0000:0000:0000/'
config = {'ec2_metadata_service_endpoint': url}
self.add_imds_response(
status_code=400, body=b'{}')
fetcher = InstanceMetadataFetcher(config=config)
result = fetcher.retrieve_iam_role_credentials()
self.assertEqual(result, {})
def test_ipv6_imds_empty_config(self):
configs = [
({'ec2_metadata_service_endpoint': ''}, 'http://169.254.169.254/'),
({'ec2_metadata_service_endpoint_mode': ''}, 'http://169.254.169.254/'),
({}, 'http://169.254.169.254/'),
(None, 'http://169.254.169.254/')
]
for config, expected_url in configs:
self._test_imds_base_url(config, expected_url)
def test_includes_user_agent_header(self):
user_agent = 'my-user-agent'
self.add_default_imds_responses()
InstanceMetadataFetcher(
user_agent=user_agent).retrieve_iam_role_credentials()
self.assertEqual(self._send.call_count, 3)
for call in self._send.calls:
self.assertTrue(call[0][0].headers['User-Agent'], user_agent)
def test_non_200_response_for_role_name_is_retried(self):
# Response for role name that have a non 200 status code should
# be retried.
self.add_get_token_imds_response(token='token')
self.add_imds_response(
status_code=429, body=b'{"message": "Slow down"}')
self.add_get_role_name_imds_response()
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
def test_http_connection_error_for_role_name_is_retried(self):
# Connection related errors should be retried
self.add_get_token_imds_response(token='token')
self.add_imds_connection_error(ConnectionClosedError(endpoint_url=''))
self.add_get_role_name_imds_response()
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
def test_empty_response_for_role_name_is_retried(self):
# Response for role name that have a non 200 status code should
# be retried.
self.add_get_token_imds_response(token='token')
self.add_imds_response(body=b'')
self.add_get_role_name_imds_response()
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
def test_non_200_response_is_retried(self):
self.add_get_token_imds_response(token='token')
self.add_get_role_name_imds_response()
# Response for creds that has a 200 status code but has an empty
# body should be retried.
self.add_imds_response(
status_code=429, body=b'{"message": "Slow down"}')
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
def test_http_connection_errors_is_retried(self):
self.add_get_token_imds_response(token='token')
self.add_get_role_name_imds_response()
# Connection related errors should be retried
self.add_imds_connection_error(ConnectionClosedError(endpoint_url=''))
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
def test_empty_response_is_retried(self):
self.add_get_token_imds_response(token='token')
self.add_get_role_name_imds_response()
# Response for creds that has a 200 status code but is empty.
# This should be retried.
self.add_imds_response(body=b'')
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
def test_invalid_json_is_retried(self):
self.add_get_token_imds_response(token='token')
self.add_get_role_name_imds_response()
# Response for creds that has a 200 status code but is invalid JSON.
# This should be retried.
self.add_imds_response(body=b'{"AccessKey":')
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
self.assertEqual(result, self._expected_creds)
def test_exhaust_retries_on_role_name_request(self):
self.add_get_token_imds_response(token='token')
self.add_imds_response(status_code=400, body=b'')
result = InstanceMetadataFetcher(
num_attempts=1).retrieve_iam_role_credentials()
self.assertEqual(result, {})
def test_exhaust_retries_on_credentials_request(self):
self.add_get_token_imds_response(token='token')
self.add_get_role_name_imds_response()
self.add_imds_response(status_code=400, body=b'')
result = InstanceMetadataFetcher(
num_attempts=1).retrieve_iam_role_credentials()
self.assertEqual(result, {})
def test_missing_fields_in_credentials_response(self):
self.add_get_token_imds_response(token='token')
self.add_get_role_name_imds_response()
# Response for creds that has a 200 status code and a JSON body
# representing an error. We do not necessarily want to retry this.
self.add_imds_response(
body=b'{"Code":"AssumeRoleUnauthorizedAccess","Message":"error"}')
result = InstanceMetadataFetcher().retrieve_iam_role_credentials()
self.assertEqual(result, {})
def test_token_is_included(self):
user_agent = 'my-user-agent'
self.add_default_imds_responses()
result = InstanceMetadataFetcher(
user_agent=user_agent).retrieve_iam_role_credentials()
# Check that subsequent calls after getting the token include the token.
self.assertEqual(self._send.call_count, 3)
for call in self._send.call_args_list[1:]:
self.assertEqual(call[0][0].headers['x-aws-ec2-metadata-token'], 'token')
self.assertEqual(result, self._expected_creds)
def test_metadata_token_not_supported_404(self):
user_agent = 'my-user-agent'
self.add_imds_response(b'', status_code=404)
self.add_get_role_name_imds_response()
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
user_agent=user_agent).retrieve_iam_role_credentials()
for call in self._send.call_args_list[1:]:
self.assertNotIn('x-aws-ec2-metadata-token', call[0][0].headers)
self.assertEqual(result, self._expected_creds)
def test_metadata_token_not_supported_403(self):
user_agent = 'my-user-agent'
self.add_imds_response(b'', status_code=403)
self.add_get_role_name_imds_response()
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
user_agent=user_agent).retrieve_iam_role_credentials()
for call in self._send.call_args_list[1:]:
self.assertNotIn('x-aws-ec2-metadata-token', call[0][0].headers)
self.assertEqual(result, self._expected_creds)
def test_metadata_token_not_supported_405(self):
user_agent = 'my-user-agent'
self.add_imds_response(b'', status_code=405)
self.add_get_role_name_imds_response()
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
user_agent=user_agent).retrieve_iam_role_credentials()
for call in self._send.call_args_list[1:]:
self.assertNotIn('x-aws-ec2-metadata-token', call[0][0].headers)
self.assertEqual(result, self._expected_creds)
def test_metadata_token_not_supported_timeout(self):
user_agent = 'my-user-agent'
self.add_imds_connection_error(ReadTimeoutError(endpoint_url='url'))
self.add_get_role_name_imds_response()
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
user_agent=user_agent).retrieve_iam_role_credentials()
for call in self._send.call_args_list[1:]:
self.assertNotIn('x-aws-ec2-metadata-token', call[0][0].headers)
self.assertEqual(result, self._expected_creds)
def test_token_not_supported_exhaust_retries(self):
user_agent = 'my-user-agent'
self.add_imds_connection_error(ConnectTimeoutError(endpoint_url='url'))
self.add_get_role_name_imds_response()
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
user_agent=user_agent).retrieve_iam_role_credentials()
for call in self._send.call_args_list[1:]:
self.assertNotIn('x-aws-ec2-metadata-token', call[0][0].headers)
self.assertEqual(result, self._expected_creds)
def test_metadata_token_bad_request_yields_no_credentials(self):
user_agent = 'my-user-agent'
self.add_imds_response(b'', status_code=400)
result = InstanceMetadataFetcher(
user_agent=user_agent).retrieve_iam_role_credentials()
self.assertEqual(result, {})
class TestIMDSRegionProvider(unittest.TestCase):
def setUp(self):
self.environ = {}
self.environ_patch = mock.patch('os.environ', self.environ)
self.environ_patch.start()
def tearDown(self):
self.environ_patch.stop()
def assert_does_provide_expected_value(self, fetcher_region=None,
expected_result=None,):
fake_session = mock.Mock(spec=Session)
fake_fetcher = mock.Mock(spec=InstanceMetadataRegionFetcher)
fake_fetcher.retrieve_region.return_value = fetcher_region
provider = IMDSRegionProvider(fake_session, fetcher=fake_fetcher)
value = provider.provide()
self.assertEqual(value, expected_result)
def test_does_provide_region_when_present(self):
self.assert_does_provide_expected_value(
fetcher_region='us-mars-2',
expected_result='us-mars-2',
)
def test_does_provide_none(self):
self.assert_does_provide_expected_value(
fetcher_region=None,
expected_result=None,
)
@mock.patch('botocore.httpsession.URLLib3Session.send')
def test_use_truncated_user_agent(self, send):
session = Session()
session = Session()
session.user_agent_version = '3.0'
provider = IMDSRegionProvider(session)
provider.provide()
args, _ = send.call_args
self.assertIn('Botocore/3.0', args[0].headers['User-Agent'])
@mock.patch('botocore.httpsession.URLLib3Session.send')
def test_can_use_ipv6(self, send):
session = Session()
session.set_config_variable('imds_use_ipv6', True)
provider = IMDSRegionProvider(session)
provider.provide()
args, _ = send.call_args
self.assertIn('[fd00:ec2::254]', args[0].url)
@mock.patch('botocore.httpsession.URLLib3Session.send')
def test_use_ipv4_by_default(self, send):
session = Session()
provider = IMDSRegionProvider(session)
provider.provide()
args, _ = send.call_args
self.assertIn('169.254.169.254', args[0].url)
@mock.patch('botocore.httpsession.URLLib3Session.send')
def test_can_set_imds_endpoint_mode_to_ipv4(self, send):
session = Session()
session.set_config_variable(
'ec2_metadata_service_endpoint_mode', 'ipv4')
provider = IMDSRegionProvider(session)
provider.provide()
args, _ = send.call_args
self.assertIn('169.254.169.254', args[0].url)
@mock.patch('botocore.httpsession.URLLib3Session.send')
def test_can_set_imds_endpoint_mode_to_ipv6(self, send):
session = Session()
session.set_config_variable(
'ec2_metadata_service_endpoint_mode', 'ipv6')
provider = IMDSRegionProvider(session)
provider.provide()
args, _ = send.call_args
self.assertIn('[fd00:ec2::254]', args[0].url)
@mock.patch('botocore.httpsession.URLLib3Session.send')
def test_can_set_imds_service_endpoint(self, send):
session = Session()
session.set_config_variable(
'ec2_metadata_service_endpoint', 'http://myendpoint/')
provider = IMDSRegionProvider(session)
provider.provide()
args, _ = send.call_args
self.assertIn('http://myendpoint/', args[0].url)
@mock.patch('botocore.httpsession.URLLib3Session.send')
def test_can_set_imds_service_endpoint_custom(self, send):
session = Session()
session.set_config_variable(
'ec2_metadata_service_endpoint', 'http://myendpoint')
provider = IMDSRegionProvider(session)
provider.provide()
args, _ = send.call_args
self.assertIn('http://myendpoint/latest/meta-data', args[0].url)
@mock.patch('botocore.httpsession.URLLib3Session.send')
def test_imds_service_endpoint_overrides_ipv6_endpoint(self, send):
session = Session()
session.set_config_variable(
'ec2_metadata_service_endpoint_mode', 'ipv6')
session.set_config_variable(
'ec2_metadata_service_endpoint', 'http://myendpoint/')
provider = IMDSRegionProvider(session)
provider.provide()
args, _ = send.call_args
self.assertIn('http://myendpoint/', args[0].url)
class TestSSOTokenLoader(unittest.TestCase):
def setUp(self):
super(TestSSOTokenLoader, self).setUp()
self.start_url = 'https://d-abc123.awsapps.com/start'
self.cache_key = '40a89917e3175433e361b710a9d43528d7f1890a'
self.access_token = 'totally.a.token'
self.cached_token = {
'accessToken': self.access_token,
'expiresAt': '2002-10-18T03:52:38UTC'
}
self.cache = {}
self.loader = SSOTokenLoader(cache=self.cache)
def test_can_load_token_exists(self):
self.cache[self.cache_key] = self.cached_token
access_token = self.loader(self.start_url)
self.assertEqual(self.access_token, access_token)
def test_can_handle_does_not_exist(self):
with self.assertRaises(SSOTokenLoadError):
self.loader(self.start_url)
def test_can_handle_invalid_cache(self):
self.cache[self.cache_key] = {}
with self.assertRaises(SSOTokenLoadError):
self.loader(self.start_url)
@pytest.mark.parametrize(
'header_name, headers, expected',
(
('test_header', {'test_header': 'foo'}, True),
('Test_Header', {'test_header': 'foo'}, True),
('test_header', {'Test_Header': 'foo'}, True),
('missing_header', {'Test_Header': 'foo'}, False),
(None, {'Test_Header': 'foo'}, False),
('test_header', HeadersDict({'test_header': 'foo'}), True),
('Test_Header', HeadersDict({'test_header': 'foo'}), True),
('test_header', HeadersDict({'Test_Header': 'foo'}), True),
('missing_header', HeadersDict({'Test_Header': 'foo'}), False),
(None, HeadersDict({'Test_Header': 'foo'}), False),
)
)
def test_has_header(header_name, headers, expected):
assert has_header(header_name, headers) is expected
|
the-stack_106_31492 | # landmarks.py
#
# test creating landmarks
from arena import *
scene = Scene(host="arena.andrew.cmu.edu", realm="realm", scene="test")
object_id = "the_box"
@scene.run_once
def make_box():
scene.add_landmark(
Box(object_id=object_id, position=(0,2,-2)), label="The Box")
scene.run_tasks()
|
the-stack_106_31493 | from mpi4py import MPI
import numpy as np
import random as ra
import datetime
import time
comm=MPI.COMM_WORLD
rank=comm.Get_rank()
size=comm.Get_size()
s=MPI.Status()
def do_something_else(data,d,rank):
if(rank==0):
print("Core {} sent message to Core {} at time:{}".format(rank,d,data.time()))
else:
print("Core {} recieved message from Core {} at time:{}, with tag {}.".format(rank,d.Get_source(),data.time(),d.Get_tag()))
if rank == 0:
any_tag=ra.randint(0,10)
data_1=datetime.datetime.now()
for i in range(1,size):
comm.send(data_1, dest=i, tag=any_tag)
do_something_else(data_1,i,rank)
else:
comm.recv(source=MPI.ANY_SOURCE,tag=MPI.ANY_TAG,status=s)
data_2=datetime.datetime.now()
do_something_else(data_2,s,rank)
|
the-stack_106_31494 | # Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto.common.resources import FileResource
class ReventFile(FileResource):
name = 'revent'
def __init__(self, owner, stage):
super(ReventFile, self).__init__(owner)
self.stage = stage
class JarFile(FileResource):
name = 'jar'
class ApkFile(FileResource):
name = 'apk'
def __init__(self, owner, platform=None, uiauto=False, package=None):
super(ApkFile, self).__init__(owner)
self.platform = platform
self.uiauto = uiauto
self.package = package
def __str__(self):
apk_type = 'uiautomator ' if self.uiauto else ''
return '<{}\'s {} {}APK>'.format(self.owner, self.platform, apk_type)
|
the-stack_106_31495 | import os
import unittest
from recipe_scrapers.allrecipes import AllRecipes
class TestAllRecipesScraper(unittest.TestCase):
def setUp(self):
# tests are run from tests.py
with open(os.path.join(
os.getcwd(),
'recipe_scrapers',
'tests',
'test_data',
'allrecipes.testhtml'
)) as file_opened:
self.harvester_class = AllRecipes(file_opened, test=True)
def test_host(self):
self.assertEqual(
'allrecipes.com',
self.harvester_class.host()
)
def test_title(self):
self.assertEqual(
self.harvester_class.title(),
'Four Cheese Margherita Pizza'
)
def test_total_time(self):
self.assertEqual(
40,
self.harvester_class.total_time()
)
def test_ingredients(self):
self.assertCountEqual(
[
'1/4 cup olive oil',
'1 tablespoon minced garlic',
'1/2 teaspoon sea salt',
'8 Roma tomatoes, sliced',
'2 (12 inch) pre-baked pizza crusts',
'8 ounces shredded Mozzarella cheese',
'4 ounces shredded Fontina cheese',
'10 fresh basil leaves, washed, dried',
'1/2 cup freshly grated Parmesan cheese',
'1/2 cup crumbled feta cheese'
],
self.harvester_class.ingredients()
)
def test_instructions(self):
return self.assertEqual(
'Stir together olive oil, garlic, and salt; toss with tomatoes, and allow to stand for 15 minutes. Preheat oven to 400 degrees F (200 degrees C).\nBrush each pizza crust with some of the tomato marinade. Sprinkle the pizzas evenly with Mozzarella and Fontina cheeses. Arrange tomatoes overtop, then sprinkle with shredded basil, Parmesan, and feta cheese.\nBake in preheated oven until the cheese is bubbly and golden brown, about 10 minutes.\n',
self.harvester_class.instructions()
)
def test_ratings(self):
self.assertEqual(
4.81,
self.harvester_class.ratings()
)
|
the-stack_106_31496 | #! /usr/bin/env python
# Last Change: Mon Aug 20 08:00 PM 2007 J
from __future__ import division, print_function, absolute_import
import re
import itertools
import datetime
from functools import partial
import numpy as np
from scipy.lib.six import next
"""A module to read arff files."""
__all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError']
# An Arff file is basically two parts:
# - header
# - data
#
# A header has each of its components starting by @META where META is one of
# the keyword (attribute of relation, for now).
# TODO:
# - both integer and reals are treated as numeric -> the integer info is lost !
# - Replace ValueError by ParseError or something
# We know can handle the following:
# - numeric and nominal attributes
# - missing values for numeric attributes
r_meta = re.compile('^\s*@')
# Match a comment
r_comment = re.compile(r'^%')
# Match an empty line
r_empty = re.compile(r'^\s+$')
# Match a header line, that is a line which starts by @ + a word
r_headerline = re.compile(r'^@\S*')
r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]')
r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)')
r_attribute = re.compile(r'^@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)')
# To get attributes name enclosed with ''
r_comattrval = re.compile(r"'(..+)'\s+(..+$)")
# To get attributes name enclosed with '', possibly spread across multilines
r_mcomattrval = re.compile(r"'([..\n]+)'\s+(..+$)")
# To get normal attributes
r_wcomattrval = re.compile(r"(\S+)\s+(..+$)")
#-------------------------
# Module defined exception
#-------------------------
class ArffError(IOError):
pass
class ParseArffError(ArffError):
pass
#------------------
# Various utilities
#------------------
# An attribute is defined as @attribute name value
def parse_type(attrtype):
"""Given an arff attribute value (meta data), returns its type.
Expect the value to be a name."""
uattribute = attrtype.lower().strip()
if uattribute[0] == '{':
return 'nominal'
elif uattribute[:len('real')] == 'real':
return 'numeric'
elif uattribute[:len('integer')] == 'integer':
return 'numeric'
elif uattribute[:len('numeric')] == 'numeric':
return 'numeric'
elif uattribute[:len('string')] == 'string':
return 'string'
elif uattribute[:len('relational')] == 'relational':
return 'relational'
elif uattribute[:len('date')] == 'date':
return 'date'
else:
raise ParseArffError("unknown attribute %s" % uattribute)
def get_nominal(attribute):
"""If attribute is nominal, returns a list of the values"""
return attribute.split(',')
def read_data_list(ofile):
"""Read each line of the iterable and put it in a list."""
data = [next(ofile)]
if data[0].strip()[0] == '{':
raise ValueError("This looks like a sparse ARFF: not supported yet")
data.extend([i for i in ofile])
return data
def get_ndata(ofile):
"""Read the whole file to get number of data attributes."""
data = [next(ofile)]
loc = 1
if data[0].strip()[0] == '{':
raise ValueError("This looks like a sparse ARFF: not supported yet")
for i in ofile:
loc += 1
return loc
def maxnomlen(atrv):
"""Given a string containing a nominal type definition, returns the
string len of the biggest component.
A nominal type is defined as seomthing framed between brace ({}).
Parameters
----------
atrv : str
Nominal type definition
Returns
-------
slen : int
length of longest component
Examples
--------
maxnomlen("{floup, bouga, fl, ratata}") returns 6 (the size of
ratata, the longest nominal value).
>>> maxnomlen("{floup, bouga, fl, ratata}")
6
"""
nomtp = get_nom_val(atrv)
return max(len(i) for i in nomtp)
def get_nom_val(atrv):
"""Given a string containing a nominal type, returns a tuple of the
possible values.
A nominal type is defined as something framed between braces ({}).
Parameters
----------
atrv : str
Nominal type definition
Returns
-------
poss_vals : tuple
possible values
Examples
--------
>>> get_nom_val("{floup, bouga, fl, ratata}")
('floup', 'bouga', 'fl', 'ratata')
"""
r_nominal = re.compile('{(.+)}')
m = r_nominal.match(atrv)
if m:
return tuple(i.strip() for i in m.group(1).split(','))
else:
raise ValueError("This does not look like a nominal string")
def get_date_format(atrv):
r_date = re.compile(r"[Dd][Aa][Tt][Ee]\s+[\"']?(.+?)[\"']?$")
m = r_date.match(atrv)
if m:
pattern = m.group(1).strip()
# convert time pattern from Java's SimpleDateFormat to C's format
datetime_unit = None
if "yyyy" in pattern:
pattern = pattern.replace("yyyy", "%Y")
datetime_unit = "Y"
elif "yy":
pattern = pattern.replace("yy", "%y")
datetime_unit = "Y"
if "MM" in pattern:
pattern = pattern.replace("MM", "%m")
datetime_unit = "M"
if "dd" in pattern:
pattern = pattern.replace("dd", "%d")
datetime_unit = "D"
if "HH" in pattern:
pattern = pattern.replace("HH", "%H")
datetime_unit = "h"
if "mm" in pattern:
pattern = pattern.replace("mm", "%M")
datetime_unit = "m"
if "ss" in pattern:
pattern = pattern.replace("ss", "%S")
datetime_unit = "s"
if "z" in pattern or "Z" in pattern:
raise ValueError("Date type attributes with time zone not supported, yet")
if datetime_unit is None:
raise ValueError("Invalid or unsupported date format")
return pattern, datetime_unit
else:
raise ValueError("Invalid or no date format")
def go_data(ofile):
"""Skip header.
the first next() call of the returned iterator will be the @data line"""
return itertools.dropwhile(lambda x: not r_datameta.match(x), ofile)
#----------------
# Parsing header
#----------------
def tokenize_attribute(iterable, attribute):
"""Parse a raw string in header (eg starts by @attribute).
Given a raw string attribute, try to get the name and type of the
attribute. Constraints:
* The first line must start with @attribute (case insensitive, and
space like characters before @attribute are allowed)
* Works also if the attribute is spread on multilines.
* Works if empty lines or comments are in between
Parameters
----------
attribute : str
the attribute string.
Returns
-------
name : str
name of the attribute
value : str
value of the attribute
next : str
next line to be parsed
Examples
--------
If attribute is a string defined in python as r"floupi real", will
return floupi as name, and real as value.
>>> iterable = iter([0] * 10) # dummy iterator
>>> tokenize_attribute(iterable, r"@attribute floupi real")
('floupi', 'real', 0)
If attribute is r"'floupi 2' real", will return 'floupi 2' as name,
and real as value.
>>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ")
('floupi 2', 'real', 0)
"""
sattr = attribute.strip()
mattr = r_attribute.match(sattr)
if mattr:
# atrv is everything after @attribute
atrv = mattr.group(1)
if r_comattrval.match(atrv):
name, type = tokenize_single_comma(atrv)
next_item = next(iterable)
elif r_wcomattrval.match(atrv):
name, type = tokenize_single_wcomma(atrv)
next_item = next(iterable)
else:
# Not sure we should support this, as it does not seem supported by
# weka.
raise ValueError("multi line not supported yet")
#name, type, next_item = tokenize_multilines(iterable, atrv)
else:
raise ValueError("First line unparsable: %s" % sattr)
if type == 'relational':
raise ValueError("relational attributes not supported yet")
return name, type, next_item
def tokenize_single_comma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_comattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError:
raise ValueError("Error while tokenizing attribute")
else:
raise ValueError("Error while tokenizing single %s" % val)
return name, type
def tokenize_single_wcomma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_wcomattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError:
raise ValueError("Error while tokenizing attribute")
else:
raise ValueError("Error while tokenizing single %s" % val)
return name, type
def read_header(ofile):
"""Read the header of the iterable ofile."""
i = next(ofile)
# Pass first comments
while r_comment.match(i):
i = next(ofile)
# Header is everything up to DATA attribute ?
relation = None
attributes = []
while not r_datameta.match(i):
m = r_headerline.match(i)
if m:
isattr = r_attribute.match(i)
if isattr:
name, type, i = tokenize_attribute(ofile, i)
attributes.append((name, type))
else:
isrel = r_relation.match(i)
if isrel:
relation = isrel.group(1)
else:
raise ValueError("Error parsing line %s" % i)
i = next(ofile)
else:
i = next(ofile)
return relation, attributes
#--------------------
# Parsing actual data
#--------------------
def safe_float(x):
"""given a string x, convert it to a float. If the stripped string is a ?,
return a Nan (missing value).
Parameters
----------
x : str
string to convert
Returns
-------
f : float
where float can be nan
Examples
--------
>>> safe_float('1')
1.0
>>> safe_float('1\\n')
1.0
>>> safe_float('?\\n')
nan
"""
if '?' in x:
return np.nan
else:
return np.float(x)
def safe_nominal(value, pvalue):
svalue = value.strip()
if svalue in pvalue:
return svalue
elif svalue == '?':
return svalue
else:
raise ValueError("%s value not in %s" % (str(svalue), str(pvalue)))
def safe_date(value, date_format, datetime_unit):
date_str = value.strip().strip("'").strip('"')
if date_str == '?':
return np.datetime64('NaT', datetime_unit)
else:
dt = datetime.datetime.strptime(date_str, date_format)
return np.datetime64(dt).astype("datetime64[%s]" % datetime_unit)
def get_delim(line):
"""Given a string representing a line of data, check whether the
delimiter is ',' or space.
Parameters
----------
line : str
line of data
Returns
-------
delim : {',', ' '}
Examples
--------
>>> get_delim(',')
','
>>> get_delim(' ')
' '
>>> get_delim(', ')
','
>>> get_delim('x')
Traceback (most recent call last):
...
ValueError: delimiter not understood: x
"""
if ',' in line:
return ','
if ' ' in line:
return ' '
raise ValueError("delimiter not understood: " + line)
class MetaData(object):
"""Small container to keep useful informations on a ARFF dataset.
Knows about attributes names and types.
Examples
--------
data, meta = loadarff('iris.arff')
# This will print the attributes names of the iris.arff dataset
for i in meta:
print i
# This works too
meta.names()
# Getting attribute type
types = meta.types()
Notes
-----
Also maintains the list of attributes in order, i.e. doing for i in
meta, where meta is an instance of MetaData, will return the
different attribute names in the order they were defined.
"""
def __init__(self, rel, attr):
self.name = rel
# We need the dictionary to be ordered
# XXX: may be better to implement an ordered dictionary
self._attributes = {}
self._attrnames = []
for name, value in attr:
tp = parse_type(value)
self._attrnames.append(name)
if tp == 'nominal':
self._attributes[name] = (tp, get_nom_val(value))
elif tp == 'date':
self._attributes[name] = (tp, get_date_format(value)[0])
else:
self._attributes[name] = (tp, None)
def __repr__(self):
msg = ""
msg += "Dataset: %s\n" % self.name
for i in self._attrnames:
msg += "\t%s's type is %s" % (i, self._attributes[i][0])
if self._attributes[i][1]:
msg += ", range is %s" % str(self._attributes[i][1])
msg += '\n'
return msg
def __iter__(self):
return iter(self._attrnames)
def __getitem__(self, key):
return self._attributes[key]
def names(self):
"""Return the list of attribute names."""
return self._attrnames
def types(self):
"""Return the list of attribute types."""
attr_types = [self._attributes[name][0] for name in self._attrnames]
return attr_types
def loadarff(f):
"""
Read an arff file.
The data is returned as a record array, which can be accessed much like
a dictionary of numpy arrays. For example, if one of the attributes is
called 'pressure', then its first 10 data points can be accessed from the
``data`` record array like so: ``data['pressure'][0:10]``
Parameters
----------
f : file-like or str
File-like object to read from, or filename to open.
Returns
-------
data : record array
The data of the arff file, accessible by attribute names.
meta : `MetaData`
Contains information about the arff file such as name and
type of attributes, the relation (name of the dataset), etc...
Raises
------
ParseArffError
This is raised if the given file is not ARFF-formatted.
NotImplementedError
The ARFF file has an attribute which is not supported yet.
Notes
-----
This function should be able to read most arff files. Not
implemented functionality include:
* date type attributes
* string type attributes
It can read files with numeric and nominal attributes. It cannot read
files with sparse data ({} in the file). However, this function can
read files with missing data (? in the file), representing the data
points as NaNs.
"""
if hasattr(f, 'read'):
ofile = f
else:
ofile = open(f, 'rt')
try:
return _loadarff(ofile)
finally:
if ofile is not f: # only close what we opened
ofile.close()
def _loadarff(ofile):
# Parse the header file
try:
rel, attr = read_header(ofile)
except ValueError as e:
msg = "Error while parsing header, error was: " + str(e)
raise ParseArffError(msg)
# Check whether we have a string attribute (not supported yet)
hasstr = False
for name, value in attr:
type = parse_type(value)
if type == 'string':
hasstr = True
meta = MetaData(rel, attr)
# XXX The following code is not great
# Build the type descriptor descr and the list of convertors to convert
# each attribute to the suitable type (which should match the one in
# descr).
# This can be used once we want to support integer as integer values and
# not as numeric anymore (using masked arrays ?).
acls2dtype = {'real': np.float, 'integer': np.float, 'numeric': np.float}
acls2conv = {'real': safe_float, 'integer': safe_float, 'numeric': safe_float}
descr = []
convertors = []
if not hasstr:
for name, value in attr:
type = parse_type(value)
if type == 'date':
date_format, datetime_unit = get_date_format(value)
descr.append((name, "datetime64[%s]" % datetime_unit))
convertors.append(partial(safe_date, date_format=date_format, datetime_unit=datetime_unit))
elif type == 'nominal':
n = maxnomlen(value)
descr.append((name, 'S%d' % n))
pvalue = get_nom_val(value)
convertors.append(partial(safe_nominal, pvalue=pvalue))
else:
descr.append((name, acls2dtype[type]))
convertors.append(safe_float)
#dc.append(acls2conv[type])
#sdescr.append((name, acls2sdtype[type]))
else:
# How to support string efficiently ? Ideally, we should know the max
# size of the string before allocating the numpy array.
raise NotImplementedError("String attributes not supported yet, sorry")
ni = len(convertors)
# Get the delimiter from the first line of data:
def next_data_line(row_iter):
"""Assumes we are already in the data part (eg after @data)."""
raw = next(row_iter)
while r_empty.match(raw) or r_comment.match(raw):
raw = next(row_iter)
return raw
try:
try:
dtline = next_data_line(ofile)
delim = get_delim(dtline)
except ValueError as e:
raise ParseArffError("Error while parsing delimiter: " + str(e))
finally:
ofile.seek(0, 0)
ofile = go_data(ofile)
# skip the @data line
next(ofile)
def generator(row_iter, delim=','):
# TODO: this is where we are spending times (~80%). I think things
# could be made more efficiently:
# - We could for example "compile" the function, because some values
# do not change here.
# - The function to convert a line to dtyped values could also be
# generated on the fly from a string and be executed instead of
# looping.
# - The regex are overkill: for comments, checking that a line starts
# by % should be enough and faster, and for empty lines, same thing
# --> this does not seem to change anything.
# We do not abstract skipping comments and empty lines for performances
# reason.
raw = next(row_iter)
while r_empty.match(raw) or r_comment.match(raw):
raw = next(row_iter)
# 'compiling' the range since it does not change
# Note, I have already tried zipping the converters and
# row elements and got slightly worse performance.
elems = list(range(ni))
row = raw.split(delim)
yield tuple([convertors[i](row[i]) for i in elems])
for raw in row_iter:
while r_comment.match(raw) or r_empty.match(raw):
raw = next(row_iter)
row = raw.split(delim)
yield tuple([convertors[i](row[i]) for i in elems])
a = generator(ofile, delim=delim)
# No error should happen here: it is a bug otherwise
data = np.fromiter(a, descr)
return data, meta
#-----
# Misc
#-----
def basic_stats(data):
nbfac = data.size * 1. / (data.size - 1)
return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac
def print_attribute(name, tp, data):
type = tp[0]
if type == 'numeric' or type == 'real' or type == 'integer':
min, max, mean, std = basic_stats(data)
print("%s,%s,%f,%f,%f,%f" % (name, type, min, max, mean, std))
else:
msg = name + ",{"
for i in range(len(tp[1])-1):
msg += tp[1][i] + ","
msg += tp[1][-1]
msg += "}"
print(msg)
def test_weka(filename):
data, meta = loadarff(filename)
print(len(data.dtype))
print(data.size)
for i in meta:
print_attribute(i,meta[i],data[i])
# make sure nose does not find this as a test
test_weka.__test__ = False
if __name__ == '__main__':
import sys
filename = sys.argv[1]
test_weka(filename)
|
the-stack_106_31498 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gym
import numpy as np
# This is heavily based on
# https://github.com/denisyarats/dmc2gym/blob/master/dmc2gym/wrappers.py
# but adapted to gym environments (instead of dmcontrol)
class MujocoGymPixelWrapper(gym.Wrapper):
def __init__(
self,
env: gym.Env,
image_width: int = 84,
image_height: int = 84,
frame_skip: int = 1,
camera_id: int = 0,
channels_first: bool = True,
):
super().__init__(env)
self._image_width = image_width
self._image_height = image_height
self._channels_first = channels_first
self._frame_skip = frame_skip
self._camera_id = camera_id
shape = (
[3, image_height, image_width]
if channels_first
else [image_height, image_width, 3]
)
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=shape, dtype=np.uint8
)
self._true_action_space = env.action_space
self.action_space = gym.spaces.Box(
low=-1.0, high=1.0, shape=self._true_action_space.shape, dtype=np.float32
)
def _get_obs(self):
obs = self.render()
if self._channels_first:
obs = np.transpose(obs, (2, 0, 1))
return obs
def _convert_action(self, action):
action = action.astype(np.float64)
true_delta = self._true_action_space.high - self._true_action_space.low
norm_delta = self.action_space.high - self.action_space.low
action = (action - self.action_space.low) / norm_delta
action = action * true_delta + self._true_action_space.low
action = action.astype(np.float32)
return action
def reset(self):
self.env.reset()
return self._get_obs()
def step(self, action):
action = self._convert_action(action)
total_reward = 0.0
done = False
for _ in range(self._frame_skip):
_, reward, done, _ = self.env.step(action)
total_reward += reward
if done:
break
next_obs = self._get_obs()
return next_obs, total_reward, done, {}
def render(self, mode="rgb_array", height=None, width=None, camera_id=None):
height = height or self._image_height
width = width or self._image_width
camera_id = camera_id or self._camera_id
return self.env.render(
mode=mode, height=height, width=width, camera_id=camera_id
)
def seed(self, seed=None):
self._true_action_space.seed(seed)
self.action_space.seed(seed)
self.observation_space.seed(seed)
|
the-stack_106_31499 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import sys
import traceback
from datetime import datetime
from http import HTTPStatus
from aiohttp import web
from aiohttp.web import Request, Response, json_response
from botbuilder.core import (
BotFrameworkAdapterSettings,
TurnContext,
BotFrameworkAdapter,
)
from botbuilder.core.integration import aiohttp_error_middleware
from botbuilder.schema import Activity, ActivityTypes
from bots import TeamsTaskModuleBot
from config import DefaultConfig
CONFIG = DefaultConfig()
# Create adapter.
# See https://aka.ms/about-bot-adapter to learn more about how bots work.
SETTINGS = BotFrameworkAdapterSettings(CONFIG.APP_ID, CONFIG.APP_PASSWORD)
ADAPTER = BotFrameworkAdapter(SETTINGS)
# Catch-all for errors.
async def on_error(context: TurnContext, error: Exception):
# This check writes out errors to console log .vs. app insights.
# NOTE: In production environment, you should consider logging this to Azure
# application insights.
print(f"\n [on_turn_error] unhandled error: {error}", file=sys.stderr)
traceback.print_exc()
# Send a message to the user
await context.send_activity("The bot encountered an error or bug.")
await context.send_activity(
"To continue to run this bot, please fix the bot source code."
)
# Send a trace activity if we're talking to the Bot Framework Emulator
if context.activity.channel_id == "emulator":
# Create a trace activity that contains the error object
trace_activity = Activity(
label="TurnError",
name="on_turn_error Trace",
timestamp=datetime.utcnow(),
type=ActivityTypes.trace,
value=f"{error}",
value_type="https://www.botframework.com/schemas/error",
)
# Send a trace activity, which will be displayed in Bot Framework Emulator
await context.send_activity(trace_activity)
ADAPTER.on_turn_error = on_error
# Create the Bot
BOT = TeamsTaskModuleBot(CONFIG)
# Listen for incoming requests on /api/messages
async def messages(req: Request) -> Response:
# Main bot message handler.
if "application/json" in req.headers["Content-Type"]:
body = await req.json()
else:
return Response(status=HTTPStatus.UNSUPPORTED_MEDIA_TYPE)
activity = Activity().deserialize(body)
auth_header = req.headers["Authorization"] if "Authorization" in req.headers else ""
invoke_response = await ADAPTER.process_activity(activity, auth_header, BOT.on_turn)
if invoke_response:
return json_response(data=invoke_response.body, status=invoke_response.status)
return Response(status=HTTPStatus.OK)
APP = web.Application(middlewares=[aiohttp_error_middleware])
APP.router.add_post("/api/messages", messages)
APP.router.add_static("/", path="./pages/", name="pages")
if __name__ == "__main__":
try:
web.run_app(APP, host="localhost", port=CONFIG.PORT)
except Exception as error:
raise error
|
the-stack_106_31500 | import pytest
import jsonpickle
from urllib.parse import quote_plus
from namex.services.name_request.auto_analyse import AnalysisIssueCodes
from ..common import assert_issues_count_is_gt, assert_has_word_upper, save_words_list_classification, save_words_list_virtual_word_condition
from ..configuration import ENDPOINT_PATH
from ...common import token_header, claims
@pytest.mark.xfail(raises=ValueError)
def test_name_requires_consent_compound_word_request_response(client, jwt, app):
words_list_classification = [{'word': 'CANADIAN', 'classification': 'DIST'},
{'word': 'CANADIAN', 'classification': 'DESC'},
{'word': 'SUMMERS', 'classification': 'DIST'},
{'word': 'SUMMERS', 'classification': 'DESC'},
{'word': 'GAMES', 'classification': 'DIST'},
{'word': 'GAMES', 'classification': 'DESC'},
{'word': 'BLAKE', 'classification': 'DIST'},
{'word': 'BLAKE', 'classification': 'DESC'},
{'word': 'ENGINEERING', 'classification': 'DIST'},
{'word': 'ENGINEERING', 'classification': 'DESC'},
{'word': 'SAANICH', 'classification': 'DIST'},
{'word': 'FIRE', 'classification': 'DIST'},
{'word': 'FIRE', 'classification': 'DESC'},
{'word': 'PROTECTION', 'classification': 'DIST'},
{'word': 'PROTECTION', 'classification': 'DESC'},
]
save_words_list_classification(words_list_classification)
words_list_virtual_word_condition = [
{
'words': 'SUMMER GAMES, WINTER GAMES',
'consent_required': True, 'allow_use': True
},
{
'words': 'CONSULTING ENGINEER, ENGINEER, ENGINEERING, INGENIERE, INGENIEUR, INGENIEUR CONSIEL, P ENG, PROFESSIONAL ENGINEER',
'consent_required': True, 'allow_use': True
},
{
'words': 'Fire Fighter, Fire Fighters, Fire Protection, Fire Services, Firefighter, Firefighters, Fire Department, Fire Rescue, Fire Suppression',
'consent_required': True, 'allow_use': True
},
{
'words': 'FIRE, FIRE RESPONSE, FIRE DEPARTMENT, FIRE BRIGADE, FIRE SERVICES, FIRE SUPPRESSION, FIRE RESCUE, ROAD RESCUE, STRUCTURE FIRE',
'consent_required': True, 'allow_use': True
},
]
save_words_list_virtual_word_condition(words_list_virtual_word_condition)
# create JWT & setup header with a Bearer Token using the JWT
token = jwt.create_jwt(claims, token_header)
headers = {'Authorization': 'Bearer ' + token, 'content-type': 'application/json'}
test_params = [
# All words are identified as distinctive because none of them are in synonym table
{
'name': 'CANADIAN SUMMERS GAMES LIMITED',
'location': 'BC',
'entity_type_cd': 'CR',
'request_action_cd': 'NEW'
},
{
'name': 'SAANICH FIRE PROTECTION LTD.',
'location': 'BC',
'entity_type_cd': 'CR',
'request_action_cd': 'NEW'
}
]
for entry in test_params:
query = '&'.join("{!s}={}".format(k, quote_plus(v)) for (k, v) in entry.items())
path = ENDPOINT_PATH + '?' + query
print('\n' + 'request: ' + path + '\n')
response = client.get(path, headers=headers)
payload = jsonpickle.decode(response.data)
print("Assert that the payload contains issues")
if isinstance(payload.get('issues'), list):
assert_issues_count_is_gt(0, payload.get('issues'))
assert_has_word_upper(AnalysisIssueCodes.NAME_REQUIRES_CONSENT, payload.get('issues'))
|
the-stack_106_31501 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.path.append(os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'PaddleNLP'
copyright = '2021, PaddleNLP'
author = 'PaddleNLP'
default_role = 'py:obj'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_rtd_theme',
'recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx_copybutton',
'sphinx_markdown_tables',
'sphinx.ext.viewcode',
'sphinx.ext.coverage',
'sphinx.ext.extlinks',
]
autodoc_default_options = {
'member-order': 'bysource',
'undoc-members': False,
}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
locale_dirs = ['locale/']
gettext_compact = False
language = 'zh_CN'
add_module_names = False
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# source_parsers = {
# '.md': recommonmark.parser.CommonMarkParser,
# }
source_suffix = ['.rst', '.md']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_options = {
'collapse_navigation': True,
'display_version': True,
'navigation_depth': 5,
'navigation_with_keys': True,
'body_max_width': '80%',
}
html_css_files = ['custom.css', ]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_logo = 'paddle.png'
|
the-stack_106_31503 | import itertools
import os.path
from collections import defaultdict
import sys
import pandas as pd
from ahocorasick import Automaton
from itertools import product
from typing import List, Any, Optional, Tuple, Union
NT_SUB = str.maketrans('acgtrymkswhbvdnxACGTRYMKSWHBVDNX',
'tgcayrkmswdvbhnxTGCAYRKMSWDVBHNX')
import time
import re
from multiprocessing import Pool, current_process
from os import fork, getpid
bases_dict = {
'A': ['A'],
'C': ['C'],
'G': ['G'],
'T': ['T'],
'R': ['A', 'G'],
'Y': ['C', 'T'],
'S': ['G', 'C'],
'W': ['A', 'T'],
'K': ['G', 'T'],
'M': ['A', 'C'],
'B': ['C', 'G', 'T'],
'D': ['A', 'G', 'T'],
'H': ['A', 'C', 'T'],
'V': ['A', 'C', 'G'],
'N': ['A', 'C', 'G', 'T'], }
def expand_degenerate_bases(seq):
"""List all possible kmers for a scheme given a degenerate base
Args:
Scheme_kmers from SNV scheme fasta file
Returns:
List of all possible kmers given a degenerate base or not
"""
return list(map("".join, product(*map(bases_dict.get, seq))))
def revcomp(s):
"""Reverse complement nucleotide sequence
Args:
s (str): nucleotide sequence
Returns:
str: reverse complement of `s` nucleotide sequence
"""
return s.translate(NT_SUB)[::-1]
def init_automaton(scheme_fasta):
"""Initialize Aho-Corasick Automaton with kmers from SNV scheme fasta
Args:
scheme_fasta: SNV scheme fasta file path
Returns:
Aho-Corasick Automaton with kmers loaded
"""
A = Automaton()
for header, sequence in parse_fasta(scheme_fasta):
kmer_list = expand_degenerate_bases(sequence)
for idx,seq in enumerate(kmer_list):
A.add_word(seq, (header, seq, False))
A.add_word(revcomp(seq), (header, seq, True))
A.make_automaton()
return A
def init_automaton_dict(seqs):
"""Initialize Aho-Corasick Automaton with kmers from SNV scheme fasta
Args:
scheme_fasta: SNV scheme fasta file path
Returns:
Aho-Corasick Automaton with kmers loaded
"""
stime = time.time()
A = Automaton()
for seq_id in seqs:
sequence = seqs[seq_id]
kmer_list = expand_degenerate_bases(sequence)
for idx,seq in enumerate(kmer_list):
A.add_word(seq, (seq_id, seq, False))
A.add_word(revcomp(seq), (seq_id, seq, True))
A.make_automaton()
return A
def SimpleFastaParser(handle):
"""Iterate over Fasta records as string tuples.
Arguments:
- handle - input stream opened in text mode
For each record a tuple of two strings is returned, the FASTA title
line (without the leading '>' character), and the sequence (with any
whitespace removed). The title line is not divided up into an
identifier (the first word) and comment or description.
>>> with open("Fasta/dups.fasta") as handle:
... for values in SimpleFastaParser(handle):
... print(values)
...
('alpha', 'ACGTA')
('beta', 'CGTC')
('gamma', 'CCGCC')
('alpha (again - this is a duplicate entry to test the indexing code)', 'ACGTA')
('delta', 'CGCGC')
"""
# Skip any text before the first record (e.g. blank lines, comments)
for line in handle:
if line[0] == ">":
title = line[1:].rstrip()
break
else:
# no break encountered - probably an empty file
return
# Main logic
# Note, remove trailing whitespace, and any internal spaces
# (and any embedded \r which are possible in mangled files
# when not opened in universal read lines mode)
lines = []
for line in handle:
if line[0] == ">":
yield title, "".join(lines).replace(" ", "").replace("\r", "").upper()
lines = []
title = line[1:].rstrip()
continue
lines.append(line.rstrip())
yield title, "".join(lines).replace(" ", "").replace("\r", "").upper()
REGEX_GZIPPED = re.compile(r'^.+\.gz$')
def parse_fasta(filepath):
"""Parse a FASTA/FASTA.GZ file returning a generator yielding tuples of fasta headers to sequences.
Args:
filepath (str): Fasta file path
Returns:
generator: yields tuples of (<fasta header>, <fasta sequence>)
"""
if REGEX_GZIPPED.match(filepath):
# assumes Linux os with zcat installed
import os
with os.popen('zcat < {}'.format(filepath)) as f:
yield from SimpleFastaParser(f)
else:
with open(filepath, 'r') as f:
yield from SimpleFastaParser(f)
def find_in_fasta(automaton: Automaton, fasta: str) -> pd.DataFrame:
"""Find scheme kmers in input fasta file
Args:
automaton: Aho-Corasick Automaton with scheme SNV target kmers loaded
fasta: Input fasta path
Returns:
Dataframe with any matches found in input fasta file
"""
stime = time.time()
res = []
for contig_header, sequence in parse_fasta(fasta):
for idx, (kmername, kmer_seq, is_revcomp) in automaton.iter(sequence):
res.append((kmername, kmer_seq, is_revcomp, contig_header, idx))
columns = ['kmername', 'seq', 'is_revcomp', 'contig_id', 'match_index']
print("Hi I am worker {} with PID {} at time {} and {} elapsed time".format(current_process(), getpid(), stime,time.time()-stime))
sys.stdin.flush()
return pd.DataFrame(res, columns=columns)
def find_in_fasta_dict(automaton: Automaton, seqs: dict) -> pd.DataFrame:
"""Find scheme kmers in input fasta file
Args:
automaton: Aho-Corasick Automaton with scheme SNV target kmers loaded
fasta: Input fasta path
Returns:
Dataframe with any matches found in input fasta file
"""
res = []
iter_keys = seqs.keys()
kmer_freq = {}
for seq_id in iter_keys:
seq = seqs[seq_id].replace('-','')
for idx, (kmername, kmer_seq, is_revcomp) in automaton.iter(seq):
res.append((kmername, kmer_seq, is_revcomp, seq_id, idx))
columns = ['kmername', 'seq', 'is_revcomp', 'contig_id', 'match_index']
return pd.DataFrame(res, columns=columns)
def parallel_query_fasta_files(input_genomes,
automaton: Automaton,
n_threads: int = 1):
results = []
sys.stdin.flush()
batch_size = 100
if n_threads == 1:
for i in range(0,len(input_genomes)):
stime = time.time()
df = find_in_fasta(automaton,input_genomes[i])
df['file_path'] = input_genomes[i]
sample = os.path.basename(input_genomes[i])
sample = re.sub(r"(\.fa$)|(\.fas$)|(\.fasta$)|(\.fna$)", "", sample)
df['sample'] = sample
df['is_pos_kmer'] = ~df.kmername.str.contains('negative')
refpositions = [x for x, y in df.kmername.str.split('-')]
df['refposition'] = [int(x.replace('negative', '')) for x in refpositions]
results.append(df)
print("{}\t{}".format(input_genomes[i],time.time()-stime))
else:
print("creating pool")
pool = Pool(processes=n_threads)
res = []
for i in range(0, len(input_genomes)):
print("submitting job {}".format(time.time()))
res.append(pool.apply_async(find_in_fasta, ( automaton, input_genomes[i] )))
if i % 100 == 0:
pool.close()
pool.join()
pool = Pool(processes=n_threads)
print("closing pool")
#cleanup
pool.close()
pool.join()
for i in range(0,len(res)):
df = res[i].get()
df['file_path'] = input_genomes[i]
sample = os.path.basename(input_genomes[i])
sample = re.sub(r"(\.fa$)|(\.fas$)|(\.fasta$)|(\.fna$)", "", sample)
df['sample'] = sample
df['is_pos_kmer'] = ~df.kmername.str.contains('negative')
refpositions = [x for x, y in df.kmername.str.split('-')]
df['refposition'] = [int(x.replace('negative', '')) for x in refpositions]
results.append(df)
return pd.concat(results)
def parallel_query_contigs(input_genomes,
automaton: Automaton,
n_threads: int = 1):
stime = time.time()
if n_threads == 1:
return find_in_fasta_dict(automaton,input_genomes)
else:
pool = Pool(processes=n_threads)
res = []
it = iter(input_genomes)
length = len(input_genomes)
chunk_size = int(length / n_threads)
for i in range(0, length, chunk_size):
chunk = {}
sub = time.time()
for seq_id in itertools.islice(it,chunk_size):
seq = input_genomes[seq_id]
chunk[seq_id] = seq
res.append(pool.apply_async(find_in_fasta_dict, ( automaton, chunk )))
print("time taken to submit: {}".format(time.time() - sub))
#cleanup
pool.close()
pool.join()
return pd.concat([x.get() for x in res])
def parse_fastq(filepath):
"""Parse a FASTQ/FASTQ.GZ file returning a generator yielding tuples of FASTQ entry headers and sequences.
Args:
filepath (str): FASTQ/FASTQ.GZ file path
Returns:
generator: yields tuples of (<fastq header>, <fastq sequence>)
"""
if REGEX_GZIPPED.match(filepath):
# using os.popen with zcat since it is much faster than gzip.open or gzip.open(io.BufferedReader)
# http://aripollak.com/pythongzipbenchmarks/
# assumes Linux os with zcat installed
import os
with os.popen('zcat < {}'.format(filepath)) as f:
yield from _parse_fastq(f)
else:
with open(filepath, 'r') as f:
yield from _parse_fastq(f)
def _parse_fastq(f):
"""Simple FASTQ parser which yields the header and sequence ignoring the quality scores
Args:
f: file-like object
Yields:
Tuple of FASTQ entry header and sequence
"""
header = ''
seq = ''
skip = False
for line in f:
if skip:
skip = False
continue
line = line.strip()
if line == '':
continue
if line[0] == '@':
header = line.replace('@', '')
elif line[0] == '+':
yield header, seq
skip = True
else:
seq = line.upper()
def find_in_fastqs(automaton: Automaton, *fastqs):
"""Find scheme kmers in input fastq files
Args:
automaton: Aho-Corasick Automaton with scheme SNV target kmers loaded
fastqs: Input fastq file paths
Returns:
Dataframe with any matches found in input fastq files
"""
kmer_seq_counts = defaultdict(int)
for fastq in fastqs:
for _, sequence in parse_fastq(fastq):
for idx, (_, kmer_seq, _) in automaton.iter(sequence):
kmer_seq_counts[kmer_seq] += 1
res = []
kmer_freq = {}
for kmer_seq, freq in kmer_seq_counts.items():
kmername, sequence, _ = automaton.get(kmer_seq)
if not kmername in kmer_freq:
kmer_freq[kmername] = 0
kmer_freq[kmername]+= freq
res.append((kmername, kmer_seq, freq))
for kmername,freq in kmer_freq:
res.append([kmername,freq])
return pd.DataFrame(res, columns=['kmername','freq'])
def parallel_fastq_query(automaton: Automaton, fastqs, n_threads=1):
results = []
df = pd.DataFrame()
if n_threads == 1:
for file in fastqs:
print(file)
tmp = find_in_fastqs(automaton,file)
tmp['file'] = file
sample = os.path.basename(file)
sample = re.sub(r"(\_1.fq$)|(\_2.fq$)|(\_R1.fq$)|(\_R2.fq$)|(\_1.fastq$)|(\_2.fastq$)|(\_R1.fastq$)|(\_R2.fastq$)|(\.fq$)|(\.fastq$)", "", sample)
tmp['sample'] = sample
results.append(tmp)
else:
pool = Pool(processes=n_threads)
res = []
for file in fastqs:
res.append(pool.apply_async(find_in_fastqs, ( automaton,file )))
#cleanup
pool.close()
pool.join()
for i in range(0,len(res)):
tmp = res[i].get()
tmp['file'] = fastqs[i]
sample = os.path.basename(fastqs[i])
sample = re.sub(r"(\_1.fq$)|(\_2.fq$)|(\_R1.fq$)|(\_R2.fq$)|(\_1.fastq$)|(\_2.fastq$)|(\_R1.fastq$)|(\_R2.fastq$)|(\.fq$)|(\.fastq$)", "", sample)
tmp['sample'] = sample
results.append(tmp)
df = pd.concat(results)
refpositions = [x for x, y in df.kmername.str.split('-')]
df['refposition'] = [int(x.replace('negative', '')) for x in refpositions]
df['is_pos_kmer'] = ~df.kmername.str.contains('negative')
return df |
the-stack_106_31506 | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
import time
import datetime
class JobLine(Model):
_name = "job.line"
_fields = {
"job_id": fields.Many2One("job", "Job", required=True, on_delete="cascade"),
"sequence": fields.Integer("Sequence"),
"type": fields.Selection([["labor", "Labor"], ["part", "Parts"], ["other", "Other"]], "Type"),
"description": fields.Text("Description"),
"product_id": fields.Many2One("product", "Product"),
"qty": fields.Decimal("Qty"),
"uom_id": fields.Many2One("uom", "UoM"),
"unit_price": fields.Decimal("Sale Unit Price"),
"amount": fields.Decimal("Sale Amount", function="get_amount"),
"payment_type": fields.Selection([["contract", "Included In Contract"], ["job", "Not Included In Contract"]], "Payment Type"),
}
_order = "sequence,id"
def get_amount(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
vals[obj.id] = (obj.qty or 0) * (obj.unit_price or 0)
return vals
JobLine.register()
|
the-stack_106_31507 | import pytest
from wal_e import exception
from wal_e import worker
class FakeTarPartition(object):
"""Implements enough protocol to test concurrency semantics."""
def __init__(self, num_members, explosive=False):
self._explosive = explosive
self.num_members = num_members
def __len__(self):
return self.num_members
class FakeUploader(object):
"""A no-op uploader that makes affordance for fault injection."""
def __call__(self, tpart):
if tpart._explosive:
raise tpart._explosive
return tpart
class Explosion(Exception):
"""Marker type of injected faults."""
pass
def make_pool(max_concurrency, max_members):
"""Set up a pool with a FakeUploader"""
return worker.TarUploadPool(FakeUploader(),
max_concurrency, max_members)
def test_simple():
"""Simple case of uploading one partition."""
pool = make_pool(1, 1)
pool.put(FakeTarPartition(1))
pool.join()
def test_not_enough_resources():
"""Detect if a too-large segment can never complete."""
pool = make_pool(1, 1)
with pytest.raises(exception.UserCritical):
pool.put(FakeTarPartition(2))
pool.join()
def test_simple_concurrency():
"""Try a pool that cannot execute all submitted jobs at once."""
pool = make_pool(1, 1)
for i in range(3):
pool.put(FakeTarPartition(1))
pool.join()
def test_fault_midstream():
"""Test if a previous upload fault is detected in calling .put.
This case is seen while pipelining many uploads in excess of the
maximum concurrency.
NB: This test is critical as to prevent failed uploads from
failing to notify a caller that the entire backup is incomplete.
"""
pool = make_pool(1, 1)
# Set up upload doomed to fail.
tpart = FakeTarPartition(1, explosive=Explosion('Boom'))
pool.put(tpart)
# Try to receive the error through adding another upload.
tpart = FakeTarPartition(1)
with pytest.raises(Explosion):
pool.put(tpart)
def test_fault_join():
"""Test if a fault is detected when .join is used.
This case is seen at the end of a series of uploads.
NB: This test is critical as to prevent failed uploads from
failing to notify a caller that the entire backup is incomplete.
"""
pool = make_pool(1, 1)
# Set up upload doomed to fail.
tpart = FakeTarPartition(1, explosive=Explosion('Boom'))
pool.put(tpart)
# Try to receive the error while finishing up.
with pytest.raises(Explosion):
pool.join()
def test_put_after_join():
"""New jobs cannot be submitted after a .join
This is mostly a re-check to detect programming errors.
"""
pool = make_pool(1, 1)
pool.join()
with pytest.raises(exception.UserCritical):
pool.put(FakeTarPartition(1))
def test_pool_concurrent_success():
pool = make_pool(4, 4)
for i in range(30):
pool.put(FakeTarPartition(1))
pool.join()
def test_pool_concurrent_failure():
pool = make_pool(4, 4)
parts = [FakeTarPartition(1) for i in range(30)]
exc = Explosion('boom')
parts[27]._explosive = exc
with pytest.raises(Explosion) as e:
for part in parts:
pool.put(part)
pool.join()
assert e.value is exc
|
the-stack_106_31509 | # -*- coding: utf-8 -*-
'''
gds.burp.models
~~~~~~~~~~~~~~~
This module contains the primary objects that make working with
Burp's IHttpRequestResponse object's more... Pythonic.
'''
from java.net import URL
try:
from burp import IHttpService, IScanIssue
except ImportError:
class IHttpService(object):pass
class IScanIssue(object):pass
from Cookie import SimpleCookie
from cStringIO import StringIO
from cgi import FieldStorage, parse_header, parse_qs
from urlparse import urlparse
from .decorators import reify
from .structures import CaseInsensitiveDict
import json
CRLF = '\r\n'
SP = chr(0x20)
class HttpRequest(object):
'''The :class:`HttpRequest <HttpRequest>` object. Pass Burp's
IHttpRequestResponse object to the constructor.
Optional init arguments:
:param _burp: IBurpExtender implementation
'''
def __init__(self, messageInfo=None, _burp=None):
self._messageInfo = messageInfo
self._burp = _burp
self._host = None
self._port = 80
self._protocol = 'http'
self._url = ''
self.method = None
self._uri = None
self.version = None
self._headers = {}
self.body = None
if messageInfo is not None and hasattr(messageInfo, 'request'):
if messageInfo.getRequest():
self.method, self._uri, self.version, self._headers, self.body = \
_parse_message(messageInfo.getRequest().tostring())
if hasattr(messageInfo, 'response'):
self.response = HttpResponse(getattr(messageInfo, 'response', None),
request=self)
else:
self.response = HttpResponse(None, request=self)
def __contains__(self, item):
return item in self.body if self.body else False
def __getstate__(self):
return {k: v if k not in ('_burp', '_messageInfo') else None
for k, v in self.__dict__.iteritems()}
def __len__(self):
return int(self.headers.get('content-length', len(self.body or '')))
def __nonzero__(self):
return self.raw is not None
def __repr__(self):
return '<HttpRequest [%s]>' % (getattr(self.url, 'path', ''), )
@property
def host(self):
'''
Returns the name of the application host.
'''
if self._messageInfo is not None and \
self._host != self._messageInfo.getHost():
self._host = self._messageInfo.getHost()
return self._host
@host.setter
def host(self, host):
'''
Sets the name of the application host to which the request
should be sent.
Note: This method generally can only be used before the
message has been forwarded to the application, and not in
read-only contexts.
:param host: The name of the application host to which the
request should be sent.
'''
if self._messageInfo is not None:
self._messageInfo.setHost(host)
return
@property
def port(self):
'''
Returns the port number used by the application.
'''
if self._messageInfo is not None and \
self._port != self._messageInfo.getPort():
self._port = self._messageInfo.getPort()
return self._port
@port.setter
def port(self, port):
'''
Sets the port number to which the request should be sent.
Note: This method generally can only be used before the
message has been forwarded to the application, and not in
read-only contexts.
:param port: The port number to which the request should be
sent.
'''
if self._messageInfo is not None:
self._messageInfo.setPort(port)
return
@property
def protocol(self):
'''
Returns the protocol used by the application.
'''
if self._messageInfo is not None and \
self._protocol != self._messageInfo.getProtocol():
self._protocol = self._messageInfo.getProtocol()
return self._protocol
@protocol.setter
def protocol(self, protocol):
'''
Sets the protocol which should be used by the request.
Note: This method generally can only be used before the
message has been forwarded to the application, and not in
read-only contexts.
:param protocol: The protocol which should be used by the
request. Valid values are "http" and "https".
'''
if self._messageInfo is not None:
self._messageInfo.setProtocol(protocol)
return
@reify
def url(self):
'''
The URL requested in this HTTP request.
Note: This is a **read-only** attribute.
:returns: :class:`~urlparse.ParseResult` object.
'''
if self._messageInfo is not None:
_url = self._messageInfo.getUrl()
if _url:
self._url = urlparse(_url.toString())
return self._url
@reify
def cookies(self):
'''
The HTTP Cookies sent in this request.
Note: This is a **read-only** attribute.
:returns: :class:`~Cookie.SimpleCookie` object.
'''
self._cookies = SimpleCookie(self.headers.get('cookie', ''))
return self._cookies
@reify
def headers(self):
'''
The HTTP headers sent in this request. Headers are accessible
by their header names (case insensitive).
Note: This is a **read-only** attribute.
'''
self._headers = CaseInsensitiveDict(self._headers)
return self._headers
@reify
def parameters(self):
'''
Parameters parsed into a dictionary based on type (i.e., query,
body, etc.)
Note: This is a **read-only** attribute.
'''
self._parameters = _parse_parameters(self)
return self._parameters
@property
def content_type(self):
'''
Content-Type of the HTTP request.
Note: This is a **read-only** attribute.
'''
return self.headers.get('content-type', '')
@property
def is_secure(self):
'''
True if the HTTP request was sent over HTTPS.
Note: This is a **read-only** attribute.
'''
return True if self.protocol == 'https' else False
@property
def is_xhr(self):
'''
True if the HTTP request originated on the client using
XMLHttpRequest.
Note: This is a **read-only** attribute.
'''
return True if 'x-requested-with' in self.headers else False
@property
def raw(self):
'''
Returns the full request contents.
'''
if self._messageInfo:
return self._messageInfo.getRequest().tostring()
return
@raw.setter
def raw(self, message):
'''
Sets the request contents which should be sent to the application.
:param message: The request contents which should be sent to the
application.
'''
if self._messageInfo:
self._messageInfo.setRequest(message)
return
@property
def comment(self):
'''
Returns the user-annotated comment for this item, if applicable.
'''
if self._messageInfo:
return self._messageInfo.getComment()
return
@comment.setter
def comment(self, comment):
'''
Sets the user-annotated comment for this item.
:param comment: The comment to be associated with this item.
'''
if self._messageInfo:
return self._messageInfo.setComment(comment)
return
@property
def highlight(self):
'''
Returns the user-annotated color for this item, if applicable.
'''
if self._messageInfo:
return self._messageInfo.getHighlight()
return
@highlight.setter
def highlight(self, color):
'''
Sets the user-annotated color for this item.
:param color: The color to be associated with this item.
Valid values are: red, orange, yellow, green, cyan, blue, pink,
magenta, gray.
'''
if self._messageInfo:
self._messageInfo.setHighlight(color)
return
class HttpResponse(object):
def __init__(self, message=None, request=None):
self.request = request
self.version = None
self.status_code = None
self.reason = None
self.encoding = None
self._headers = {}
self.body = None
if message is not None:
self.version, self.status_code, self.reason, self._headers, self.body = \
_parse_message(message.tostring())
def __contains__(self, item):
return item in self.body if self.body else False
def __len__(self):
return int(self.headers.get('content-length', len(self.body or '')))
def __nonzero__(self):
return self.raw is not None
def __repr__(self):
return '<HttpResponse [%s]>' % (self.status_code, )
@reify
def cookies(self):
'''
The HTTP Cookies set in this response.
Note: This is a **read-only** attribute.
:returns: :class:`~Cookie.SimpleCookie` object.
'''
self._cookies = SimpleCookie(self.headers.get('set-cookie', ''))
return self._cookies
@reify
def headers(self):
'''
The HTTP headers received in this response. Headers are accessible
by their header names (case insensitive).
Note: This is a **read-only** attribute.
'''
self._headers = CaseInsensitiveDict(self._headers)
return self._headers
@property
def content_type(self):
'''
Content-Type of the HTTP response.
Note: This is a **read-only** attribute.
'''
return self.headers.get('content-type', '')
@property
def raw(self):
'''
Returns the full response contents.
'''
if self.request._messageInfo:
return self.request._messageInfo.getResponse().tostring()
return
@raw.setter
def raw(self, message):
'''
Sets the response contents which should be processed by the
invoking Burp tool.
:param message: The response contents which should be processed
by the invoking Burp tool.
'''
if self.request._messageInfo:
return self.request._messageInfo.setResponse(message)
return
class HttpService(IHttpService):
__slots__ = ['host', 'port', 'protocol', ]
def __init__(self, *args, **kwargs):
attrs = {}
if args and isinstance(args[0], IHttpService):
service = args[0]
attrs['host'] = service.getHost()
attrs['port'] = service.getPort()
attrs['protocol'] = service.getProtocol()
attrs.update(kwargs)
self.host = unicode(attrs.get('host', u'localhost'))
self.port = int(attrs.get('port', 80))
self.protocol = unicode(attrs.get('protocol', u'http'))
def __repr__(self):
return '<HttpService [%s://%s:%d]>' % (
self.getHost(), self.getProtocol(), self.getPort(), )
def getHost(self):
return unicode(self.host)
def getPort(self):
return int(self.port)
def getProtocol(self):
return unicode(self.protocol)
class ScanIssue(IScanIssue):
__slots__ = ['confidence', 'httpMessages', 'httpService',
'issueBackground', 'issueDetail', 'issueName', 'issueType',
'remediationBackground', 'remediationDetail', 'severity', 'url',
'host', 'port', 'protocol', ]
def __init__(self, *args, **kwargs):
attrs = {}
if args and isinstance(args[0], IScanIssue):
issue = args[0]
attrs['confidence'] = issue.getConfidence()
attrs['httpMessages'] = list(issue.getHttpMessages())
attrs['httpService'] = HttpService(issue.getHttpService(), **kwargs)
attrs['issueBackground'] = issue.getIssueBackground()
attrs['issueDetail'] = issue.getIssueDetail()
attrs['issueName'] = issue.getIssueName()
attrs['issueType'] = issue.getIssueType()
attrs['remediationBackground'] = issue.getRemediationBackground()
attrs['remediationDetail'] = issue.getRemediationDetail()
attrs['severity'] = issue.getSeverity()
attrs['url'] = urlparse(str(issue.getUrl()))
attrs['host'] = attrs['httpService'].getHost()
attrs['port'] = attrs['httpService'].getPort()
attrs['protocol'] = attrs['httpService'].getProtocol()
attrs.update(kwargs)
for key, value in attrs.iteritems():
setattr(self, key, value)
def __repr__(self):
return '<ScanIssue %s>' % (self.getIssueName(), )
def getConfidence(self):
return getattr(self, 'confidence', u'Tentative')
def getHttpMessages(self):
return getattr(self, 'httpMessages', None)
def getHttpService(self):
return getattr(self, 'httpService', HttpService())
def getIssueBackground(self):
return getattr(self, 'issueBackground', None)
def getIssueDetail(self):
return getattr(self, 'issueDetail', None)
def getIssueName(self):
return getattr(self, 'issueName', u'Default Issue Name')
def getIssueType(self):
return getattr(self, 'issueType', None)
def getRemediationBackground(self):
return getattr(self, 'remediationBackground', None)
def getRemediationDetail(self):
return getattr(self, 'remediationDetail', None)
def getSeverity(self):
return getattr(self, 'severity', u'Information')
def getUrl(self):
return URL(getattr(self, 'url', urlparse('http://')).geturl())
def getHost(self):
return getattr(self, 'host', u'localhost')
def getPort(self):
return int(getattr(self, 'port', 80))
def getProtocol(self):
return getattr(self, 'protocol', u'http')
def _parse_message(message):
is_response = False
pos = idx = 0
idx = message.find(CRLF, pos)
if idx != -1:
start_line = message[pos:idx]
if start_line.startswith('HTTP/'):
is_response = True
_idx = start_line.find(SP)
if _idx != -1:
if is_response:
version = start_line[0:_idx]
else:
method = start_line[0:_idx]
_pos = _idx + 1
if is_response:
_idx = start_line.find(SP, _pos)
status = start_line[_pos:_idx]
if not status.isdigit():
raise ValueError('status code %r is not a number' % (status, ))
status = int(status)
_pos = _idx + 1
reason = start_line[_pos:]
else:
# work out the http version by looking in reverse
_ridx = start_line.rfind(SP)
version = start_line[_ridx + 1:]
if not version.startswith('HTTP/'):
raise ValueError('Invalid HTTP version: %r' % (version, ))
# request-uri will be everything in-between.
# some clients might not encode space into a plus or %20
uri = start_line[_pos:_ridx]
if not uri or uri.isspace():
raise ValueError('Invalid URI: %r' % (uri, ))
pos = idx + 2
else:
raise ValueError('Could not parse start-line from message')
headers = CaseInsensitiveDict()
while (idx != -1):
idx = message.find(CRLF, pos)
if idx == pos:
# we've reached the end of the request headers
# advance 4 bytes (2 * CRLF)
pos = idx + 2
break
if idx != -1:
header = message[pos:idx]
_idx = header.find(':')
if _idx != -1:
name = header[:_idx].strip()
value = header[_idx + 1:].strip()
has_value = headers.get(name)
if has_value and has_value != value:
value = ', '.join([has_value, value])
headers[name] = value
else:
raise ValueError('Error parsing header: %r' % (header, ))
pos = idx + 2
else:
# looks like we reached the end of the message before EOL
break
if idx < len(message):
body = message[pos:]
else:
raise ValueError('Parsed past message body??')
if not is_response:
return method, uri, version, headers, body
else:
return version, status, reason, headers, body
def _parse_parameters(request):
parameters = {}
if request.url.query:
parameters['query'] = parse_qs(request.url.query,
keep_blank_values=True)
ctype, pdict = parse_header(request.headers.get('content-type', ''))
if ctype == 'application/x-www-form-urlencoded':
parameters['body'] = parse_qs(request.body, keep_blank_values=True)
elif ctype.startswith('multipart/'):
parameters['body'] = FieldStorage(
fp=StringIO(request.body),
headers=request.headers,
environ={
'REQUEST_METHOD': request.method,
'QUERY_STRING': request.url.query,
},
keep_blank_values=True)
elif ctype in ('application/json', ):
try:
parameters['body'] = json.loads(request.body)
except (NameError, TypeError):
pass
elif ctype == 'application/x-amf':
pass
elif ctype == 'text/x-gwt-rpc':
pass
elif ctype == 'application/xml':
pass
else:
pass
return parameters
|
the-stack_106_31510 | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Btcavenue Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.test_framework import BtcavenueTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(BtcavenueTestFramework):
start_height = 101
max_stat_pos = 2
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = 1525107225
self.nodes[0].setmocktime(mocktime)
self.nodes[0].generate(101)
address = self.nodes[0].get_deterministic_priv_key().address
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].generate(1)
self.sync_all()
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=False)
self.nodes[0].settxfee(amount=0.003)
self.nodes[0].sendtoaddress(address=address, amount=1, subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.sync_all()
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
self.sync_all()
stats = self.get_stats()
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
# Invalid number of args
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats, '00', 1, 2)
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats)
if __name__ == '__main__':
GetblockstatsTest().main()
|
the-stack_106_31513 | from functools import reduce
from pathlib import Path
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
import SimpleITK as sitk
from bokeh.models import HoverTool
import panel as pn
import param
import holoviews as hv
from holoviews import opts
from holoviews.operation.datashader import datashade, shade, dynspread, rasterize
from bokeh.models import FuncTickFormatter
from skimage import color
from skimage.restoration import inpaint
from skimage.morphology import binary_dilation
# hv.extension('bokeh')
class PyPatient:
"""Class for holding image and metadata for a patient."""
def __init__(self, path=None, img_type='infer', overlay_path=None, from_xarray=False,
*args, **kwargs):
'''Initialize via reading the image and creating the xarray.'''
self.verbose = kwargs.get('verbose', False)
if from_xarray:
self.ds = kwargs.get('ds', None)
if self.ds is None:
raise TypeError('"from_xarray" is True, requires "ds" kwarg"')
return None
if path is None:
raise TypeError('Must specify "path" if "from_xarray" is False')
# Initial trimming of path arg
if type(path) is dict:
self.label = list(path.keys())
path = [path[key] for key in self.label]
if type(overlay_path) is dict:
self.overlay_label = list(overlay_path.keys())
overlay_path = [overlay_path[key] for key in self.overlay_label]
np_img_list, img_metadata = self.get_img_list(path)
if self.verbose:
print(img_metadata, len(np_img_list))
np_overlay_list, _ = self.get_overlay_list(overlay_path)
self.orient_images(np_img_list, img_metadata)
self.orient_overlays(np_overlay_list, img_metadata)
np_img_stack, np_overlay_stack = self.pad_and_stack_images(np_img_list, np_overlay_list)
self.n_img = np_img_stack.shape[0]
self.n_overlay = np_overlay_stack.shape[1] if np_overlay_stack is not None else 0
self.subject_id = kwargs.get('subject_id', 'NO_ID')
if not hasattr(self, 'label'):
self.label = kwargs.get('label', [f'image_{n}' for n in range(self.n_img)])
if type(self.label) is not list:
self.label = [self.label]
if np_overlay_list is None:
if self.verbose:
print(img_metadata)
print(img_metadata['spacing'])
print(img_metadata['origin'])
print(img_metadata['direction'])
self.ds = xr.Dataset({'image': (['subject_id', 'label', 'z', 'y', 'x'],
np_img_stack[np.newaxis, :]),
'spacing': (['subject_id', 'label', 'img_dims'],
[img_metadata['spacing']]),
'origin': (['subject_id', 'label', 'img_dims'],
[img_metadata['origin']]),
},
coords={'subject_id': [self.subject_id],
'label': self.label,
'z': range(np_img_stack.shape[1]),
'y': range(np_img_stack.shape[2]),
'x': range(np_img_stack.shape[3]),
'img_dims': range(3),
}
)
else:
if not hasattr(self, 'overlay_label'):
self.overlay_label = kwargs.get('overlay_label', [f'overlay_label_{n}' for n in
range(self.n_overlay)])
self.ds = xr.Dataset({'image': (['subject_id', 'label', 'z', 'y', 'x'],
np_img_stack[np.newaxis, :]),
'overlay': (['subject_id', 'label', 'overlay_label',
'z', 'y', 'x'],
np_overlay_stack[np.newaxis, :]),
'spacing': (['subject_id', 'label', 'img_dims'],
[img_metadata['spacing']]),
'origin': (['subject_id', 'label', 'img_dims'],
[img_metadata['origin']]),
},
coords={'subject_id': [self.subject_id],
'label': self.label,
'overlay_label': self.overlay_label,
'z': range(np_img_stack.shape[1]),
'y': range(np_img_stack.shape[2]),
'x': range(np_img_stack.shape[3]),
'img_dims': range(3),
}
)
def orient_images(self, np_img_list, img_metadata, bad=False):
if self.verbose:
print(img_metadata)
print('is this live?')
if np_img_list is None:
return None
for i in range(len(np_img_list)):
if type(np_img_list[i]) is list:
for j in range(len(np_img_list[i])):
if self.verbose:
print(img_metadata['direction'][i])
if img_metadata['direction'][i][0] < 0:
np_img_list[i][j] = np.flip(np_img_list[i][j], axis=2)
if img_metadata['direction'][i][1] > 0:
np_img_list[i][j] = np.flip(np_img_list[i][j], axis=1)
if img_metadata['direction'][i][2] > 0:
np_img_list[i][j] = np.flip(np_img_list[i][j], axis=0)
else:
if self.verbose:
print(img_metadata['direction'][i])
if img_metadata['direction'][i][0] < 0:
np_img_list[i] = np.flip(np_img_list[i], axis=2)
if bad:
if img_metadata['direction'][i][1] < 0:
np_img_list[i] = np.flip(np_img_list[i], axis=1)
else:
if img_metadata['direction'][i][1] > 0:
np_img_list[i] = np.flip(np_img_list[i], axis=1)
if img_metadata['direction'][i][2] > 0:
np_img_list[i] = np.flip(np_img_list[i], axis=0)
def orient_overlays(self, np_overlay_list, img_metadata, bad=False):
if self.verbose:
print(img_metadata)
if np_overlay_list is None:
return None
for i in range(len(np_overlay_list)):
# loop over overlays (overlay_labels)
if type(np_overlay_list[i]) is list:
# loop over images (labels)
for j in range(len(np_overlay_list[i])):
if self.verbose:
print(img_metadata['direction'][j])
if img_metadata['direction'][j][0] < 0:
np_overlay_list[i][j] = np.flip(np_overlay_list[i][j], axis=2)
if img_metadata['direction'][j][1] > 0:
np_overlay_list[i][j] = np.flip(np_overlay_list[i][j], axis=1)
if img_metadata['direction'][j][2] > 0:
np_overlay_list[i][j] = np.flip(np_overlay_list[i][j], axis=0)
else:
if self.verbose:
print(img_metadata['direction'][0])
if img_metadata['direction'][0][0] < 0:
np_overlay_list[i] = np.flip(np_overlay_list[i], axis=2)
if bad:
if img_metadata['direction'][0][1] < 0:
np_overlay_list[i] = np.flip(np_overlay_list[i], axis=1)
if img_metadata['direction'][0][1] > 0:
np_overlay_list[i] = np.flip(np_overlay_list[i], axis=1)
if img_metadata['direction'][0][2] > 0:
np_overlay_list[i] = np.flip(np_overlay_list[i], axis=0)
def get_file_type(self, path):
'''Automatically determine file type based on path name.'''
pass
def parse_path(self, path):
'''Return list of files that match path specification.'''
path = Path(path)
if path.is_file():
return str(path)
elif path.is_dir():
return [str(p) for p in sorted(path.glob('*'))]
elif '*' in path.name:
return [str(p) for p in sorted(path.parents[0].glob(path.name))]
else:
raise ValueError('Cannot parse path: {path}')
return path
def read_image(self, path, img_type=None):
'''Read image from path, and store image object'''
# Clean args
path = Path(path)
if img_type is None:
path = self.parse_path(path)
else:
img_type = img_type.lower()
if img_type is None:
image = sitk.ReadImage(path)
direction = image.GetDirection()
direction = [direction[0], direction[4], direction[-1]]
# direction = (np.asarray(image.GetOrigin()) -
# np.asarray(image.TransformIndexToPhysicalPoint(image.GetSize())))
origin = image.GetOrigin()
spacing = image.GetSpacing()
image = sitk.GetArrayFromImage(image)
if len(image.shape) == 4:
# image = color.rgb2gray(image)
# custom RGB converter for wave images
img_r = np.squeeze(image[:, :, :, 0]).astype(float)
img_g = np.squeeze(image[:, :, :, 1]).astype(float)
img_b = np.squeeze(image[:, :, :, 2]).astype(float)
img_gr = np.where(img_r == 0, 0, img_g)
img_gb = np.where(img_b == 0, 0, img_g)
img_gg = np.where((img_b == 255) & (img_r == 255) & (img_g == 255), 1, 0)
image = 0.001*(img_r+img_gr) - 0.001*(img_b+img_gb) + img_gg
# array = np.ma.masked_where(image == 1, image)
# for i in range(image.shape[0]):
# array[i].mask = binary_dilation(array[i].mask)
# array[i].mask = binary_dilation(array[i].mask)
# image[i] = inpaint.inpaint_biharmonic(image[i], array[i].mask)
elif img_type == 'nifti':
reader = sitk.ImageFileReader()
reader.SetFileName(str(path))
image = reader.Execute()
direction = image.GetDirection()
direction = [direction[0], direction[4], direction[-1]]
# direction = (np.asarray(image.GetOrigin()) -
# np.asarray(image.TransformIndexToPhysicalPoint(image.GetSize())))
origin = image.GetOrigin()
spacing = image.GetSpacing()
image = sitk.GetArrayFromImage(image)
if len(image.shape) == 4:
image = color.rgb2gray(image)
elif img_type == 'dicom':
reader = sitk.ImageSeriesReader()
if self.verbose:
print(str(path))
dicom_names = reader.GetGDCMSeriesFileNames(str(path))
if self.verbose:
print(dicom_names)
dicom_names = sorted(dicom_names, key=lambda a: Path(a).stem[2:].zfill(3))
reader.SetFileNames(dicom_names)
# reader.MetaDataDictionaryArrayUpdateOn() # Get DICOM Info
# reader.LoadPrivateTagsOn() # Get DICOM Info
image = reader.Execute()
direction = image.GetDirection()
direction = [direction[0], direction[4], direction[-1]]
# direction = (np.asarray(image.GetOrigin()) -
# np.asarray(image.TransformIndexToPhysicalPoint(image.GetSize())))
origin = image.GetOrigin()
spacing = image.GetSpacing()
image = sitk.GetArrayFromImage(image)
if len(image.shape) == 4:
image = color.rgb2gray(image)
elif img_type == 'png':
png_list = sorted(list(path.glob('*.png')))
for i, png in enumerate(png_list):
img_slice = sitk.GetArrayFromImage(sitk.ReadImage(str(png)))
if i == 0:
image = np.zeros((len(png_list), img_slice.shape[0],
img_slice.shape[1]), dtype=img_slice.dtype)
image[i, :, :] = img_slice
direction = None
origin = None
spacing = None
return image, {'spacing': spacing, 'origin': origin, 'direction': direction}
def get_img_list(self, path, get_metadata=True):
'''Return a list of images in numpy format (and metadata, optional).
Possible path configs:
'''
if path is None:
return None, None
np_img_list = []
meta_data_lists = {'direction': [], 'origin': [], 'spacing': []}
if type(path) is str:
img, meta_data = self.read_image(path)
np_img_list.append(img)
for key in meta_data.keys():
if self.verbose:
print(key)
meta_data_lists[key].append(meta_data[key])
elif type(path) is list:
for i in path:
img, meta_data = self.read_image(i)
np_img_list.append(img)
for key in meta_data.keys():
if self.verbose:
print(key)
print(meta_data_lists[key])
meta_data_lists[key].append(meta_data[key])
return np_img_list, meta_data_lists
def get_overlay_list(self, path, get_metadata=True):
'''Return a list of overlays in numpy format (and metadata, optional).
Possible configs: path, list(path), list(path, (lab1, lab2, ...))
'''
if path is None:
return None, None
np_overlay_list = []
meta_data_lists = {'direction': [], 'origin': [], 'spacing': []}
if type(path) is str:
img, meta_data = self.read_image(path)
np_overlay_list.append(img)
for key in meta_data.keys():
if self.verbose:
print(key)
meta_data_lists[key].append(meta_data[key])
elif type(path) is list:
for i in path:
if type(i) is tuple:
pass
elif type(i) is list:
np_overlay_list.append([])
for j in i:
img, meta_data = self.read_image(j)
np_overlay_list[-1].append(img)
# for key in meta_data.keys():
# if self.verbose:
# print(key)
# print(meta_data_lists[key])
# meta_data_lists[key].append(meta_data[key])
else:
img, meta_data = self.read_image(i)
np_overlay_list.append(img)
for key in meta_data.keys():
if self.verbose:
print(key)
print(meta_data_lists[key])
meta_data_lists[key].append(meta_data[key])
return np_overlay_list, meta_data_lists
def pad_and_stack_images(self, img_list, overlay_list=None):
n_imgs = len(img_list)
if self.verbose:
print('len img_list', n_imgs)
max_z = max_y = max_x = 0
for img in img_list:
max_z = max(img.shape[0], max_z)
max_y = max(img.shape[1], max_y)
max_x = max(img.shape[2], max_x)
print(img.shape)
if overlay_list is not None:
n_overlay_labels = self.get_n_overlay_labels(overlay_list)
if self.verbose:
print('n_overlay_labels', n_overlay_labels)
print(overlay_list)
for img in overlay_list:
if type(img) is list:
for subimg in img:
max_z = max(subimg.shape[0], max_z)
max_y = max(subimg.shape[1], max_y)
max_x = max(subimg.shape[2], max_x)
else:
if self.verbose:
print('img.shape', img.shape)
max_z = max(img.shape[0], max_z)
max_y = max(img.shape[1], max_y)
max_x = max(img.shape[2], max_x)
pad = np.zeros((max_z, max_y, max_x))
pad_rgb = np.zeros((max_z, max_y, max_x, 3))
for i, img in enumerate(img_list):
if len(img.shape) == 3:
pad_copy = pad.copy()
pad_copy[:img.shape[0], :img.shape[1], :img.shape[2]] = img
img_list[i] = pad_copy
elif len(img.shape) == 4:
pad_copy = pad_rgb.copy()
pad_copy[:img.shape[0], :img.shape[1], :img.shape[2], :img.shape[3]] = img
img_list[i] = pad_copy
img_list = np.stack(img_list, axis=0)
padded_overlay = None
if overlay_list is not None:
padded_overlay = np.zeros((n_imgs, n_overlay_labels, max_z, max_y, max_x))
# print(padded_overlay.shape)
# Loop over overlays
feat = 0
for i, overlay in enumerate(overlay_list):
if type(overlay) is list:
# Loop over images
for j, sub_overlay in enumerate(overlay):
if sub_overlay.ndim == 3:
# print(sub_overlay.shape)
# print(j, feat)
padded_overlay[j, feat, :sub_overlay.shape[0], :sub_overlay.shape[1],
:sub_overlay.shape[2]] = sub_overlay
elif sub_overlay.ndim == 4:
padded_overlay[j, feat:feat+sub_overlay.shape[0], :sub_overlay.shape[1],
:sub_overlay.shape[2],
:sub_overlay.shape[3]] = sub_overlay
if sub_overlay.ndim == 3:
feat += 1
elif sub_overlay.ndim == 4:
feat += sub_overlay.shape[0]
else:
if overlay.ndim == 3:
padded_overlay[0, feat, :overlay.shape[0], :overlay.shape[1],
:overlay.shape[2]] = overlay
feat += 1
elif overlay.ndim == 4:
padded_overlay[0, feat:feat+overlay.shape[0], :overlay.shape[1],
:overlay.shape[2], :overlay.shape[3]] = overlay
feat += overlay.shape[0]
return img_list, padded_overlay
def get_n_overlay_labels(self, overlay_list):
return len(overlay_list)
# n_overlay_labels = 0
# if type(overlay_list[0]) is list:
# overlay_iters = overlay_list[0]
# else:
# overlay_iters = overlay_list
# for i in overlay_iters:
# if len(i.shape) == 3:
# n_overlay_labels += 1
# elif len(i.shape) == 4:
# n_overlay_labels += i.shape[0]
# return n_overlay_labels
def view(self, plane='axial', three_planes=False, image_size=300, dynamic=True, cmap='gray'):
# imopts = {'tools': ['hover'], 'width': 400, 'height': 400, 'cmap': 'gray'}
# imopts = {'tools': ['hover'], 'cmap': 'gray'}
opts.defaults(
opts.GridSpace(shared_xaxis=False, shared_yaxis=False,
fontsize={'title': 16, 'labels': 16, 'xticks': 12,
'yticks': 12},
),
# opts.Image(cmap='gray', tools=['hover'], xaxis=None,
# yaxis=None, shared_axes=False),
# opts.Overlay(tools=['hover']),
# opts.NdOverlay(tools=['hover']),
opts.Image(cmap=cmap, xaxis=None,
yaxis=None, shared_axes=False),
)
self.is2d = False
if 'z' not in self.ds.dims:
self.is2d = True
self.set_size(image_size)
if self.is2d:
plane == '2d'
a1, a2 = 'x', 'y'
pane_width = self.pane_width
pane_height = self.pane_height
else:
if plane == 'axial':
a1, a2, a3 = 'x', 'y', 'z'
# invert = True
pane_width = self.axial_width
pane_height = self.axial_height
elif plane == 'coronal':
a1, a2, a3 = 'x', 'z', 'y'
pane_width = self.coronal_width
pane_height = self.coronal_height
# invert = False
elif plane == 'sagittal':
a1, a2, a3 = 'y', 'z', 'x'
# invert = False
pane_width = self.sagittal_width
pane_height = self.sagittal_height
contrast_start_min = np.asscalar(self.ds.isel(subject_id=0,
).image.quantile(0.01).values)-1e-6
contrast_start_max = np.asscalar(self.ds.isel(subject_id=0,
).image.quantile(0.99).values)+1e-6
contrast_min = np.asscalar(self.ds.isel(subject_id=0).image.min().values)
contrast_max = np.asscalar(self.ds.isel(subject_id=0).image.max().values)
ctotal = contrast_max - contrast_min
contrast_min -= ctotal*0.1
contrast_max += ctotal*0.1
cslider = pn.widgets.RangeSlider(start=contrast_min, end=contrast_max,
value=(contrast_start_min, contrast_start_max),
name='contrast')
if 'overlay' in self.ds.data_vars:
hv_ds_image = hv.Dataset(self.ds[['image', 'overlay']])
# hv_ds_image = hv.Dataset(self.ds['image'])
if self.verbose:
print(hv_ds_image)
hv_ds_overlay = hv.Dataset(self.ds['overlay'])
if self.verbose:
print(hv_ds_overlay)
# tooltips = [
# ('(x,y)', '($x, $y)'),
# ('image', '@image'),
# ('overlay', '@overlay')
# ]
# hover = HoverTool(tooltips=tooltips)
if self.verbose:
print('overlay_max_calc')
if self.is2d:
first_subj_max = self.ds.isel(subject_id=0).overlay.max(dim=['x', 'y',
'label']).compute()
first_subj_min = self.ds.isel(subject_id=0).overlay.min(dim=['x', 'y',
'label']).compute()
else:
first_subj_max = self.ds.isel(subject_id=0).overlay.max(dim=['x', 'y', 'z',
'label']).compute()
first_subj_min = self.ds.isel(subject_id=0).overlay.min(dim=['x', 'y', 'z',
'label']).compute()
if self.verbose:
print('overlay_max_calc ready')
print(first_subj_max)
overlay_max = first_subj_max.max()
alpha_slider = pn.widgets.FloatSlider(start=0, end=1, value=0.7,
name='overlay transparency')
cmap_select = pn.widgets.Select(name='Overlay Colormap',
options=['Discrete', 'Continuous'])
if self.verbose:
print('max thresh calc')
print(first_subj_max.max())
max_thresholds = first_subj_max.values
if max_thresholds.size != 1:
max_thresholds = sorted(set(max_thresholds))
else:
max_thresholds = [np.asscalar(max_thresholds)]
# max_thresholds = sorted(list(set([first_subj.overlay.sel(overlay_label=i).values.max()
# for i in first_subj.overlay_label])))
if self.verbose:
print('min thresh calc')
min_thresholds = first_subj_min.values+1e-6
if min_thresholds.size != 1:
min_thresholds = sorted(set(min_thresholds))
else:
min_thresholds = [np.asscalar(min_thresholds)]
# min_thresholds = sorted(list(set(first_subj_min.min())))
# min_thresholds = sorted(list(set([first_subj.sel(overlay_label=i).min()+1e-6 for i in
# first_subj.overlay_label])))
# ocslider = pn.widgets.DiscreteSlider(name='overlay max threshold',
# options=max_thresholds,
# value=max_thresholds[-1])
if len(min_thresholds) == 1 and len(max_thresholds) == 1:
thresh_toggle = 0
oclim = (min_thresholds[0], max_thresholds[0])
elif len(min_thresholds) > 1 and len(max_thresholds) == 1:
thresh_toggle = 1
ocslider_min = pn.widgets.DiscreteSlider(name='overlay min threshold',
options=min_thresholds,
value=min_thresholds[-1])
@pn.depends(ocslider_min)
def oclim(value):
return (value, max_thresholds[0])
elif len(min_thresholds) == 1 and len(max_thresholds) > 1:
thresh_toggle = 2
ocslider_max = pn.widgets.DiscreteSlider(name='overlay max threshold',
options=max_thresholds,
value=max_thresholds[-1])
@pn.depends(ocslider_max)
def oclim(value):
return (min_thresholds[0], value)
else:
thresh_toggle = 3
ocslider_min = pn.widgets.DiscreteSlider(name='overlay min threshold',
options=min_thresholds,
value=min_thresholds[-1])
ocslider_max = pn.widgets.DiscreteSlider(name='overlay max threshold',
options=max_thresholds,
value=max_thresholds[-1])
@pn.depends(ocslider_min, ocslider_max)
def oclim(value_min, value_max):
return (value_min, value_max)
if self.verbose:
print(thresh_toggle)
@pn.depends(cmap_select)
def cmap_dict(value):
d = {'Discrete': 'glasbey_hv', 'Continuous': 'viridis'}
return d[value]
# subj_viewer = SubjectViewer(ds=self.ds,
# subject_id_sel=list(self.ds.subject_id.values))
if self.is2d:
gridspace = hv_ds_image.to(
hv.Image, [a1, a2], vdims=['image', 'overlay'],
dynamic=dynamic).opts(frame_width=pane_width, frame_height=pane_height,
tools=['hover'],
).apply.opts(clim=cslider.param.value)
if self.verbose:
print(gridspace)
gridspace *= hv_ds_overlay.to(
hv.Image, [a1, a2], vdims='overlay',
dynamic=dynamic
).opts(
cmap='glasbey_hv', clipping_colors={'min': 'transparent',
'NaN': 'transparent'},
).redim.range(overlay=(1e-6, overlay_max)).apply.opts(
alpha=alpha_slider.param.value, cmap=cmap_dict, clim=oclim)
# print(gridspace)
# print(gridspace)
# gridspace = hv.DynamicMap(subj_viewer.load_subject).grid('label')
gridspace = gridspace.layout('label')
elif three_planes:
# squish_height = int(max(image_size*(len(self.ds.z)/len(self.ds.x)), image_size/2))
# gridspace = hv.GridSpace(kdims=['plane', 'label'], label=f'{self.subject_id}')
gridspace = hv.GridSpace(kdims=['plane', 'label'])
for mod in self.ds.label.values:
gridspace['axial', mod] = hv_ds_image.select(label=mod).to(
hv.Image, ['x', 'y'], groupby=['subject_id', 'z'], vdims='image',
dynamic=dynamic).opts(frame_width=self.axial_width,
frame_height=self.axial_height
).apply.opts(clim=cslider.param.value)
gridspace['coronal', mod] = hv_ds_image.select(label=mod).to(
hv.Image, ['x', 'z'], groupby=['subject_id', 'y'], vdims='image',
dynamic=dynamic).opts(frame_width=self.coronal_width,
frame_height=self.coronal_height
).apply.opts(clim=cslider.param.value)
gridspace['sagittal', mod] = hv_ds_image.select(label=mod).to(
hv.Image, ['y', 'z'], groupby=['subject_id', 'x'], vdims='image',
dynamic=dynamic).opts(frame_width=self.sagittal_width,
frame_height=self.sagittal_height
).apply.opts(clim=cslider.param.value)
gridspace['axial', mod] *= hv_ds_overlay.select(label=mod).to(
hv.Image, ['x', 'y'], groupby=['subject_id', 'z', 'overlay_label'],
vdims='overlay', dynamic=dynamic
).opts(
cmap='glasbey_hv', clipping_colors={'min': 'transparent', 'NaN':
'transparent'},
).redim.range(overlay=(0.1, overlay_max)).apply.opts(
alpha=alpha_slider.param.value, cmap=cmap_dict, clim=oclim)
gridspace['coronal', mod] *= hv_ds_overlay.select(label=mod).to(
hv.Image, ['x', 'z'], groupby=['subject_id', 'y', 'overlay_label'],
vdims='overlay', dynamic=dynamic
).opts(
cmap='glasbey_hv', clipping_colors={'min': 'transparent', 'NaN':
'transparent'},
).redim.range(overlay=(0.1, overlay_max)).apply.opts(
alpha=alpha_slider.param.value, cmap=cmap_dict, clim=oclim)
gridspace['sagittal', mod] *= hv_ds_overlay.select(label=mod).to(
hv.Image, ['y', 'z'], groupby=['subject_id', 'x', 'overlay_label'],
vdims='overlay', dynamic=dynamic
).opts(
cmap='glasbey_hv', clipping_colors={'min': 'transparent', 'NaN':
'transparent'},
).redim.range(overlay=(0.1, overlay_max)).apply.opts(
alpha=alpha_slider.param.value, cmap=cmap_dict, clim=oclim)
else:
# squish_height = int(max(image_size*(len(self.ds.z)/len(self.ds.x)), image_size/2))
# gridspace = hv.GridSpace(kdims=['label'], label=f'{self.subject_id}')
if self.verbose:
print('init gridspace')
# gridspace = hv.GridSpace(kdims=['label'])
# for mod in self.ds.label:
# gridspace[mod] = hv_ds_image.select(label=mod).to(
# hv.Image, [a1, a2], groupby=[a3], vdims='image',
# dynamic=dynamic).opts(frame_width=image_size, frame_height=image_size,
# ).apply.opts(clim=cslider.param.value)
# gridspace[mod] *= hv_ds_overlay.select(label=mod).to(
# hv.Image, [a1, a2], groupby=[a3, 'overlay_label'], vdims='overlay',
# dynamic=dynamic).opts(
# cmap='glasbey_hv', clipping_colors={'min': 'transparent'},
# ).redim.range(overlay=(1e-6, overlay_max)).apply.opts(
# alpha=alpha_slider.param.value, cmap=cmap_dict, clim=oclim)
# gridspace[mod] = gridspace[mod].opts(tools=['hover'])
# print(gridspace[mod])
gridspace = hv_ds_image.to(
hv.Image, [a1, a2], vdims=['image', 'overlay'],
dynamic=dynamic).opts(frame_width=pane_width, frame_height=pane_height,
tools=['hover'],
).apply.opts(clim=cslider.param.value)
if self.verbose:
print(gridspace)
gridspace *= hv_ds_overlay.to(
hv.Image, [a1, a2], vdims='overlay',
dynamic=dynamic
).opts(
cmap='glasbey_hv', clipping_colors={'min': 'transparent',
'NaN': 'transparent'},
).redim.range(overlay=(1e-6, overlay_max)).apply.opts(
alpha=alpha_slider.param.value, cmap=cmap_dict, clim=oclim)
# print(gridspace)
# print(gridspace)
# gridspace = hv.DynamicMap(subj_viewer.load_subject).grid('label')
gridspace = gridspace.layout('label')
else:
tooltips = [
('(x,y)', '($x, $y)'),
('image', '@image'),
]
hover = HoverTool(tooltips=tooltips)
hv_ds = hv.Dataset(self.ds['image'])
if self.is2d:
gridspace = hv.GridSpace(kdims=['label'])
for mod in self.ds.label.values:
gridspace[mod] = hv_ds.select(label=mod).to(
hv.Image, [a1, a2], groupby=['subject_id'], vdims='image',
dynamic=dynamic).opts(frame_width=pane_width, frame_height=pane_height,
shared_axes=False, tools=[hover], axiswise=True,
# ).apply.opts(clim=cslider.param.value)
)
elif three_planes:
# squish_height = int(max(image_size*(len(self.ds.z)/len(self.ds.x)), image_size/2))
# gridspace = hv.GridSpace(kdims=['plane', 'label'], label=f'{self.subject_id}')
gridspace = hv.GridSpace(kdims=['plane', 'label'])
for mod in self.ds.label.values:
gridspace['axial', mod] = hv_ds.select(label=mod).to(
hv.Image, ['x', 'y'], groupby=['subject_id', 'z'], vdims='image',
dynamic=dynamic).opts(frame_width=self.axial_width,
frame_height=self.axial_height,
invert_yaxis=False).apply.opts(
clim=cslider.param.value)
gridspace['coronal', mod] = hv_ds.select(label=mod).to(
hv.Image, ['x', 'z'], groupby=['subject_id', 'y'], vdims='image',
dynamic=dynamic).opts(frame_width=self.coronal_width,
frame_height=self.coronal_height).apply.opts(
clim=cslider.param.value)
gridspace['sagittal', mod] = hv_ds.select(label=mod).to(
hv.Image, ['y', 'z'], groupby=['subject_id', 'x'], vdims='image',
dynamic=dynamic).opts(frame_width=self.sagittal_width,
frame_height=self.sagittal_height).apply.opts(
clim=cslider.param.value)
else:
# squish_height = int(max(image_size*(len(self.ds.z)/len(self.ds.x)), image_size/2))
# gridspace = hv.GridSpace(kdims=['label'], label=f'{self.subject_id}')
gridspace = hv.GridSpace(kdims=['label'])
for mod in self.ds.label.values:
gridspace[mod] = hv_ds.select(label=mod).to(
hv.Image, [a1, a2], groupby=['subject_id', a3], vdims='image',
dynamic=dynamic).opts(frame_width=pane_width, frame_height=pane_height,
shared_axes=False, tools=[hover],
).apply.opts(clim=cslider.param.value)
pn_layout = pn.pane.HoloViews(gridspace)
wb = pn_layout.widget_box
wb.append(cslider)
if 'overlay' in self.ds.data_vars:
wb.append(alpha_slider)
wb.append(cmap_select)
if thresh_toggle in [2, 3]:
wb.append(ocslider_max)
if thresh_toggle in [1, 3]:
wb.append(ocslider_min)
return pn.Row(wb, pn_layout)
def set_size(self, size):
init_spacing = self.ds.spacing.isel(subject_id=0, label=0).values
x_spacing = init_spacing[0]
y_spacing = init_spacing[1]
if self.is2d:
self.pane_width = int(size)
pane_scale = self.pane_width/(len(self.ds.x)*x_spacing)
self.pane_height = int(y_spacing*len(self.ds.y)*pane_scale)
else:
z_spacing = init_spacing[2]
self.axial_width = int(size)
axial_scale = self.axial_width/(len(self.ds.x)*x_spacing)
self.axial_height = int(y_spacing*len(self.ds.y)*axial_scale)
self.coronal_width = int(size)
coronal_scale = self.coronal_width/(len(self.ds.x)*x_spacing)
self.coronal_height = int(z_spacing*len(self.ds.z)*coronal_scale)
self.sagittal_width = int(size)
sagittal_scale = self.sagittal_width/(len(self.ds.y)*y_spacing)
self.sagittal_height = int(z_spacing*len(self.ds.z)*sagittal_scale)
class PyCohort(PyPatient):
"""Class for veiwing image and metadata for a cohort of patients."""
def __init__(self, path, **kwargs):
'''Initialize via reading the image and creating the xarray.'''
self.ds = self.load_files(path)
self.verbose = kwargs.get('verbose', False)
def load_files(self, file_names):
if type(file_names) is not list:
file_names = str(file_names)
else:
file_names = [f for f in file_names if Path(f).exists()]
if '*' in file_names or type(file_names) is list:
# ds = xr.open_mfdataset(file_names, combine='nested', concat_dim='subject_id')
ds = xr.open_mfdataset(file_names, concat_dim='subject_id', combine='nested').persist()
else:
ds = xr.open_dataset(file_names)
return ds
|
the-stack_106_31514 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=9
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.Y.on(input_qubit[0])) # number=5
c.append(cirq.Y.on(input_qubit[0])) # number=6
c.append(cirq.Y.on(input_qubit[3])) # number=7
c.append(cirq.Y.on(input_qubit[3])) # number=8
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq32.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() |
the-stack_106_31515 | from sortedcontainers import SortedList
class NoSolutionError(Exception):
"""An error indicating that no solution was found.
Attributes
----------
iteration_limited
Whether the search failed because the maximum number
of iterations was reached.
"""
def __init__(self, iteration_limited=False):
self.iteration_limited = iteration_limited
class _Node:
def __init__(self, state, path_cost, parent=None, action=None):
self.state = state
self.path_cost = path_cost
self.parent = parent
self.action = action
def get_child(self, problem, action):
state = problem.result(self.state, action)
path_cost = self.path_cost + problem.step_cost(self.state, action, state)
return _Node(state, path_cost, parent=self, action=action)
def solution(self):
if self.parent is None:
return []
return self.parent.solution() + [self.action]
def a_star_graph(problem, heuristic, iteration_limit=10000):
"""Do an A* search to find the solution to a given problem.
Parameters
----------
problem
The problem to solve. It must have the following methods:
- initial_state(), which returns the initial state.
- actions(state), which returns the possible actions in `state`.
- result(state, action), which returns the resulting state after
performing `action` in `state`.
- goal_test(state), which checks whether `state` is a goal state.
- step_cost(state, action, result), which returns the step cost for
the given state transition.
States should be hashable, but can otherwise have any type. Actions
can have any type.
heuristic
A heuristic function that takes a state and returns an estimate of
the path cost from that state to a goal state. To get an optimal
solution, it should never overestimate the path cost.
iteration_limit
A limit on the number of iterations before raising a NoSolutionError.
This is to prevent freezing if the problem is too difficult.
Returns
-------
list of action
The sequence of actions that solves the problem.
Raises
------
NoSolutionError
Raised if there is no solution, or if the max number of iterations
was reached.
"""
starting_node = _Node(problem.initial_state(), 0)
if problem.goal_test(starting_node.state):
return starting_node.solution()
frontier = SortedList(
[starting_node], key=lambda n: -(n.path_cost + heuristic(n.state))
)
explored = set()
iterations = 0
while frontier:
node = frontier.pop()
if problem.goal_test(node.state):
return node.solution()
explored.add(node.state)
for action in problem.actions(node.state):
child = node.get_child(problem, action)
child_in_frontier = child.state in [n.state for n in frontier]
if child.state not in explored and not child_in_frontier:
frontier.add(child)
elif child_in_frontier:
index = [n.state for n in frontier].index(child.state)
other_node = frontier[index]
if child.path_cost < other_node.path_cost:
frontier.remove(other_node)
frontier.add(child)
iterations += 1
if iterations > iteration_limit:
raise NoSolutionError(iteration_limited=True)
raise NoSolutionError()
|
the-stack_106_31516 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for file_upload_hook."""
from absl.testing import absltest
import mock
from multitest_transport.models import ndb_models
from multitest_transport.plugins import base as plugins
from multitest_transport.plugins import file_upload_hook
from multitest_transport.util import file_util
class AbstractFileUploadHookTest(absltest.TestCase):
def testInit_noFilePattern(self):
"""Tests that a file pattern is required."""
with self.assertRaises(ValueError):
file_upload_hook.AbstractFileUploadHook(file_pattern=None)
@mock.patch.object(file_upload_hook.AbstractFileUploadHook, 'UploadFile')
@mock.patch.object(file_util, 'GetOutputFileUrl')
@mock.patch.object(file_util, 'GetOutputFilenames')
def testExecute_filePattern(
self, mock_get_filenames, mock_get_output_url, mock_upload_file):
"""Tests that only files matching the pattern are uploaded."""
hook_context = plugins.TestRunHookContext(
test_run=mock.MagicMock(), latest_attempt=mock.MagicMock(),
phase=ndb_models.TestRunPhase.ON_SUCCESS)
mock_get_filenames.return_value = ['hello.txt', 'world.xml']
mock_get_output_url.side_effect = lambda tr, a, filename: filename
# Hook will only upload XML files
hook = file_upload_hook.AbstractFileUploadHook(file_pattern=r'.*\.xml')
hook.Execute(hook_context)
mock_upload_file.assert_called_once_with(mock.ANY, 'world.xml', 'world.xml')
@mock.patch.object(file_upload_hook.AbstractFileUploadHook, 'UploadFile')
@mock.patch.object(file_util, 'GetOutputFileUrl')
@mock.patch.object(file_util, 'GetOutputFilenames')
def testExecute_uploadPrefix(
self, mock_get_filenames, mock_get_output_url, mock_upload_file):
"""Tests that a prefix can be applied to the destination file path."""
hook_context = plugins.TestRunHookContext(
test_run=mock.MagicMock(), latest_attempt=mock.MagicMock(),
phase=ndb_models.TestRunPhase.ON_SUCCESS)
mock_get_filenames.return_value = ['hello.txt', 'world.xml']
mock_get_output_url.side_effect = lambda tr, a, filename: filename
# Hook will apply a dir/ prefix to all destinations
hook = file_upload_hook.AbstractFileUploadHook(
file_pattern='.*', upload_prefix='dir/')
hook.Execute(hook_context)
mock_upload_file.assert_has_calls([
mock.call(mock.ANY, 'hello.txt', 'dir/hello.txt'),
mock.call(mock.ANY, 'world.xml', 'dir/world.xml'),
])
def testExecute_noAttempt(self):
"""Tests that an attempt is required during execution."""
hook_context = plugins.TestRunHookContext(
test_run=mock.MagicMock(), latest_attempt=None,
phase=ndb_models.TestRunPhase.ON_SUCCESS)
hook = file_upload_hook.AbstractFileUploadHook(file_pattern='.*')
with self.assertRaises(ValueError):
hook.Execute(hook_context)
if __name__ == '__main__':
absltest.main()
|
the-stack_106_31517 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from graph_nets import modules
from graph_nets import utils_tf
import sonnet as snt
OBJ_HIDDEN = 8
REL_HIDDEN = 8
class SegmentClassifier(snt.AbstractModule):
def __init__(self, name="SegmentClassifier"):
super(SegmentClassifier, self).__init__(name=name)
self._obj_mlp = snt.Sequential([
snt.nets.MLP([OBJ_HIDDEN, OBJ_HIDDEN, 3],
activation=tf.nn.relu,
activate_final=False),
])
self._rel_mlp = snt.Sequential([
snt.nets.MLP([REL_HIDDEN, REL_HIDDEN, REL_HIDDEN, 1],
activation=tf.nn.relu,
activate_final=False),
tf.nn.sigmoid
])
self._first = modules.InteractionNetwork(
edge_model_fn=lambda: self._rel_mlp,
node_model_fn=lambda: self._obj_mlp,
reducer=tf.unsorted_segment_sum
)
def _build(self, input_op, num_processing_steps):
output_ops = []
latent = self._first(input_op)
latent = self._first(latent)
# Transforms the outputs into appropriate shapes.
output_ops.append(latent)
return output_ops
|
the-stack_106_31518 | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from testing_support.fixtures import (code_coverage_fixture,
collector_agent_registration_fixture, collector_available_fixture)
_coverage_source = [
'newrelic.hooks.framework_bottle',
]
code_coverage = code_coverage_fixture(source=_coverage_source)
_default_settings = {
'transaction_tracer.explain_threshold': 0.0,
'transaction_tracer.transaction_threshold': 0.0,
'transaction_tracer.stack_trace_threshold': 0.0,
'debug.log_data_collector_payloads': True,
'debug.record_transaction_failure': True,
}
collector_agent_registration = collector_agent_registration_fixture(
app_name='Python Agent Test (framework_bottle)',
default_settings=_default_settings)
@pytest.fixture(scope='function')
def target_application():
import _target_application
return _target_application.target_application
|
the-stack_106_31520 | '''
Generative Adversarial Attacks - den Output des Netes beeinflussen
(z.B. andere Klasse ausgeben als eigentlich ist) durch Noise über ganzes Bild was für das menschliche Auge nicht sichtbar ist,
oder anbringen von Patches (kleine Bilder die dann im Vordergrund liegen) welche dann eine
z.B. andere Klassifizierung bewirken.
untargeted - ohne bestimmte "erzwungene" Klasse
'''
import tensorflow as tf
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
import numpy as np
import matplotlib.pyplot as plt
from mnistCnn import *
from mnistData import *
from plotting import *
mnist_data = MNIST()
x_train, y_train = mnist_data.get_train_set()
x_test, y_test = mnist_data.get_test_set()
def adversarial_noise(model, image, label):
with tf.GradientTape() as tape:
tape.watch(image)
prediction = model(image, training=False)[0]
loss = tf.keras.losses.categorical_crossentropy(label, prediction)
# Get the gradients of the loss w.r.t to the input image.
gradient = tape.gradient(loss, image)
# Get the sign of the gradients to create the noise
signed_grad = tf.sign(gradient)
return signed_grad
if __name__ == "__main__":
cnn = build_cnn()
lr = 0.0001
optimizer = Adam(lr=lr)
cnn.compile(
loss="categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
#cnn.fit(x_train, y_train, verbose=1,
# batch_size=256, epochs=10,
# validation_data=(x_test, y_test))
path = "/home/felix/Desktop/DeepLearning/7_DeepLearning-GANs/04_Generative_Adversarial_Attacks/weights/mnist_cnn.h5"
#cnn.save_weights(path)
cnn.load_weights(path)
#score = cnn.evaluate(x_test, y_test, verbose=0)
#print("Test accuracy: ", score[1])
sample_idx = np.random.randint(low=0, high=x_test.shape[0])
image = np.array([x_test[sample_idx]])
true_label = y_test[sample_idx]
true_label_idx = np.argmax(true_label)
y_pred = cnn.predict(image)[0]
print("Right class: ", true_label_idx)
print("Prob. right class: ", y_pred[true_label_idx])
eps = 0.005 # Stärke des Noise filters pro Schritt
image_adv = tf.convert_to_tensor(image, dtype=tf.float32) # Bild in Tensor umwandeln
while (np.argmax(y_pred) == true_label_idx):
# image_adv = image_adv + eps * noise
noise = adversarial_noise(cnn, image_adv, true_label)
if np.sum(noise) == 0.0:
break
image_adv = image_adv + eps * noise
image_adv = tf.clip_by_value(image_adv, 0, 1)
y_pred = cnn.predict(image_adv)[0]
print("Prob. right class: ", y_pred[true_label_idx])
print("Highest Prob.: ", np.max(y_pred), "\n")
plot_img(image_adv.numpy(), cmap="gray")
plot_img(noise.numpy(), cmap="gray")
|
the-stack_106_31521 | from multiprocessing.pool import ThreadPool as Pool
import abc
import logging
from rtcclient import requests
import xmltodict
from rtcclient import urlunquote, OrderedDict
from rtcclient import exception
from rtcclient.utils import token_expire_handler
class RTCBase(object):
__metaclass__ = abc.ABCMeta
log = logging.getLogger("base.RTCBase")
CONTENT_XML = "text/xml"
CONTENT_URL_ENCODED = "application/x-www-form-urlencoded"
OSLC_CR_XML = "application/x-oslc-cm-change-request+xml"
OSLC_CR_JSON = "application/x-oslc-cm-change-request+json"
def __init__(self, url):
self.url = self.validate_url(url)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, str(self))
@abc.abstractmethod
def __str__(self):
pass
def __eq__(self, other):
"""
identify whether the other one represents a connection to the server
"""
if not isinstance(other, self.__class__):
return False
if not other.url == self.url:
return False
return True
def getattr(self, attr):
try:
return self.__getattribute__(attr)
except Exception:
return None
def __getitem__(self, key):
return self.__getattribute__(key)
@abc.abstractmethod
def get_rtc_obj(self):
pass
@token_expire_handler
def get(self,
url,
verify=False,
headers=None,
proxies=None,
timeout=60,
**kwargs):
"""Sends a GET request. Refactor from requests module
:param url: URL for the new :class:`Request` object.
:param verify: (optional) if ``True``, the SSL cert will be verified.
A CA_BUNDLE path can also be provided.
:param headers: (optional) Dictionary of HTTP Headers to send with
the :class:`Request`.
:param proxies: (optional) Dictionary mapping protocol to the URL of
the proxy.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
self.log.debug("Get response from %s", url)
response = requests.get(url,
verify=verify,
headers=headers,
proxies=proxies,
timeout=timeout,
**kwargs)
if response.status_code != 200:
self.log.error("Failed GET request at <%s> with response: %s", url,
response.content)
response.raise_for_status()
return response
@token_expire_handler
def post(self,
url,
data=None,
json=None,
verify=False,
headers=None,
proxies=None,
timeout=60,
**kwargs):
"""Sends a POST request. Refactor from requests module
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to
send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of
the :class:`Request`.
:param verify: (optional) if ``True``, the SSL cert will be verified.
A CA_BUNDLE path can also be provided.
:param headers: (optional) Dictionary of HTTP Headers to send with
the :class:`Request`.
:param proxies: (optional) Dictionary mapping protocol to the URL of
the proxy.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
self.log.debug("Post a request to %s with data: %s and json: %s", url,
data, json)
response = requests.post(url,
data=data,
json=json,
verify=verify,
headers=headers,
proxies=proxies,
timeout=timeout,
**kwargs)
if response.status_code not in [200, 201]:
self.log.error("Failed POST request at <%s> with response: %s", url,
response.content)
self.log.info(response.status_code)
response.raise_for_status()
return response
@token_expire_handler
def put(self,
url,
data=None,
verify=False,
headers=None,
proxies=None,
timeout=60,
**kwargs):
"""Sends a PUT request. Refactor from requests module
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to
send in the body of the :class:`Request`.
:param verify: (optional) if ``True``, the SSL cert will be verified.
A CA_BUNDLE path can also be provided.
:param headers: (optional) Dictionary of HTTP Headers to send with
the :class:`Request`.
:param proxies: (optional) Dictionary mapping protocol to the URL of
the proxy.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
self.log.debug("Put a request to %s with data: %s", url, data)
response = requests.put(url,
data=data,
verify=verify,
headers=headers,
proxies=proxies,
timeout=timeout,
**kwargs)
if response.status_code not in [200, 201]:
self.log.error("Failed PUT request at <%s> with response: %s", url,
response.content)
response.raise_for_status()
return response
@token_expire_handler
def delete(self,
url,
headers=None,
verify=False,
proxies=None,
timeout=60,
**kwargs):
"""Sends a DELETE request. Refactor from requests module
:param url: URL for the new :class:`Request` object.
:param headers: (optional) Dictionary of HTTP Headers to send with
the :class:`Request`.
:param verify: (optional) if ``True``, the SSL cert will be verified.
A CA_BUNDLE path can also be provided.
:param proxies: (optional) Dictionary mapping protocol to the URL of
the proxy.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
self.log.debug("Delete a request to %s", url)
response = requests.delete(url,
headers=headers,
verify=verify,
proxies=proxies,
timeout=timeout,
**kwargs)
if response.status_code not in [200, 201]:
self.log.error("Failed DELETE request at <%s> with response: %s",
url, response.content)
response.raise_for_status()
return response
@classmethod
def validate_url(cls, url):
"""Strip and trailing slash to validate a url
:param url: the url address
:return: the valid url address
:rtype: string
"""
if url is None:
return None
url = url.strip()
while url.endswith("/"):
url = url[:-1]
return url
class FieldBase(RTCBase):
__metaclass__ = abc.ABCMeta
log = logging.getLogger("base.FieldBase")
def __init__(self, url, rtc_obj, raw_data=None):
RTCBase.__init__(self, url)
self.field_alias = dict()
self.rtc_obj = rtc_obj
self.raw_data = raw_data
if raw_data is not None:
self.__initializeFromRaw()
elif self.url:
self._initialize()
@abc.abstractmethod
def __str__(self):
pass
def get_rtc_obj(self):
return self.rtc_obj
def _initialize(self):
"""Initialize the object from the request"""
self.log.debug("Start initializing data from %s", self.url)
resp = self.get(
self.url,
verify=False,
proxies=self.rtc_obj.proxies,
headers=self.rtc_obj.headers,
)
self.__initialize(resp)
self.log.info("Finish the initialization for <%s %s>",
self.__class__.__name__, self)
def __initialize(self, resp):
"""Initialize from the response"""
raw_data = xmltodict.parse(resp.content)
root_key = list(raw_data.keys())[0]
self.raw_data = raw_data.get(root_key)
self.__initializeFromRaw()
def __initializeFromRaw(self):
"""Initialze from raw data (OrderedDict)"""
with Pool() as pool:
for processed in pool.map(self.__process_items,
self.raw_data.items()):
if processed is None:
continue
key, attr, value = processed
self.field_alias[attr] = key
self.setattr(attr, value)
def __process_items(self, item):
"""Process a single work item element"""
key, value = item
if key.startswith("@"):
# be compatible with IncludedInBuild
if "@oslc_cm:label" != key:
return None
attr = key.split(":")[-1].replace("-", "_")
attr_list = attr.split(".")
# ignore long attributes
if len(attr_list) > 1:
# attr = "_".join([attr_list[-2],
# attr_list[-1]])
return None
if isinstance(value, OrderedDict):
value_text = value.get("#text")
if value_text is not None:
value = value_text
else:
# request detailed info using rdf:resource
value = list(value.values())[0]
try:
value = self.__get_rdf_resource_title(value)
except (exception.RTCException, Exception):
self.log.error("Unable to handle %s", value)
return key, attr, value
def __get_rdf_resource_title(self, rdf_url):
# handle for /jts/users
if "/jts/users" in rdf_url:
return urlunquote(rdf_url.split("/")[-1])
# keep query result url
if rdf_url.endswith("rtc_cm:results"):
return rdf_url
# keep attachment url
if "/resource/content/" in rdf_url:
return rdf_url
resp = self.get(
rdf_url,
verify=False,
proxies=self.rtc_obj.proxies,
headers=self.rtc_obj.headers,
)
raw_data = xmltodict.parse(resp.content)
root_key = list(raw_data.keys())[0]
total_count = raw_data[root_key].get("@oslc_cm:totalCount")
if total_count is None:
# no total count
# only single resource
# compatible with IncludedInBuild
return raw_data[root_key].get("dc:title") or raw_data[root_key].get(
"foaf:nick")
else:
# multiple resource
result_list = list()
entry_keys = [
entry_key for entry_key in raw_data[root_key].keys()
if not entry_key.startswith("@")
]
for entry_key in entry_keys:
entries = raw_data[root_key][entry_key]
if isinstance(entries, OrderedDict):
entry_result = self.__handle_rdf_entry(entries)
result_list.append(entry_result)
else:
for entry in entries:
entry_result = self.__handle_rdf_entry(entry)
result_list.append(entry_result)
if not result_list:
return None
return result_list
def __handle_rdf_entry(self, entry):
# only return useful info instead of the whole object
return_fields = ["rtc_cm:userId", "dc:title", "dc:description"]
subkeys = entry.keys()
for return_field in return_fields:
if return_field in subkeys:
return entry.get(return_field)
raise exception.RTCException()
def setattr(self, attr, value):
self.__setattr__(attr, value)
|
the-stack_106_31522 | from __future__ import absolute_import, division, print_function
import numpy as np
import torch
from torch.autograd import Variable
import pyro.distributions as dist
import pyro.poutine as poutine
import pyro.util as util
def _eq(x, y):
"""
Equality comparison for nested data structures with tensors.
"""
if type(x) is not type(y):
return False
elif isinstance(x, dict):
if set(x.keys()) != set(y.keys()):
return False
return all(_eq(x_val, y[key]) for key, x_val in x.items())
elif isinstance(x, (np.ndarray, torch.Tensor)):
return (x == y).all()
elif isinstance(x, torch.autograd.Variable):
return (x.data == y.data).all()
else:
return x == y
def _index(seq, value):
"""
Find position of ``value`` in ``seq`` using ``_eq`` to test equality.
Returns ``-1`` if ``value`` is not in ``seq``.
"""
for i, x in enumerate(seq):
if _eq(x, value):
return i
return -1
class Histogram(dist.Distribution):
"""
Abstract Histogram distribution of equality-comparable values.
Should only be used inside Marginal.
"""
enumerable = True
@util.memoize
def _dist_and_values(self, *args, **kwargs):
# XXX currently this whole object is very inefficient
values, logits = [], []
for value, logit in self._gen_weighted_samples(*args, **kwargs):
ix = _index(values, value)
if ix == -1:
# Value is new.
values.append(value)
logits.append(logit)
else:
# Value has already been seen.
logits[ix] = util.log_sum_exp(torch.stack([logits[ix], logit]).squeeze())
logits = torch.stack(logits).squeeze()
logits -= util.log_sum_exp(logits)
if not isinstance(logits, torch.autograd.Variable):
logits = Variable(logits)
logits = logits - util.log_sum_exp(logits)
d = dist.Categorical(logits=logits, one_hot=False)
return d, values
def _gen_weighted_samples(self, *args, **kwargs):
raise NotImplementedError("_gen_weighted_samples is abstract method")
def sample(self, *args, **kwargs):
d, values = self._dist_and_values(*args, **kwargs)
ix = d.sample().data[0]
return values[ix]
def log_pdf(self, val, *args, **kwargs):
d, values = self._dist_and_values(*args, **kwargs)
ix = _index(values, val)
return d.log_pdf(Variable(torch.Tensor([ix])))
def batch_log_pdf(self, val, *args, **kwargs):
d, values = self._dist_and_values(*args, **kwargs)
ix = _index(values, val)
return d.batch_log_pdf(Variable(torch.Tensor([ix])))
def enumerate_support(self, *args, **kwargs):
d, values = self._dist_and_values(*args, **kwargs)
return values[:]
class Marginal(Histogram):
"""
:param trace_dist: a TracePosterior instance representing a Monte Carlo posterior
Marginal histogram distribution.
Turns a TracePosterior object into a Distribution
over the return values of the TracePosterior's model.
"""
def __init__(self, trace_dist, sites=None):
assert isinstance(trace_dist, TracePosterior), \
"trace_dist must be trace posterior distribution object"
if sites is None:
sites = "_RETURN"
assert isinstance(sites, (str, list)), \
"sites must be either '_RETURN' or list"
if isinstance(sites, str):
assert sites in ("_RETURN",), \
"sites string must be '_RETURN'"
self.sites = sites
super(Marginal, self).__init__()
self.trace_dist = trace_dist
def _gen_weighted_samples(self, *args, **kwargs):
for tr, log_w in poutine.block(self.trace_dist._traces)(*args, **kwargs):
if self.sites == "_RETURN":
val = tr.nodes["_RETURN"]["value"]
else:
val = {name: tr.nodes[name]["value"]
for name in self.sites}
yield (val, log_w)
def batch_log_pdf(self, val, *args, **kwargs):
raise NotImplementedError("batch_log_pdf not well defined for Marginal")
class TracePosterior(object):
"""
Abstract TracePosterior object from which posterior inference algorithms inherit.
Holds a generator over Traces sampled from the approximate posterior.
Not actually a distribution object - no sample or score methods.
"""
def __init__(self):
pass
def _traces(self, *args, **kwargs):
"""
Abstract method.
Get unnormalized weighted list of posterior traces
"""
raise NotImplementedError("inference algorithm must implement _traces")
def __call__(self, *args, **kwargs):
traces, logits = [], []
for tr, logit in poutine.block(self._traces)(*args, **kwargs):
traces.append(tr)
logits.append(logit)
logits = torch.stack(logits).squeeze()
logits -= util.log_sum_exp(logits)
if not isinstance(logits, torch.autograd.Variable):
logits = Variable(logits)
ix = dist.categorical(logits=logits, one_hot=False)
return traces[ix.data[0]]
|
the-stack_106_31525 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from azure.core.exceptions import HttpResponseError
class DetectedLanguage(Model):
"""DetectedLanguage.
All required parameters must be populated in order to send to Azure.
:param name: Required. Long name of a detected language (e.g. English,
French).
:type name: str
:param iso6391_name: Required. A two letter representation of the detected
language according to the ISO 639-1 standard (e.g. en, fr).
:type iso6391_name: str
:param score: Required. A confidence score between 0 and 1. Scores close
to 1 indicate 100% certainty that the identified language is true.
:type score: float
"""
_validation = {
'name': {'required': True},
'iso6391_name': {'required': True},
'score': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'iso6391_name': {'key': 'iso6391Name', 'type': 'str'},
'score': {'key': 'score', 'type': 'float'},
}
def __init__(self, **kwargs):
super(DetectedLanguage, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.iso6391_name = kwargs.get('iso6391_name', None)
self.score = kwargs.get('score', None)
class DocumentEntities(Model):
"""DocumentEntities.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param entities: Required. Recognized entities in the document.
:type entities: list[~azure.ai.textanalytics.models.Entity]
:param statistics: if showStats=true was specified in the request this
field will contain information about the document payload.
:type statistics: ~azure.ai.textanalytics.models.DocumentStatistics
"""
_validation = {
'id': {'required': True},
'entities': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'entities': {'key': 'entities', 'type': '[Entity]'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
def __init__(self, **kwargs):
super(DocumentEntities, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.entities = kwargs.get('entities', None)
self.statistics = kwargs.get('statistics', None)
class DocumentError(Model):
"""DocumentError.
All required parameters must be populated in order to send to Azure.
:param id: Required. Document Id.
:type id: str
:param error: Required. Document Error.
:type error: ~azure.ai.textanalytics.models.TextAnalyticsError
"""
_validation = {
'id': {'required': True},
'error': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'error': {'key': 'error', 'type': 'TextAnalyticsError'},
}
def __init__(self, **kwargs):
super(DocumentError, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.error = kwargs.get('error', None)
class DocumentKeyPhrases(Model):
"""DocumentKeyPhrases.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param key_phrases: Required. A list of representative words or phrases.
The number of key phrases returned is proportional to the number of words
in the input document.
:type key_phrases: list[str]
:param statistics: if showStats=true was specified in the request this
field will contain information about the document payload.
:type statistics: ~azure.ai.textanalytics.models.DocumentStatistics
"""
_validation = {
'id': {'required': True},
'key_phrases': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'key_phrases': {'key': 'keyPhrases', 'type': '[str]'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
def __init__(self, **kwargs):
super(DocumentKeyPhrases, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.key_phrases = kwargs.get('key_phrases', None)
self.statistics = kwargs.get('statistics', None)
class DocumentLanguage(Model):
"""DocumentLanguage.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param detected_languages: Required. A list of extracted languages.
:type detected_languages:
list[~azure.ai.textanalytics.models.DetectedLanguage]
:param statistics: if showStats=true was specified in the request this
field will contain information about the document payload.
:type statistics: ~azure.ai.textanalytics.models.DocumentStatistics
"""
_validation = {
'id': {'required': True},
'detected_languages': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'detected_languages': {'key': 'detectedLanguages', 'type': '[DetectedLanguage]'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
def __init__(self, **kwargs):
super(DocumentLanguage, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.detected_languages = kwargs.get('detected_languages', None)
self.statistics = kwargs.get('statistics', None)
class DocumentLinkedEntities(Model):
"""DocumentLinkedEntities.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param entities: Required. Recognized well-known entities in the document.
:type entities: list[~azure.ai.textanalytics.models.LinkedEntity]
:param statistics: if showStats=true was specified in the request this
field will contain information about the document payload.
:type statistics: ~azure.ai.textanalytics.models.DocumentStatistics
"""
_validation = {
'id': {'required': True},
'entities': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'entities': {'key': 'entities', 'type': '[LinkedEntity]'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
}
def __init__(self, **kwargs):
super(DocumentLinkedEntities, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.entities = kwargs.get('entities', None)
self.statistics = kwargs.get('statistics', None)
class DocumentSentiment(Model):
"""DocumentSentiment.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param sentiment: Required. Predicted sentiment for document (Negative,
Neutral, Positive, or Mixed). Possible values include: 'positive',
'neutral', 'negative', 'mixed'
:type sentiment: str or
~azure.ai.textanalytics.models.DocumentSentimentValue
:param statistics:
:type statistics: ~azure.ai.textanalytics.models.DocumentStatistics
:param document_scores: Required. Document level sentiment confidence
scores between 0 and 1 for each sentiment class.
:type document_scores:
~azure.ai.textanalytics.models.SentimentConfidenceScorePerLabel
:param sentences: Required. Sentence level sentiment analysis.
:type sentences: list[~azure.ai.textanalytics.models.SentenceSentiment]
"""
_validation = {
'id': {'required': True},
'sentiment': {'required': True},
'document_scores': {'required': True},
'sentences': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'sentiment': {'key': 'sentiment', 'type': 'DocumentSentimentValue'},
'statistics': {'key': 'statistics', 'type': 'DocumentStatistics'},
'document_scores': {'key': 'documentScores', 'type': 'SentimentConfidenceScorePerLabel'},
'sentences': {'key': 'sentences', 'type': '[SentenceSentiment]'},
}
def __init__(self, **kwargs):
super(DocumentSentiment, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.sentiment = kwargs.get('sentiment', None)
self.statistics = kwargs.get('statistics', None)
self.document_scores = kwargs.get('document_scores', None)
self.sentences = kwargs.get('sentences', None)
class DocumentStatistics(Model):
"""if showStats=true was specified in the request this field will contain
information about the document payload.
All required parameters must be populated in order to send to Azure.
:param characters_count: Required. Number of text elements recognized in
the document.
:type characters_count: int
:param transactions_count: Required. Number of transactions for the
document.
:type transactions_count: int
"""
_validation = {
'characters_count': {'required': True},
'transactions_count': {'required': True},
}
_attribute_map = {
'characters_count': {'key': 'charactersCount', 'type': 'int'},
'transactions_count': {'key': 'transactionsCount', 'type': 'int'},
}
def __init__(self, **kwargs):
super(DocumentStatistics, self).__init__(**kwargs)
self.characters_count = kwargs.get('characters_count', None)
self.transactions_count = kwargs.get('transactions_count', None)
class EntitiesResult(Model):
"""EntitiesResult.
All required parameters must be populated in order to send to Azure.
:param documents: Required. Response by document
:type documents: list[~azure.ai.textanalytics.models.DocumentEntities]
:param errors: Required. Errors by document id.
:type errors: list[~azure.ai.textanalytics.models.DocumentError]
:param statistics:
:type statistics: ~azure.ai.textanalytics.models.RequestStatistics
:param model_version: Required. This field indicates which model is used
for scoring.
:type model_version: str
"""
_validation = {
'documents': {'required': True},
'errors': {'required': True},
'model_version': {'required': True},
}
_attribute_map = {
'documents': {'key': 'documents', 'type': '[DocumentEntities]'},
'errors': {'key': 'errors', 'type': '[DocumentError]'},
'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(EntitiesResult, self).__init__(**kwargs)
self.documents = kwargs.get('documents', None)
self.errors = kwargs.get('errors', None)
self.statistics = kwargs.get('statistics', None)
self.model_version = kwargs.get('model_version', None)
class Entity(Model):
"""Entity.
All required parameters must be populated in order to send to Azure.
:param text: Required. Entity text as appears in the request.
:type text: str
:param type: Required. Entity type, such as Person/Location/Org/SSN etc
:type type: str
:param subtype: Entity sub type, such as Age/Year/TimeRange etc
:type subtype: str
:param offset: Required. Start position (in Unicode characters) for the
entity text.
:type offset: int
:param length: Required. Length (in Unicode characters) for the entity
text.
:type length: int
:param score: Required. Confidence score between 0 and 1 of the extracted
entity.
:type score: float
"""
_validation = {
'text': {'required': True},
'type': {'required': True},
'offset': {'required': True},
'length': {'required': True},
'score': {'required': True},
}
_attribute_map = {
'text': {'key': 'text', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'subtype': {'key': 'subtype', 'type': 'str'},
'offset': {'key': 'offset', 'type': 'int'},
'length': {'key': 'length', 'type': 'int'},
'score': {'key': 'score', 'type': 'float'},
}
def __init__(self, **kwargs):
super(Entity, self).__init__(**kwargs)
self.text = kwargs.get('text', None)
self.type = kwargs.get('type', None)
self.subtype = kwargs.get('subtype', None)
self.offset = kwargs.get('offset', None)
self.length = kwargs.get('length', None)
self.score = kwargs.get('score', None)
class EntityLinkingResult(Model):
"""EntityLinkingResult.
All required parameters must be populated in order to send to Azure.
:param documents: Required. Response by document
:type documents:
list[~azure.ai.textanalytics.models.DocumentLinkedEntities]
:param errors: Required. Errors by document id.
:type errors: list[~azure.ai.textanalytics.models.DocumentError]
:param statistics:
:type statistics: ~azure.ai.textanalytics.models.RequestStatistics
:param model_version: Required. This field indicates which model is used
for scoring.
:type model_version: str
"""
_validation = {
'documents': {'required': True},
'errors': {'required': True},
'model_version': {'required': True},
}
_attribute_map = {
'documents': {'key': 'documents', 'type': '[DocumentLinkedEntities]'},
'errors': {'key': 'errors', 'type': '[DocumentError]'},
'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(EntityLinkingResult, self).__init__(**kwargs)
self.documents = kwargs.get('documents', None)
self.errors = kwargs.get('errors', None)
self.statistics = kwargs.get('statistics', None)
self.model_version = kwargs.get('model_version', None)
class InnerError(Model):
"""InnerError.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code. Possible values include:
'invalidParameterValue', 'invalidRequestBodyFormat', 'emptyRequest',
'missingInputRecords', 'invalidDocument', 'modelVersionIncorrect',
'invalidDocumentBatch', 'unsupportedLanguageCode', 'invalidCountryHint'
:type code: str or ~azure.ai.textanalytics.models.InnerErrorCodeValue
:param message: Required. Error message.
:type message: str
:param details: Error details.
:type details: dict[str, str]
:param target: Error target.
:type target: str
:param inner_error: Inner error contains more specific information.
:type inner_error: ~azure.ai.textanalytics.models.InnerError
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'InnerErrorCodeValue'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '{str}'},
'target': {'key': 'target', 'type': 'str'},
'inner_error': {'key': 'innerError', 'type': 'InnerError'},
}
def __init__(self, **kwargs):
super(InnerError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.details = kwargs.get('details', None)
self.target = kwargs.get('target', None)
self.inner_error = kwargs.get('inner_error', None)
class KeyPhraseResult(Model):
"""KeyPhraseResult.
All required parameters must be populated in order to send to Azure.
:param documents: Required. Response by document
:type documents: list[~azure.ai.textanalytics.models.DocumentKeyPhrases]
:param errors: Required. Errors by document id.
:type errors: list[~azure.ai.textanalytics.models.DocumentError]
:param statistics:
:type statistics: ~azure.ai.textanalytics.models.RequestStatistics
:param model_version: Required. This field indicates which model is used
for scoring.
:type model_version: str
"""
_validation = {
'documents': {'required': True},
'errors': {'required': True},
'model_version': {'required': True},
}
_attribute_map = {
'documents': {'key': 'documents', 'type': '[DocumentKeyPhrases]'},
'errors': {'key': 'errors', 'type': '[DocumentError]'},
'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(KeyPhraseResult, self).__init__(**kwargs)
self.documents = kwargs.get('documents', None)
self.errors = kwargs.get('errors', None)
self.statistics = kwargs.get('statistics', None)
self.model_version = kwargs.get('model_version', None)
class LanguageBatchInput(Model):
"""LanguageBatchInput.
All required parameters must be populated in order to send to Azure.
:param documents: Required.
:type documents: list[~azure.ai.textanalytics.models.LanguageInput]
"""
_validation = {
'documents': {'required': True},
}
_attribute_map = {
'documents': {'key': 'documents', 'type': '[LanguageInput]'},
}
def __init__(self, **kwargs):
super(LanguageBatchInput, self).__init__(**kwargs)
self.documents = kwargs.get('documents', None)
class LanguageInput(Model):
"""LanguageInput.
All required parameters must be populated in order to send to Azure.
:param id: Required. Unique, non-empty document identifier.
:type id: str
:param text: Required.
:type text: str
:param country_hint:
:type country_hint: str
"""
_validation = {
'id': {'required': True},
'text': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'text': {'key': 'text', 'type': 'str'},
'country_hint': {'key': 'countryHint', 'type': 'str'},
}
def __init__(self, **kwargs):
super(LanguageInput, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.text = kwargs.get('text', None)
self.country_hint = kwargs.get('country_hint', None)
class LanguageResult(Model):
"""LanguageResult.
All required parameters must be populated in order to send to Azure.
:param documents: Required. Response by document
:type documents: list[~azure.ai.textanalytics.models.DocumentLanguage]
:param errors: Required. Errors by document id.
:type errors: list[~azure.ai.textanalytics.models.DocumentError]
:param statistics:
:type statistics: ~azure.ai.textanalytics.models.RequestStatistics
:param model_version: Required. This field indicates which model is used
for scoring.
:type model_version: str
"""
_validation = {
'documents': {'required': True},
'errors': {'required': True},
'model_version': {'required': True},
}
_attribute_map = {
'documents': {'key': 'documents', 'type': '[DocumentLanguage]'},
'errors': {'key': 'errors', 'type': '[DocumentError]'},
'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(LanguageResult, self).__init__(**kwargs)
self.documents = kwargs.get('documents', None)
self.errors = kwargs.get('errors', None)
self.statistics = kwargs.get('statistics', None)
self.model_version = kwargs.get('model_version', None)
class LinkedEntity(Model):
"""LinkedEntity.
All required parameters must be populated in order to send to Azure.
:param name: Required. Entity Linking formal name.
:type name: str
:param matches: Required. List of instances this entity appears in the
text.
:type matches: list[~azure.ai.textanalytics.models.Match]
:param language: Required. Language used in the data source.
:type language: str
:param id: Unique identifier of the recognized entity from the data
source.
:type id: str
:param url: Required. URL for the entity's page from the data source.
:type url: str
:param data_source: Required. Data source used to extract entity linking,
such as Wiki/Bing etc.
:type data_source: str
"""
_validation = {
'name': {'required': True},
'matches': {'required': True},
'language': {'required': True},
'url': {'required': True},
'data_source': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'matches': {'key': 'matches', 'type': '[Match]'},
'language': {'key': 'language', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'data_source': {'key': 'dataSource', 'type': 'str'},
}
def __init__(self, **kwargs):
super(LinkedEntity, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.matches = kwargs.get('matches', None)
self.language = kwargs.get('language', None)
self.id = kwargs.get('id', None)
self.url = kwargs.get('url', None)
self.data_source = kwargs.get('data_source', None)
class Match(Model):
"""Match.
All required parameters must be populated in order to send to Azure.
:param score: Required. If a well-known item is recognized, a decimal
number denoting the confidence level between 0 and 1 will be returned.
:type score: float
:param text: Required. Entity text as appears in the request.
:type text: str
:param offset: Required. Start position (in Unicode characters) for the
entity match text.
:type offset: int
:param length: Required. Length (in Unicode characters) for the entity
match text.
:type length: int
"""
_validation = {
'score': {'required': True},
'text': {'required': True},
'offset': {'required': True},
'length': {'required': True},
}
_attribute_map = {
'score': {'key': 'score', 'type': 'float'},
'text': {'key': 'text', 'type': 'str'},
'offset': {'key': 'offset', 'type': 'int'},
'length': {'key': 'length', 'type': 'int'},
}
def __init__(self, **kwargs):
super(Match, self).__init__(**kwargs)
self.score = kwargs.get('score', None)
self.text = kwargs.get('text', None)
self.offset = kwargs.get('offset', None)
self.length = kwargs.get('length', None)
class MultiLanguageBatchInput(Model):
"""Contains a set of input documents to be analyzed by the service.
All required parameters must be populated in order to send to Azure.
:param documents: Required. The set of documents to process as part of
this batch.
:type documents: list[~azure.ai.textanalytics.models.MultiLanguageInput]
"""
_validation = {
'documents': {'required': True},
}
_attribute_map = {
'documents': {'key': 'documents', 'type': '[MultiLanguageInput]'},
}
def __init__(self, **kwargs):
super(MultiLanguageBatchInput, self).__init__(**kwargs)
self.documents = kwargs.get('documents', None)
class MultiLanguageInput(Model):
"""Contains an input document to be analyzed by the service.
All required parameters must be populated in order to send to Azure.
:param id: Required. A unique, non-empty document identifier.
:type id: str
:param text: Required. The input text to process.
:type text: str
:param language: (Optional) This is the 2 letter ISO 639-1 representation
of a language. For example, use "en" for English; "es" for Spanish etc. If
not set, use "en" for English as default.
:type language: str
"""
_validation = {
'id': {'required': True},
'text': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'text': {'key': 'text', 'type': 'str'},
'language': {'key': 'language', 'type': 'str'},
}
def __init__(self, **kwargs):
super(MultiLanguageInput, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.text = kwargs.get('text', None)
self.language = kwargs.get('language', None)
class RequestStatistics(Model):
"""if showStats=true was specified in the request this field will contain
information about the request payload.
All required parameters must be populated in order to send to Azure.
:param documents_count: Required. Number of documents submitted in the
request.
:type documents_count: int
:param valid_documents_count: Required. Number of valid documents. This
excludes empty, over-size limit or non-supported languages documents.
:type valid_documents_count: int
:param erroneous_documents_count: Required. Number of invalid documents.
This includes empty, over-size limit or non-supported languages documents.
:type erroneous_documents_count: int
:param transactions_count: Required. Number of transactions for the
request.
:type transactions_count: long
"""
_validation = {
'documents_count': {'required': True},
'valid_documents_count': {'required': True},
'erroneous_documents_count': {'required': True},
'transactions_count': {'required': True},
}
_attribute_map = {
'documents_count': {'key': 'documentsCount', 'type': 'int'},
'valid_documents_count': {'key': 'validDocumentsCount', 'type': 'int'},
'erroneous_documents_count': {'key': 'erroneousDocumentsCount', 'type': 'int'},
'transactions_count': {'key': 'transactionsCount', 'type': 'long'},
}
def __init__(self, **kwargs):
super(RequestStatistics, self).__init__(**kwargs)
self.documents_count = kwargs.get('documents_count', None)
self.valid_documents_count = kwargs.get('valid_documents_count', None)
self.erroneous_documents_count = kwargs.get('erroneous_documents_count', None)
self.transactions_count = kwargs.get('transactions_count', None)
class SentenceSentiment(Model):
"""SentenceSentiment.
All required parameters must be populated in order to send to Azure.
:param sentiment: Required. The predicted Sentiment for the sentence.
Possible values include: 'positive', 'neutral', 'negative'
:type sentiment: str or
~azure.ai.textanalytics.models.SentenceSentimentValue
:param sentence_scores: Required. The sentiment confidence score between 0
and 1 for the sentence for all classes.
:type sentence_scores:
~azure.ai.textanalytics.models.SentimentConfidenceScorePerLabel
:param offset: Required. The sentence offset from the start of the
document.
:type offset: int
:param length: Required. The length of the sentence by Unicode standard.
:type length: int
:param warnings: The warnings generated for the sentence.
:type warnings: list[str]
"""
_validation = {
'sentiment': {'required': True},
'sentence_scores': {'required': True},
'offset': {'required': True},
'length': {'required': True},
}
_attribute_map = {
'sentiment': {'key': 'sentiment', 'type': 'SentenceSentimentValue'},
'sentence_scores': {'key': 'sentenceScores', 'type': 'SentimentConfidenceScorePerLabel'},
'offset': {'key': 'offset', 'type': 'int'},
'length': {'key': 'length', 'type': 'int'},
'warnings': {'key': 'warnings', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(SentenceSentiment, self).__init__(**kwargs)
self.sentiment = kwargs.get('sentiment', None)
self.sentence_scores = kwargs.get('sentence_scores', None)
self.offset = kwargs.get('offset', None)
self.length = kwargs.get('length', None)
self.warnings = kwargs.get('warnings', None)
class SentimentConfidenceScorePerLabel(Model):
"""Represents the confidence scores between 0 and 1 across all sentiment
classes: positive, neutral, negative.
All required parameters must be populated in order to send to Azure.
:param positive: Required.
:type positive: float
:param neutral: Required.
:type neutral: float
:param negative: Required.
:type negative: float
"""
_validation = {
'positive': {'required': True},
'neutral': {'required': True},
'negative': {'required': True},
}
_attribute_map = {
'positive': {'key': 'positive', 'type': 'float'},
'neutral': {'key': 'neutral', 'type': 'float'},
'negative': {'key': 'negative', 'type': 'float'},
}
def __init__(self, **kwargs):
super(SentimentConfidenceScorePerLabel, self).__init__(**kwargs)
self.positive = kwargs.get('positive', None)
self.neutral = kwargs.get('neutral', None)
self.negative = kwargs.get('negative', None)
class SentimentResponse(Model):
"""SentimentResponse.
All required parameters must be populated in order to send to Azure.
:param documents: Required. Sentiment analysis per document.
:type documents: list[~azure.ai.textanalytics.models.DocumentSentiment]
:param errors: Required. Errors by document id.
:type errors: list[~azure.ai.textanalytics.models.DocumentError]
:param statistics:
:type statistics: ~azure.ai.textanalytics.models.RequestStatistics
:param model_version: Required. This field indicates which model is used
for scoring.
:type model_version: str
"""
_validation = {
'documents': {'required': True},
'errors': {'required': True},
'model_version': {'required': True},
}
_attribute_map = {
'documents': {'key': 'documents', 'type': '[DocumentSentiment]'},
'errors': {'key': 'errors', 'type': '[DocumentError]'},
'statistics': {'key': 'statistics', 'type': 'RequestStatistics'},
'model_version': {'key': 'modelVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SentimentResponse, self).__init__(**kwargs)
self.documents = kwargs.get('documents', None)
self.errors = kwargs.get('errors', None)
self.statistics = kwargs.get('statistics', None)
self.model_version = kwargs.get('model_version', None)
class TextAnalyticsError(Model):
"""TextAnalyticsError.
All required parameters must be populated in order to send to Azure.
:param code: Required. Error code. Possible values include:
'invalidRequest', 'invalidArgument', 'internalServerError',
'serviceUnavailable'
:type code: str or ~azure.ai.textanalytics.models.ErrorCodeValue
:param message: Required. Error message.
:type message: str
:param target: Error target.
:type target: str
:param inner_error: Inner error contains more specific information.
:type inner_error: ~azure.ai.textanalytics.models.InnerError
:param details: Details about specific errors that led to this reported
error.
:type details: list[~azure.ai.textanalytics.models.TextAnalyticsError]
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'ErrorCodeValue'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'inner_error': {'key': 'innerError', 'type': 'InnerError'},
'details': {'key': 'details', 'type': '[TextAnalyticsError]'},
}
def __init__(self, **kwargs):
super(TextAnalyticsError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.inner_error = kwargs.get('inner_error', None)
self.details = kwargs.get('details', None)
class TextAnalyticsErrorException(HttpResponseError):
"""Server responsed with exception of type: 'TextAnalyticsError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, response, deserialize, *args):
model_name = 'TextAnalyticsError'
self.error = deserialize(model_name, response)
if self.error is None:
self.error = deserialize.dependencies[model_name]()
super(TextAnalyticsErrorException, self).__init__(response=response)
|
the-stack_106_31526 | from django.template.loader import render_to_string
from django.utils.text import slugify
from crispy_forms import layout as crispy_forms_layout
from crispy_forms.utils import TEMPLATE_PACK, flatatt, render_field
from crispy_forms_gds.layout import Size
class Div(crispy_forms_layout.Div):
"""
A layout object for displaying a general-purpose Div. This is not
a Design System component but is included as it's a basic part of
``django-crispy-forms``.
Although there is the Fieldset component for grouping fields together
a Div is quite useful when you just need to add some spacing between
elements.
Examples::
Div("name", "email", "phone", css_class="govuk-!-margin-bottom-5")
Div("street", "city", "post_code")
Arguments:
css_id (str, optional): an unique identifier for the <div>. Generally
you will need to set this if you need to add some javascript or
very specific styling.
css_class (str, optional): the names of one or more CSS classes that
will be added to the <div>. This parameter is for any styling you
want to apply. Nothing is added by default.
template (str, optional): the path to a template that overrides the
one normally used.
*fields: a list of layout objects - fields, buttons, HTML,
etc. that displayed inside the <div> element.
**kwargs: any additional attributes you want to add to the parent
<div>.
"""
pass
class Accordion(Div):
"""
.. _Accordion: https://design-system.service.gov.uk/components/accordion/
A layout object for displaying an `Accordion`_ component.
Accordion is the parent object to which you add an ``AccordionSection`` for
each of the panels you want to display.
Examples::
Accordion(
AccordionSection("title_1", "form_field_1", "form_field_2"),
AccordionSection("title_2", "form_field_3")
)
Accordion(
AccordionSection("title", "form_field_1", "form_field_2"),
css_id="accordion-1"
)
Arguments:
css_id (str, optional): an unique identifier for the accordion. The
default is "accordion". You will need to set this if you have more
than one accordion on a page.
css_class (str, optional): the names of one or more CSS classes that
will be added to the parent <div>. The basic Design System CSS
classes will be added automatically. This parameter is for any
extra styling you want to apply.
template (str, optional): the path to a template that overrides the
one normally used the accordion.
*fields: a list of AccordionSection objects that are the panels
that make up this accordion.
**kwargs: any additional attributes you want to add to the parent
<div>.
"""
template = "%s/accordion.html"
def __init__(self, *fields, **kwargs):
super().__init__(*fields, **kwargs)
if not self.css_id:
self.css_id = "accordion"
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
content = ""
for index, group in enumerate(self.fields, start=1):
context["index"] = index
context["parent"] = self.css_id
content += render_field(
group, form, form_style, context, template_pack=template_pack, **kwargs
)
template = self.get_template_name(template_pack)
context.update({"accordion": self, "content": content})
return render_to_string(template, context.flatten())
class AccordionSection(Div):
"""
.. _Accordion: https://design-system.service.gov.uk/components/accordion/
A layout object for displaying a action in an `Accordion`_ component.
Examples::
AccordionSection("title", "form_field_1", "form_field_2")
AccordionSection(
"title",
"form_field_1",
summary="A short description of the contents"
)
Arguments:
name (str): the title of the section.
summary (str, optional): a short description of the section's contents.
css_id (str, optional): an unique identifier for the section. This is
included as an AccordionSection is just a specialised Div. It is
a basic LayoutObject param and should never have to set it.
css_class (str, optional): the names of one or more CSS classes that
will be added to the section <div>. The basic Design System CSS
classes will be added automatically. This parameter is for any
extra styling you want to apply.
template (str, optional): the path to a template that overrides the
one used to render a section.
*fields: a list of layout objects objects that make up the section
contents.
**kwargs: any additional attributes you want to add to the section
<div>.
"""
template = "%s/accordion-group.html"
def __init__(self, name, *fields, summary=None, **kwargs):
super().__init__(*fields, **kwargs)
self.name = name
self.summary = summary
self.index = None
self.parent = None
def __contains__(self, field_name):
"""
check if field_name is contained within tab.
"""
return field_name in map(lambda pointer: pointer[1], self.get_field_names())
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
self.index = context.get("index", None)
self.parent = context.get("parent")
return super().render(form, form_style, context, template_pack)
class Fieldset(crispy_forms_layout.LayoutObject):
"""
A layout object for displaying groups of fields.
The contents of a Fieldset are be one or more LayoutObjects: fields, buttons,
composed layouts, etc. You can give the <fieldset> a <legend> title, set the size
of the font used and wrap the <legend> in a heading tag if necessary.
Examples::
Fieldset('form_field_1', 'form_field_2')
Fieldset('form_field_1', 'form_field_2', legend="title")
Fieldset('form_field_1', 'form_field_2', legend="title", legend_tag="h1")
Fieldset('form_field_1', 'form_field_2', legend="title", legend_size="xl")
Arguments:
legend (str, optional): the title displayed in a <legend>.
legend_size (str, optional): the size of the title: 's', 'm', 'l' or
'xl'. It's more readable if you use the contants on the ``Size`` class.
legend_tag (str, optional): an HTML tag that wraps the <legend>. Typically
this is 'h1' so the <legend> also acts as the page title.
css_id (str, optional): an unique identifier for the fieldset.
css_class (str, optional): the names of one or more CSS classes that
will be added to the <fieldset>. The basic Design System CSS
classes will be added automatically. This parameter is for any
extra styling you want to apply.
template (str, optional): the path to a template that overrides the
one provided by the template pack.
*fields: a list of LayoutObjects objects that make up the Fieldset contents.
**kwargs: any additional attributes you want to add to the <fieldset>.
"""
css_class = "govuk-fieldset"
template = "%s/layout/fieldset.html"
def __init__(
self, *fields, legend=None, legend_size=None, legend_tag=None, **kwargs
):
self.fields = list(fields)
self.context = {}
if legend:
self.context["legend"] = legend
if legend_size:
self.context["legend_size"] = Size.for_legend(legend_size)
if legend_tag:
self.context["legend_tag"] = legend_tag
if hasattr(self, "css_class") and "css_class" in kwargs:
self.css_class += " %s" % kwargs.pop("css_class")
if not hasattr(self, "css_class"):
self.css_class = kwargs.pop("css_class", None)
self.css_id = kwargs.pop("css_id", None)
self.template = kwargs.pop("template", self.template)
self.flat_attrs = flatatt(kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
fields = self.get_rendered_fields(
form, form_style, context, template_pack, **kwargs
)
context = {
"fieldset": self,
"fields": fields,
"form_style": form_style,
}
context.update(self.context)
template = self.get_template_name(template_pack)
return render_to_string(template, context)
class Tabs(Div):
"""
A layout object for displaying a set of tabs.
``Tabs`` contains a list of ``TabPanels`` which in turn contains the
list of fields and other layout objects which make up the content of
each tab.
Examples::
Tabs(
TabPane('tab_name_1', 'form_field_1', 'form_field_2'),
TabPane('tab_name_2', 'form_field_3')
)
Arguments:
css_id (str, optional): an unique identifier for the parent <div>.
css_class (str, optional): the names of one or more CSS classes that
will be added to the parent <div>. The basic Design System CSS
classes will be added automatically. This parameter is for any
extra styling you want to apply.
template (str, optional): the path to a template that overrides the
one provided by the template pack.
*fields: a list of TabPanel objects that make up the set of tabs.
**kwargs: any additional attributes you want to add to the parent
<div>.
"""
template = "%s/layout/tabs.html"
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
content = self.get_rendered_fields(form, form_style, context, template_pack)
links = "".join(tab.render_link(template_pack) for tab in self.fields)
context.update({"tabs": self, "links": links, "content": content})
template = self.get_template_name(template_pack)
return render_to_string(template, context.flatten())
class TabPanel(Div):
"""
A layout object that displays the contents of each pane in a set of tabs.
Examples::
TabPanel('tab_name', 'form_field_1', 'form_field_2', 'form_field_3')
Arguments:
name (str): the title of the panel.
css_id (str, optional): an unique identifier for the parent <div>.
If you don't set this then the slugified title is used for the
id attribute. You must set this if you have more than one set
of tabs on a page with the same set of titles.
css_class (str, optional): the names of one or more CSS classes that
will be added to the parent <div>. The basic Design System CSS
classes will be added automatically. This parameter is for any
extra styling you want to apply.
template (str, optional): the path to a template that overrides the
one provided by the template pack.
*fields: a list of layout objects that make up the contents of the panel.
**kwargs: any additional attributes you want to add to the <div>
element used for create the tab panel.
"""
css_class = "govuk-tabs__panel"
link_template = "%s/layout/tab-link.html"
def __init__(self, name, *fields, **kwargs):
super().__init__(*fields, **kwargs)
self.name = name
if "css_id" in kwargs:
self.css_id = kwargs["css_id"]
if not self.css_id:
self.css_id = slugify(self.name)
def render_link(self, template_pack=TEMPLATE_PACK):
link_template = self.link_template % template_pack
return render_to_string(link_template, {"link": self})
|
the-stack_106_31527 | import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as la
from floq.helpers.index import n_to_i, i_to_n
from floq.helpers.numpy_replacements import numba_outer, numba_zeros
import floq.helpers.blockmatrix as bm
import floq.helpers.matrix as mm
import floq.errors as errors
import itertools
import cmath
from numba import autojit
def get_u(hf, params):
"""
Calculate the time evolution operator U,
given a Fourier transformed Hamiltonian Hf
and the parameters of the problem
"""
return get_u_and_eigensystem(hf, params)[0]
def get_u_and_du(hf, dhf, params):
"""
Calculate the time evolution operator U
given a Fourier transformed Hamiltonian Hf,
as well as its derivative dU given dHf,
and the parameters of the problem
"""
u, vals, vecs, phi, psi = get_u_and_eigensystem(hf, params)
du = get_du_from_eigensystem(dhf, psi, vals, vecs, params)
return [u, du]
def get_u_and_eigensystem(hf, params):
"""
Calculate the time evolution operator U,
given a Fourier transformed Hamiltonian Hf
and the parameters of the problem, and return
it as well as the intermediary results
"""
k = assemble_k(hf, params)
vals, vecs = find_eigensystem(k, params)
phi = calculate_phi(vecs)
psi = calculate_psi(vecs, params)
return [calculate_u(phi, psi, vals, params), vals, vecs, phi, psi]
def get_du_from_eigensystem(dhf, psi, vals, vecs, params):
dk = assemble_dk(dhf, params)
du = calculate_du(dk, psi, vals, vecs, params)
return du
def assemble_k(hf, p):
# assemble the Floquet Hamiltonian K from
# the components of the Fourier-transformed Hamiltonian
return numba_assemble_k(hf, p.dim, p.k_dim, p.nz, p.nc, p.omega)
@autojit(nopython=True)
def numba_assemble_k(hf, dim, k_dim, nz, nc, omega):
hf_max = (nc-1)/2
k = numba_zeros((k_dim, k_dim))
# Assemble K by placing each component of Hf in turn, which
# for a fixed Fourier index lie on diagonals, with 0 on the
# main diagonal, positive numbers on the right and negative on the left
#
# The first row is therefore essentially Hf(0) Hf(-1) ... Hf(-hf_max) 0 0 0 ...
# The last row is then ... 0 0 0 Hf(+hf_max) ... Hf(0)
# Note that the main diagonal acquires a factor of omega*identity*(row/column number)
for n in range(-hf_max, hf_max+1):
start_row = max(0, n) # if n < 0, start at row 0
start_col = max(0, -n) # if n > 0, start at col 0
stop_row = min((nz-1)+n, nz-1)
stop_col = min((nz-1)-n, nz-1)
row = start_row
col = start_col
current_component = hf[n_to_i(n, nc)]
while row <= stop_row and col <= stop_col:
if n == 0:
block = current_component + np.identity(dim)*omega*i_to_n(row, nz)
bm.set_block_in_matrix(block, k, dim, nz, row, col)
else:
bm.set_block_in_matrix(current_component, k, dim, nz, row, col)
row += 1
col += 1
return k
def assemble_dk(dhf, p):
# assemble the derivative of the Floquet Hamiltonian K from
# the components of the derivative of the Fourier-transformed Hamiltonian
# This is equivalent to K, with Hf -> d HF and omega -> 0.
return numba_assemble_dk(dhf, p.np, p.dim, p.k_dim, p.nz, p.nc)
@autojit(nopython=True)
def numba_assemble_dk(dhf, npm, dim, k_dim, nz, nc):
dk = np.empty((npm, k_dim, k_dim), dtype=np.complex128)
for c in range(npm):
dk[c, :, :] = numba_assemble_k(dhf[c], dim, k_dim, nz, nc, 0.0)
return dk
def find_eigensystem(k, p):
# Find unique eigenvalues and -vectors,
# return them as segments
unique_vals, unique_vecs = get_basis(k, p)
unique_vecs = np.array([np.split(unique_vecs[i], p.nz) for i in xrange(p.dim)])
return [unique_vals, unique_vecs]
def get_basis(k, p):
# Compute the eigensystem of K,
# then separate out the dim relevant parts,
# orthogonalising degenerate subspaces.
vals, vecs = compute_eigensystem(k, p)
start = find_first_above_value(vals, -p.omega/2.)
picked_vals = vals[start:start+p.dim]
picked_vecs = np.array([vecs[:, i] for i in xrange(start, start+p.dim)])
degenerate_indices = find_duplicates(picked_vals, p.decimals)
if degenerate_indices:
to_orthogonalize = picked_vecs[degenerate_indices]
orthogonalized = mm.gram_schmidt(to_orthogonalize)
picked_vecs[degenerate_indices, :] = orthogonalized
return [picked_vals, picked_vecs]
def compute_eigensystem(k, p):
# Find eigenvalues and eigenvectors of k,
# using the method specified in the parameters
# (sparse is almost always faster, and is the default)
if p.sparse:
k = sp.csc_matrix(k)
number_of_eigs = min(2*p.dim, p.k_dim)
# find number_of_eigs eigenvectors/-values around 0.0
# -> trimming/sorting the eigensystem is NOT necessary
vals, vecs = la.eigs(k, k=number_of_eigs, sigma=0.0)
else:
vals, vecs = np.linalg.eig(k)
vals, vecs = trim_eigensystem(vals, vecs, p)
vals = vals.real.astype(np.float64, copy=False)
# sort eigenvalues / eigenvectors
idx = vals.argsort()
vals = vals[idx]
vecs = vecs[:, idx]
return vals, vecs
def trim_eigensystem(vals, vecs, p):
# Trim eigenvalues and eigenvectors to only 2*dim ones
# clustered around zero
# Sort eigenvalues and -vectors in increasing order
idx = vals.argsort()
vals = vals[idx]
vecs = vecs[:, idx]
# Only keep values around 0
middle = p.k_dim/2
cutoff_left = max(0, middle - p.dim)
cutoff_right = min(p.k_dim, cutoff_left + 2*p.dim)
cut_vals = vals[cutoff_left:cutoff_right]
cut_vecs = vecs[:, cutoff_left:cutoff_right]
return cut_vals, cut_vecs
@autojit(nopython=True)
def find_first_above_value(array, value):
"""Find the index of the first array entry > value."""
for i in xrange(len(array)):
if array[i] > value:
return i
return None
def find_duplicates(array, decimals):
indices = np.arange(array.shape[0])
a = np.round(array, decimals=decimals)
vals, idx_start, count = np.unique(a, return_counts=True,
return_index=True)
res = np.split(indices, idx_start[1:])
res = filter(lambda x: x.size > 1, res)
return res
@autojit(nopython=True)
def calculate_phi(vecs):
# Given an array of eigenvectors vecs,
# sum over Fourier components in each
dim = vecs.shape[0]
phi = np.empty((dim, dim), dtype=np.complex128)
for i in range(dim):
phi[i] = numba_sum_components(vecs[i], dim)
return phi
@autojit(nopython=True)
def numba_sum_components(vec, dim):
n = vec.shape[0]
result = numba_zeros(dim)
for i in range(n):
result += vec[i]
return result
def calculate_psi(vecs, p):
# Given an array of eigenvectors vecs,
# sum over all Fourier components in each,
# weighted by exp(- i omega t n), with n
# being the Fourier index of the component
return numba_calculate_psi(vecs, p.dim, p.nz, p.omega, p.t)
@autojit(nopython=True)
def numba_calculate_psi(vecs, dim, nz, omega, t):
psi = numba_zeros((dim, dim))
for k in range(0, dim):
partial = numba_zeros(dim)
for i in range(0, nz):
num = i_to_n(i, nz)
partial += np.exp(1j*omega*t*num)*vecs[k][i]
psi[k, :] = partial
return psi
def calculate_u(phi, psi, energies, p):
u = np.zeros([p.dim, p.dim], dtype='complex128')
t = p.t
for k in xrange(0, p.dim):
u += np.exp(-1j*t*energies[k])*np.outer(psi[k], np.conj(phi[k]))
return u
def calculate_du(dk, psi, vals, vecs, p):
# Given the eigensystem of K, and its derivative,
# perform the computations to get dU.
#
# This routine is optimised and quite hard to read, I recommend
# taking a look in the museum, which contains functionally equivalent,
# but much more readable versions.
dim = p.dim
nz_max = p.nz_max
nz = p.nz
npm = p.np
omega = p.omega
t = p.t
vecsstar = np.conj(vecs)
factors = calculate_factors(dk, nz, nz_max, dim, npm, vals, vecs, vecsstar, omega, t)
return assemble_du(nz, nz_max, dim, npm, factors, psi, vecsstar)
def calculate_factors(dk, nz, nz_max, dim, npm, vals, vecs, vecsstar, omega, t):
# Factors in the sum for dU that only depend on dn=n1-n2, and therefore
# can be computed more efficiently outside the "full" loop
factors = np.empty([npm, 2*nz+1, dim, dim], dtype=np.complex128)
for dn in xrange(-nz_max*2, 2*nz_max+1):
idn = n_to_i(dn, 2*nz)
for i1 in xrange(0, dim):
for i2 in xrange(0, dim):
v1 = np.roll(vecsstar[i1], dn, axis=0) # not supported by numba!
for c in xrange(0, npm):
factors[c, idn, i1, i2] = (integral_factors(vals[i1], vals[i2], dn, omega, t) *
expectation_value(dk[c], v1, vecs[i2]))
return factors
@autojit(nopython=True)
def assemble_du(nz, nz_max, dim, npm, alphas, psi, vecsstar):
# Execute the sum defining dU, taking pre-computed factors into account
du = numba_zeros((npm, dim, dim))
for n2 in range(-nz_max, nz_max+1):
for i1 in range(0, dim):
for i2 in range(0, dim):
product = numba_outer(psi[i1], vecsstar[i2, n_to_i(-n2, nz)])
for n1 in range(-nz_max, nz_max+1):
idn = n_to_i(n1-n2, 2*nz)
for c in xrange(0, npm):
du[c] += alphas[c, idn, i1, i2]*product
return du
@autojit(nopython=True)
def integral_factors(e1, e2, dn, omega, t):
if e1 == e2 and dn == 0:
return -1.0j*cmath.exp(-1j*t*e1)*t
else:
return (cmath.exp(-1j*t*e1)-cmath.exp(-1j*t*(e2-omega*dn)))/((e1-e2+omega*dn))
@autojit(nopython=True)
def expectation_value(dk, v1, v2):
# Computes <v1|dk|v2>, assuming v1 is already conjugated
# v1 and v2 are split into Fourier components,
# we undo that here
a = v1.flatten()
b = v2.flatten()
return np.dot(np.dot(a, dk), b)
|
the-stack_106_31528 | import logging
from typing import Dict, List, Optional, Tuple
import aiosqlite
from spare.consensus.block_record import BlockRecord
from spare.types.blockchain_format.sized_bytes import bytes32
from spare.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from spare.types.full_block import FullBlock
from spare.types.weight_proof import SubEpochChallengeSegment, SubEpochSegments
from spare.util.db_wrapper import DBWrapper
from spare.util.ints import uint32
from spare.util.lru_cache import LRUCache
log = logging.getLogger(__name__)
class BlockStore:
db: aiosqlite.Connection
block_cache: LRUCache
db_wrapper: DBWrapper
ses_challenge_cache: LRUCache
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
# All full blocks which have been added to the blockchain. Header_hash -> block
self.db_wrapper = db_wrapper
self.db = db_wrapper.db
await self.db.execute("pragma journal_mode=wal")
await self.db.execute("pragma synchronous=2")
await self.db.execute(
"CREATE TABLE IF NOT EXISTS full_blocks(header_hash text PRIMARY KEY, height bigint,"
" is_block tinyint, is_fully_compactified tinyint, block blob)"
)
# Block records
await self.db.execute(
"CREATE TABLE IF NOT EXISTS block_records(header_hash "
"text PRIMARY KEY, prev_hash text, height bigint,"
"block blob, sub_epoch_summary blob, is_peak tinyint, is_block tinyint)"
)
# todo remove in v1.2
await self.db.execute("DROP TABLE IF EXISTS sub_epoch_segments_v2")
# Sub epoch segments for weight proofs
await self.db.execute(
"CREATE TABLE IF NOT EXISTS sub_epoch_segments_v3(ses_block_hash text PRIMARY KEY, challenge_segments blob)"
)
# Height index so we can look up in order of height for sync purposes
await self.db.execute("CREATE INDEX IF NOT EXISTS full_block_height on full_blocks(height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS is_block on full_blocks(is_block)")
await self.db.execute("CREATE INDEX IF NOT EXISTS is_fully_compactified on full_blocks(is_fully_compactified)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS hh on block_records(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak)")
await self.db.execute("CREATE INDEX IF NOT EXISTS is_block on block_records(is_block)")
await self.db.commit()
self.block_cache = LRUCache(1000)
self.ses_challenge_cache = LRUCache(50)
return self
async def add_full_block(self, header_hash: bytes32, block: FullBlock, block_record: BlockRecord) -> None:
self.block_cache.put(header_hash, block)
cursor_1 = await self.db.execute(
"INSERT OR REPLACE INTO full_blocks VALUES(?, ?, ?, ?, ?)",
(
header_hash.hex(),
block.height,
int(block.is_transaction_block()),
int(block.is_fully_compactified()),
bytes(block),
),
)
await cursor_1.close()
cursor_2 = await self.db.execute(
"INSERT OR REPLACE INTO block_records VALUES(?, ?, ?, ?,?, ?, ?)",
(
header_hash.hex(),
block.prev_header_hash.hex(),
block.height,
bytes(block_record),
None
if block_record.sub_epoch_summary_included is None
else bytes(block_record.sub_epoch_summary_included),
False,
block.is_transaction_block(),
),
)
await cursor_2.close()
async def persist_sub_epoch_challenge_segments(
self, ses_block_hash: bytes32, segments: List[SubEpochChallengeSegment]
) -> None:
async with self.db_wrapper.lock:
cursor_1 = await self.db.execute(
"INSERT OR REPLACE INTO sub_epoch_segments_v3 VALUES(?, ?)",
(ses_block_hash.hex(), bytes(SubEpochSegments(segments))),
)
await cursor_1.close()
await self.db.commit()
async def get_sub_epoch_challenge_segments(
self,
ses_block_hash: bytes32,
) -> Optional[List[SubEpochChallengeSegment]]:
cached = self.ses_challenge_cache.get(ses_block_hash)
if cached is not None:
return cached
cursor = await self.db.execute(
"SELECT challenge_segments from sub_epoch_segments_v3 WHERE ses_block_hash=?", (ses_block_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
challenge_segments = SubEpochSegments.from_bytes(row[0]).challenge_segments
self.ses_challenge_cache.put(ses_block_hash, challenge_segments)
return challenge_segments
return None
def rollback_cache_block(self, header_hash: bytes32):
try:
self.block_cache.remove(header_hash)
except KeyError:
# this is best effort. When rolling back, we may not have added the
# block to the cache yet
pass
async def get_full_block(self, header_hash: bytes32) -> Optional[FullBlock]:
cached = self.block_cache.get(header_hash)
if cached is not None:
log.debug(f"cache hit for block {header_hash.hex()}")
return cached
log.debug(f"cache miss for block {header_hash.hex()}")
cursor = await self.db.execute("SELECT block from full_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
block = FullBlock.from_bytes(row[0])
self.block_cache.put(header_hash, block)
return block
return None
async def get_full_block_bytes(self, header_hash: bytes32) -> Optional[bytes]:
cached = self.block_cache.get(header_hash)
if cached is not None:
log.debug(f"cache hit for block {header_hash.hex()}")
return bytes(cached)
log.debug(f"cache miss for block {header_hash.hex()}")
cursor = await self.db.execute("SELECT block from full_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return row[0]
return None
async def get_full_blocks_at(self, heights: List[uint32]) -> List[FullBlock]:
if len(heights) == 0:
return []
heights_db = tuple(heights)
formatted_str = f'SELECT block from full_blocks WHERE height in ({"?," * (len(heights_db) - 1)}?)'
cursor = await self.db.execute(formatted_str, heights_db)
rows = await cursor.fetchall()
await cursor.close()
return [FullBlock.from_bytes(row[0]) for row in rows]
async def get_block_records_by_hash(self, header_hashes: List[bytes32]):
"""
Returns a list of Block Records, ordered by the same order in which header_hashes are passed in.
Throws an exception if the blocks are not present
"""
if len(header_hashes) == 0:
return []
header_hashes_db = tuple([hh.hex() for hh in header_hashes])
formatted_str = f'SELECT block from block_records WHERE header_hash in ({"?," * (len(header_hashes_db) - 1)}?)'
cursor = await self.db.execute(formatted_str, header_hashes_db)
rows = await cursor.fetchall()
await cursor.close()
all_blocks: Dict[bytes32, BlockRecord] = {}
for row in rows:
block_rec: BlockRecord = BlockRecord.from_bytes(row[0])
all_blocks[block_rec.header_hash] = block_rec
ret: List[BlockRecord] = []
for hh in header_hashes:
if hh not in all_blocks:
raise ValueError(f"Header hash {hh} not in the blockchain")
ret.append(all_blocks[hh])
return ret
async def get_blocks_by_hash(self, header_hashes: List[bytes32]) -> List[FullBlock]:
"""
Returns a list of Full Blocks blocks, ordered by the same order in which header_hashes are passed in.
Throws an exception if the blocks are not present
"""
if len(header_hashes) == 0:
return []
header_hashes_db = tuple([hh.hex() for hh in header_hashes])
formatted_str = (
f'SELECT header_hash, block from full_blocks WHERE header_hash in ({"?," * (len(header_hashes_db) - 1)}?)'
)
cursor = await self.db.execute(formatted_str, header_hashes_db)
rows = await cursor.fetchall()
await cursor.close()
all_blocks: Dict[bytes32, FullBlock] = {}
for row in rows:
header_hash = bytes.fromhex(row[0])
full_block: FullBlock = FullBlock.from_bytes(row[1])
all_blocks[header_hash] = full_block
self.block_cache.put(header_hash, full_block)
ret: List[FullBlock] = []
for hh in header_hashes:
if hh not in all_blocks:
raise ValueError(f"Header hash {hh} not in the blockchain")
ret.append(all_blocks[hh])
return ret
async def get_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]:
cursor = await self.db.execute(
"SELECT block from block_records WHERE header_hash=?",
(header_hash.hex(),),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return BlockRecord.from_bytes(row[0])
return None
async def get_block_records_in_range(
self,
start: int,
stop: int,
) -> Dict[bytes32, BlockRecord]:
"""
Returns a dictionary with all blocks in range between start and stop
if present.
"""
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {start} and height <= {stop}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash = bytes.fromhex(row[0])
ret[header_hash] = BlockRecord.from_bytes(row[1])
return ret
async def get_block_records_close_to_peak(
self, blocks_n: int
) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all blocks that have height >= peak height - blocks_n, as well as the
peak header hash.
"""
res = await self.db.execute("SELECT * from block_records WHERE is_peak = 1")
peak_row = await res.fetchone()
await res.close()
if peak_row is None:
return {}, None
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {peak_row[2] - blocks_n}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash = bytes.fromhex(row[0])
ret[header_hash] = BlockRecord.from_bytes(row[1])
return ret, bytes.fromhex(peak_row[0])
async def get_peak_height_dicts(self) -> Tuple[Dict[uint32, bytes32], Dict[uint32, SubEpochSummary]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
res = await self.db.execute("SELECT * from block_records WHERE is_peak = 1")
row = await res.fetchone()
await res.close()
if row is None:
return {}, {}
peak: bytes32 = bytes.fromhex(row[0])
cursor = await self.db.execute("SELECT header_hash,prev_hash,height,sub_epoch_summary from block_records")
rows = await cursor.fetchall()
await cursor.close()
hash_to_prev_hash: Dict[bytes32, bytes32] = {}
hash_to_height: Dict[bytes32, uint32] = {}
hash_to_summary: Dict[bytes32, SubEpochSummary] = {}
for row in rows:
hash_to_prev_hash[bytes.fromhex(row[0])] = bytes.fromhex(row[1])
hash_to_height[bytes.fromhex(row[0])] = row[2]
if row[3] is not None:
hash_to_summary[bytes.fromhex(row[0])] = SubEpochSummary.from_bytes(row[3])
height_to_hash: Dict[uint32, bytes32] = {}
sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {}
curr_header_hash = peak
curr_height = hash_to_height[curr_header_hash]
while True:
height_to_hash[curr_height] = curr_header_hash
if curr_header_hash in hash_to_summary:
sub_epoch_summaries[curr_height] = hash_to_summary[curr_header_hash]
if curr_height == 0:
break
curr_header_hash = hash_to_prev_hash[curr_header_hash]
curr_height = hash_to_height[curr_header_hash]
return height_to_hash, sub_epoch_summaries
async def set_peak(self, header_hash: bytes32) -> None:
# We need to be in a sqlite transaction here.
# Note: we do not commit this to the database yet, as we need to also change the coin store
cursor_1 = await self.db.execute("UPDATE block_records SET is_peak=0 WHERE is_peak=1")
await cursor_1.close()
cursor_2 = await self.db.execute(
"UPDATE block_records SET is_peak=1 WHERE header_hash=?",
(header_hash.hex(),),
)
await cursor_2.close()
async def is_fully_compactified(self, header_hash: bytes32) -> Optional[bool]:
cursor = await self.db.execute(
"SELECT is_fully_compactified from full_blocks WHERE header_hash=?", (header_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return bool(row[0])
async def get_first_not_compactified(self, min_height: int) -> Optional[int]:
cursor = await self.db.execute(
"SELECT MIN(height) from full_blocks WHERE is_fully_compactified=0 AND height>=?", (min_height,)
)
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return int(row[0])
|
the-stack_106_31530 | import matplotlib
from matplotlib.collections import PatchCollection
import matplotlib.cm
from matplotlib.axes import Axes
import matplotlib.transforms as transforms
from matplotlib import pyplot as plt
from matplotlib import rc
from shapely.geometry import Polygon
from collections import OrderedDict, defaultdict
import numpy as np
from shapely.ops import cascaded_union
from descartes import PolygonPatch
import matplotlib.patheffects as PathEffects
from matplotlib.gridspec import SubplotSpec, GridSpec, GridSpecFromSubplotSpec
import pymc3 as pm
import seaborn as sns
from itertools import product
import re
plt.style.use('ggplot')
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
matplotlib.rcParams["font.family"] = "Bitstream Charter"
matplotlib.rcParams['xtick.labelsize'] = 16
matplotlib.rcParams['ytick.labelsize'] = 16
matplotlib.rcParams['axes.labelsize'] = 22
matplotlib.rcParams['axes.titlesize'] = 22
def plot_counties(
ax,
counties,
values=None,
edgecolors=None,
contourcolor="white",
hatch_surround=None,
xlim=None,
ylim=None,
background=True,
xticks=True,
yticks=True,
grid=True,
frame=True,
xlabel="Longitude [in dec. degrees]",
ylabel="Latitude [in dec. degrees]",
lw=1):
polygons = [r["shape"] for r in counties.values()]
# extend german borders :S and then shrink them again after unifying
# gets rid of many holes at the county boundaries
contour = cascaded_union([pol.buffer(0.01)
for pol in polygons]).buffer(-0.01)
xmin, ymin, xmax, ymax = contour.bounds
if xlim is None:
xlim = [xmin, xmax]
if ylim is None:
ylim = [ymin, ymax]
surround = PolygonPatch(Polygon([(xlim[0], ylim[0]), (xlim[0], ylim[1]), (
xlim[1], ylim[1]), (xlim[1], ylim[0])]).difference(contour))
contour = PolygonPatch(contour, lw=lw)
pc = PatchCollection([PolygonPatch(p, lw=lw)
for p in polygons], cmap=matplotlib.cm.magma, alpha=1.0)
if values is not None:
if isinstance(values, (dict, OrderedDict)):
values = np.array([values.setdefault(r, np.nan)
for r in counties.keys()])
elif isinstance(values, str):
values = np.array([r.setdefault(values, np.nan)
for r in counties.values()])
else:
assert np.size(values) == len(counties), "Number of values ({}) doesn't match number of counties ({})!".format(
np.size(values), len(counties))
pc.set_clim(0, 10)
nans = np.isnan(values)
values[nans] = 0
values = np.ma.MaskedArray(values, mask=nans)
pc.set(array=values, cmap='magma')
else:
pc.set_facecolors("none")
if edgecolors is not None:
if isinstance(edgecolors, (dict, OrderedDict)):
edgecolors = np.array([edgecolors.setdefault(r, "none")
for r in counties.keys()])
elif isinstance(edgecolors, str):
edgecolors = np.array([r.setdefault(edgecolors, "none")
for r in counties.values()])
pc.set_edgecolors(edgecolors)
else:
pc.set_edgecolors("none")
if hatch_surround is not None:
surround.set_hatch(hatch_surround)
surround.set_facecolor("none")
ax.add_patch(surround)
cb = plt.colorbar(pc, shrink=0.6)
cb.set_ticks([0,5,10])
#cb.set_yticks([0.00004])
ax.add_collection(pc)
if contourcolor is not None:
contour.set_edgecolor(contourcolor)
contour.set_facecolor("none")
ax.add_patch(contour)
if isinstance(background, bool):
ax.patch.set_visible(background)
else:
ax.patch.set_color(background)
ax.grid(grid)
ax.set_frame_on(frame)
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
ax.set_aspect(1.43)
if xlabel:
ax.set_xlabel(xlabel, fontsize=14)
if ylabel:
ax.set_ylabel(ylabel, fontsize=14)
ax.tick_params(axis="x", which="both", bottom=xticks, labelbottom=xticks)
ax.tick_params(axis="y", which="both", left=yticks, labelleft=yticks)
#plt.colorbar()
return pc, contour, surround
def pairplot(
df,
labels={},
diagonal_kind="kde",
lower_kind="kde",
upper_kind="empty",
spec=GridSpec(
1,
1)[0],
xlabelrotation=0,
ylabelrotation=90,
ylabels=True,
xlabels=True,
xtickrotation=60,
fig=plt.gcf(),
lower_kwargs={},
diagonal_kwargs={},
upper_kwargs={},
rasterized=False,
tick_args={}):
N = len(df.columns)
axes = np.empty((N, N), dtype=object)
g = GridSpecFromSubplotSpec(N, N, subplot_spec=spec)
fake_axes = {}
for y in range(N):
fake_axes[(y, 0)] = plt.Subplot(fig, g[y, 0])
fake_axes[(y, 0)].set_visible(False)
for x in range(1, N):
fake_axes[(0, x)] = plt.Subplot(fig, g[0, x])
fake_axes[(0, x)].set_visible(False)
for y, v2 in enumerate(df.columns):
for x, v1 in enumerate(df.columns):
if np.all(np.isnan(df[v1])) or np.all(np.isnan(df[v2])):
axes[y, x] = plt.Subplot(fig, g[y, x], **share_args)
kind = "noframe"
else:
if y < x: # upper triangle
kind = upper_kind
kwargs = upper_kwargs
elif y == x: # diagonal
kind = diagonal_kind
kwargs = diagonal_kwargs
else: # lower triangle
kind = lower_kind
kwargs = lower_kwargs
if x == y and kind == "kde":
share_args = {"sharex": fake_axes[(0, x)]}
tick_args_default = {
"left": False,
"labelleft": False,
"bottom": (
y == N - 1),
"labelbottom": (
y == N - 1),
"labelsize": 18,
"length": 6}
else:
share_args = {"sharex": fake_axes[(
0, x)], "sharey": fake_axes[(y, 0)]}
tick_args_default = {
"labelleft": (
x == 0),
"labelright": (
x == N - 1),
"labelbottom": (
y == N - 1),
"left": (
x == 0),
"right": (
x == N - 1),
"bottom": (
y == N - 1),
"labelsize": 18,
"length": 6}
tick_args_default.update(tick_args)
tick_args = tick_args_default
axes[y, x] = plt.Subplot(fig, g[y, x], **share_args)
axes[y, x].tick_params(
axis="x", labelrotation=xtickrotation, **tick_args)
if kind == "noframe":
axes[y, x].set_frame_on(False)
axes[y, x].set_xticks([])
axes[y, x].set_yticks([])
elif kind == "empty":
axes[y, x].set_visible(False)
elif kind == "scatter":
axes[y, x].scatter(df[v1], df[v2], **kwargs)
elif kind == "reg":
sns.regplot(df[v1], df[v2], ax=axes[y, x], **kwargs)
elif kind == "kde":
if x == y:
sns.kdeplot(df[v1], shade=True,
ax=axes[y, x], legend=False, **kwargs)
axes[y, x].grid(False)
else:
sns.kdeplot(df[v1], df[v2], shade=True, shade_lowest=False,
ax=axes[y, x], legend=False, **kwargs)
# kde
else:
raise NotImplementedError(
"Subplot kind must be 'empty', 'scatter', 'reg' or 'kde'.")
axes[y, x].set_rasterized(rasterized)
if x == 0 and ylabels:
axes[y, x].set_ylabel(labels.setdefault(
v2, v2), rotation=ylabelrotation, ha='right', va="center", fontsize=18)
axes[y, x].tick_params(**tick_args)
else:
axes[y, x].set_ylabel("")
axes[y, x].tick_params(**tick_args)
if y == N - 1 and xlabels:
axes[y, x].set_xlabel(labels.setdefault(
v1, v1), rotation=xlabelrotation, ha='center', va="top", fontsize=18)
else:
axes[y, x].set_xlabel("")
fig.add_subplot(axes[y, x])
positive = np.all(df.values >= 0)
for y in range(N):
if np.all(np.isnan(df.iloc[:, y])):
continue
μ = df.iloc[:, y].mean()
σ = df.iloc[:, y].std()
if positive:
fake_axes[(y, 0)].set_yticks((0, μ, μ + 3 * σ))
fake_axes[(y, 0)].set_ylim((0, μ + 4 * σ))
else:
fake_axes[(y, 0)].set_yticks((μ - 3 * σ, μ, μ + 3 * σ))
fake_axes[(y, 0)].set_ylim((μ - 4 * σ, μ + 4 * σ))
fake_axes[(y, 0)].yaxis.set_major_formatter(
matplotlib.ticker.FormatStrFormatter('%.1f'))
for x in range(N):
if np.all(np.isnan(df.iloc[:, y])):
continue
μ = df.iloc[:, x].mean()
σ = df.iloc[:, x].std()
if positive:
fake_axes[(0, x)].set_xticks((0, μ, μ + 3 * σ))
fake_axes[(0, x)].set_xlim((0, μ + 4 * σ))
else:
fake_axes[(0, x)].set_xticks((μ - 3 * σ, μ, μ + 3 * σ))
fake_axes[(0, x)].set_xlim((μ - 4 * σ, μ + 4 * σ))
fake_axes[(0, x)].xaxis.set_major_formatter(
matplotlib.ticker.FormatStrFormatter('%.1f'))
return np.array(axes)
def rhatplot(trace,
var_names=None,
var_args={},
fig=plt.gcf(),
sp=GridSpec(1,
1)[:,
:],
bound=None,
ylabels=True,
yticks=True,
yticklabels=True,
title=r"$\hat R$",
labelsize=22):
if var_names is None:
var_names = trace.varnames
var_args = defaultdict(
lambda: {"color": "C1", "label": None, "markersize": 1}, **var_args)
num_groups = len(var_names)
tp = trace.point(0)
rhat = pm.gelman_rubin(trace, varnames=var_names)
minval = np.min([np.min(rhat[name])
for name in var_names if len(rhat[name]) > 0])
maxval = np.max([np.max(rhat[name])
for name in var_names if len(rhat[name]) > 0])
if bound is None:
bound = maxval
bound_label = str(bound)
gl, gz, gt = re.match(r"([0-9]+\.)(0*)(.*)", bound_label).groups()
gt = str(round(int(gt) / 10**(len(gt) - 1)))[0]
bound_label = gl + gz + gt
grid = GridSpecFromSubplotSpec(
num_groups,
1,
sp,
height_ratios=[
np.prod(
tp[name].shape) +
2 for name in var_names])
axes = []
for j, name in enumerate(var_names):
if len(tp[name]) == 0:
continue
ax = fig.add_subplot(grid[j], sharex=axes[0]
if len(axes) > 0 else None)
args = var_args[name]
yticks_ = []
yticklabels_ = []
for i, idx in enumerate(product(*(range(s) for s in tp[name].shape))):
yticks_.append(-i)
yticklabels_.append("{}".format(np.squeeze(idx)))
if name in rhat:
ax.plot(rhat[name], yticks_, "o", markersize=args["markersize"])
ax.set_ylim([yticks_[-1] - 1, 1])
if not yticklabels:
ax.set_yticklabels([])
elif yticklabels:
ax.set_yticklabels(yticklabels_)
else:
ax.set_yticklabels(yticklabels)
if not yticks:
ax.set_yticks([])
elif yticks:
ax.set_yticks(yticks_)
else:
ax.set_yticks(yticks)
if ylabels:
bbox = ax.get_position()
if ylabels:
label = args["label"]
else:
label = ylabels[j]
if label is None:
label = name
fig.text(bbox.x0 - 0.01, bbox.y0 + bbox.height / 2, label,
ha="right", va="center", fontsize=labelsize)
# ax.set_ylabel(label, rotation=0)
axes.append(ax)
axes[-1].set_xticks([1.0, bound])
axes[-1].set_xticklabels(["1.0", bound_label])
axes[-1].set_xlim([min(minval, 1.0) - 0.01, max(bound, maxval) + 0.01])
for ax in axes[:-1]:
for tick in ax.get_xticklabels():
tick.set_visible(False)
axes[0].set_title(title)
return axes, grid
# because the trace loading doesnt load energy stats properly...
def energyplot(
energies, fill_color=(
"C0", "C1"), fill_alpha=(
1, 0.5), fig=plt.gcf(), sp=GridSpec(
1, 1)[
:, :]):
for i, energy in enumerate(energies):
mean_energy, trans_energy = energy - energy.mean(), np.diff(energy)
ax = fig.add_subplot(sp)
pm.kdeplot(mean_energy, label="Marginal Energy", ax=ax,
shade=fill_alpha[0], kwargs_shade={"color": fill_color[0]})
pm.kdeplot(trans_energy, label="Energy Transition", ax=ax,
shade=fill_alpha[1], kwargs_shade={"color": fill_color[1]})
ax.plot([], label="chain {:>2} BFMI = {:.2f}".format(
i, pm.bfmi({"energy": energy})), alpha=0)
ax.legend()
ax.set_xticks([])
ax.set_yticks([])
# because the default forest plot is not flexible enough #sad
def forestplot(trace, var_labels=None, var_args={}, fig=plt.gcf(),
sp=GridSpec(1, 1)[:, :], combine=False, credible_interval=0.95):
if var_labels is None:
var_labels = trace.varnames
var_args = defaultdict(
lambda: {
"color": "C1",
"label": None,
"interquartile_linewidth": 2,
"credible_linewidth": 1},
**var_args)
num_groups = len(var_labels)
tp = trace.point(0)
# create indices
for i, var_label in enumerate(var_labels):
name = var_label if isinstance(var_label, str) else var_label[0]
cart = product(*(range(s) for s in tp[name].shape))
if isinstance(var_label, str):
var_labels[i] = (var_label, map(np.squeeze, cart), cart)
else:
var_labels[i] = tuple(var_label) + (cart,)
def plot_var_trace(ax, y, var_trace, credible_interval=0.95, **args):
endpoint = (1 - credible_interval) / 2
qs = np.quantile(var_trace, [endpoint, 1.0 - endpoint, 0.25, 0.75])
ax.plot(qs[:2], [y, y], color=args["color"],
linewidth=args["credible_linewidth"])
ax.plot(qs[2:], [y, y], color=args["color"],
linewidth=args["interquartile_linewidth"])
ax.plot([np.mean(var_trace)], [y], "o",
color=args["color"], markersize=args["markersize"])
grid = GridSpecFromSubplotSpec(
num_groups,
1,
sp,
height_ratios=[
np.prod(
tp[name].shape) +
2 for (
name,
idxs,
carts) in var_labels])
axes = []
for j, (name, idxs, carts) in enumerate(var_labels):
if len(tp[name]) == 0:
continue
ax = fig.add_subplot(grid[j])
args = var_args[name]
yticks = []
yticklabels = []
# plot label
# plot variable stats
for i, (idx, cart) in enumerate(zip(idxs, carts)):
yticks.append(-i)
yticklabels.append("{}".format(idx))
if combine:
var_trace = trace[name][(slice(-1),) + cart]
plot_var_trace(ax, -i, var_trace,
credible_interval=credible_interval, **args)
else:
for c, chain in enumerate(trace.chains):
var_trace = trace.get_values(name, chains=chain)[
(slice(-1),) + cart]
plot_var_trace(ax, -i + 0.25 - c / (trace.nchains - 1) * 0.5,
var_trace, credible_interval=credible_interval, **args)
ax.set_yticks(yticks)
ax.set_ylim([yticks[-1] - 1, 1])
ax.set_yticklabels(yticklabels)
label = args["label"]
if label is None:
label = name
ax.set_ylabel(label)
# ax.set_frame_on(False)
axes.append(ax)
return axes, grid
|
the-stack_106_31532 | import pytest
from pytest_cases.tests.example_code import super_function_i_want_to_test
from pytest_cases.tests.utils import nb_pytest_parameters, get_pytest_param
from pytest_cases import cases_data, CaseDataGetter, THIS_MODULE, cases_generator
try:
from pytest_cases import CaseData
except ImportError:
pass
def case_simple(version # type: str
):
# type: (...) -> CaseData
print("using version " + version)
ins = dict(a=1, b=2)
outs = 2, 3
return ins, outs, None
def case_simple2(version # type: str
):
# type: (...) -> CaseData
print("using version " + version)
ins = dict(a=1, b=2)
outs = 2, 3
return ins, outs, None
@cases_generator(i=range(2), j=range(2))
def case_gen(version, # type: str,
i, j):
# type: (...) -> CaseData
print("using version " + version)
ins = dict(a=i, b=j)
outs = i+1, j+1
return ins, outs, None
# the order of the loops will be [for version] > [for case]
@cases_data(module=THIS_MODULE)
@pytest.mark.parametrize("version", ["1.0.0", "2.0.0"])
def test_with_parameters(case_data, # type: CaseDataGetter
version # type: str
):
""" This test checks that you can blend with your own pytest fixtures/parameters """
# 1- Grab the test case data
i, expected_o, expected_e = case_data.get(version)
# 2- Use it: nominal test only
assert expected_e is None
outs = super_function_i_want_to_test(**i)
assert outs == expected_o
def test_assert_parametrized():
"""Asserts that all tests are parametrized with the correct number of cases"""
assert nb_pytest_parameters(test_with_parameters) == 2
param_args = get_pytest_param(test_with_parameters, 0)
assert len(param_args) == 2
assert param_args[0] == 'version'
assert len(param_args[1]) == 2
param_args = get_pytest_param(test_with_parameters, 1)
assert len(param_args) == 2
assert param_args[0] == 'case_data'
assert len(param_args[1]) == 1 + 1 + 2 * 2
|
the-stack_106_31533 | # Copyright (c) 2020, Huawei Technologies.
# Copyright (c) 2019, NVIDIA CORPORATION.
# All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ._initialize import _initialize
from ._amp_state import _amp_state, warn_or_err, maybe_print
from collections import OrderedDict
class Properties(object):
"""
This class has two purposes: to establish a set of default properties,
and to route setting of these attributes through __setattr__ so that (in theory)
they can be checked for consistency with other existing args.
"""
def __init__(self):
self.options = {
"enabled" : False,
"opt_level" : None,
"cast_model_type" : None,
"patch_torch_functions" : False,
"keep_batchnorm_fp32" : None,
"master_weights" : None,
"loss_scale" : 1.0,
"combine_grad": None
# Reserved for future functionality
# "fused_optimizer" : False,
# "enable_ddp_interop" : False,
}
"""
This function allows updating several options at a time without routing through
__setattr__ checks, to avoid "you can't get there from here" scenarios.
Currently not intended to be exposed; users are expected to select an opt_level
and apply consistent modifications.
"""
def _update_options_dict(self, new_options):
for k, v in new_options:
if k in self.options:
self.options[k] = v
else:
raise ValueError("Tried to set unexpected option {}".format(k))
"""
The members of "options" are not direct attributes of self, so access attempts
will roll down to __getattr__. This borrows from the logic in torch.nn.Module.
"""
def __getattr__(self, name):
if "options" in self.__dict__:
options = self.__dict__["options"]
if name in options:
return options[name]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, name))
def __setattr__(self, name, value):
if "options" in self.__dict__:
if name in self.options:
# print("setting {} {}".format(name, value))
if name == "cast_model_type":
if self.opt_level == "O1" and value is not None:
if value is not False:
if value is not torch.float32:
warn_or_err("O1 inserts casts around Torch functions rather than "
"model weights, so with O1, the model weights themselves "
"should remain FP32. If you wish to cast the model to a "
"different type, use opt_level='O2' or 'O3'. " +
"cast_model_type was {}".format(value))
self.options[name] = value
elif name == "patch_torch_functions":
if self.opt_level != "O1" and value:
warn_or_err("Currently, patch_torch_functions=True should only be set by "
"selecting opt_level='O1'.")
self.options[name] = value
elif name == "keep_batchnorm_fp32":
if self.opt_level == "O1" and value is not None:
warn_or_err("With opt_level O1, batchnorm functions are automatically patched "
"to run in FP32, so keep_batchnorm_fp32 should be None." +
" keep_batchnorm_fp32 was {}".format(value))
if value == "False":
self.options[name] = False
elif value == "True":
self.options[name] = True
else:
assert (value is True or value is False or value is None),\
"keep_batchnorm_fp32 must be a boolean, the string 'True' or 'False', "\
"or None, found keep_batchnorm_fp32={}".format(value)
self.options[name] = value
elif name == "master_weights":
if self.opt_level == "O1" and value is not None:
warn_or_err("It doesn't make sense to use master_weights with O1. "
"With O1, your model weights themselves should be FP32.")
self.options[name] = value
elif name == "loss_scale":
if value == "dynamic":
self.options[name] = value
else:
self.options[name] = float(value)
elif name == "combine_grad":
if self.opt_level not in ["O1", "O2"] and value:
warn_or_err("Currently, combine_grad=True should only be set by "
"selecting opt_level='O1' or opt_level='O2'.")
self.options[name] = value
else:
self.options[name] = value
else:
super(Properties, self).__setattr__(name, value)
""" O0-O3 are convenience wrappers to establish defaults for typically used mixed precision options. """
class O3:
brief = "O3: Pure FP16 training."
more = "Calls .half() on your model, converting the entire model to FP16.\n"\
"A casting operation is also inserted to cast incoming Tensors to FP16,\n"\
"so you don't need to change your data pipeline.\n"\
"This mode is useful for establishing a performance ceiling.\n"\
"It's also possible training may 'just work' in this mode.\n"\
"If not, try other optimization levels."
def __call__(self, properties):
properties.enabled = True
properties.opt_level = "O3"
properties.cast_model_type = torch.float16
properties.patch_torch_functions = False
properties.keep_batchnorm_fp32 = False
properties.master_weights = False
properties.loss_scale = 1.0
# properties.fused_optimizer = False
# properties.enable_ddp_interop = False
return properties # modified in place so this isn't really necessary
class O2:
brief = "O2: FP16 training with FP32 batchnorm and FP32 master weights.\n"
more = "Calls .half() on your model, converting the entire model (except for batchnorms)\n"\
"to FP16. Batchnorms are retained in FP32 for additional stability.\n"\
"The forward pass is patched to cast incoming Tensors to FP16, so you don't need to change\n"\
"your data pipeline.\n"\
"O2 creates FP32 master weights outside the model and patches any optimizers to update\n"\
"these master weights, then copy the master weights into the FP16 model weights.\n"\
"Master weights can also improve convergence and stability."
def __call__(self, properties):
properties.enabled = True
properties.opt_level = "O2"
properties.cast_model_type = torch.float16
properties.patch_torch_functions = False
properties.keep_batchnorm_fp32 = True
properties.master_weights = True
properties.loss_scale = "dynamic"
# properties.fused_optimizer = False
# properties.enable_ddp_interop = False
return properties # modified in place so this isn't really necessary
class O1:
brief = "O1: Insert automatic casts around Pytorch functions and Tensor methods.\n"
more = "The type of your model's weights is not altered. However, internally,\n"\
"Pytorch functions are patched to cast any Tensor Core-friendly ops to FP16 for speed,\n"\
"while operations that might benefit from the additional stability of FP32 are patched\n"\
"to cast their inputs to fp32.\n"\
"O1 is the safest way to try mixed precision training, and is recommended when\n"\
"trying mixed precision training for the first time."
def __call__(self, properties):
properties.enabled = True
properties.opt_level = "O1"
properties.cast_model_type = None
properties.patch_torch_functions = True
properties.keep_batchnorm_fp32 = None
properties.master_weights = None
properties.loss_scale = "dynamic"
properties.combine_grad = None
# properties.fused_optimizer = False
# properties.enable_ddp_interop = False
return properties # modified in place so this isn't really necessary
class O0:
brief = "O0: Pure FP32 training.\n"
more = "Your models are checked to make sure parameters are FP32, but otherwise the\n"\
"types of weights and internal Pytorch operations are not altered. This mode disables any\n"\
"FP16 arithmetic, although other optimizations like DDP interop may still be requested.\n"
def __call__(self, properties):
properties.enabled = True
properties.opt_level = "O0"
properties.cast_model_type = torch.float32
properties.patch_torch_functions = False
properties.keep_batchnorm_fp32 = None
properties.master_weights = False
properties.loss_scale = 1.0
# properties.fused_optimizer = False
# properties.enable_ddp_interop = False
return properties # modified in place so this isn't really necessary
opt_levels = {"O3": O3(),
"O2": O2(),
"O1": O1(),
"O0": O0()}
# allow user to directly pass Properties struct as well?
def initialize(
models,
optimizers=None,
enabled=True,
opt_level="O1",
cast_model_type=None,
patch_torch_functions=None,
keep_batchnorm_fp32=None,
master_weights=None,
loss_scale=None,
cast_model_outputs=None,
num_losses=1,
verbosity=1,
min_loss_scale=None,
max_loss_scale=2.**24,
combine_grad=None
):
"""
Initialize your models, optimizers, and the Torch tensor and functional namespace according to the
chosen ``opt_level`` and overridden properties, if any.
``amp.initialize`` should be called **after** you have finished
constructing your model(s) and
optimizer(s), but **before** you send your model through any DistributedDataParallel wrapper.
See `Distributed training`_ in the Imagenet example.
Currently, ``amp.initialize`` should only be called **once**,
although it can process an arbitrary number of
models and optimizers (see the corresponding `Advanced Amp Usage topic`_).
If you think your use case requires ``amp.initialize`` to be called more than once,
`let us know`_.
Any property keyword argument that is not ``None`` will be interpreted as a manual override.
To prevent having to rewrite anything else in your script, name the returned models/optimizers
to replace the passed models/optimizers, as in the code sample below.
Args:
models (torch.nn.Module or list of torch.nn.Modules): Models to modify/cast.
optimizers (optional, torch.optim.Optimizer or list of torch.optim.Optimizers): Optimizers to modify/cast.
REQUIRED for training, optional for inference.
enabled (bool, optional, default=True): If False, renders all Amp calls no-ops, so your script
should run as if Amp were not present.
opt_level (str, optional, default="O1"): Pure or mixed precision optimization level. Accepted values are
"O0", "O1", "O2", and "O3", explained in detail above.
cast_model_type (``torch.dtype``, optional, default=None): Optional property override, see
above.
patch_torch_functions (bool, optional, default=None): Optional property override.
keep_batchnorm_fp32 (bool or str, optional, default=None): Optional property override. If
passed as a string, must be the string "True" or "False".
master_weights (bool, optional, default=None): Optional property override.
loss_scale (float or str, optional, default=None): Optional property override. If passed as a string,
must be a string representing a number, e.g., "128.0", or the string "dynamic".
cast_model_outputs (torch.dtype, optional, default=None): Option to ensure that the outputs
of your model(s) are always cast to a particular type regardless of ``opt_level``.
num_losses (int, optional, default=1): Option to tell Amp in advance how many losses/backward
passes you plan to use. When used in conjunction with the ``loss_id`` argument to
``amp.scale_loss``, enables Amp to use a different loss scale per loss/backward pass,
which can improve stability. See "Multiple models/optimizers/losses"
under `Advanced Amp Usage`_ for examples. If ``num_losses`` is left to 1, Amp will still
support multiple losses/backward passes, but use a single global loss scale
for all of them.
verbosity (int, default=1): Set to 0 to suppress Amp-related output.
min_loss_scale (float, default=None): Sets a floor for the loss scale values that can be chosen by dynamic
loss scaling. The default value of None means that no floor is imposed.
If dynamic loss scaling is not used, `min_loss_scale` is ignored.
max_loss_scale (float, default=2.**24): Sets a ceiling for the loss scale values that can be chosen by
dynamic loss scaling. If dynamic loss scaling is not used, `max_loss_scale` is ignored.
combine_grad (bool, optional, default=None): If True, make gradients fused for unscale.
Returns:
Model(s) and optimizer(s) modified according to the ``opt_level``.
If either the ``models`` or ``optimizers`` args were lists, the corresponding return value will
also be a list.
Permissible invocations::
model, optim = amp.initialize(model, optim,...)
model, [optim1, optim2] = amp.initialize(model, [optim1, optim2],...)
[model1, model2], optim = amp.initialize([model1, model2], optim,...)
[model1, model2], [optim1, optim2] = amp.initialize([model1, model2], [optim1, optim2],...)
# This is not an exhaustive list of the cross product of options that are possible,
# just a set of examples.
model, optim = amp.initialize(model, optim, opt_level="O0")
model, optim = amp.initialize(model, optim, opt_level="O0", loss_scale="dynamic"|128.0|"128.0")
model, optim = amp.initialize(model, optim, opt_level="O1") # uses "loss_scale="dynamic" default
model, optim = amp.initialize(model, optim, opt_level="O1", loss_scale=128.0|"128.0")
model, optim = amp.initialize(model, optim, opt_level="O2") # uses "loss_scale="dynamic" default
model, optim = amp.initialize(model, optim, opt_level="O2", loss_scale=128.0|"128.0")
model, optim = amp.initialize(model, optim, opt_level="O2", keep_batchnorm_fp32=True|False|"True"|"False")
model, optim = amp.initialize(model, optim, opt_level="O3") # uses loss_scale=1.0 default
model, optim = amp.initialize(model, optim, opt_level="O3", loss_scale="dynamic"|128.0|"128.0")
model, optim = amp.initialize(model, optim, opt_level="O3", keep_batchnorm_fp32=True|False|"True"|"False")
The `Imagenet example`_ demonstrates live use of various opt_levels and overrides.
.. _`Distributed training`:
https://github.com/NVIDIA/apex/tree/master/examples/imagenet#distributed-training
.. _`Imagenet example`:
https://github.com/NVIDIA/apex/tree/master/examples/imagenet
.. _`Advanced Amp Usage`:
https://nvidia.github.io/apex/advanced.html
.. _`Advanced Amp Usage topic`:
https://nvidia.github.io/apex/advanced.html#multiple-models-optimizers-losses
.. _`let us know`:
https://github.com/NVIDIA/apex/issues
"""
_amp_state.opt_properties = Properties()
# Here add a switch to open combine tensor
_amp_state.verbosity = verbosity
if not enabled:
if optimizers is None:
return models
else:
return models, optimizers
if not torch.backends.cudnn.enabled:
raise RuntimeError(
"Amp requires torch.backends.cudnn.enabled = True")
if opt_level not in opt_levels:
raise RuntimeError(
"Unexpected optimization level {}. ".format(opt_level) +
"Options are 'O0', 'O1', 'O2', 'O3'. Note that in `O0`, `O1`, etc., the prefix O is the letter O, " +
"not the number zero.")
else:
_amp_state.opt_properties = opt_levels[opt_level](_amp_state.opt_properties)
maybe_print("Selected optimization level {}".format(opt_levels[opt_level].brief), True)
maybe_print("Defaults for this optimization level are:", True)
for k, v in _amp_state.opt_properties.options.items():
maybe_print("{:22} : {}".format(k, v), True)
_amp_state.min_loss_scale = min_loss_scale
_amp_state.max_loss_scale = max_loss_scale
maybe_print("Processing user overrides (additional kwargs that are not None)...", True)
# I chose to have the keyword arguments listed directly in the argument list,
# instead of **kwargs, so I can't use kwargs.items() here.
if enabled is not None:
_amp_state.opt_properties.enabled = enabled
if opt_level is not None:
_amp_state.opt_properties.opt_level = opt_level
if cast_model_type is not None:
_amp_state.opt_properties.cast_model_type = cast_model_type
if patch_torch_functions is not None:
_amp_state.opt_properties.patch_torch_functions = patch_torch_functions
if keep_batchnorm_fp32 is not None:
_amp_state.opt_properties.keep_batchnorm_fp32 = keep_batchnorm_fp32
if master_weights is not None:
_amp_state.opt_properties.master_weights = master_weights
if loss_scale is not None:
_amp_state.opt_properties.loss_scale = loss_scale
if combine_grad is not None:
_amp_state.opt_properties.combine_grad = combine_grad
maybe_print("After processing overrides, optimization options are:", True)
for k, v in _amp_state.opt_properties.options.items():
maybe_print("{:22} : {}".format(k, v), True)
return _initialize(models, optimizers, _amp_state.opt_properties, num_losses, cast_model_outputs)
def state_dict(destination=None):
if destination is None:
destination = OrderedDict()
for idx, loss_scaler in enumerate(_amp_state.loss_scalers):
destination['loss_scaler%d' % idx] = {
'loss_scale': loss_scaler.loss_scale(),
'unskipped': loss_scaler._unskipped,
}
return destination
def load_state_dict(state_dict):
# Check if state_dict containes the same number of loss_scalers as current setup
if len(state_dict) != len(_amp_state.loss_scalers):
print('Warning: state_dict contains {} entries, while {} loss_scalers are used'.format(
len(state_dict), len(_amp_state.loss_scalers)))
state_dict = state_dict.copy()
nb_loss_scalers = len(_amp_state.loss_scalers)
unexpected_keys = []
# Initialize idx outside, since unexpected_keys will increase it if enumerate is used
idx = 0
for key in state_dict:
if 'loss_scaler' not in key:
unexpected_keys.append(key)
else:
if idx > (nb_loss_scalers - 1):
print('Skipping loss_scaler[{}], since num_losses was set to {}'.format(
idx, nb_loss_scalers))
break
_amp_state.loss_scalers[idx]._loss_scale = state_dict[key]['loss_scale']
_amp_state.loss_scalers[idx]._unskipped = state_dict[key]['unskipped']
idx += 1
if len(unexpected_keys) > 0:
raise RuntimeError(
'Error(s) in loading state_dict. Unexpected key(s) in state_dict: {}. '.format(
', '.join('"{}"'.format(k) for k in unexpected_keys)))
# TODO: is this necessary/useful?
# def check_option_consistency(enabled=True,
# opt_level=None,
# cast_model_type=None,
# patch_torch_functions=None,
# keep_batchnorm_fp32=None,
# master_weights=None,
# loss_scale=None,
# enable_ddp_interop=None,
# hard_override=False):
# """
# Utility function that enables users to quickly check if the option combination they intend
# to use is permitted. ``check_option_consistency`` does not require models or optimizers
# to be constructed, and can be called at any point in the script. ``check_option_consistency``
# is totally self-contained; it does not set any amp global state or affect anything outside
# of itself.
# """
#
# if not enabled:
# return
#
# if opt_level not in opt_levels:
# raise RuntimeError("Unexpected optimization level. Options are 'O0', 'O1', 'O2', 'O3'.")
# else:
# opt_properties = opt_levels[opt_level](Properties())
# print("Selected optimization level {}", opt_levels[opt_level].brief)
# print("Defaults for this optimization level are:")
# for k, v in opt_properties.options:
# print("{:22} : {}".format(k, v))
#
# print("Processing user overrides (additional kwargs that are not None)...")
# for k, v in kwargs:
# if k not in _amp_state.opt_properties.options:
# raise RuntimeError("Unexpected kwarg {}".format(k))
# if v is not None:
# setattr(opt_properties, k, v)
#
# print("After processing overrides, optimization options are:")
# for k, v in opt_properties.options:
# print("{:22} : {}".format(k, v))
|
the-stack_106_31534 | import _plotly_utils.basevalidators
class ShowtickprefixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="showtickprefix",
parent_name="densitymapbox.colorbar",
**kwargs
):
super(ShowtickprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
|
the-stack_106_31537 | import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
import math
class Mish(nn.Module):
def forward(self, x):
return x * torch.tanh(F.softplus(x))
class DiffusionEmbedding(nn.Module):
""" Diffusion Step Embedding """
def __init__(self, d_denoiser):
super(DiffusionEmbedding, self).__init__()
self.dim = d_denoiser
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class ResidualBlock(nn.Module):
""" Residual Block """
def __init__(self, d_encoder, residual_channels, dropout):
super(ResidualBlock, self).__init__()
self.conv_layer = ConvNorm(
residual_channels,
2 * residual_channels,
kernel_size=3,
stride=1,
padding=int((3 - 1) / 2),
dilation=1,
)
self.diffusion_projection = LinearNorm(residual_channels, residual_channels)
self.conditioner_projection = ConvNorm(
d_encoder, 2 * residual_channels, kernel_size=1
)
self.output_projection = ConvNorm(
residual_channels, 2 * residual_channels, kernel_size=1
)
def forward(self, x, conditioner, diffusion_step, mask=None):
diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)
conditioner = self.conditioner_projection(conditioner)
y = x + diffusion_step
y = self.conv_layer(y) + conditioner
gate, filter = torch.chunk(y, 2, dim=1)
y = torch.sigmoid(gate) * torch.tanh(filter)
y = self.output_projection(y)
residual, skip = torch.chunk(y, 2, dim=1)
return (x + residual) / math.sqrt(2.0), skip
class LinearNorm(nn.Module):
""" LinearNorm Projection """
def __init__(self, in_features, out_features, bias=False):
super(LinearNorm, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(self.linear.weight)
if bias:
nn.init.constant_(self.linear.bias, 0.0)
def forward(self, x):
x = self.linear(x)
return x
class ConvBlock(nn.Module):
""" Convolutional Block """
def __init__(self, in_channels, out_channels, kernel_size, dropout, activation=nn.ReLU()):
super(ConvBlock, self).__init__()
self.conv_layer = nn.Sequential(
ConvNorm(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
padding=int((kernel_size - 1) / 2),
dilation=1,
w_init_gain="tanh",
),
nn.BatchNorm1d(out_channels),
activation
)
self.dropout = dropout
self.layer_norm = nn.LayerNorm(out_channels)
def forward(self, enc_input, mask=None):
enc_output = enc_input.contiguous().transpose(1, 2)
enc_output = F.dropout(self.conv_layer(enc_output), self.dropout, self.training)
enc_output = self.layer_norm(enc_output.contiguous().transpose(1, 2))
if mask is not None:
enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)
return enc_output
class ConvNorm(nn.Module):
""" 1D Convolution """
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
w_init_gain="linear",
):
super(ConvNorm, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
nn.init.kaiming_normal_(self.conv.weight)
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class FFTBlock(nn.Module):
""" FFT Block """
def __init__(self, d_model, n_head, d_k, d_v, d_inner, kernel_size, dropout=0.1):
super(FFTBlock, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(
d_model, d_inner, kernel_size, dropout=dropout
)
def forward(self, enc_input, mask=None, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input, mask=slf_attn_mask
)
if mask is not None:
enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)
enc_output = self.pos_ffn(enc_output)
if mask is not None:
enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)
return enc_output, enc_slf_attn
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention """
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = LinearNorm(d_model, n_head * d_k)
self.w_ks = LinearNorm(d_model, n_head * d_k)
self.w_vs = LinearNorm(d_model, n_head * d_v)
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = LinearNorm(n_head * d_v, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
output = output.view(n_head, sz_b, len_q, d_v)
output = (
output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output, attn
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature):
super(ScaledDotProductAttention, self).__init__()
self.temperature = temperature
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
attn = self.softmax(attn)
output = torch.bmm(attn, v)
return output, attn
class PositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer """
def __init__(self, d_in, d_hid, kernel_size, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
# Use Conv1D
# position-wise
self.w_1 = nn.Conv1d(
d_in,
d_hid,
kernel_size=kernel_size[0],
padding=(kernel_size[0] - 1) // 2,
)
# position-wise
self.w_2 = nn.Conv1d(
d_hid,
d_in,
kernel_size=kernel_size[1],
padding=(kernel_size[1] - 1) // 2,
)
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
output = x.transpose(1, 2)
output = self.w_2(F.relu(self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
|
the-stack_106_31538 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import gc
import numpy as np
import os
import threading
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.client import session
from tensorflow.python.compat import compat as forward_compat
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.gradients # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
ops._set_call_cpp_shape_fn(common_shapes.call_cpp_shape_fn)
class ResourceTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBuildGraph(self):
with self.cached_session():
pt = test_ops.stub_resource_handle_op(container="a", shared_name="b")
test_ops.resource_create_op(pt).run()
@test_util.run_deprecated_v1
def testInitialize(self):
with self.cached_session():
handle = test_ops.stub_resource_handle_op(container="a", shared_name="b")
resources.register_resource(
handle=handle,
create_op=test_ops.resource_create_op(handle),
is_initialized_op=test_ops.resource_initialized_op(handle))
self.assertEquals(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 1)
resources.initialize_resources(resources.shared_resources()).run()
self.assertEquals(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 0)
class TensorAndShapeTest(test_util.TensorFlowTestCase):
def testShape(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertEqual(tensor_shape.unknown_shape(), t.get_shape())
t.set_shape([1, 2, 3])
self.assertEqual([1, 2, 3], t.get_shape())
def testIterable(self):
if not context.executing_eagerly():
self.skipTest("Eager-mode test")
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
with self.assertRaisesRegexp(TypeError, "Cannot iterate"):
next(iter(t))
def testIterableGraph(self):
if context.executing_eagerly():
self.skipTest("Graph-mode test")
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
with self.assertRaisesRegexp(TypeError, "iterating.*not allowed in Graph"):
next(iter(t))
with self.assertRaisesRegexp(
TypeError, "iterating.*AutoGraph did not convert"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED):
next(iter(t))
with self.assertRaisesRegexp(
TypeError, "iterating.*AutoGraph is disabled"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.DISABLED):
next(iter(t))
def testImplicitBool(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.bool])
t = op.outputs[0]
with self.assertRaisesRegexp(
TypeError, "using.*as a.*bool.*not allowed in Graph"):
bool(t)
with self.assertRaisesRegexp(
TypeError, "using.*as a.*bool.*AutoGraph did not convert"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED):
bool(t)
with self.assertRaisesRegexp(
TypeError, "using.*as a.*bool.*AutoGraph is disabled"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.DISABLED):
bool(t)
def testAddShape(self):
with self.cached_session():
a = array_ops.zeros([2, 3])
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual([2, 3], c.shape)
@test_util.run_deprecated_v1
def testUnknownDim(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
b = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
c = a + b
self.assertEqual([2, None, 3], c.shape.as_list())
@test_util.run_deprecated_v1
def testUnknownShape(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=None)
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual(tensor_shape.unknown_shape(), c.shape)
@test_util.run_deprecated_v1
def testScalarShape(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[])
b = array_ops.ones([])
c = a + b
self.assertEqual(tensor_shape.TensorShape([]), c.shape)
@test_util.run_deprecated_v1
def testShapeFunctionError(self):
with self.cached_session():
a = array_ops.ones([1, 2, 3])
b = array_ops.ones([4, 5, 6])
with self.assertRaisesRegexp(
ValueError, r"Dimensions must be equal, but are 2 and 5 for 'add' "
r"\(op: 'Add(V2)?'\) with input shapes: \[1,2,3\], \[4,5,6\]."):
_ = a + b
def testNumpyArray(self):
with ops.Graph().as_default():
x = array_ops.ones((3, 4), name="test_ones")
with self.assertRaisesRegexp(NotImplementedError,
r"Cannot convert a symbolic.+test_ones"):
np.array(x)
with self.assertRaisesRegexp(TypeError, "not well defined.+test_ones"):
len(x)
# EagerTensors should still behave as numpy arrays.
with context.eager_mode():
x = array_ops.ones((3, 4))
self.assertAllEqual(x, np.ones((3, 4)))
self.assertAllEqual(np.array(x), np.ones((3, 4)))
self.assertEqual(len(x), 3)
def testRef(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.experimental_ref(), x1.experimental_ref())
self.assertEqual(x2.experimental_ref(), x2.experimental_ref())
self.assertEqual(x1.experimental_ref(), x2.experimental_ref())
self.assertEqual(y.experimental_ref(), y.experimental_ref())
self.assertEqual(z.experimental_ref(), z.experimental_ref())
self.assertEqual(w.experimental_ref(), w.experimental_ref())
self.assertNotEqual(x1.experimental_ref(), y.experimental_ref())
self.assertNotEqual(x1.experimental_ref(), z.experimental_ref())
self.assertNotEqual(x1.experimental_ref(), w.experimental_ref())
self.assertNotEqual(y.experimental_ref(), z.experimental_ref())
self.assertNotEqual(y.experimental_ref(), w.experimental_ref())
self.assertNotEqual(z.experimental_ref(), w.experimental_ref())
def testRefDeref(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertIs(x1, x1.experimental_ref().deref())
self.assertIs(x2, x2.experimental_ref().deref())
self.assertIs(x1, x2.experimental_ref().deref())
self.assertIs(x2, x1.experimental_ref().deref())
self.assertIs(y, y.experimental_ref().deref())
self.assertIs(z, z.experimental_ref().deref())
self.assertIsNot(x1, y.experimental_ref().deref())
self.assertIsNot(x1, z.experimental_ref().deref())
self.assertIsNot(x1, w.experimental_ref().deref())
self.assertIsNot(y, z.experimental_ref().deref())
self.assertIsNot(y, w.experimental_ref().deref())
self.assertIsNot(z, w.experimental_ref().deref())
def testRefInSet(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.experimental_ref(), x2.experimental_ref())
tensor_set = {
x1.experimental_ref(),
x2.experimental_ref(),
y.experimental_ref(),
z.experimental_ref(),
w.experimental_ref(),
}
self.assertEqual(len(tensor_set), 4)
self.assertIn(x1.experimental_ref(), tensor_set)
self.assertIn(x2.experimental_ref(), tensor_set)
self.assertIn(y.experimental_ref(), tensor_set)
self.assertIn(z.experimental_ref(), tensor_set)
self.assertIn(w.experimental_ref(), tensor_set)
def testRefInDict(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.experimental_ref(), x2.experimental_ref())
tensor_dict = {
x1.experimental_ref(): "x1",
y.experimental_ref(): "y",
z.experimental_ref(): "z",
w.experimental_ref(): "w",
}
self.assertEqual(len(tensor_dict), 4)
# Overwriting x1
tensor_dict[x2.experimental_ref()] = "x2"
self.assertEqual(len(tensor_dict), 4)
self.assertEqual(tensor_dict[x1.experimental_ref()], "x2")
self.assertEqual(tensor_dict[x2.experimental_ref()], "x2")
self.assertEqual(tensor_dict[y.experimental_ref()], "y")
self.assertEqual(tensor_dict[z.experimental_ref()], "z")
self.assertEqual(tensor_dict[w.experimental_ref()], "w")
def testTensorRefStrong(self):
x = constant_op.constant(1.)
x_ref = x.experimental_ref()
del x
self.assertIsNotNone(x_ref.deref())
def testVariableRefStrong(self):
x = variables.Variable(1.)
x_ref = x.experimental_ref()
del x
self.assertIsNotNone(x_ref.deref())
class IndexedSlicesTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testToTensor(self):
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
dense_shape = constant_op.constant([3, 2])
x = ops.IndexedSlices(values, indices, dense_shape)
tensor = ops.convert_to_tensor(x, name="tensor")
self.assertAllEqual(self.evaluate(tensor), [[2, 3], [0, 0], [5, 7]])
@test_util.run_gpu_only
def testEagerCopy(self):
with context.eager_mode():
var = variables.Variable([[0.0], [0.0], [0.0], [0.0]], name="tensor")
with backprop.GradientTape() as tape:
a = array_ops.gather(array_ops.gather(var, [0, 1]), [0, 1])
b = array_ops.gather(array_ops.gather(var, [2, 3]), [0, 1])
r = special_math_ops.einsum("ij,ij->i", a, b)
g = tape.gradient(r, [var])[0]
values = g.values if isinstance(g, ops.IndexedSlices) else g
self.assertAllEqual(values.get_shape(), [4, 1])
@test_util.run_deprecated_v1
def testNegation(self):
with self.cached_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = -ops.IndexedSlices(values, indices)
self.assertAllEqual(x.values.eval(), [[-2, -3], [-5, -7]])
self.assertAllEqual(x.indices.eval(), [0, 2])
@test_util.run_deprecated_v1
def testScalarMul(self):
with self.cached_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = math_ops.scalar_mul(-2, ops.IndexedSlices(values, indices))
self.assertAllEqual(x.values.eval(), [[-4, -6], [-10, -14]])
self.assertAllEqual(x.indices.eval(), [0, 2])
@test_util.run_all_in_graph_and_eager_modes
class IndexedSlicesSpecTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def assertAllTensorsEqual(self, list1, list2):
self.assertLen(list1, len(list2))
for (t1, t2) in zip(list1, list2):
self.assertAllEqual(t1, t2)
def testConstruction(self):
spec1 = indexed_slices.IndexedSlicesSpec()
self.assertEqual(spec1._shape.rank, None)
self.assertEqual(spec1._values_dtype, dtypes.float32)
self.assertEqual(spec1._indices_dtype, dtypes.int64)
self.assertEqual(spec1._dense_shape_dtype, None)
self.assertEqual(spec1._indices_shape.as_list(), [None])
spec2 = indexed_slices.IndexedSlicesSpec([None, None], dtypes.string,
dtypes.int32, dtypes.int64, [10])
self.assertEqual(spec2._shape.as_list(), [None, None])
self.assertEqual(spec2._values_dtype, dtypes.string)
self.assertEqual(spec2._indices_dtype, dtypes.int32)
self.assertEqual(spec2._dense_shape_dtype, dtypes.int64)
self.assertEqual(spec2._indices_shape.as_list(), [10])
def testValueType(self):
spec1 = indexed_slices.IndexedSlicesSpec()
self.assertEqual(spec1.value_type, ops.IndexedSlices)
@parameterized.parameters([
(indexed_slices.IndexedSlicesSpec(),
(tensor_shape.TensorShape(None), dtypes.float32, dtypes.int64, None,
tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(shape=[5, None, None]),
(tensor_shape.TensorShape([5, None, None]), dtypes.float32,
dtypes.int64, None, tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(
dtype=dtypes.int32, dense_shape_dtype=dtypes.int64),
(tensor_shape.TensorShape(None), dtypes.int32, dtypes.int64,
dtypes.int64, tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(indices_shape=[100]),
(tensor_shape.TensorShape(None), dtypes.float32, dtypes.int64, None,
tensor_shape.TensorShape([100]))),
]) # pyformat: disable
def testSerialize(self, spec, expected):
serialization = spec._serialize()
# TensorShape has an unconventional definition of equality, so we can't use
# assertEqual directly here. But repr() is deterministic and lossless for
# the expected values, so we can use that instead.
self.assertEqual(repr(serialization), repr(expected))
@parameterized.parameters([
(indexed_slices.IndexedSlicesSpec(dtype=dtypes.string), (
tensor_spec.TensorSpec(None, dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64),
)),
(indexed_slices.IndexedSlicesSpec(
dtype=dtypes.string, dense_shape_dtype=dtypes.int32), (
tensor_spec.TensorSpec(None, dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64),
tensor_spec.TensorSpec([None], dtypes.int32),
)),
(indexed_slices.IndexedSlicesSpec(
shape=[5, 10, 15], dense_shape_dtype=dtypes.int32), (
tensor_spec.TensorSpec([None, 10, 15], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.int64),
tensor_spec.TensorSpec([3], dtypes.int32),
)),
(indexed_slices.IndexedSlicesSpec(
shape=[5, 10, 15], dense_shape_dtype=dtypes.int32,
indices_shape=[20]), (
tensor_spec.TensorSpec([20, 10, 15], dtypes.float32),
tensor_spec.TensorSpec([20], dtypes.int64),
tensor_spec.TensorSpec([3], dtypes.int32),
)),
])
def testComponentSpecs(self, spec, expected):
self.assertEqual(spec._component_specs, expected)
@parameterized.parameters([
{
"spec": indexed_slices.IndexedSlicesSpec(),
"values": [3.0, 5.0],
"indices": [5, 10]
},
{
"spec":
indexed_slices.IndexedSlicesSpec(dense_shape_dtype=dtypes.int32),
"values": [3.0, 5.0],
"indices": [5, 10],
"dense_shape": [100]
},
])
def testToFromComponents(self, spec, indices, values, dense_shape=None):
x = ops.IndexedSlices(indices, values, dense_shape)
actual_components = spec._to_components(x)
if dense_shape is None:
self.assertAllTensorsEqual(actual_components, [indices, values])
else:
self.assertAllTensorsEqual(actual_components,
[indices, values, dense_shape])
st_reconstructed = spec._from_components(actual_components)
self.assertAllEqual(x.indices, st_reconstructed.indices)
self.assertAllEqual(x.values, st_reconstructed.values)
if dense_shape is None:
self.assertIs(st_reconstructed.dense_shape, None)
else:
self.assertAllEqual(x.dense_shape, st_reconstructed.dense_shape)
@test_util.run_v1_only("IndexedSlicesValue is deprecated in v2")
def testFromNumpyComponents(self):
indices = np.array([3, 8])
values = np.array([1.0, 9.0])
dense_shape = np.array([100])
spec1 = indexed_slices.IndexedSlicesSpec(dense_shape_dtype=dtypes.int32)
st1 = spec1._from_components((values, indices, dense_shape))
self.assertIsInstance(st1, indexed_slices.IndexedSlicesValue)
self.assertAllEqual(st1.indices, indices)
self.assertAllEqual(st1.values, values)
self.assertAllEqual(st1.dense_shape, dense_shape)
spec2 = indexed_slices.IndexedSlicesSpec()
st2 = spec2._from_components((values, indices))
self.assertIsInstance(st2, indexed_slices.IndexedSlicesValue)
self.assertAllEqual(st2.indices, indices)
self.assertAllEqual(st2.values, values)
self.assertIs(st2.dense_shape, None)
class NodeDefConstructorTest(test_util.TensorFlowTestCase):
def testNoArgs(self):
nodedef = ops._NodeDef("None", "bar")
self.assertProtoEquals("op: 'None' name: 'bar'", nodedef)
def testArgs(self):
nodedef = ops._NodeDef("foo", "bar", device="/device:baz:*")
self.assertProtoEquals("op:'foo' name:'bar' device:'/device:baz:*'",
nodedef)
nodedef = ops._NodeDef("foo", "bar", device=pydev.DeviceSpec(job="j"))
self.assertProtoEquals("op:'foo' name:'bar' device:'/job:j'", nodedef)
def _apply_op(g, *args, **kwargs):
op = g.create_op(*args, **kwargs)
if len(op.outputs) == 1:
return op.outputs[0]
else:
return op.outputs
class OperationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testNoInputs(self):
op = test_ops.float_output_string_output(name="myop").a.op
self.assertEqual(2, len(op.values()))
self.assertEqual(0, len(op.inputs))
self.assertEqual("myop", op.name)
float_t, label_str_t = op.values()
self.assertEqual(dtypes.float32, float_t.dtype)
self.assertEqual(op, float_t.op)
self.assertEqual(0, float_t._value_index)
self.assertEqual(0, len(float_t.consumers()))
self.assertEqual("myop", float_t._as_node_def_input())
self.assertEqual(dtypes.string, label_str_t.dtype)
self.assertEqual(op, label_str_t.op)
self.assertEqual(1, label_str_t._value_index)
self.assertEqual(0, len(label_str_t.consumers()))
self.assertEqual("myop:1", label_str_t._as_node_def_input())
self.assertProtoEquals("op:'FloatOutputStringOutput' name:'myop'",
op.node_def)
@test_util.run_deprecated_v1
def testNoOutputs(self):
op1 = test_ops.float_output(name="myop1").op
float_t, = op1.values()
op2 = test_ops.float_input(float_t, name="myop2")
self.assertEqual(0, len(op2.values()))
self.assertEqual(1, len(op2.inputs))
self.assertIs(float_t, op2.inputs[0])
self.assertEqual(1, len(float_t.consumers()))
self.assertEqual(op2, float_t.consumers()[0])
self.assertProtoEquals("op:'FloatOutput' name:'myop1'", op1.node_def)
self.assertProtoEquals("op:'FloatInput' name:'myop2' input:'myop1'",
op2.node_def)
@test_util.run_deprecated_v1
def testInputsAndOutputs(self):
op1 = test_ops.float_output(name="myop1").op
self.assertEqual(1, len(op1.values()))
float1_t, = op1.values()
op2 = test_ops.float_output_string_output(name="myop2").a.op
self.assertEqual(2, len(op2.values()))
float2_t, label2_str_t = op2.values()
# Note that we consume label2_str_t twice here.
op3 = test_ops.foo2(float1_t, label2_str_t, label2_str_t, name="myop3").d.op
self.assertEqual(2, len(op3.values()))
self.assertEqual(1, len(float1_t.consumers()))
self.assertEqual(op3, float1_t.consumers()[0])
self.assertEqual(0, len(float2_t.consumers()))
self.assertEqual(2, len(label2_str_t.consumers()))
self.assertEqual(op3, label2_str_t.consumers()[0])
self.assertEqual(op3, label2_str_t.consumers()[1])
self.assertProtoEquals("""
op:'Foo2' name:'myop3'
input:'myop1' input:'myop2:1' input:'myop2:1'
""", op3.node_def)
def testDeviceFromNodeDef(self):
op = ops.Operation(
ops._NodeDef("None", "myop", device="/job:goo/device:GPU:0"),
ops.Graph(), [], [])
self.assertEqual("/job:goo/device:GPU:0", op.device)
def testDeviceObject(self):
op = ops.Operation(ops._NodeDef("None", "myop"), ops.Graph(), [], [])
op._set_device("/job:goo/device:GPU:0")
self.assertProtoEquals(
"op:'None' name:'myop' device:'/job:goo/device:GPU:0' ", op.node_def)
op = ops.Operation(ops._NodeDef("None", "op2"), ops.Graph(), [], [])
op._set_device(
pydev.DeviceSpec(
job="muu", device_type="CPU", device_index=0))
self.assertProtoEquals(
"op:'None' name:'op2' device:'/job:muu/device:CPU:0'", op.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = ops.Operation(
ops._NodeDef("RefOutputFloatOutput", "op1"), g, [],
[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
self.assertEquals([], list(op1.inputs))
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = ops.Operation(
ops._NodeDef("RefInputFloatInput", "op2"),
g, [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
self.assertEquals([ref_t, nonref_t], list(op2.inputs))
op3 = ops.Operation(
ops._NodeDef("TwoFloatInputs", "op3"), g, [ref_t, nonref_t], [])
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testInvalidNames(self):
g = ops.Graph()
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", ""), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "_invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "-invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "/invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "invalid:0"), g)
@test_util.run_deprecated_v1
def testNoShapeFunction(self):
op = test_ops.a()
self.assertEqual(tensor_shape.unknown_shape(), op.get_shape())
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedArray(self):
values = [[2], [3], [5], [7]]
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
def testShapeTuple(self):
with self.cached_session():
c = constant_op.constant(1)
self.assertEqual(c._shape_tuple(), ()) # pylint: disable=protected-access
def testConvertToTensorEager(self):
with context.eager_mode():
t = constant_op.constant(1)
self.assertTrue(isinstance(t, ops.EagerTensor))
converted = ops.convert_to_tensor(t)
self.assertTrue(isinstance(converted, ops.EagerTensor))
converted = ops.convert_to_tensor(1)
self.assertTrue(isinstance(converted, ops.EagerTensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTuple(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(ops.convert_to_tensor(values)))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTensors(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(
[constant_op.constant(row) for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
tensor = ops.convert_to_tensor(
[[constant_op.constant(v) for v in row] for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedMix(self):
values = ([2], (3,), [constant_op.constant(5)], constant_op.constant([7]))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(((2,), (3,), (5,), (7,)), self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorPreferred(self):
values = [2, 3, 5, 7]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.float32)
self.assertEqual(dtypes.float32, tensor.dtype)
# Convert empty tensor to anything.
values = []
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.int64, tensor.dtype)
# The preferred dtype is a type error and will convert to
# float32 instead.
values = [1.23]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.float32, tensor.dtype)
@test_util.run_in_graph_and_eager_modes
def testConvertToInvalidTensorType(self):
with self.assertRaises(TypeError):
# Forcing an invalid dtype should fail with a type error.
values = [1.23]
ops.convert_to_tensor(values, dtype=dtypes.int64)
@test_util.run_in_graph_and_eager_modes
def testConvertToLongLongTensorType(self):
tensor = ops.convert_to_tensor(
# Get a numpy array of dtype NPY_LONGLONG
np.prod(constant_op.constant([1])._shape_tuple()))
self.assertEqual(dtypes.int64, tensor.dtype)
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorFromInvalidTensor(self):
tensor = constant_op.constant(42.0, dtype=dtypes.float32)
with self.assertRaises(ValueError):
ops.convert_to_tensor(tensor, dtype=dtypes.int32)
@test_util.run_deprecated_v1
def testNoConvert(self):
# Operation cannot be converted to Tensor.
op = control_flow_ops.no_op()
with self.assertRaisesRegexp(TypeError,
r"Can't convert Operation '.*' to Tensor"):
ops.convert_to_tensor(op)
def testStr(self):
node_def = ops._NodeDef("None", "op1")
op = ops.Operation(node_def, ops.Graph(), [], [dtypes.float32])
self.assertEqual(str(node_def), str(op))
def testRepr(self):
op = ops.Operation(
ops._NodeDef("None", "op1"), ops.Graph(), [], [dtypes.float32])
self.assertEqual("<tf.Operation 'op1' type=None>", repr(op))
@test_util.run_deprecated_v1
def testGetAttr(self):
op = test_ops.default_attrs()
self.assertEqual(op.get_attr("string_val"), b"abc")
self.assertEqual(op.get_attr("string_list_val"), [b"abc", b""])
self.assertEqual(op.get_attr("int_val"), 123)
self.assertEqual(op.get_attr("int_list_val"), [1, 2, 3])
self.assertEqual(op.get_attr("float_val"), 10.0)
self.assertEqual(op.get_attr("float_list_val"), [10.0])
self.assertEqual(op.get_attr("bool_val"), True)
self.assertEqual(op.get_attr("bool_list_val"), [True, False])
self.assertEqual(op.get_attr("shape_val"),
tensor_shape.as_shape([2, 1]).as_proto())
self.assertEqual(op.get_attr("shape_list_val"),
[tensor_shape.as_shape([]).as_proto(),
tensor_shape.as_shape([1]).as_proto()])
self.assertEqual(op.get_attr("tensor_val"),
tensor_util.make_tensor_proto(1, dtypes.int32))
self.assertEqual(op.get_attr("tensor_list_val"),
[tensor_util.make_tensor_proto(1, dtypes.int32)])
type_val = op.get_attr("type_val")
# First check that type_val is a DType, because the assertEquals will work
# no matter what since DType overrides __eq__
self.assertIsInstance(type_val, dtypes.DType)
self.assertEqual(type_val, dtypes.int32)
type_list_val = op.get_attr("type_list_val")
self.assertTrue(all(isinstance(x, dtypes.DType) for x in type_list_val))
self.assertEqual(type_list_val, [dtypes.int32, dtypes.float32])
@function.Defun(dtypes.float32, func_name="MyFunc")
def func(x):
return x
op = test_ops.func_attr(func)
self.assertEqual(op.get_attr("f"),
attr_value_pb2.NameAttrList(name="MyFunc"))
# Try fetching missing attr
with self.assertRaisesRegexp(
ValueError, "Operation 'FuncAttr' has no attr named 'FakeAttr'."):
op.get_attr("FakeAttr")
# TODO(b/65162920): remove this test when users who are directly mutating the
# node_def have been updated to proper usage.
@test_util.run_deprecated_v1
def testSetAttr(self):
op = test_ops.int_attr().op
op._set_attr("foo", attr_value_pb2.AttrValue(i=2))
# TODO(skyewm): add node_def check
self.assertEqual(op.get_attr("foo"), 2)
# TODO(nolivia): test all error cases
def testAddControlInput(self):
with ops.Graph().as_default():
x = constant_op.constant(1).op
y = constant_op.constant(2).op
z = constant_op.constant(3).op
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_inputs([x, y, y]) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x, y])
self.assertEqual(x._control_outputs, [z])
@test_util.run_deprecated_v1
def testRemoveAllControlInputs(self):
a = constant_op.constant(1)
with ops.control_dependencies([a]):
b = constant_op.constant(2)
c = constant_op.constant(3)
d = constant_op.constant(4)
e = constant_op.constant(5)
with ops.control_dependencies([a, c]):
f = d + e
self.assertEqual(a.op.control_inputs, [])
self.assertEqual(b.op.control_inputs, [a.op])
self.assertEqual(f.op.control_inputs, [a.op, c.op])
a.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(a.op.control_inputs, [])
b.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(b.op.control_inputs, [])
f.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(f.op.control_inputs, [])
self.assertEqual(list(f.op.inputs), [d, e])
@test_util.run_deprecated_v1
def testControlInputCycle(self):
graph = ops.Graph()
with graph.as_default():
z = constant_op.constant(0)
x = constant_op.constant(1)
y = constant_op.constant(2)
y.op._add_control_input(z.op) # pylint: disable=protected-access
y.op._add_control_input(x.op) # pylint: disable=protected-access
x.op._add_control_input(y.op) # pylint: disable=protected-access
with self.session(graph=graph) as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Graph is invalid, contains a cycle with 2 nodes"):
self.evaluate(x)
def testUpdateInput(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = x + y
z.op._update_input(0, y) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [y, y])
self.assertEquals(x.consumers(), [])
self.assertEquals(y.consumers(), [z.op, z.op])
with session.Session(graph=g) as sess:
self.assertEquals(self.evaluate(z), 4)
z.op._update_input(0, x) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [x, y])
self.assertEquals(x.consumers(), [z.op])
self.assertEquals(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEquals(self.evaluate(z), 3)
z.op._update_input(1, y) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [x, y])
self.assertEquals(x.consumers(), [z.op])
self.assertEquals(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEquals(self.evaluate(z), 3)
def testUpdateInputGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
z = y * 2
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
z.op._update_input(0, x) # pylint: disable=protected-access
def testUpdateInputTypeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(0)
x = constant_op.constant("")
y = constant_op.constant(1)
z = y + w
z.op._update_input(0, x) # pylint: disable=protected-access
with session.Session(graph=g) as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Input 0 of node add was passed string from Const_1:0 incompatible "
"with expected int32"):
self.evaluate(z)
def testUpdateInputShapeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(2, shape=[3, 1])
x = constant_op.constant(0, shape=[3, 1])
y = constant_op.constant(1, shape=[2, 2])
z = w + x
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Cannot update edge, incompatible shapes: \[2,2\] and \[3,1\]"):
z.op._update_input(0, y) # pylint: disable=protected-access
def testUpdateInputOutOfRange(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
with self.assertRaisesRegexp(
errors.OutOfRangeError,
r"Cannot update edge. Input index \[1\] is greater than the number of "
r"total inputs \[0\]."
):
x.op._update_input(1, x) # pylint: disable=protected-access
@test_util.enable_control_flow_v2
@test_util.run_v1_only("b/120545219")
def testAddWhileInput(self):
if forward_compat.forward_compatible(2019, 8, 23):
@eager_function.defun
def test():
output = control_flow_ops.while_loop(lambda x: x < 3, lambda x: x + 1,
[1])
while_op = output.op
self.assertEqual(while_op.type, "StatelessWhile")
orig_num_inputs = len(while_op.inputs)
# Make sure we can handle the while op having a control input.
while_op._add_control_input(constant_op.constant(0).op)
new_input1 = constant_op.constant(1.0)
new_input2 = constant_op.constant(True)
# Clear output shapes to bypass shape checking.
while_op._set_shape_list_attr("output_shapes", [])
while_op._set_type_list_attr("T",
[t.dtype for t in while_op.inputs] +
[new_input1.dtype, new_input2.dtype])
while_op._add_while_inputs([new_input1, new_input2])
# Can't add an edge beyond what's specified by "T"
with self.assertRaises(errors.OutOfRangeError):
while_op._add_while_inputs([new_input2])
self.assertEqual(len(while_op.inputs), orig_num_inputs + 2) # pylint: disable=g-deprecated-assert
test()
@test_util.run_deprecated_v1
def testOpDef(self):
x = constant_op.constant(0)
y = constant_op.constant(1)
z = x + y
self.assertEqual(x.op.op_def.name, "Const")
self.assertEqual(len(x.op.op_def.input_arg), 0)
self.assertEqual(len(x.op.op_def.output_arg), 1)
self.assertRegexpMatches(z.op.op_def.name, "Add(V2)?")
self.assertEqual(len(z.op.op_def.input_arg), 2)
self.assertEqual(len(z.op.op_def.output_arg), 1)
def testInputFromDifferentGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
y * x # pylint: disable=pointless-statement
def testInputsAreImmutable(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
op = test_ops.int_input_int_output(x, name="myop").op
with self.assertRaisesRegexp(
AttributeError, "'_InputList' object has no attribute 'append'"):
op.inputs.append(None)
class CreateOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
op1 = g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
with g.device("/device:GPU:0"):
op2 = g.create_op(
"FloatOutputStringOutput", [], [dtypes.float32, dtypes.string], None,
name="myop2")
op3 = g.create_op(
"Foo3",
[list(op1.values())[0], list(op2.values())[1], list(op2.values())[0]],
[dtypes.float32, dtypes.int32],
None,
name="myop3")
self.assertDeviceEqual(None, op1.device)
self.assertDeviceEqual("/device:GPU:0", op2.device)
self.assertDeviceEqual(None, op3.device)
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", op1.node_def)
self.assertProtoEquals(
"name:'myop2' op:'FloatOutputStringOutput' device:'/device:GPU:0'",
op2.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo3'",
op3.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = g.create_op(
"RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = g.create_op(
"RefInputFloatInput", [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = g.create_op("TwoFloatInputs", [ref_t, nonref_t], [], name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testFinalized(self):
g = ops.Graph()
g.finalize()
with self.assertRaises(RuntimeError):
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# Test unfinalize.
g._unsafe_unfinalize()
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# NOTE(skyewm): these cases test the private Graph._create_op_from_tf_operation
# method. Arguably we should only test the public APIs that depend on this
# method. However, this logic is complex and tricky, and it can be difficult to
# ascertain if we have adequate coverage (e.g. a graph may run successfully if
# the control flow context isn't set properly, but a more complicated use case
# that might not be obvious to test will fail). Thus we instead explicitly test
# the low-level behavior.
class CreateOpFromTFOperationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c_op = ops._create_c_op(
g, ops._NodeDef("IntInputIntOutput", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "IntInputIntOutput")
self.assertEqual(len(op.outputs), 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.unknown_shape())
self.assertEqual(list(op.inputs), [x])
self.assertEqual(op.control_inputs, [])
self.assertEqual(op.graph, g)
self.assertEqual(x.consumers(), [op])
self.assertIsNotNone(op.traceback)
self.assertEqual(g.get_operation_by_name("myop"), op)
self.assertEqual(g.get_tensor_by_name("myop:0"), op.outputs[0])
def testShape(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
c_op = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "Identity")
self.assertEqual(len(op.outputs), 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.TensorShape([2, 3]))
def testUniqueName(self):
g = ops.Graph()
with g.as_default():
c_op = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop"), [], [])
c_op2 = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop_1"), [], [])
op = g._create_op_from_tf_operation(c_op)
op2 = g._create_op_from_tf_operation(c_op2)
# Create ops with same names as op1 and op2. We expect the new names to be
# uniquified.
op3 = test_ops.int_output(name="myop").op
op4 = test_ops.int_output(name="myop_1").op
self.assertEqual(op.name, "myop")
self.assertEqual(op2.name, "myop_1")
self.assertEqual(op3.name, "myop_2")
self.assertEqual(op4.name, "myop_1_1")
@test_util.run_v1_only("b/120545219")
def testCond(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def true_fn():
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "cond/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return x
control_flow_ops.cond(x < 10, true_fn, lambda: x)
op = g.get_operation_by_name("cond/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "cond/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Switch")
self.assertEqual(op_input.inputs[0], x)
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"cond/cond_text")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoop(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "myloop/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Enter")
self.assertEqual(list(op_input.inputs), [x])
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"myloop/while_context")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithInternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
c = constant_op.constant(1.0, name="c")
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
c = g.get_operation_by_name("myloop/c")
self.assertIsNotNone(c)
# Internal control dep is preserved
self.assertEqual(op.control_inputs, [c])
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithExternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c = constant_op.constant(1.0)
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
# External control dep is removed and replaced with internal control dep
self.assertNotEqual(op.control_inputs[0], c.op)
self.assertIsNotNone(op.control_inputs[0]._get_control_flow_context())
class ApplyOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
with g.device("/device:GPU:0"):
t2 = _apply_op(
g, "TwoIntOutputs", [], [dtypes.int32, dtypes.int32], name="myop2")
t3 = _apply_op(
g,
"Foo1", [t1, t2[1], t2[0]], [dtypes.float32, dtypes.int32],
name="myop3")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, list))
self.assertTrue(isinstance(t3, list))
self.assertTrue(isinstance(t3[0], ops.Tensor))
self.assertEqual("myop1", t1._as_node_def_input())
self.assertEqual("myop2", t2[0]._as_node_def_input())
self.assertEqual("myop2:1", t2[1]._as_node_def_input())
self.assertEqual("myop3", t3[0]._as_node_def_input())
# Validate that we got the right ops as well
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", t1.op.node_def)
self.assertProtoEquals(
"name:'myop2' op:'TwoIntOutputs' device:'/device:GPU:0'",
t2[0].op.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo1'",
t3[0].op.node_def)
def testReferenceInput(self):
g = ops.Graph()
ref_t, nonref_t = _apply_op(
g, "RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'",
ref_t.op.node_def)
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
out_2 = _apply_op(
g,
"RefInputFloatInputIntOutput", [ref_t, nonref_t], [dtypes.int32],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInputIntOutput' name:'op2' input:'op1' input:'op1:1'",
out_2.op.node_def)
out_3 = _apply_op(
g, "TwoFloatInputsIntOutput", [ref_t, nonref_t], [dtypes.int32],
name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputsIntOutput' name:'op3' input:'op1' input:'op1:1'",
out_3.op.node_def)
class NameStackTest(test_util.TensorFlowTestCase):
def testBasics(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_2", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_2", g.unique_name("foo"))
self.assertEqual("foo_1_1", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_1", g.unique_name("foo_1"))
self.assertEqual("foo_1_2", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_2", g.unique_name("foo_1"))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2", mark_as_used=False))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo", g.unique_name("foo"))
self.assertEqual("bar/foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo_1", g.unique_name("foo"))
with g.name_scope(None):
self.assertEqual("foo_3", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_3", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo_1", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz_1/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz_1/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo_1", g.unique_name("foo"))
with g.name_scope("quux"):
self.assertEqual("quux/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("quux/foo", g.unique_name("foo"))
with g.name_scope("bar"):
with g.name_scope("baz"):
self.assertEqual(
"bar_1/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar_1/baz/foo", g.unique_name("foo"))
self.assertEqual("foo_4", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_4", g.unique_name("foo"))
self.assertEqual("bar_2", g.unique_name("bar", mark_as_used=False))
self.assertEqual("bar_2", g.unique_name("bar"))
@test_util.run_deprecated_v1
def testNameAndVariableScope(self):
with self.cached_session() as sess:
with sess.graph.name_scope("l0"):
with variable_scope.variable_scope("l1"):
with sess.graph.name_scope("l1") as scope:
self.assertEqual("l0/l1/l1/", scope)
self.assertEqual(
"l0/l1/l1/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l1/foo", sess.graph.unique_name("foo"))
with sess.graph.name_scope("l2") as scope:
self.assertEqual("l0/l1/l2/", scope)
self.assertEqual(
"l0/l1/l2/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l2/foo", sess.graph.unique_name("foo"))
def testOutOfOrderUniqueName(self):
g = ops.Graph()
self.assertEqual("foo_2", g.unique_name("foo_2"))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_3", g.unique_name("foo"))
def testUniqueNameCaseInsensitivity(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("Foo_1", g.unique_name("Foo"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo"))
with g.name_scope("Bar"):
self.assertEqual("Bar_1/foo", g.unique_name("foo"))
def testInvalidNameRaisesError(self):
g = ops.Graph()
with g.name_scope(""): # Should not raise
pass
with g.name_scope("foo/"): # Should not raise
with g.name_scope("_bar"): # Should not raise
pass
with self.assertRaises(ValueError):
with g.name_scope("foo:0"):
pass
with self.assertRaises(ValueError):
with g.name_scope("_bar"):
pass
class NameTest(test_util.TensorFlowTestCase):
def testGenerateName(self):
g = ops.Graph()
op0 = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
self.assertEqual("TwoFloatOutputs", op0.name)
self.assertEqual("TwoFloatOutputs:0", op0.outputs[0].name)
self.assertEqual("TwoFloatOutputs:1", op0.outputs[1].name)
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput", op1.name)
self.assertEqual("FloatOutput:0", op1.outputs[0].name)
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput_1", op2.name)
self.assertEqual("FloatOutput_1:0", op2.outputs[0].name)
op3 = g.create_op("FloatOutput", [], [dtypes.float32], name="my_op")
self.assertEqual("my_op", op3.name)
self.assertEqual("my_op:0", op3.outputs[0].name)
def testNameScope(self):
g = ops.Graph()
with g.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
with g.name_scope("foo2") as foo2:
self.assertEqual("foo/foo2/", foo2)
with g.name_scope(None) as empty1:
self.assertEqual("", empty1)
with g.name_scope("foo3") as foo3:
self.assertEqual("foo3/", foo3)
with g.name_scope("") as empty2:
self.assertEqual("", empty2)
self.assertEqual("FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar") as scope:
self.assertEqual("bar/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual("bar/FloatOutput_1",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from "with .. as", that values is used as-is.
self.assertEqual(
"bar", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
with g.name_scope("baz") as scope:
with g.name_scope("quux"):
self.assertEqual("baz/quux/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from the enclosing "with .. as", nothing is pushed.
with g.name_scope(scope):
self.assertEqual("baz/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual(
"baz", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
self.assertEqual(
"trailing",
g.create_op(
"FloatOutput", [], [dtypes.float32], name="trailing/").name)
with g.name_scope("bar"):
self.assertEqual("bar_1/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar/"):
self.assertEqual("bar/FloatOutput_2",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
class DeviceTest(test_util.TensorFlowTestCase):
def testNoDevice(self):
g = ops.Graph()
op = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertDeviceEqual(None, op.device)
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput" }
""", gd)
def testEagerBackingDevice(self):
with context.eager_mode():
with ops.device("/device:CPU:0"):
t = constant_op.constant(1.0)
self.assertRegexpMatches(t.device, "/device:CPU:0")
self.assertRegexpMatches(t.backing_device, "/device:CPU:0")
def testDevicePartialString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testDeviceFull(self):
g = ops.Graph()
with g.device(
pydev.DeviceSpec(
job="worker", replica=2, task=0, device_type="CPU",
device_index=3)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/task:0/device:CPU:3" }
""", gd)
def testNesting(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingOverrideGpuCpu(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:2/device:GPU:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:2/device:GPU:2" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNestingWithMergeDeviceFunction(self):
g = ops.Graph()
with g.device(pydev.merge_device("/device:GPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:worker")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/device:CPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device(None)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStrings(self):
g = ops.Graph()
with g.device("/device:GPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(""):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStringWildcard(self):
g = ops.Graph()
with g.device("/device:GPU:7"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:GPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:5"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/device:CPU:*" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/device:CPU:5" }
""", gd)
def testNestingErrorGraph(self):
g = ops.Graph()
scope = g.device("/device:GPU:8")
scope.__enter__()
with g.device("/device:GPU:9"):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
def testNestingErrorEager(self):
with context.eager_mode():
scope = ops.device("/device:CPU:0")
scope.__enter__()
with ops.device(None):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
def testNoneClearsDefault(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNoneIgnoresOuterDeviceFunction(self):
g = ops.Graph()
with g.device(lambda op: "/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def _overwritingDeviceFunction(self, unused_op):
# This device function unconditionally overwrites the device of ops.
#
# NOTE(mrry): Writing device functions like this is not
# recommended. Instead, in most cases you should use
# `pydev.merge_device("/job:ps")` or simply `"/job:ps"` as the
# argument to `tf.device()` and the device component will be merged in.
return "/job:overwrite"
def testOverwritingBehavior(self):
g = ops.Graph()
with g.device(self._overwritingDeviceFunction):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps" }
""", gd)
class MultithreadedGraphStateTest(test_util.TensorFlowTestCase):
class TestThread(threading.Thread):
def __init__(self, graph, replica_id):
super(MultithreadedGraphStateTest.TestThread, self).__init__()
self._graph = graph
self._replica_id = replica_id
# This thread sets this event when it mutated the graph. The caller can
# wait for that.
self.has_mutated_graph = threading.Event()
# This thread waits for when it should continue. The caller can set this
# event.
self.should_continue = threading.Event()
def run(self):
# Mutate a graph's stack, then set `has_mutated_graph`, then wait for
# `should_continue`, then add an op to the graph affected by the graph's
# stack.
raise NotImplementedError("must be implemented in descendants")
def testDeviceFunctionStack(self):
class DeviceSettingThread(self.TestThread):
def run(self):
with g.device("/job:worker/replica:{}".format(self._replica_id)):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
# If `switch_to_thread` isn't called, then device placement of the ops
# below is not deterministic.
g.switch_to_thread_local()
threads = [DeviceSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testColocateWith(self):
class ColocatingThread(self.TestThread):
def __init__(self, graph, replica_id, op_to_colocate_with):
super(ColocatingThread, self).__init__(graph, replica_id)
self._op_to_colocate_with = op_to_colocate_with
def run(self):
with g.colocate_with(self._op_to_colocate_with):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
ops_to_colocate_with = []
for i in range(3):
with g.device("/job:worker/replica:{}".format(i)):
ops_to_colocate_with.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `device` and `attr` values for
# the ops below are not deterministic.
g.switch_to_thread_local()
threads = [
ColocatingThread(g, i, ops_to_colocate_with[i]) for i in range(3)
]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "ColocateWithMe_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "ColocateWithMe_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_0"}}}}
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_1"}}}}
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_2"}}}}
""", gd)
def testControlDependencies(self):
class DependingThread(self.TestThread):
def __init__(self, graph, replica_id, dependency_op):
super(DependingThread, self).__init__(graph, replica_id)
self._dependency_op = dependency_op
def run(self):
with g.control_dependencies([self._dependency_op]):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
dependency_ops = []
for i in range(3):
dependency_ops.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `input` values for the ops below
# are not deterministic.
g.switch_to_thread_local()
threads = [DependingThread(g, i, dependency_ops[i]) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput" }
node { name: "ColocateWithMe_1" op: "FloatOutput" }
node { name: "ColocateWithMe_2" op: "FloatOutput" }
node { name: "FloatOutput_0" op: "FloatOutput"
input: "^ColocateWithMe_0" }
node { name: "FloatOutput_1" op: "FloatOutput"
input: "^ColocateWithMe_1" }
node { name: "FloatOutput_2" op: "FloatOutput"
input: "^ColocateWithMe_2" }
""", gd)
def testNameStack(self):
class NameSettingThread(self.TestThread):
def run(self):
with g.name_scope("foo"):
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.result = (op1, op2)
g = ops.Graph()
threads = [NameSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
suffixes = ["", "_1", "_2"]
for t, s in zip(threads, suffixes):
self.assertEquals("foo" + s + "/FloatOutput", t.result[0].name)
self.assertEquals("foo" + s + "/FloatOutput_1", t.result[1].name)
class ObjectWithName(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class CollectionTest(test_util.TensorFlowTestCase):
def test_get_collections(self):
g = ops.Graph()
self.assertSequenceEqual(g.collections, [])
g.add_to_collection("key", 12)
g.add_to_collection("key", 15)
self.assertSequenceEqual(g.collections, ["key"])
g.add_to_collection("other", "foo")
self.assertSequenceEqual(sorted(g.collections), ["key", "other"])
self.assertSequenceEqual(
sorted(g.get_all_collection_keys()), ["key", "other"])
def test_add_to_collection(self):
g = ops.Graph()
g.add_to_collection("key", 12)
g.add_to_collection("other", "foo")
g.add_to_collection("key", 34)
# Note that only blank1 is returned.
g.add_to_collection("blah", 27)
blank1 = ObjectWithName("prefix/foo")
g.add_to_collection("blah", blank1)
blank2 = ObjectWithName("junk/foo")
g.add_to_collection("blah", blank2)
self.assertEqual([12, 34], g.get_collection("key"))
self.assertEqual([], g.get_collection("nothing"))
self.assertEqual([27, blank1, blank2], g.get_collection("blah"))
self.assertEqual([blank1], g.get_collection("blah", "prefix"))
self.assertEqual([blank1], g.get_collection("blah", ".*x"))
# Make sure that get_collection() returns a first-level
# copy of the collection, while get_collection_ref() returns
# the original list.
other_collection_snapshot = g.get_collection("other")
other_collection_ref = g.get_collection_ref("other")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo"], other_collection_ref)
g.add_to_collection("other", "bar")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo", "bar"], other_collection_ref)
self.assertEqual(["foo", "bar"], g.get_collection("other"))
self.assertTrue(other_collection_ref is g.get_collection_ref("other"))
# Verify that getting an empty collection ref returns a modifiable list.
empty_coll_ref = g.get_collection_ref("empty")
self.assertEqual([], empty_coll_ref)
empty_coll = g.get_collection("empty")
self.assertEqual([], empty_coll)
self.assertFalse(empty_coll is empty_coll_ref)
empty_coll_ref2 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref2 is empty_coll_ref)
# Add to the collection.
empty_coll_ref.append("something")
self.assertEqual(["something"], empty_coll_ref)
self.assertEqual(["something"], empty_coll_ref2)
self.assertEqual([], empty_coll)
self.assertEqual(["something"], g.get_collection("empty"))
empty_coll_ref3 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref3 is empty_coll_ref)
def test_add_to_collections_uniquify(self):
g = ops.Graph()
g.add_to_collections([1, 2, 1], "key")
# Make sure "key" is not added twice
self.assertEqual(["key"], g.get_collection(1))
def test_add_to_collections_from_list(self):
g = ops.Graph()
g.add_to_collections(["abc", "123"], "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_tuple(self):
g = ops.Graph()
g.add_to_collections(("abc", "123"), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_generator(self):
g = ops.Graph()
def generator():
yield "abc"
yield "123"
g.add_to_collections(generator(), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_set(self):
g = ops.Graph()
g.add_to_collections(set(["abc", "123"]), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_string(self):
g = ops.Graph()
g.add_to_collections("abc", "key")
self.assertEqual(["key"], g.get_collection("abc"))
def test_default_graph(self):
with ops.Graph().as_default():
ops.add_to_collection("key", 90)
ops.add_to_collection("key", 100)
# Collections are ordered.
self.assertEqual([90, 100], ops.get_collection("key"))
def test_defun(self):
with context.eager_mode():
@eager_function.defun
def defun():
ops.add_to_collection("int", 1)
ops.add_to_collection("tensor", constant_op.constant(2))
@eager_function.defun
def inner_defun():
self.assertEqual(ops.get_collection("int"), [1])
three = ops.get_collection("tensor")[0] + ops.get_collection("int")[0]
ops.add_to_collection("int", 2)
self.assertEqual(ops.get_collection("int"), [1, 2])
ops.add_to_collection("foo", "bar")
self.assertEqual(ops.get_collection("foo"), ["bar"])
return three
self.assertEqual(ops.get_collection("int"), [1])
three = inner_defun()
self.assertEqual(ops.get_collection("int"), [1])
self.assertEqual(ops.get_collection("foo"), [])
return three
three = defun()
self.assertEqual(three.numpy(), 3)
ops.NotDifferentiable("FloatOutput")
@ops.RegisterGradient("CopyOp")
def _CopyGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
@ops.RegisterGradient("copy_override")
def _CopyOverrideGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
class RegistrationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testRegisterGradients(self):
x = test_ops.float_output()
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyGrad, fn)
def testOverrideGradients(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "copy_override"}):
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyOverrideGrad, fn)
def testNonExistentOverride(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "unknown_override"}):
y = test_ops.copy_op(x)
with self.assertRaisesRegexp(LookupError, "unknown_override"):
ops.get_gradient_function(y.op)
class ComparisonTest(test_util.TensorFlowTestCase):
def testMembershipAllowed(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
t2 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop2")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, ops.Tensor))
self.assertTrue(t1 in [t1])
self.assertTrue(t1 not in [t2])
class ControlDependenciesTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
# Creating unregistered ops with _apply_op() doesn't work with the C API
# TODO(skyewm): address this more consistently. Possible solutions are
# to use registered ops in all tests, create a way to register ops in
# Python tests, or conditionally disable the op registration check in
# the C API.
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
with g.control_dependencies([a]):
c = constant_op.constant(1.0)
d = array_ops.identity(b)
e = array_ops.identity(c)
self.assertEqual(c.op.control_inputs, [a.op])
self.assertEqual(d.op.control_inputs, [a.op])
# e should be dominated by c.
self.assertEqual(e.op.control_inputs, [])
@test_util.run_in_graph_and_eager_modes
def testEager(self):
def future():
future.calls += 1
return constant_op.constant(2.0)
future.calls = 0
if context.executing_eagerly():
a = constant_op.constant(1.0)
b = future
with ops.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(future.calls, 1)
else:
g = ops.Graph()
with g.as_default():
a = constant_op.constant(1.0)
b = future()
with g.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(c.op.control_inputs, [a.op, b.op])
self.assertEqual(future.calls, 1)
def testBasicWithConversion(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
class ConvertibleObj(object):
def _as_graph_element(self):
return a
with g.control_dependencies([ConvertibleObj()]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(c.op.control_inputs, [a.op])
def testNested(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1, a_2, a_3, a_4]):
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
b_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_1.op, a_2.op, a_3.op, a_4.op],
b_1.op.control_inputs)
self.assertItemsEqual(b_1.op.control_inputs, b_2.op.control_inputs)
def testClear(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies(None):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies(None):
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testComplex(self):
g = ops.Graph()
# Usage pattern:
# * Nodes a_i are constants defined at the outermost scope, and are used
# as control inputs for the ith nested scope.
# * Nodes b_i are defined as Mul(a_3, a_4) at each scope.
# * Nodes c_i are defined as Mul(a_1, b_1) at each scope.
# * Nodes d_i are defined as Mul(b_i, c_i) at each scope.
# * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1.
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
b_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_1, c_1],
[dtypes.float32])
e_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_2]):
b_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_2, c_2],
[dtypes.float32])
e_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_1, e_1],
[dtypes.float32])
with g.control_dependencies([a_3]):
b_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_3, c_3],
[dtypes.float32])
e_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_2, e_2],
[dtypes.float32])
with g.control_dependencies([a_4]):
b_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_4, c_4],
[dtypes.float32])
e_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_3, e_3],
[dtypes.float32])
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_2.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_3.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_4.op.control_inputs)
self.assertItemsEqual([], c_1.op.control_inputs)
self.assertItemsEqual([a_2.op], c_2.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op], c_3.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op, a_4.op], c_4.op.control_inputs)
self.assertItemsEqual([], d_1.op.control_inputs)
self.assertItemsEqual([], d_2.op.control_inputs)
self.assertItemsEqual([], d_3.op.control_inputs)
self.assertItemsEqual([], d_4.op.control_inputs)
self.assertItemsEqual([a_1.op], e_1.op.control_inputs)
self.assertItemsEqual([a_2.op], e_2.op.control_inputs)
self.assertItemsEqual([a_3.op], e_3.op.control_inputs)
self.assertItemsEqual([a_4.op], e_4.op.control_inputs)
def testRepeatedDependency(self):
g = ops.Graph()
a = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
a_0, a_1 = a.outputs
with g.control_dependencies([a_0]):
b = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [a])
self.assertEqual(c.op.control_inputs, [a])
def testNoControlDependencyWithDataDependency(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a]):
b = _apply_op(g, "Identity", [a], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [])
class OpScopeTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testNames(self):
with ops.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
with ops.name_scope("foo2") as foo2:
self.assertEqual("foo/foo2/", foo2)
with ops.name_scope(None) as empty1:
self.assertEqual("", empty1)
with ops.name_scope("foo3") as foo3:
self.assertEqual("foo3/", foo3)
with ops.name_scope("") as empty2:
self.assertEqual("", empty2)
with ops.name_scope("foo/") as outer_foo:
self.assertEqual("foo/", outer_foo)
with ops.name_scope("") as empty3:
self.assertEqual("", empty3)
with ops.name_scope("foo4") as foo4:
self.assertEqual("foo/foo4/", foo4)
with ops.name_scope("foo5//") as foo5:
self.assertEqual("foo5//", foo5)
with ops.name_scope("foo6") as foo6:
self.assertEqual("foo5//foo6/", foo6)
with ops.name_scope("/") as foo7:
self.assertEqual("/", foo7)
with ops.name_scope("//") as foo8:
self.assertEqual("//", foo8)
with ops.name_scope("a//b/c") as foo9:
self.assertEqual("foo/a//b/c/", foo9)
with ops.name_scope("a//b/c") as foo10:
self.assertEqual("a//b/c/", foo10)
@test_util.run_in_graph_and_eager_modes
def testEagerDefaultScopeName(self):
with ops.name_scope(None, "default") as scope:
self.assertEqual(scope, "default/")
with ops.name_scope(None, "default2") as scope2:
self.assertEqual(scope2, "default/default2/")
@test_util.run_in_graph_and_eager_modes
def testNameScopeV2IsReEntrant(self):
foo = ops.name_scope_v2("foo")
bar = ops.name_scope_v2("bar")
with foo as scope_name:
self.assertEqual("foo/", scope_name)
with foo as scope_name:
self.assertEqual("foo/foo/", scope_name)
with bar as scope_name:
self.assertEqual("foo/bar/", scope_name)
with foo as scope_name:
self.assertEqual("foo/bar/foo/", scope_name)
with bar as scope_name:
self.assertEqual("bar/", scope_name)
@test_util.run_deprecated_v1
def testNoScopeName(self):
g0 = ops.Graph()
values = [
g0.create_op("A", [], [dtypes.float32]),
g0.create_op("B", [], [dtypes.float32])
]
with self.assertRaises(ValueError):
with ops.name_scope(None, values=values):
pass
with self.assertRaises(ValueError):
with ops.name_scope(None, None, values):
pass
@test_util.run_deprecated_v1
def testEmptyScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
with ops.name_scope("", values=[a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope("", "my_default_scope", [a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
@test_util.run_deprecated_v1
def testDefaultScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
scope_name = "my_scope"
default_scope_name = "my_default_scope"
with ops.name_scope(scope_name, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope(None, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % default_scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with self.assertRaises(TypeError):
with ops.name_scope(scope_name, [a, b]):
pass
def _testGraphElements(self, graph_elements):
scope_name = "my_scope"
with ops.name_scope(scope_name, values=graph_elements) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(graph_elements[0].graph, ops.get_default_graph())
g1 = ops.Graph()
a = g1.create_op("A", [], [dtypes.float32])
with self.assertRaises(ValueError):
with ops.name_scope(scope_name, values=graph_elements + [a]):
pass
@test_util.run_deprecated_v1
def testTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, b])
@test_util.run_deprecated_v1
def testSparseTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
sparse = sparse_tensor.SparseTensor(
_apply_op(g0, "Int64Output", [], [dtypes.int64]),
_apply_op(g0, "FloatOutput", [], [dtypes.float32]),
_apply_op(g0, "Int64Output", [], [dtypes.int64]))
self._testGraphElements([a, sparse, b])
@test_util.run_deprecated_v1
def testVariable(self):
g0 = ops.Graph()
with g0.as_default():
variable = variables.Variable([1.0])
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, variable, b])
class InitScopeTest(test_util.TensorFlowTestCase):
def testClearsControlDependencies(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.as_default():
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with ops.init_scope():
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with ops.init_scope():
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testLiftsOpsFromFunctions(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with g2.as_default():
with ops.init_scope():
_ = constant_op.constant(1.0)
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 1)
def testPreservesDevices(self):
g0 = ops.Graph()
with g0.as_default(), ops.device("CPU:0"):
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
with g1.as_default():
with ops.device("GPU:0"):
with ops.init_scope():
# init_scope should preserve device set under `g1`.
on_gpu = constant_op.constant(1.0)
self.assertEqual(on_gpu.device, "/device:GPU:0")
still_on_gpu = constant_op.constant(1.0)
self.assertEqual(still_on_gpu.device, "/device:GPU:0")
blank = constant_op.constant(1.0)
self.assertEqual(blank.device, "")
with ops.init_scope():
now_on_cpu = constant_op.constant(1.0)
self.assertEqual(now_on_cpu.device, "/device:CPU:0")
on_cpu = constant_op.constant(1.0)
self.assertEqual(on_cpu.device, "/device:CPU:0")
def testComposes(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
g3 = ops.Graph()
g3._building_function = False # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 1)
with g2.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
with g3.as_default():
with ops.init_scope():
# This op should be lifted into g3, because g3 is not building a
# function.
_ = constant_op.constant(1.0)
self.assertIs(g3, ops.get_default_graph())
self.assertEqual(len(g3.get_operations()), 1)
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 2)
def testEscapesToEagerContext(self):
g = ops.Graph()
g._building_function = True # pylint: disable=protected-access
with context.eager_mode():
with context.graph_mode():
with g.as_default():
with ops.init_scope():
# Because g is building a function, init_scope should
# escape out to the eager context.
self.assertTrue(context.executing_eagerly())
# g should be reinstated as the default graph, and the
# graph context should be re-entered.
self.assertIs(g, ops.get_default_graph())
self.assertFalse(context.executing_eagerly())
def testStaysInEagerWhenOnlyEagerContextActive(self):
with context.eager_mode():
with ops.init_scope():
self.assertTrue(context.eager_mode())
self.assertTrue(context.eager_mode())
def testEscapesDefunWhenInEagerMode(self):
def function_with_variables():
with ops.init_scope():
self.v = resource_variable_ops.ResourceVariable(3)
return self.v.assign_add(1)
with context.eager_mode():
# Each invocation of function_with_variables recreates a variable.
self.assertEqual(4, int(function_with_variables()))
self.assertEqual(4, int(function_with_variables()))
compiled = eager_function.defun(function_with_variables)
# The init_scope in function_with_variables lifts the variable out
# of the graph function constructed by defun; hence,
# compiled now appears to be stateful.
self.assertEqual(4, int(compiled()))
self.assertEqual(5, int(compiled()))
def testEscapesDefunWhenInGraphMode(self):
def function_with_variables(name):
with ops.init_scope():
_ = variable_scope.get_variable(name, shape=(1,))
g = ops.Graph()
with g.as_default():
with self.cached_session():
# First ensure that graphs that are not building functions are
# not escaped.
function_with_variables("foo")
with self.assertRaisesRegexp(ValueError,
r"Variable foo already exists.*"):
# This will fail because reuse is not set to True.
function_with_variables("foo")
compiled = eager_function.defun(function_with_variables)
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
# The second call to `compiled` should not create variables: the
# init_scope has lifted the variable creation code out of the defun.
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
def testEscapesNestedDefun(self):
def inner_function():
with ops.init_scope():
self.v = resource_variable_ops.ResourceVariable(1)
return self.v.assign_add(2)
def outer_function(inner=None):
with ops.init_scope():
self.v0 = resource_variable_ops.ResourceVariable(0)
return self.v0.assign_add(1) + inner()
with context.eager_mode():
# Each invocation of outer_function recreates variables.
self.assertEqual(4, int(outer_function(inner=inner_function)))
self.assertEqual(4, int(outer_function(inner=inner_function)))
compiled_inner = eager_function.defun(inner_function)
compiled_outer = eager_function.defun(outer_function)
# The init_scope lifts variables out of the graph functions
# constructed by defun; hence, compiled_outer should now appear to be
# stateful.
self.assertEqual(4, int(compiled_outer(inner=compiled_inner)))
self.assertEqual(7, int(compiled_outer(inner=compiled_inner)))
@test_util.run_v1_only("b/120545219")
def testFallsBackToGlobalGraphWhenAllGraphsAreBuildingFunctions(self):
with context.graph_mode():
ops.reset_default_graph()
# This doesn't push anything onto the graph stack, but it does
# set the stack's global graph.
global_graph = ops.get_default_graph()
fn_graph = ops.Graph()
# pylint: disable=protected-access
fn_graph._building_function = True
self.assertEqual(len(ops._default_graph_stack.stack), 0)
with fn_graph.as_default():
self.assertEqual(len(ops._default_graph_stack.stack), 1)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 1)
dummy = constant_op.constant(1.0)
self.assertEqual(len(ops._default_graph_stack.stack), 1)
# Note that the global graph is _not_ on the graph stack.
self.assertEqual(len(ops._default_graph_stack.stack), 0)
# Ensure that `dummy` was added to the global graph.
self.assertEqual(global_graph, dummy.graph)
# pylint: enable=protected-access
def testInstallsDefaultGraphWhenGraphStackIsEmptyInGraphMode(self):
with context.graph_mode():
# pylint: disable=protected-access
self.assertEqual(len(ops._default_graph_stack.stack), 0)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 0)
self.assertEqual(len(ops._default_graph_stack.stack), 0)
# pylint: enable=protected-access
def testPreservesNameScopeInGraphConstruction(self):
with ops.Graph().as_default():
function_graph = ops.Graph()
with function_graph.as_default():
with ops.name_scope("inner"), ops.init_scope():
self.assertEqual(ops.get_name_scope(), "inner")
self.assertEqual(ops.get_name_scope(), "")
def testEnteringGraphFromEagerIsSticky(self):
with context.eager_mode():
g = ops.Graph()
with g.as_default():
with ops.init_scope():
self.assertFalse(context.executing_eagerly())
self.assertEqual(g, ops.get_default_graph())
def testMixGraphEager(self):
with context.eager_mode():
c = constant_op.constant(1.0)
with ops.Graph().as_default():
with self.assertRaisesRegexp(
RuntimeError, "Attempting to capture an EagerTensor"):
math_ops.add(c, c)
c2 = constant_op.constant(2.0)
with self.assertRaisesRegexp(
TypeError, "Graph tensors"):
math_ops.add(c2, c2)
def testPreservesNameScopeInEagerExecution(self):
with context.eager_mode():
def foo():
with ops.name_scope("inner"), ops.init_scope():
if context.executing_eagerly():
# A trailing slash is always appended when eager execution is
# enabled.
self.assertEqual(context.context().scope_name, "inner/")
else:
self.assertEqual(ops.get_name_scope(), "inner")
foo()
self.assertEqual(ops.get_name_scope(), "")
foo_compiled = eager_function.defun(foo)
foo_compiled()
self.assertEqual(ops.get_name_scope(), "")
def testExecutingEagerlyOutsideFunctions(self):
@def_function.function
def f():
return ops.executing_eagerly_outside_functions()
with context.graph_mode():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
# Need self.evaluate for these as the return type of functions is
# tensors.
self.assertFalse(self.evaluate(f()))
with context.eager_mode():
self.assertTrue(ops.executing_eagerly_outside_functions())
self.assertTrue(f())
with ops.Graph().as_default():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
self.assertFalse(self.evaluate(f()))
class GraphTest(test_util.TensorFlowTestCase):
def setUp(self):
ops.reset_default_graph()
def _AssertDefault(self, expected):
self.assertIs(expected, ops.get_default_graph())
def testResetDefaultGraphNesting(self):
g0 = ops.Graph()
with self.assertRaises(AssertionError):
with g0.as_default():
ops.reset_default_graph()
def testGraphContextManagerCancelsEager(self):
with context.eager_mode():
with ops.Graph().as_default():
self.assertFalse(context.executing_eagerly())
def testGraphContextManager(self):
g0 = ops.Graph()
with g0.as_default() as g1:
self.assertIs(g0, g1)
def testDefaultGraph(self):
orig = ops.get_default_graph()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
g0 = ops.Graph()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
context_manager_0 = g0.as_default()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
with context_manager_0 as g0:
self._AssertDefault(g0)
with ops.Graph().as_default() as g1:
self.assertTrue(ops.has_default_graph())
self._AssertDefault(g1)
self._AssertDefault(g0)
self._AssertDefault(orig)
self.assertFalse(ops.has_default_graph())
def testPreventFeeding(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_feedable(a))
g.prevent_feeding(a)
self.assertFalse(g.is_feedable(a))
@test_util.run_deprecated_v1
def testPreventFetching(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_fetchable(a))
g.prevent_fetching(a.op)
self.assertFalse(g.is_fetchable(a))
def testAsGraphElementConversions(self):
class ConvertibleObj(object):
def _as_graph_element(self):
return "FloatOutput:0"
class NonConvertibleObj(object):
pass
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(a, g.as_graph_element(ConvertibleObj()))
with self.assertRaises(TypeError):
g.as_graph_element(NonConvertibleObj())
# Regression test against creating custom __del__ functions in classes
# involved in cyclic references, e.g. Graph and Operation. (Python won't gc
# cycles that require calling a __del__ method, because the __del__ method can
# theoretically increase the object's refcount to "save" it from gc, and any
# already-deleted objects in the cycle would have be to restored.)
def testGarbageCollected(self):
# Create a graph we can delete and a weak reference to monitor if it's gc'd
g = ops.Graph()
g_ref = weakref.ref(g)
# Create some ops
with g.as_default():
a = constant_op.constant(2.0)
b = constant_op.constant(3.0)
c = math_ops.add(a, b)
# Create a session we can delete
with session.Session(graph=g) as sess:
self.evaluate(c)
# Delete all references and trigger gc
del g
del a
del b
del c
del sess
gc.collect()
self.assertIsNone(g_ref())
def testRunnableAfterInvalidShape(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError):
math_ops.add([1, 2], [1, 2, 3])
a = constant_op.constant(1)
with session.Session() as sess:
self.evaluate(a)
def testRunnableAfterInvalidShapeWithKernelLabelMap(self):
g = ops.Graph()
with g.as_default():
with g._kernel_label_map({"KernelLabelRequired": "overload_1"}):
with self.assertRaises(ValueError):
test_ops.kernel_label_required(1)
a = constant_op.constant(1)
with session.Session() as sess:
self.evaluate(a)
class AttrScopeTest(test_util.TensorFlowTestCase):
def _get_test_attrs(self):
x = control_flow_ops.no_op()
try:
a = compat.as_text(x.get_attr("_A"))
except ValueError:
a = None
try:
b = compat.as_text(x.get_attr("_B"))
except ValueError:
b = None
return (a, b)
@test_util.run_deprecated_v1
def testNoLabel(self):
with self.cached_session():
self.assertAllEqual((None, None), self._get_test_attrs())
@test_util.run_deprecated_v1
def testLabelMap(self):
with self.cached_session() as sess:
a1 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("foo"))
}):
a2 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": None,
"_B": attr_value_pb2.AttrValue(s=compat.as_bytes("bar"))
}):
a3 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("baz"))
}):
a4 = self._get_test_attrs()
a5 = self._get_test_attrs()
a6 = self._get_test_attrs()
a7 = self._get_test_attrs()
self.assertAllEqual((None, None), a1)
self.assertAllEqual(("foo", None), a2)
self.assertAllEqual((None, "bar"), a3)
self.assertAllEqual(("baz", "bar"), a4)
self.assertAllEqual((None, "bar"), a5)
self.assertAllEqual(("foo", None), a6)
self.assertAllEqual((None, None), a7)
ops.RegisterShape("KernelLabel")(common_shapes.scalar_shape)
class KernelLabelTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testNoLabel(self):
with self.cached_session():
self.assertAllEqual(b"My label is: default",
test_ops.kernel_label().eval())
@test_util.run_deprecated_v1
def testLabelMap(self):
with self.cached_session() as sess:
default_1 = test_ops.kernel_label()
# pylint: disable=protected-access
with sess.graph._kernel_label_map({"KernelLabel": "overload_1"}):
overload_1_1 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": "overload_2"}):
overload_2 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": ""}):
default_2 = test_ops.kernel_label()
overload_1_2 = test_ops.kernel_label()
# pylint: enable=protected-access
default_3 = test_ops.kernel_label()
self.assertAllEqual(b"My label is: default", self.evaluate(default_1))
self.assertAllEqual(b"My label is: default", self.evaluate(default_2))
self.assertAllEqual(b"My label is: default", self.evaluate(default_3))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_1))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_2))
self.assertAllEqual(b"My label is: overload_2", self.evaluate(overload_2))
class AsGraphDefTest(test_util.TensorFlowTestCase):
def testGraphDefVersion(self):
"""Test that the graphdef version is plumbed through to kernels."""
with ops.Graph().as_default() as g:
version = g.graph_def_versions.producer
with self.session(graph=g):
v = test_ops.graph_def_version().eval()
self.assertEqual(version, v)
def testAddShapes(self):
with ops.Graph().as_default() as g:
t1, t2, t3, t4, t5 = _apply_op(g, "FiveFloatOutputs", [],
[dtypes.float32] * 5)
t1.set_shape(None)
t2.set_shape([])
t3.set_shape([None])
t4.set_shape([43, 37])
t5.set_shape([43, None])
b = constant_op.constant(1.0) # pylint: disable=unused-variable
gd = g.as_graph_def(add_shapes=True)
self.assertProtoEqualsVersion("""
node { name: "FiveFloatOutputs" op: "FiveFloatOutputs"
attr {
key: "_output_shapes"
value {
list {
shape { unknown_rank: true }
shape { }
shape { dim { size: -1 } }
shape { dim { size: 43 } dim { size: 37 } }
shape { dim { size: 43 } dim { size: -1 } }
}
}
}
}
node { name: "Const" op: "Const"
attr {
key: "_output_shapes"
value {
list {
shape { }
}
}
}
attr {
key: "dtype"
value { type: DT_FLOAT }
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape { }
float_val: 1.0 } } } }
""", gd)
@ops.RegisterStatistics("a", "flops")
def _calc_a_forward_flops(unused_graph, unused_node):
return ops.OpStats("flops", 20)
class StatisticsTest(test_util.TensorFlowTestCase):
def testRegisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("a", "an_a")
flops = ops.get_stats_for_node_def(graph, node, "flops")
self.assertEqual(20, flops.value)
missing_stat = ops.get_stats_for_node_def(graph, node, "missing_stat")
self.assertEqual(None, missing_stat.value)
def testUnregisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("b", "a_b")
weight_params = ops.get_stats_for_node_def(graph, node, "weight_params")
self.assertEqual(None, weight_params.value)
def testAccumulateStatistics(self):
flops_total = ops.OpStats("flops")
self.assertEqual(None, flops_total.value)
second_flops = ops.OpStats("flops", 3)
flops_total += second_flops
self.assertEqual(3, flops_total.value)
class DeviceStackTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasicDeviceAssignmentMetadata(self):
def device_func(unused_op):
return "/cpu:*"
const_zero = constant_op.constant([0.0], name="zero")
with ops.device("/cpu"):
const_one = constant_op.constant([1.0], name="one")
with ops.device("/cpu:0"):
const_two = constant_op.constant([2.0], name="two")
with ops.device(device_func):
const_three = constant_op.constant(3.0, name="three")
self.assertEqual(0, len(const_zero.op._device_assignments))
one_list = const_one.op._device_assignments
self.assertEqual(1, len(one_list))
self.assertEqual("/cpu", one_list[0].obj)
self.assertEqual("ops_test.py", os.path.basename(one_list[0].filename))
two_list = const_two.op._device_assignments
self.assertEqual(2, len(two_list))
devices = [t.obj for t in two_list]
self.assertEqual(set(["/cpu", "/cpu:0"]), set(devices))
three_list = const_three.op._device_assignments
self.assertEqual(1, len(three_list))
func_description = three_list[0].obj
expected_regex = r"device_func<.*ops_test.py, [0-9]+"
self.assertRegexpMatches(func_description, expected_regex)
@test_util.run_deprecated_v1
def testDeviceAssignmentMetadataForGraphDeviceAndTfDeviceFunctions(self):
with ops.device("/cpu"):
const_one = constant_op.constant([1.0], name="one")
with ops.get_default_graph().device("/cpu"):
const_two = constant_op.constant([2.0], name="two")
one_metadata = const_one.op._device_assignments[0]
two_metadata = const_two.op._device_assignments[0]
# Verify both types of device assignment return the right stack info.
self.assertRegexpMatches("ops_test.py",
os.path.basename(one_metadata.filename))
self.assertEqual(one_metadata.filename, two_metadata.filename)
self.assertEqual(one_metadata.lineno + 2, two_metadata.lineno)
class ColocationGroupTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], a.op.colocation_groups())
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
with self.assertRaises(ValueError):
c.op.get_attr("_class")
@test_util.run_deprecated_v1
def testBasicColocationMetadata(self):
const_two = constant_op.constant([2.0], name="two")
with ops.colocate_with(const_two.op):
const_three = constant_op.constant(3.0, name="three")
locations_dict = const_three.op._colocation_dict
self.assertIn("two", locations_dict)
metadata = locations_dict["two"]
self.assertIsNone(metadata.obj)
# Check that this test's filename is recorded as the file containing the
# colocation statement.
self.assertEqual("ops_test.py", os.path.basename(metadata.filename))
@test_util.run_deprecated_v1
def testColocationDeviceInteraction(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
# 'b' is created in the scope of /cpu:0, but it is
# colocated with 'a', which is on '/device:GPU:0'. colocate_with
# overrides devices because it is a stronger constraint.
b = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual(a.op.device, b.op.device)
@test_util.run_deprecated_v1
def testColocationCanonicalization(self):
with ops.device("/device:GPU:0"):
_ = constant_op.constant(2.0)
with ops.device(lambda op: "/device:GPU:0"):
b = constant_op.constant(3.0)
with ops.get_default_graph().colocate_with(b):
with ops.device("/device:GPU:0"):
c = constant_op.constant(4.0)
# A's device will be /device:GPU:0
# B's device will be /device:GPU:0
# C's device will be /device:GPU:0 because it
# inherits B's device name, after canonicalizing the names.
self.assertEqual(b.op.device, c.op.device)
@test_util.run_deprecated_v1
def testLocationOverrides(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
# Note that this colocation is "redundant", since we are
# within the scope of "/device:GPU:0". However, we would like to
# preserve in the GraphDef that these two ops should be
# colocated in a portable way.
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
d = constant_op.constant(5.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual("/device:GPU:0", a.op.device)
self.assertEqual(a.op.device, b.op.device)
# Test that device function stack is restored.
self.assertEqual("/device:GPU:0", c.op.device)
self.assertEqual("/device:CPU:0", d.op.device)
@test_util.run_deprecated_v1
def testNestedColocateWith(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testMultiColocationGroups(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@a", b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocationIgnoreStack(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op, ignore_existing=True):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocateWithReset(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(None, ignore_existing=True):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@c"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateWithInitialNoneThenNested(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
with ops.colocate_with(None, ignore_existing=True):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(b.op):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@b"], b.op.colocation_groups())
self.assertEqual([b"loc:@b"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateVariables(self):
a = variables.Variable([2.0], name="a")
with ops.colocate_with(a.op):
b = variables.Variable([3.0], name="b")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
def testColocateWithVariableInFunction(self):
v = variables.Variable(1.)
@def_function.function
def f():
with ops.colocate_with(v):
return array_ops.ones([], name="output")
f()
graph_def = f.get_concrete_function().graph.as_graph_def()
wrap_function.function_from_graph_def(graph_def, [], ["output"])
class DeprecatedTest(test_util.TensorFlowTestCase):
def testSuccess(self):
with ops.Graph().as_default() as g:
test_util.set_producer_version(g, 7)
old = test_ops.old()
with self.session(graph=g):
old.run()
def _error(self):
return ((r"Op Old is not available in GraphDef version %d\. "
r"It has been removed in version 8\. For reasons\.") %
versions.GRAPH_DEF_VERSION)
def testGraphConstructionFail(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(NotImplementedError, self._error()):
test_ops.old()
class DenseTensorLikeTypeTest(test_util.TensorFlowTestCase):
def testSuccess(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertTrue(ops.is_dense_tensor_like(t))
v = variables.Variable([17])
self.assertTrue(ops.is_dense_tensor_like(v))
class BadClassNoName(object):
pass
class BadClassBadName(object):
def name(self):
pass
class BadClassNoDtype(object):
@property
def name(self):
pass
class BadClassBadDtype(object):
@property
def name(self):
pass
def dtype(self):
pass
def testBadClass(self):
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoName)
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadName)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoDtype)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadDtype)
class NameScopeTest(test_util.TensorFlowTestCase):
def testStripAndPrependScope(self):
strs = [
"hidden1/hidden1/weights", # Same prefix. Should strip.
"hidden1///hidden1/weights", # Extra "/". Should strip.
"^hidden1/hidden1/weights", # Same prefix. Should strip.
"loc:@hidden1/hidden1/weights", # Same prefix. Should strip.
"hhidden1/hidden1/weights", # Different prefix. Should keep.
"hidden1"
] # Not a prefix. Should keep.
expected_striped = [
"hidden1/weights", "hidden1/weights", "^hidden1/weights",
"loc:@hidden1/weights", "hhidden1/hidden1/weights", "hidden1"
]
expected_prepended = [
"hidden2/hidden1/weights", "hidden2/hidden1/weights",
"^hidden2/hidden1/weights", "loc:@hidden2/hidden1/weights",
"hidden2/hhidden1/hidden1/weights", "hidden2/hidden1"
]
name_scope_to_strip = "hidden1"
name_scope_to_add = "hidden2"
for es, ep, s in zip(expected_striped, expected_prepended, strs):
striped = ops.strip_name_scope(s, name_scope_to_strip)
self.assertEqual(es, striped)
self.assertEqual(ep, ops.prepend_name_scope(striped, name_scope_to_add))
def testGetNameScope(self):
with ops.Graph().as_default() as g:
with ops.name_scope("scope1"):
with ops.name_scope("scope2"):
with ops.name_scope("scope3"):
self.assertEqual("scope1/scope2/scope3", g.get_name_scope())
self.assertEqual("scope1/scope2", g.get_name_scope())
self.assertEqual("scope1", g.get_name_scope())
self.assertEqual("", g.get_name_scope())
def testTwoGraphs(self):
def f():
g1 = ops.Graph()
g2 = ops.Graph()
with g1.as_default():
with g2.as_default():
with ops.name_scope("_"):
pass
self.assertRaisesRegexp(ValueError, "'_' is not a valid scope name", f)
class TracebackTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testTracebackWithStartLines(self):
with self.cached_session() as sess:
a = constant_op.constant(2.0)
sess.run(
a,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(sess.graph.get_operations())
# Tests that traceback_with_start_lines is the same as traceback
# but includes one more element at the end.
for op in sess.graph.get_operations():
self.assertEquals(len(op.traceback), len(op.traceback_with_start_lines))
for frame, frame_with_start_line in zip(
op.traceback, op.traceback_with_start_lines):
self.assertEquals(5, len(frame_with_start_line))
self.assertEquals(frame, frame_with_start_line[:-1])
class EnableEagerExecutionTest(test_util.TensorFlowTestCase):
@test_util.run_v1_only("b/120545219")
def testBadArgumentsToEnableEagerExecution(self):
with self.assertRaisesRegexp(TypeError, "config must be a tf.ConfigProto"):
ops.enable_eager_execution(context.DEVICE_PLACEMENT_SILENT)
with self.assertRaisesRegexp(ValueError, "device_policy must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, c)
with self.assertRaisesRegexp(ValueError, "execution_mode must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, execution_mode=c)
class _TupleTensor(composite_tensor.CompositeTensor):
"""`Tensor`-like `tuple`-like for custom `Tensor` conversion masquerading."""
def __init__(self, components):
super(_TupleTensor, self).__init__()
self._components = tuple(ops.convert_to_tensor(c) for c in components)
@property
def _type_spec(self):
return _TupleTensorSpec(type_spec.from_value(c) for c in self._components)
def __getitem__(self, key):
return self._components[key]
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
class _TupleTensorSpec(type_spec.TypeSpec):
def __init__(self, specs):
self._specs = specs
value_type = property(lambda self: _TupleTensor)
_component_specs = property(lambda self: self._specs)
def _to_components(self, value):
return value._components
def _from_components(self, components):
return _TupleTensor(*components)
def _serialize(self):
return (self._specs,)
class _MyTuple(object):
"""Pretend user-side class for `ConvertToCompositeTensorTest ."""
def __init__(self, components):
super(_MyTuple, self).__init__()
self._components = tuple(components)
def __getitem__(self, key):
return self._components[key]
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
ops.register_tensor_conversion_function(
_MyTuple, conversion_func=lambda x, *_, **__: _TupleTensor(x))
class CustomConvertToCompositeTensorTest(test_util.TensorFlowTestCase):
def testCompositeTensorConversion(self):
"""Tests that a user can register a CompositeTensor converter."""
x = _MyTuple((1, [2., 3.], [[4, 5], [6, 7]]))
y = ops.convert_to_tensor_or_composite(x)
self.assertFalse(tensor_util.is_tensor(y))
self.assertIsInstance(y, _TupleTensor)
self.assertLen(y, len(x))
for x_, y_ in zip(x, y):
self.assertIsInstance(y_, ops.Tensor)
self.assertTrue(tensor_util.is_tensor(y_))
self.assertAllEqual(x_, tensor_util.constant_value(y_))
if __name__ == "__main__":
googletest.main()
|
the-stack_106_31540 | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from distutils.core import setup
from platform import python_implementation
from sys import version_info
from os.path import abspath, dirname, join
from setuptools import find_packages
__author__ = 'Leif Johansson'
__version__ = '1.1.2dev0'
here = abspath(dirname(__file__))
README = open(join(here, 'README.rst')).read()
NEWS = open(join(here, 'NEWS.txt')).read()
python_requires='>=3.5';
install_requires = [
'mako',
'lxml >=4.1.1',
'pyyaml >=3.10',
'pyXMLSecurity >=0.15',
'cherrypy',
'iso8601 >=0.1.4',
'simplejson >=2.6.2',
'jinja2',
'httplib2 >=0.7.7',
'six>=1.11.0',
'ipaddr',
'publicsuffix2',
'redis',
'requests',
'requests_cache',
'requests_file',
'pyconfig',
'pyyaml',
'multiprocess',
'minify',
'whoosh',
'pyramid',
'accept_types >=0.4.1',
'apscheduler',
'redis-collections',
'cachetools',
'xmldiff',
'gunicorn'
]
python_implementation_str = python_implementation()
setup(name='pyFF',
version=__version__,
description="Federation Feeder",
long_description=README + '\n\n' + NEWS,
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='identity federation saml metadata',
author=__author__,
author_email='[email protected]',
url='http://blogs.mnt.se',
license='BSD',
setup_requires=['nose>=1.0'],
tests_require=['pbr', 'fakeredis>=1.0.5', 'coverage', 'nose>=1.0', 'mock', 'mako', 'testfixtures', 'wsgi_intercept'],
test_suite="nose.collector",
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
package_data={
'pyff': ['xslt/*.xsl',
'site/static/js/*.js',
'site/static/js/select2/*',
'site/static/fonts/*',
'site/static/css/*.css',
'site/templates/*',
'site/icons/*',
'site/static/bootstrap/fonts/*',
'site/static/bootstrap/js/*',
'site/static/bootstrap/css/*',
'site/static/bootstrap/img/*',
'schema/*.xsd']
},
zip_safe=False,
install_requires=install_requires,
scripts=['scripts/mirror-mdq.sh'],
entry_points={
'console_scripts': ['pyff=pyff.md:main', 'pyffd=pyff.mdx:main', 'samldiff=pyff.tools:difftool'],
'paste.app_factory': [
'pyffapp=pyff.wsgi:app_factory'
],
'paste.server_runner': [
'pyffs=pyff.wsgi:server_runner'
],
},
message_extractors={'src': [
('**.py', 'python', None),
('**/templates/**.html', 'mako', None),
]},
)
|
the-stack_106_31543 | import importlib
import inspect
import pdb
import logging
def load(name):
logging.info(f"Loading transformer {name}")
# Split name into file and method name?
parts = name.rsplit(".", 1)
module_name = parts[0]
if len(parts) == 1:
class_name = "default"
else:
class_name = parts[1]
try:
mdl = importlib.import_module("." + module_name, "fastmri.transforms")
classobj = getattr(mdl, class_name)
except AttributeError as e:
raise Exception(f"{name} method in specified transformer module doesn't exist")
except ModuleNotFoundError as e:
raise Exception(f"{module_name} transformer module file not found")
return classobj
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.