gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright 2013-2021 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from .. import mesonlib, mlog
from .disabler import Disabler
from .exceptions import InterpreterException, InvalidArguments
from ._unholder import _unholder
from dataclasses import dataclass
from functools import wraps
import abc
import itertools
import copy
import typing as T
if T.TYPE_CHECKING:
from typing_extensions import Protocol
from .. import mparser
from .baseobjects import InterpreterObject, TV_func, TYPE_var, TYPE_kwargs
from .interpreterbase import SubProject
from .operator import MesonOperator
_TV_IntegerObject = T.TypeVar('_TV_IntegerObject', bound=InterpreterObject, contravariant=True)
_TV_ARG1 = T.TypeVar('_TV_ARG1', bound=TYPE_var, contravariant=True)
class FN_Operator(Protocol[_TV_IntegerObject, _TV_ARG1]):
def __call__(s, self: _TV_IntegerObject, other: _TV_ARG1) -> TYPE_var: ...
_TV_FN_Operator = T.TypeVar('_TV_FN_Operator', bound=FN_Operator)
def get_callee_args(wrapped_args: T.Sequence[T.Any]) -> T.Tuple['mparser.BaseNode', T.List['TYPE_var'], 'TYPE_kwargs', 'SubProject']:
# First argument could be InterpreterBase, InterpreterObject or ModuleObject.
# In the case of a ModuleObject it is the 2nd argument (ModuleState) that
# contains the needed information.
s = wrapped_args[0]
if not hasattr(s, 'current_node'):
s = wrapped_args[1]
node = s.current_node
subproject = s.subproject
args = kwargs = None
if len(wrapped_args) >= 3:
args = wrapped_args[-2]
kwargs = wrapped_args[-1]
return node, args, kwargs, subproject
def noPosargs(f: TV_func) -> TV_func:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
args = get_callee_args(wrapped_args)[1]
if args:
raise InvalidArguments('Function does not take positional arguments.')
return f(*wrapped_args, **wrapped_kwargs)
return T.cast('TV_func', wrapped)
def noKwargs(f: TV_func) -> TV_func:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
kwargs = get_callee_args(wrapped_args)[2]
if kwargs:
raise InvalidArguments('Function does not take keyword arguments.')
return f(*wrapped_args, **wrapped_kwargs)
return T.cast('TV_func', wrapped)
def stringArgs(f: TV_func) -> TV_func:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
args = get_callee_args(wrapped_args)[1]
if not isinstance(args, list):
mlog.debug('Not a list:', str(args))
raise InvalidArguments('Argument not a list.')
if not all(isinstance(s, str) for s in args):
mlog.debug('Element not a string:', str(args))
raise InvalidArguments('Arguments must be strings.')
return f(*wrapped_args, **wrapped_kwargs)
return T.cast('TV_func', wrapped)
def noArgsFlattening(f: TV_func) -> TV_func:
setattr(f, 'no-args-flattening', True) # noqa: B010
return f
def noSecondLevelHolderResolving(f: TV_func) -> TV_func:
setattr(f, 'no-second-level-holder-flattening', True) # noqa: B010
return f
def unholder_return(f: TV_func) -> T.Callable[..., TYPE_var]:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
res = f(*wrapped_args, **wrapped_kwargs)
return _unholder(res)
return T.cast('T.Callable[..., TYPE_var]', wrapped)
def disablerIfNotFound(f: TV_func) -> TV_func:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
kwargs = get_callee_args(wrapped_args)[2]
disabler = kwargs.pop('disabler', False)
ret = f(*wrapped_args, **wrapped_kwargs)
if disabler and not ret.found():
return Disabler()
return ret
return T.cast('TV_func', wrapped)
@dataclass(repr=False, eq=False)
class permittedKwargs:
permitted: T.Set[str]
def __call__(self, f: TV_func) -> TV_func:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
kwargs = get_callee_args(wrapped_args)[2]
unknowns = set(kwargs).difference(self.permitted)
if unknowns:
ustr = ', '.join([f'"{u}"' for u in sorted(unknowns)])
raise InvalidArguments(f'Got unknown keyword arguments {ustr}')
return f(*wrapped_args, **wrapped_kwargs)
return T.cast('TV_func', wrapped)
def typed_operator(operator: MesonOperator,
types: T.Union[T.Type, T.Tuple[T.Type, ...]]) -> T.Callable[['_TV_FN_Operator'], '_TV_FN_Operator']:
"""Decorator that does type checking for operator calls.
The principle here is similar to typed_pos_args, however much simpler
since only one other object ever is passed
"""
def inner(f: '_TV_FN_Operator') -> '_TV_FN_Operator':
@wraps(f)
def wrapper(self: 'InterpreterObject', other: TYPE_var) -> TYPE_var:
if not isinstance(other, types):
raise InvalidArguments(f'The `{operator.value}` of {self.display_name()} does not accept objects of type {type(other).__name__} ({other})')
return f(self, other)
return T.cast('_TV_FN_Operator', wrapper)
return inner
def unary_operator(operator: MesonOperator) -> T.Callable[['_TV_FN_Operator'], '_TV_FN_Operator']:
"""Decorator that does type checking for unary operator calls.
This decorator is for unary operators that do not take any other objects.
It should be impossible for a user to accidentally break this. Triggering
this check always indicates a bug in the Meson interpreter.
"""
def inner(f: '_TV_FN_Operator') -> '_TV_FN_Operator':
@wraps(f)
def wrapper(self: 'InterpreterObject', other: TYPE_var) -> TYPE_var:
if other is not None:
raise mesonlib.MesonBugException(f'The unary operator `{operator.value}` of {self.display_name()} was passed the object {other} of type {type(other).__name__}')
return f(self, other)
return T.cast('_TV_FN_Operator', wrapper)
return inner
def typed_pos_args(name: str, *types: T.Union[T.Type, T.Tuple[T.Type, ...]],
varargs: T.Optional[T.Union[T.Type, T.Tuple[T.Type, ...]]] = None,
optargs: T.Optional[T.List[T.Union[T.Type, T.Tuple[T.Type, ...]]]] = None,
min_varargs: int = 0, max_varargs: int = 0) -> T.Callable[..., T.Any]:
"""Decorator that types type checking of positional arguments.
This supports two different models of optional arguments, the first is the
variadic argument model. Variadic arguments are a possibly bounded,
possibly unbounded number of arguments of the same type (unions are
supported). The second is the standard default value model, in this case
a number of optional arguments may be provided, but they are still
ordered, and they may have different types.
This function does not support mixing variadic and default arguments.
:name: The name of the decorated function (as displayed in error messages)
:varargs: They type(s) of any variadic arguments the function takes. If
None the function takes no variadic args
:min_varargs: the minimum number of variadic arguments taken
:max_varargs: the maximum number of variadic arguments taken. 0 means unlimited
:optargs: The types of any optional arguments parameters taken. If None
then no optional parameters are taken.
Some examples of usage blow:
>>> @typed_pos_args('mod.func', str, (str, int))
... def func(self, state: ModuleState, args: T.Tuple[str, T.Union[str, int]], kwargs: T.Dict[str, T.Any]) -> T.Any:
... pass
>>> @typed_pos_args('method', str, varargs=str)
... def method(self, node: BaseNode, args: T.Tuple[str, T.List[str]], kwargs: T.Dict[str, T.Any]) -> T.Any:
... pass
>>> @typed_pos_args('method', varargs=str, min_varargs=1)
... def method(self, node: BaseNode, args: T.Tuple[T.List[str]], kwargs: T.Dict[str, T.Any]) -> T.Any:
... pass
>>> @typed_pos_args('method', str, optargs=[(str, int), str])
... def method(self, node: BaseNode, args: T.Tuple[str, T.Optional[T.Union[str, int]], T.Optional[str]], kwargs: T.Dict[str, T.Any]) -> T.Any:
... pass
When should you chose `typed_pos_args('name', varargs=str,
min_varargs=1)` vs `typed_pos_args('name', str, varargs=str)`?
The answer has to do with the semantics of the function, if all of the
inputs are the same type (such as with `files()`) then the former is
correct, all of the arguments are string names of files. If the first
argument is something else the it should be separated.
"""
def inner(f: TV_func) -> TV_func:
@wraps(f)
def wrapper(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
args = get_callee_args(wrapped_args)[1]
# These are implementation programming errors, end users should never see them.
assert isinstance(args, list), args
assert max_varargs >= 0, 'max_varags cannot be negative'
assert min_varargs >= 0, 'min_varags cannot be negative'
assert optargs is None or varargs is None, \
'varargs and optargs not supported together as this would be ambiguous'
num_args = len(args)
num_types = len(types)
a_types = types
if varargs:
min_args = num_types + min_varargs
max_args = num_types + max_varargs
if max_varargs == 0 and num_args < min_args:
raise InvalidArguments(f'{name} takes at least {min_args} arguments, but got {num_args}.')
elif max_varargs != 0 and (num_args < min_args or num_args > max_args):
raise InvalidArguments(f'{name} takes between {min_args} and {max_args} arguments, but got {num_args}.')
elif optargs:
if num_args < num_types:
raise InvalidArguments(f'{name} takes at least {num_types} arguments, but got {num_args}.')
elif num_args > num_types + len(optargs):
raise InvalidArguments(f'{name} takes at most {num_types + len(optargs)} arguments, but got {num_args}.')
# Add the number of positional arguments required
if num_args > num_types:
diff = num_args - num_types
a_types = tuple(list(types) + list(optargs[:diff]))
elif num_args != num_types:
raise InvalidArguments(f'{name} takes exactly {num_types} arguments, but got {num_args}.')
for i, (arg, type_) in enumerate(itertools.zip_longest(args, a_types, fillvalue=varargs), start=1):
if not isinstance(arg, type_):
if isinstance(type_, tuple):
shouldbe = 'one of: {}'.format(", ".join(f'"{t.__name__}"' for t in type_))
else:
shouldbe = f'"{type_.__name__}"'
raise InvalidArguments(f'{name} argument {i} was of type "{type(arg).__name__}" but should have been {shouldbe}')
# Ensure that we're actually passing a tuple.
# Depending on what kind of function we're calling the length of
# wrapped_args can vary.
nargs = list(wrapped_args)
i = nargs.index(args)
if varargs:
# if we have varargs we need to split them into a separate
# tuple, as python's typing doesn't understand tuples with
# fixed elements and variadic elements, only one or the other.
# so in that case we need T.Tuple[int, str, float, T.Tuple[str, ...]]
pos = args[:len(types)]
var = list(args[len(types):])
pos.append(var)
nargs[i] = tuple(pos)
elif optargs:
if num_args < num_types + len(optargs):
diff = num_types + len(optargs) - num_args
nargs[i] = tuple(list(args) + [None] * diff)
else:
nargs[i] = args
else:
nargs[i] = tuple(args)
return f(*nargs, **wrapped_kwargs)
return T.cast('TV_func', wrapper)
return inner
class ContainerTypeInfo:
"""Container information for keyword arguments.
For keyword arguments that are containers (list or dict), this class encodes
that information.
:param container: the type of container
:param contains: the types the container holds
:param pairs: if the container is supposed to be of even length.
This is mainly used for interfaces that predate the addition of dictionaries, and use
`[key, value, key2, value2]` format.
:param allow_empty: Whether this container is allowed to be empty
There are some cases where containers not only must be passed, but must
not be empty, and other cases where an empty container is allowed.
"""
def __init__(self, container: T.Type, contains: T.Union[T.Type, T.Tuple[T.Type, ...]], *,
pairs: bool = False, allow_empty: bool = True):
self.container = container
self.contains = contains
self.pairs = pairs
self.allow_empty = allow_empty
def check(self, value: T.Any) -> bool:
"""Check that a value is valid.
:param value: A value to check
:return: True if it is valid, False otherwise
"""
if not isinstance(value, self.container):
return False
iter_ = iter(value.values()) if isinstance(value, dict) else iter(value)
for each in iter_:
if not isinstance(each, self.contains):
return False
if self.pairs and len(value) % 2 != 0:
return False
if not value and not self.allow_empty:
return False
return True
def description(self) -> str:
"""Human readable description of this container type.
:return: string to be printed
"""
container = 'dict' if self.container is dict else 'array'
if isinstance(self.contains, tuple):
contains = ' | '.join([t.__name__ for t in self.contains])
else:
contains = self.contains.__name__
s = f'{container}[{contains}]'
if self.pairs:
s += ' that has even size'
if not self.allow_empty:
s += ' that cannot be empty'
return s
_T = T.TypeVar('_T')
class _NULL_T:
"""Special null type for evolution, this is an implementation detail."""
_NULL = _NULL_T()
class KwargInfo(T.Generic[_T]):
"""A description of a keyword argument to a meson function
This is used to describe a value to the :func:typed_kwargs function.
:param name: the name of the parameter
:param types: A type or tuple of types that are allowed, or a :class:ContainerType
:param required: Whether this is a required keyword argument. defaults to False
:param listify: If true, then the argument will be listified before being
checked. This is useful for cases where the Meson DSL allows a scalar or
a container, but internally we only want to work with containers
:param default: A default value to use if this isn't set. defaults to None,
this may be safely set to a mutable type, as long as that type does not
itself contain mutable types, typed_kwargs will copy the default
:param since: Meson version in which this argument has been added. defaults to None
:param since_message: An extra message to pass to FeatureNew when since is triggered
:param deprecated: Meson version in which this argument has been deprecated. defaults to None
:param deprecated_message: An extra message to pass to FeatureDeprecated
when since is triggered
:param validator: A callable that does additional validation. This is mainly
intended for cases where a string is expected, but only a few specific
values are accepted. Must return None if the input is valid, or a
message if the input is invalid
:param convertor: A callable that converts the raw input value into a
different type. This is intended for cases such as the meson DSL using a
string, but the implementation using an Enum. This should not do
validation, just conversion.
:param deprecated_values: a dictionary mapping a value to the version of
meson it was deprecated in. The Value may be any valid value for this
argument.
:param since_values: a dictionary mapping a value to the version of meson it was
added in.
:param not_set_warning: A warning message that is logged if the kwarg is not
set by the user.
"""
def __init__(self, name: str,
types: T.Union[T.Type[_T], T.Tuple[T.Union[T.Type[_T], ContainerTypeInfo], ...], ContainerTypeInfo],
*, required: bool = False, listify: bool = False,
default: T.Optional[_T] = None,
since: T.Optional[str] = None,
since_message: T.Optional[str] = None,
since_values: T.Optional[T.Dict[_T, T.Union[str, T.Tuple[str, str]]]] = None,
deprecated: T.Optional[str] = None,
deprecated_message: T.Optional[str] = None,
deprecated_values: T.Optional[T.Dict[_T, T.Union[str, T.Tuple[str, str]]]] = None,
validator: T.Optional[T.Callable[[T.Any], T.Optional[str]]] = None,
convertor: T.Optional[T.Callable[[_T], object]] = None,
not_set_warning: T.Optional[str] = None):
self.name = name
self.types = types
self.required = required
self.listify = listify
self.default = default
self.since = since
self.since_message = since_message
self.since_values = since_values
self.deprecated = deprecated
self.deprecated_message = deprecated_message
self.deprecated_values = deprecated_values
self.validator = validator
self.convertor = convertor
self.not_set_warning = not_set_warning
def evolve(self, *,
name: T.Union[str, _NULL_T] = _NULL,
required: T.Union[bool, _NULL_T] = _NULL,
listify: T.Union[bool, _NULL_T] = _NULL,
default: T.Union[_T, None, _NULL_T] = _NULL,
since: T.Union[str, None, _NULL_T] = _NULL,
since_message: T.Union[str, None, _NULL_T] = _NULL,
since_values: T.Union[T.Dict[_T, T.Union[str, T.Tuple[str, str]]], None, _NULL_T] = _NULL,
deprecated: T.Union[str, None, _NULL_T] = _NULL,
deprecated_message: T.Union[str, None, _NULL_T] = _NULL,
deprecated_values: T.Union[T.Dict[_T, T.Union[str, T.Tuple[str, str]]], None, _NULL_T] = _NULL,
validator: T.Union[T.Callable[[_T], T.Optional[str]], None, _NULL_T] = _NULL,
convertor: T.Union[T.Callable[[_T], TYPE_var], None, _NULL_T] = _NULL) -> 'KwargInfo':
"""Create a shallow copy of this KwargInfo, with modifications.
This allows us to create a new copy of a KwargInfo with modifications.
This allows us to use a shared kwarg that implements complex logic, but
has slight differences in usage, such as being added to different
functions in different versions of Meson.
The use the _NULL special value here allows us to pass None, which has
meaning in many of these cases. _NULL itself is never stored, always
being replaced by either the copy in self, or the provided new version.
"""
return type(self)(
name if not isinstance(name, _NULL_T) else self.name,
self.types,
listify=listify if not isinstance(listify, _NULL_T) else self.listify,
required=required if not isinstance(required, _NULL_T) else self.required,
default=default if not isinstance(default, _NULL_T) else self.default,
since=since if not isinstance(since, _NULL_T) else self.since,
since_message=since_message if not isinstance(since_message, _NULL_T) else self.since_message,
since_values=since_values if not isinstance(since_values, _NULL_T) else self.since_values,
deprecated=deprecated if not isinstance(deprecated, _NULL_T) else self.deprecated,
deprecated_message=deprecated_message if not isinstance(deprecated_message, _NULL_T) else self.deprecated_message,
deprecated_values=deprecated_values if not isinstance(deprecated_values, _NULL_T) else self.deprecated_values,
validator=validator if not isinstance(validator, _NULL_T) else self.validator,
convertor=convertor if not isinstance(convertor, _NULL_T) else self.convertor,
)
def typed_kwargs(name: str, *types: KwargInfo) -> T.Callable[..., T.Any]:
"""Decorator for type checking keyword arguments.
Used to wrap a meson DSL implementation function, where it checks various
things about keyword arguments, including the type, and various other
information. For non-required values it sets the value to a default, which
means the value will always be provided.
If type tyhpe is a :class:ContainerTypeInfo, then the default value will be
passed as an argument to the container initializer, making a shallow copy
:param name: the name of the function, including the object it's attached to
(if applicable)
:param *types: KwargInfo entries for each keyword argument.
"""
def inner(f: TV_func) -> TV_func:
def types_description(types_tuple: T.Tuple[T.Union[T.Type, ContainerTypeInfo], ...]) -> str:
candidates = []
for t in types_tuple:
if isinstance(t, ContainerTypeInfo):
candidates.append(t.description())
else:
candidates.append(t.__name__)
shouldbe = 'one of: ' if len(candidates) > 1 else ''
shouldbe += ', '.join(candidates)
return shouldbe
def raw_description(t: object) -> str:
"""describe a raw type (ie, one that is not a ContainerTypeInfo)."""
if isinstance(t, list):
if t:
return f"array[{' | '.join(sorted(mesonlib.OrderedSet(type(v).__name__ for v in t)))}]"
return 'array[]'
elif isinstance(t, dict):
if t:
return f"dict[{' | '.join(sorted(mesonlib.OrderedSet(type(v).__name__ for v in t.values())))}]"
return 'dict[]'
return type(t).__name__
def check_value_type(types_tuple: T.Tuple[T.Union[T.Type, ContainerTypeInfo], ...],
value: T.Any) -> bool:
for t in types_tuple:
if isinstance(t, ContainerTypeInfo):
if t.check(value):
return True
elif isinstance(value, t):
return True
return False
@wraps(f)
def wrapper(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
def emit_feature_change(values: T.Dict[str, T.Union[str, T.Tuple[str, str]]], feature: T.Union[T.Type['FeatureDeprecated'], T.Type['FeatureNew']]) -> None:
for n, version in values.items():
if isinstance(value, (dict, list)):
warn = n in value
else:
warn = n == value
if warn:
if isinstance(version, tuple):
version, msg = version
else:
msg = None
feature.single_use(f'"{name}" keyword argument "{info.name}" value "{n}"', version, subproject, msg, location=node)
node, _, _kwargs, subproject = get_callee_args(wrapped_args)
# Cast here, as the convertor function may place something other than a TYPE_var in the kwargs
kwargs = T.cast('T.Dict[str, object]', _kwargs)
all_names = {t.name for t in types}
unknowns = set(kwargs).difference(all_names)
if unknowns:
ustr = ', '.join([f'"{u}"' for u in sorted(unknowns)])
raise InvalidArguments(f'{name} got unknown keyword arguments {ustr}')
for info in types:
types_tuple = info.types if isinstance(info.types, tuple) else (info.types,)
value = kwargs.get(info.name)
if value is not None:
if info.since:
feature_name = info.name + ' arg in ' + name
FeatureNew.single_use(feature_name, info.since, subproject, info.since_message, location=node)
if info.deprecated:
feature_name = info.name + ' arg in ' + name
FeatureDeprecated.single_use(feature_name, info.deprecated, subproject, info.deprecated_message, location=node)
if info.listify:
kwargs[info.name] = value = mesonlib.listify(value)
if not check_value_type(types_tuple, value):
shouldbe = types_description(types_tuple)
raise InvalidArguments(f'{name} keyword argument {info.name!r} was of type {raw_description(value)} but should have been {shouldbe}')
if info.validator is not None:
msg = info.validator(value)
if msg is not None:
raise InvalidArguments(f'{name} keyword argument "{info.name}" {msg}')
if info.deprecated_values is not None:
emit_feature_change(info.deprecated_values, FeatureDeprecated)
if info.since_values is not None:
emit_feature_change(info.since_values, FeatureNew)
elif info.required:
raise InvalidArguments(f'{name} is missing required keyword argument "{info.name}"')
else:
# set the value to the default, this ensuring all kwargs are present
# This both simplifies the typing checking and the usage
assert check_value_type(types_tuple, info.default), f'In funcion {name} default value of {info.name} is not a valid type, got {type(info.default)} expected {types_description(types_tuple)}'
# Create a shallow copy of the container. This allows mutable
# types to be used safely as default values
kwargs[info.name] = copy.copy(info.default)
if info.not_set_warning:
mlog.warning(info.not_set_warning)
if info.convertor:
kwargs[info.name] = info.convertor(kwargs[info.name])
return f(*wrapped_args, **wrapped_kwargs)
return T.cast('TV_func', wrapper)
return inner
# This cannot be a dataclass due to https://github.com/python/mypy/issues/5374
class FeatureCheckBase(metaclass=abc.ABCMeta):
"Base class for feature version checks"
feature_registry: T.ClassVar[T.Dict[str, T.Dict[str, T.Set[T.Tuple[str, T.Optional['mparser.BaseNode']]]]]]
emit_notice = False
def __init__(self, feature_name: str, feature_version: str, extra_message: str = ''):
self.feature_name = feature_name # type: str
self.feature_version = feature_version # type: str
self.extra_message = extra_message # type: str
@staticmethod
def get_target_version(subproject: str) -> str:
# Don't do any checks if project() has not been parsed yet
if subproject not in mesonlib.project_meson_versions:
return ''
return mesonlib.project_meson_versions[subproject]
@staticmethod
@abc.abstractmethod
def check_version(target_version: str, feature_version: str) -> bool:
pass
def use(self, subproject: 'SubProject', location: T.Optional['mparser.BaseNode'] = None) -> None:
tv = self.get_target_version(subproject)
# No target version
if tv == '':
return
# Target version is new enough, don't warn
if self.check_version(tv, self.feature_version) and not self.emit_notice:
return
# Feature is too new for target version or we want to emit notices, register it
if subproject not in self.feature_registry:
self.feature_registry[subproject] = {self.feature_version: set()}
register = self.feature_registry[subproject]
if self.feature_version not in register:
register[self.feature_version] = set()
feature_key = (self.feature_name, location)
if feature_key in register[self.feature_version]:
# Don't warn about the same feature multiple times
# FIXME: This is needed to prevent duplicate warnings, but also
# means we won't warn about a feature used in multiple places.
return
register[self.feature_version].add(feature_key)
# Target version is new enough, don't warn even if it is registered for notice
if self.check_version(tv, self.feature_version):
return
self.log_usage_warning(tv, location)
@classmethod
def report(cls, subproject: str) -> None:
if subproject not in cls.feature_registry:
return
warning_str = cls.get_warning_str_prefix(cls.get_target_version(subproject))
notice_str = cls.get_notice_str_prefix(cls.get_target_version(subproject))
fv = cls.feature_registry[subproject]
tv = cls.get_target_version(subproject)
for version in sorted(fv.keys()):
if cls.check_version(tv, version):
notice_str += '\n * {}: {}'.format(version, {i[0] for i in fv[version]})
else:
warning_str += '\n * {}: {}'.format(version, {i[0] for i in fv[version]})
if '\n' in notice_str:
mlog.notice(notice_str, fatal=False)
if '\n' in warning_str:
mlog.warning(warning_str)
def log_usage_warning(self, tv: str, location: T.Optional['mparser.BaseNode']) -> None:
raise InterpreterException('log_usage_warning not implemented')
@staticmethod
def get_warning_str_prefix(tv: str) -> str:
raise InterpreterException('get_warning_str_prefix not implemented')
@staticmethod
def get_notice_str_prefix(tv: str) -> str:
raise InterpreterException('get_notice_str_prefix not implemented')
def __call__(self, f: TV_func) -> TV_func:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
node, _, _, subproject = get_callee_args(wrapped_args)
if subproject is None:
raise AssertionError(f'{wrapped_args!r}')
self.use(subproject, node)
return f(*wrapped_args, **wrapped_kwargs)
return T.cast('TV_func', wrapped)
@classmethod
def single_use(cls, feature_name: str, version: str, subproject: 'SubProject',
extra_message: str = '', location: T.Optional['mparser.BaseNode'] = None) -> None:
"""Oneline version that instantiates and calls use()."""
cls(feature_name, version, extra_message).use(subproject, location)
class FeatureNew(FeatureCheckBase):
"""Checks for new features"""
# Class variable, shared across all instances
#
# Format: {subproject: {feature_version: set(feature_names)}}
feature_registry = {} # type: T.ClassVar[T.Dict[str, T.Dict[str, T.Set[T.Tuple[str, T.Optional[mparser.BaseNode]]]]]]
@staticmethod
def check_version(target_version: str, feature_version: str) -> bool:
return mesonlib.version_compare_condition_with_min(target_version, feature_version)
@staticmethod
def get_warning_str_prefix(tv: str) -> str:
return f'Project specifies a minimum meson_version \'{tv}\' but uses features which were added in newer versions:'
@staticmethod
def get_notice_str_prefix(tv: str) -> str:
return ''
def log_usage_warning(self, tv: str, location: T.Optional['mparser.BaseNode']) -> None:
args = [
'Project targeting', f"'{tv}'",
'but tried to use feature introduced in',
f"'{self.feature_version}':",
f'{self.feature_name}.',
]
if self.extra_message:
args.append(self.extra_message)
mlog.warning(*args, location=location)
class FeatureDeprecated(FeatureCheckBase):
"""Checks for deprecated features"""
# Class variable, shared across all instances
#
# Format: {subproject: {feature_version: set(feature_names)}}
feature_registry = {} # type: T.ClassVar[T.Dict[str, T.Dict[str, T.Set[T.Tuple[str, T.Optional[mparser.BaseNode]]]]]]
emit_notice = True
@staticmethod
def check_version(target_version: str, feature_version: str) -> bool:
# For deprecation checks we need to return the inverse of FeatureNew checks
return not mesonlib.version_compare_condition_with_min(target_version, feature_version)
@staticmethod
def get_warning_str_prefix(tv: str) -> str:
return 'Deprecated features used:'
@staticmethod
def get_notice_str_prefix(tv: str) -> str:
return 'Future-deprecated features used:'
def log_usage_warning(self, tv: str, location: T.Optional['mparser.BaseNode']) -> None:
args = [
'Project targeting', f"'{tv}'",
'but tried to use feature deprecated since',
f"'{self.feature_version}':",
f'{self.feature_name}.',
]
if self.extra_message:
args.append(self.extra_message)
mlog.warning(*args, location=location)
# This cannot be a dataclass due to https://github.com/python/mypy/issues/5374
class FeatureCheckKwargsBase(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def feature_check_class(self) -> T.Type[FeatureCheckBase]:
pass
def __init__(self, feature_name: str, feature_version: str,
kwargs: T.List[str], extra_message: T.Optional[str] = None):
self.feature_name = feature_name
self.feature_version = feature_version
self.kwargs = kwargs
self.extra_message = extra_message
def __call__(self, f: TV_func) -> TV_func:
@wraps(f)
def wrapped(*wrapped_args: T.Any, **wrapped_kwargs: T.Any) -> T.Any:
node, _, kwargs, subproject = get_callee_args(wrapped_args)
if subproject is None:
raise AssertionError(f'{wrapped_args!r}')
for arg in self.kwargs:
if arg not in kwargs:
continue
name = arg + ' arg in ' + self.feature_name
self.feature_check_class.single_use(
name, self.feature_version, subproject, self.extra_message, node)
return f(*wrapped_args, **wrapped_kwargs)
return T.cast('TV_func', wrapped)
class FeatureNewKwargs(FeatureCheckKwargsBase):
feature_check_class = FeatureNew
class FeatureDeprecatedKwargs(FeatureCheckKwargsBase):
feature_check_class = FeatureDeprecated
|
|
#!/usr/bin/env python3
import unittest
import numpy as np
from typing import Optional
from panda import Panda
from panda.tests.safety import libpandasafety_py
import panda.tests.safety.common as common
from panda.tests.safety.common import CANPackerPanda, make_msg, MAX_WRONG_COUNTERS, UNSAFE_MODE
class Btn:
CANCEL = 2
SET = 3
RESUME = 4
HONDA_NIDEC = 0
HONDA_BOSCH = 1
# Honda safety has several different configurations tested here:
# * Nidec
# * normal
# * alt SCM messages
# * interceptor
# * interceptor with alt SCM messages
# * Bosch
# * Bosch with Longitudinal Support
class HondaButtonEnableBase(common.PandaSafetyTest):
# pylint: disable=no-member,abstract-method
# override these inherited tests since we're using button enable
def test_disable_control_allowed_from_cruise(self):
pass
def test_enable_control_allowed_from_cruise(self):
pass
def test_cruise_engaged_prev(self):
pass
def test_buttons_with_main_off(self):
for btn in (Btn.SET, Btn.RESUME, Btn.CANCEL):
self.safety.set_controls_allowed(1)
self._rx(self._acc_state_msg(False))
self._rx(self._button_msg(btn, main_on=False))
self.assertFalse(self.safety.get_controls_allowed())
def test_resume_button(self):
self._rx(self._acc_state_msg(True))
self.safety.set_controls_allowed(0)
self._rx(self._button_msg(Btn.RESUME, main_on=True))
self.assertTrue(self.safety.get_controls_allowed())
def test_set_button(self):
self._rx(self._acc_state_msg(True))
self.safety.set_controls_allowed(0)
self._rx(self._button_msg(Btn.SET, main_on=True))
self.assertTrue(self.safety.get_controls_allowed())
def test_cancel_button(self):
self.safety.set_controls_allowed(1)
self._rx(self._button_msg(Btn.CANCEL, main_on=True))
self.assertFalse(self.safety.get_controls_allowed())
def test_disengage_on_main(self):
self.safety.set_controls_allowed(1)
self._rx(self._acc_state_msg(True))
self.assertTrue(self.safety.get_controls_allowed())
self._rx(self._acc_state_msg(False))
self.assertFalse(self.safety.get_controls_allowed())
def test_rx_hook(self):
# TODO: move this test to common
# checksum checks
for msg in ["btn", "gas", "speed"]:
self.safety.set_controls_allowed(1)
if msg == "btn":
to_push = self._button_msg(Btn.SET)
if msg == "gas":
to_push = self._gas_msg(0)
if msg == "speed":
to_push = self._speed_msg(0)
self.assertTrue(self._rx(to_push))
if msg != "btn":
to_push[0].data[4] = 0 # invalidate checksum
to_push[0].data[5] = 0
to_push[0].data[6] = 0
to_push[0].data[7] = 0
self.assertFalse(self._rx(to_push))
self.assertFalse(self.safety.get_controls_allowed())
# counter
# reset wrong_counters to zero by sending valid messages
for i in range(MAX_WRONG_COUNTERS + 1):
self.__class__.cnt_speed += 1
self.__class__.cnt_button += 1
self.__class__.cnt_powertrain_data += 1
if i < MAX_WRONG_COUNTERS:
self.safety.set_controls_allowed(1)
self._rx(self._button_msg(Btn.SET))
self._rx(self._speed_msg(0))
self._rx(self._gas_msg(0))
else:
self.assertFalse(self._rx(self._button_msg(Btn.SET)))
self.assertFalse(self._rx(self._speed_msg(0)))
self.assertFalse(self._rx(self._gas_msg(0)))
self.assertFalse(self.safety.get_controls_allowed())
# restore counters for future tests with a couple of good messages
for i in range(2):
self.safety.set_controls_allowed(1)
self._rx(self._button_msg(Btn.SET))
self._rx(self._speed_msg(0))
self._rx(self._gas_msg(0))
self._rx(self._button_msg(Btn.SET, main_on=True))
self.assertTrue(self.safety.get_controls_allowed())
def test_tx_hook_on_pedal_pressed(self):
for mode in [UNSAFE_MODE.DEFAULT, UNSAFE_MODE.DISABLE_DISENGAGE_ON_GAS]:
for pedal in ['brake', 'gas']:
self.safety.set_unsafe_mode(mode)
allow_ctrl = False
if pedal == 'brake':
# brake_pressed_prev and vehicle_moving
self._rx(self._speed_msg(100))
self._rx(self._brake_msg(1))
elif pedal == 'gas':
# gas_pressed_prev
self._rx(self._gas_msg(1))
allow_ctrl = mode == UNSAFE_MODE.DISABLE_DISENGAGE_ON_GAS
self.safety.set_controls_allowed(1)
hw = self.safety.get_honda_hw()
if hw == HONDA_NIDEC:
self.safety.set_honda_fwd_brake(False)
self.assertEqual(allow_ctrl, self._tx(self._send_brake_msg(self.MAX_BRAKE)))
self.assertEqual(allow_ctrl, self._tx(self._send_steer_msg(0x1000)))
# reset status
self.safety.set_controls_allowed(0)
self.safety.set_unsafe_mode(UNSAFE_MODE.DEFAULT)
if hw == HONDA_NIDEC:
self._tx(self._send_brake_msg(0))
self._tx(self._send_steer_msg(0))
if pedal == 'brake':
self._rx(self._speed_msg(0))
self._rx(self._brake_msg(0))
elif pedal == 'gas':
self._rx(self._gas_msg(0))
class HondaPcmEnableBase(common.PandaSafetyTest):
# pylint: disable=no-member,abstract-method
def test_buttons(self):
"""
Buttons shouldn't do anything in this configuration,
since our state is tied to the PCM's cruise state.
"""
for controls_allowed in (True, False):
for main_on in (True, False):
# not a valid state
if controls_allowed and not main_on:
continue
for btn in (Btn.SET, Btn.RESUME, Btn.CANCEL):
self.safety.set_controls_allowed(controls_allowed)
self._rx(self._acc_state_msg(main_on))
self._rx(self._button_msg(btn, main_on=main_on))
self.assertEqual(controls_allowed, self.safety.get_controls_allowed())
class HondaBase(common.PandaSafetyTest):
MAX_BRAKE: float = 255
PT_BUS: Optional[int] = None # must be set when inherited
STEER_BUS: Optional[int] = None # must be set when inherited
STANDSTILL_THRESHOLD = 0
RELAY_MALFUNCTION_ADDR = 0xE4
RELAY_MALFUNCTION_BUS = 0
FWD_BUS_LOOKUP = {0: 2, 2: 0}
cnt_speed = 0
cnt_button = 0
cnt_brake = 0
cnt_powertrain_data = 0
cnt_acc_state = 0
@classmethod
def setUpClass(cls):
if cls.__name__ == "HondaBase":
cls.packer = None
cls.safety = None
raise unittest.SkipTest
def _powertrain_data_msg(self, cruise_on=None, brake_pressed=None, gas_pressed=None):
# preserve the state
if cruise_on is None:
# or'd with controls allowed since the tests use it to "enable" cruise
cruise_on = self.safety.get_cruise_engaged_prev() or self.safety.get_controls_allowed()
if brake_pressed is None:
brake_pressed = self.safety.get_brake_pressed_prev()
if gas_pressed is None:
gas_pressed = self.safety.get_gas_pressed_prev()
values = {
"ACC_STATUS": cruise_on,
"BRAKE_PRESSED": brake_pressed,
"PEDAL_GAS": gas_pressed,
"COUNTER": self.cnt_powertrain_data % 4
}
self.__class__.cnt_powertrain_data += 1
return self.packer.make_can_msg_panda("POWERTRAIN_DATA", self.PT_BUS, values)
def _pcm_status_msg(self, enable):
return self._powertrain_data_msg(cruise_on=enable)
def _speed_msg(self, speed):
values = {"XMISSION_SPEED": speed, "COUNTER": self.cnt_speed % 4}
self.__class__.cnt_speed += 1
return self.packer.make_can_msg_panda("ENGINE_DATA", self.PT_BUS, values)
def _acc_state_msg(self, main_on):
values = {"MAIN_ON": main_on, "COUNTER": self.cnt_acc_state % 4}
self.__class__.cnt_acc_state += 1
return self.packer.make_can_msg_panda("SCM_FEEDBACK", self.PT_BUS, values)
def _button_msg(self, buttons, main_on=False):
values = {"CRUISE_BUTTONS": buttons, "COUNTER": self.cnt_button % 4}
self.__class__.cnt_button += 1
return self.packer.make_can_msg_panda("SCM_BUTTONS", self.PT_BUS, values)
def _brake_msg(self, brake):
return self._powertrain_data_msg(brake_pressed=brake)
def _gas_msg(self, gas):
return self._powertrain_data_msg(gas_pressed=gas)
def _send_steer_msg(self, steer):
values = {"STEER_TORQUE": steer}
return self.packer.make_can_msg_panda("STEERING_CONTROL", self.STEER_BUS, values)
def _send_brake_msg(self, brake):
# must be implemented when inherited
raise NotImplementedError
def test_disengage_on_brake(self):
self.safety.set_controls_allowed(1)
self._rx(self._brake_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
def test_steer_safety_check(self):
self.safety.set_controls_allowed(0)
self.assertTrue(self._tx(self._send_steer_msg(0x0000)))
self.assertFalse(self._tx(self._send_steer_msg(0x1000)))
# ********************* Honda Nidec **********************
class TestHondaNidecSafetyBase(HondaBase):
TX_MSGS = [[0xE4, 0], [0x194, 0], [0x1FA, 0], [0x200, 0], [0x30C, 0], [0x33D, 0]]
FWD_BLACKLISTED_ADDRS = {2: [0xE4, 0x194, 0x33D, 0x30C]}
PT_BUS = 0
STEER_BUS = 0
INTERCEPTOR_THRESHOLD = 344
@classmethod
def setUpClass(cls):
if cls.__name__ == "TestHondaNidecSafetyBase":
cls.safety = None
raise unittest.SkipTest
def setUp(self):
self.packer = CANPackerPanda("honda_civic_touring_2016_can_generated")
self.safety = libpandasafety_py.libpandasafety
self.safety.set_safety_hooks(Panda.SAFETY_HONDA_NIDEC, 0)
self.safety.init_tests_honda()
# Honda gas gains are the different
def _interceptor_msg(self, gas, addr):
to_send = make_msg(0, addr, 6)
gas2 = gas * 2
to_send[0].data[0] = (gas & 0xFF00) >> 8
to_send[0].data[1] = gas & 0xFF
to_send[0].data[2] = (gas2 & 0xFF00) >> 8
to_send[0].data[3] = gas2 & 0xFF
return to_send
def _send_brake_msg(self, brake):
values = {"COMPUTER_BRAKE": brake}
return self.packer.make_can_msg_panda("BRAKE_COMMAND", 0, values)
def test_fwd_hook(self):
# normal operation, not forwarding AEB
self.FWD_BLACKLISTED_ADDRS[2].append(0x1FA)
self.safety.set_honda_fwd_brake(False)
super().test_fwd_hook()
# TODO: test latching until AEB event is over?
# forwarding AEB brake signal
self.FWD_BLACKLISTED_ADDRS = {2: [0xE4, 0x194, 0x33D, 0x30C]}
self.safety.set_honda_fwd_brake(True)
super().test_fwd_hook()
def test_brake_safety_check(self):
for fwd_brake in [False, True]:
self.safety.set_honda_fwd_brake(fwd_brake)
for brake in np.arange(0, self.MAX_BRAKE + 10, 1):
for controls_allowed in [True, False]:
self.safety.set_controls_allowed(controls_allowed)
if fwd_brake:
send = False # block openpilot brake msg when fwd'ing stock msg
elif controls_allowed:
send = self.MAX_BRAKE >= brake >= 0
else:
send = brake == 0
self.assertEqual(send, self._tx(self._send_brake_msg(brake)))
self.safety.set_honda_fwd_brake(False)
def test_tx_hook_on_interceptor_pressed(self):
for mode in [UNSAFE_MODE.DEFAULT, UNSAFE_MODE.DISABLE_DISENGAGE_ON_GAS]:
self.safety.set_unsafe_mode(mode)
# gas_interceptor_prev > INTERCEPTOR_THRESHOLD
self._rx(self._interceptor_msg(self.INTERCEPTOR_THRESHOLD + 1, 0x201))
self._rx(self._interceptor_msg(self.INTERCEPTOR_THRESHOLD + 1, 0x201))
allow_ctrl = mode == UNSAFE_MODE.DISABLE_DISENGAGE_ON_GAS
self.safety.set_controls_allowed(1)
self.safety.set_honda_fwd_brake(False)
self.assertEqual(allow_ctrl, self._tx(self._send_brake_msg(self.MAX_BRAKE)))
self.assertEqual(allow_ctrl, self._tx(self._interceptor_msg(self.INTERCEPTOR_THRESHOLD, 0x200)))
self.assertEqual(allow_ctrl, self._tx(self._send_steer_msg(0x1000)))
# reset status
self.safety.set_controls_allowed(0)
self.safety.set_unsafe_mode(UNSAFE_MODE.DEFAULT)
self._tx(self._send_brake_msg(0))
self._tx(self._send_steer_msg(0))
self._tx(self._interceptor_msg(0, 0x200))
self.safety.set_gas_interceptor_detected(False)
class TestHondaNidecSafety(HondaButtonEnableBase, TestHondaNidecSafetyBase):
"""
Covers the Honda Nidec safety mode
"""
class TestHondaNidecInterceptorSafety(TestHondaNidecSafety, common.InterceptorSafetyTest):
"""
Covers the Honda Nidec safety mode with a gas interceptor
"""
def setUp(self):
TestHondaNidecSafety.setUpClass()
common.InterceptorSafetyTest.setUpClass()
class TestHondaNidecAltSafety(HondaButtonEnableBase, TestHondaNidecSafetyBase):
"""
Covers the Honda Nidec safety mode with alt SCM messages
"""
def setUp(self):
self.packer = CANPackerPanda("acura_ilx_2016_can_generated")
self.safety = libpandasafety_py.libpandasafety
self.safety.set_safety_hooks(Panda.SAFETY_HONDA_NIDEC, Panda.FLAG_HONDA_NIDEC_ALT)
self.safety.init_tests_honda()
def _acc_state_msg(self, main_on):
values = {"MAIN_ON": main_on, "COUNTER": self.cnt_acc_state % 4}
self.__class__.cnt_acc_state += 1
return self.packer.make_can_msg_panda("SCM_BUTTONS", self.PT_BUS, values)
def _button_msg(self, buttons, main_on=False):
values = {"CRUISE_BUTTONS": buttons, "MAIN_ON": main_on, "COUNTER": self.cnt_button % 4}
self.__class__.cnt_button += 1
return self.packer.make_can_msg_panda("SCM_BUTTONS", self.PT_BUS, values)
class TestHondaNidecAltInterceptorSafety(TestHondaNidecSafety, common.InterceptorSafetyTest):
"""
Covers the Honda Nidec safety mode with alt SCM messages and gas interceptor
"""
def setUp(self):
self.packer = CANPackerPanda("acura_ilx_2016_can_generated")
self.safety = libpandasafety_py.libpandasafety
self.safety.set_safety_hooks(Panda.SAFETY_HONDA_NIDEC, Panda.FLAG_HONDA_NIDEC_ALT)
self.safety.init_tests_honda()
common.InterceptorSafetyTest.setUpClass()
def _acc_state_msg(self, main_on):
values = {"MAIN_ON": main_on, "COUNTER": self.cnt_acc_state % 4}
self.__class__.cnt_acc_state += 1
return self.packer.make_can_msg_panda("SCM_BUTTONS", self.PT_BUS, values)
def _button_msg(self, buttons, main_on=False):
values = {"CRUISE_BUTTONS": buttons, "MAIN_ON": main_on, "COUNTER": self.cnt_button % 4}
self.__class__.cnt_button += 1
return self.packer.make_can_msg_panda("SCM_BUTTONS", self.PT_BUS, values)
# ********************* Honda Bosch **********************
class TestHondaBoschSafetyBase(HondaBase):
PT_BUS = 1
STEER_BUS = 0
TX_MSGS = [[0xE4, 0], [0xE5, 0], [0x296, 1], [0x33D, 0], [0x33DA, 0], [0x33DB, 0]]
FWD_BLACKLISTED_ADDRS = {2: [0xE4, 0xE5, 0x33D, 0x33DA, 0x33DB]}
@classmethod
def setUpClass(cls):
if cls.__name__ == "TestHondaBoschSafetyBase":
cls.packer = None
cls.safety = None
raise unittest.SkipTest
def setUp(self):
self.packer = CANPackerPanda("honda_accord_2018_can_generated")
self.safety = libpandasafety_py.libpandasafety
def _alt_brake_msg(self, brake):
values = {"BRAKE_PRESSED": brake, "COUNTER": self.cnt_brake % 4}
self.__class__.cnt_brake += 1
return self.packer.make_can_msg_panda("BRAKE_MODULE", self.PT_BUS, values)
def _send_brake_msg(self, brake):
pass
# TODO: add back in once alternative brake checksum/counter validation is added
# def test_alt_brake_rx_hook(self):
# self.safety.set_honda_alt_brake_msg(1)
# self.safety.set_controls_allowed(1)
# to_push = self._alt_brake_msg(0)
# self.assertTrue(self._rx(to_push))
# to_push[0].RDLR = to_push[0].RDLR & 0xFFF0FFFF # invalidate checksum
# self.assertFalse(self._rx(to_push))
# self.assertFalse(self.safety.get_controls_allowed())
def test_alt_disengage_on_brake(self):
self.safety.set_honda_alt_brake_msg(1)
self.safety.set_controls_allowed(1)
self._rx(self._alt_brake_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_honda_alt_brake_msg(0)
self.safety.set_controls_allowed(1)
self._rx(self._alt_brake_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
class TestHondaBoschSafety(HondaPcmEnableBase, TestHondaBoschSafetyBase):
"""
Covers the Honda Bosch safety mode with stock longitudinal
"""
def setUp(self):
super().setUp()
self.safety.set_safety_hooks(Panda.SAFETY_HONDA_BOSCH, 0)
self.safety.init_tests_honda()
def test_spam_cancel_safety_check(self):
self.safety.set_controls_allowed(0)
self.assertTrue(self._tx(self._button_msg(Btn.CANCEL)))
self.assertFalse(self._tx(self._button_msg(Btn.RESUME)))
self.assertFalse(self._tx(self._button_msg(Btn.SET)))
# do not block resume if we are engaged already
self.safety.set_controls_allowed(1)
self.assertTrue(self._tx(self._button_msg(Btn.RESUME)))
class TestHondaBoschLongSafety(HondaButtonEnableBase, TestHondaBoschSafetyBase):
"""
Covers the Honda Bosch safety mode with longitudinal control
"""
NO_GAS = -30000
MAX_GAS = 2000
MAX_BRAKE = -3.5
STEER_BUS = 1
TX_MSGS = [[0xE4, 1], [0x1DF, 1], [0x1EF, 1], [0x1FA, 1], [0x30C, 1], [0x33D, 1], [0x33DA, 1], [0x33DB, 1], [0x39F, 1], [0x18DAB0F1, 1]]
FWD_BLACKLISTED_ADDRS = {2: [0xE4, 0xE5, 0x33D, 0x33DA, 0x33DB]}
def setUp(self):
super().setUp()
self.safety.set_safety_hooks(Panda.SAFETY_HONDA_BOSCH, Panda.FLAG_HONDA_BOSCH_LONG)
self.safety.init_tests_honda()
def _send_gas_brake_msg(self, gas, accel):
values = {
"GAS_COMMAND": gas,
"ACCEL_COMMAND": accel,
"BRAKE_REQUEST": accel < 0,
}
return self.packer.make_can_msg_panda("ACC_CONTROL", self.PT_BUS, values)
def test_diagnostics(self):
tester_present = common.package_can_msg((0x18DAB0F1, 0, b"\x02\x3E\x80\x00\x00\x00\x00\x00", self.PT_BUS))
self.assertTrue(self.safety.safety_tx_hook(tester_present))
not_tester_present = common.package_can_msg((0x18DAB0F1, 0, b"\x03\xAA\xAA\x00\x00\x00\x00\x00", self.PT_BUS))
self.assertFalse(self.safety.safety_tx_hook(not_tester_present))
def test_radar_alive(self):
# If the radar knockout failed, make sure the relay malfunction is shown
self.assertFalse(self.safety.get_relay_malfunction())
self._rx(make_msg(self.PT_BUS, 0x1DF, 8))
self.assertTrue(self.safety.get_relay_malfunction())
def test_gas_safety_check(self):
for controls_allowed in [True, False]:
for gas in np.arange(self.NO_GAS, self.MAX_GAS + 2000, 100):
accel = 0 if gas < 0 else gas / 1000
self.safety.set_controls_allowed(controls_allowed)
send = gas <= self.MAX_GAS if controls_allowed else gas == self.NO_GAS
self.assertEqual(send, self.safety.safety_tx_hook(self._send_gas_brake_msg(gas, accel)), gas)
def test_brake_safety_check(self):
for controls_allowed in [True, False]:
for accel in np.arange(0, self.MAX_BRAKE - 1, -0.01):
accel = round(accel, 2) # floats might not hit exact boundary conditions without rounding
self.safety.set_controls_allowed(controls_allowed)
send = self.MAX_BRAKE <= accel <= 0 if controls_allowed else accel == 0
self.assertEqual(send, self._tx(self._send_gas_brake_msg(self.NO_GAS, accel)), (controls_allowed, accel))
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
import roslib
roslib.load_manifest('crazyflie_control')
import rospy
import sys
from geometry_msgs.msg import Vector3
from nav_msgs.msg import Odometry
from crazyflie_driver.msg import RPYT
import dynamic_reconfigure.server
from crazyflie_control.cfg import CrazyflieControlConfig
from math import *
import numpy as np
class CrazyflieControlNode(object):
mass = 1.0
gravity = 9.801
kpz = 1.0
kdz = 1.0
kpx = 1.0
kpy = 1.0
kdx = 1.0
kdy = 1.0
xd = 0.0
yd = 0.0
zd = 0.0
xp = 0.0
yp = 0.0
zp = 0.0
x = 0.0
y = 0.0
z = 0.0
q0 = 1.0
q1 = 0.0
q2 = 0.0
q3 = 0.0
last_odometry_update = rospy.Time()
def __init__(self, default_name='apollo', default_update_rate=100):
self.default_name = default_name
self.default_update_rate = default_update_rate
rospy.init_node('crazyflie_control')
self._init_params()
self._init_pubsub()
dynamic_reconfigure.server.Server(CrazyflieControlConfig, self.reconfigure)
self.last_odometry_update = rospy.get_rostime()
def _init_params(self):
self.name = rospy.get_param('~name', self.default_name)
self.update_rate = rospy.get_param('~update_rate', self.default_update_rate)
def _init_pubsub(self):
self.vicon_sub = rospy.Subscriber('/' + self.name + '/odom', Odometry, self.set_odometry)
self.rotation_desired_pub = rospy.Publisher('/' + self.name + '/rotation_desired', RPYT)
self.rotation_actual_pub = rospy.Publisher('/' + self.name + '/rotation_actual', Vector3)
def set_odometry(self, msg):
now = rospy.get_rostime()
dt = self.last_odometry_update - now
x_old = self.x
y_old = self.y
z_old = self.z
self.x = msg.pose.pose.position.x * 0.001
self.y = msg.pose.pose.position.y * 0.001
self.z = msg.pose.pose.position.z * 0.001
self.q1 = msg.pose.pose.orientation.x
self.q2 = msg.pose.pose.orientation.y
self.q3 = msg.pose.pose.orientation.z
self.q0 = msg.pose.pose.orientation.w
self.xd = (2.0/dt.to_sec())*(self.x - x_old) - self.xd
self.yd = (2.0/dt.to_sec())*(self.y - y_old) - self.yd
self.zd = (2.0/dt.to_sec())*(self.z - z_old) - self.zd
self.last_odometry_update = now
def reconfigure(self, config, level):
self.kpx = config['kpx']
self.kpy = config['kpy']
self.kpz = config['kpz']
self.kdx = config['kdx']
self.kdy = config['kdy']
self.kdz = config['kdz']
self.xd = config['xd']
self.yd = config['yd']
self.zd = config['zd']
self.power = config['power']
return config
def spin(self):
rospy.loginfo("Spinning")
r = rospy.Rate(self.update_rate)
while not rospy.is_shutdown():
gx = 2 * (self.q1*self.q3 - self.q0*self.q2);
gy = 2 * (self.q0*self.q1 + self.q2*self.q3);
gz = self.q0*self.q0 - self.q1*self.q1 - self.q2*self.q2 + self.q3*self.q3;
yaw = atan2(2*self.q1*self.q2 - 2*self.q0*self.q3, 2*self.q0*self.q0 + 2*self.q1*self.q1 - 1) * 180 /pi;
pitch = atan(gx / sqrt(gy*gy + gz*gz)) * 180 / pi;
roll = atan(gy / sqrt(gx*gx + gz*gz)) * 180 / pi;
msg_actual = Vector3()
msg_actual.x = roll
msg_actual.y = pitch
msg_actual.z = yaw
self.rotation_actual_pub.publish(msg_actual)
R = [ [0]*3 ]*3
R[0][0] = pow(self.q0,2) + pow(self.q1,2) - pow(self.q2,2) - pow(self.q3,2)
R[0][1] = 2*self.q0*self.q1 - 2*self.q0*self.q3
R[0][2] = 2*self.q1*self.q3 + 2*self.q0*self.q2
R[1][0] = 2*self.q0*self.q1 + 2*self.q0*self.q3
R[1][1] = pow(self.q0,2) - pow(self.q1,2) + pow(self.q2,2) - pow(self.q3,2)
R[1][2] = 2*self.q2*self.q3 - 2*self.q0*self.q1
R[2][0] = 2*self.q1*self.q3 - 2*self.q0*self.q2
R[2][1] = 2*self.q2*self.q3 + 2*self.q0*self.q1
R[2][2] = pow(self.q0,2) - pow(self.q1,2) - pow(self.q2,2) + pow(self.q3,2)
r_matrix = np.matrix(R)
# This is the thrust, should be also placed in the function below...
f = self.mass / R[2][2] * ( self.gravity - self.kpz*(self.z-self.zd) - self.kdz*self.zp )
r13d = self.mass / f * ( -self.kpx*(self.x-self.xd) - self.kdx*self.xp )
r23d = self.mass / f * ( -self.kpy*(self.y-self.yd) - self.kdy*self.yp )
r33d = sqrt(1-pow(r13d,2)-pow(r23d,2))
v = [0]*3
v[0] = -r23d
v[1] = r13d
v[2] = 0.0
angle = acos(r33d)
ca = cos(angle)
sa = sin(angle)
A = [ [0]*3 ]*3
A[0][0] = ca + pow(v[0],2)*(1-ca)
A[0][1] = v[0]*v[1]*(1-ca) - v[2]*sa
A[0][2] = v[0]*v[2]*(1-ca) + v[1]*sa
A[1][0] = v[0]*v[1]*(1-ca) + v[2]*sa
A[1][1] = ca + pow(v[1],2)*(1-ca)
A[1][2] = v[1]*v[2]*(1-ca) - v[0]*sa
A[2][0] = v[0]*v[2]*(1-ca) + v[1]*sa
A[2][1] = v[1]*v[2]*(1-ca) + v[0]*sa
A[2][2] = ca + pow(v[2],2)*(1-ca)
a_matrix = np.matrix(A)
rd = [0]*3
rd[0] = r13d
rd[1] = r23d
rd[2] = r33d
rd_matrix = np.matrix(rd)
gd = np.transpose(r_matrix)*a_matrix*np.transpose(rd_matrix)
eulerRollDesired = atan2(gd[1],sqrt(pow(gd[1],2)+pow(gd[2],2))) * 180 / pi
eulerPitchDesired = -atan(gd[0]/sqrt(pow(gd[1],2)+pow(gd[2],2))) * 180 / pi
eulerYawDesired = 0.0;
msg_desired = RPYT()
msg_desired.roll = eulerRollDesired
msg_desired.pitch = eulerPitchDesired
msg_desired.yaw = eulerYawDesired
if self.power:
msg_desired.thrust = f
else:
msg_desired.thrust = 0.0
self.rotation_desired_pub.publish(msg_desired)
r.sleep()
def crazyflie_control_main(argv):
c = CrazyflieControlNode()
c.spin()
if __name__ == '__main__':
crazyflie_control_main(sys.argv)
|
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from oslo_utils import excutils
from oslo_utils import importutils
import six
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.i18n import _LE, _LW
LOG = logging.getLogger(__name__)
_ENFORCER = None
ADMIN_CTX_POLICY = 'context_is_admin'
ADVSVC_CTX_POLICY = 'context_is_advsvc'
def reset():
global _ENFORCER
if _ENFORCER:
_ENFORCER.clear()
_ENFORCER = None
def init(conf=cfg.CONF, policy_file=None):
"""Init an instance of the Enforcer class."""
global _ENFORCER
if not _ENFORCER:
_ENFORCER = policy.Enforcer(conf, policy_file=policy_file)
_ENFORCER.load_rules(True)
def refresh(policy_file=None):
"""Reset policy and init a new instance of Enforcer."""
reset()
init(policy_file=policy_file)
def get_resource_and_action(action, pluralized=None):
"""Extract resource and action (write, read) from api operation."""
data = action.split(':', 1)[0].split('_', 1)
resource = pluralized or ("%ss" % data[-1])
return (resource, data[0] != 'get')
def set_rules(policies, overwrite=True):
"""Set rules based on the provided dict of rules.
:param policies: New policies to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
"""
LOG.debug("Loading policies from file: %s", _ENFORCER.policy_path)
init()
_ENFORCER.set_rules(policies, overwrite)
def _is_attribute_explicitly_set(attribute_name, resource, target, action):
"""Verify that an attribute is present and is explicitly set."""
if 'update' in action:
# In the case of update, the function should not pay attention to a
# default value of an attribute, but check whether it was explicitly
# marked as being updated instead.
return (attribute_name in target[const.ATTRIBUTES_TO_UPDATE] and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED)
return ('default' in resource[attribute_name] and
attribute_name in target and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED and
target[attribute_name] != resource[attribute_name]['default'])
def _should_validate_sub_attributes(attribute, sub_attr):
"""Verify that sub-attributes are iterable and should be validated."""
validate = attribute.get('validate')
return (validate and isinstance(sub_attr, collections.Iterable) and
any([k.startswith('type:dict') and
v for (k, v) in six.iteritems(validate)]))
def _build_subattr_match_rule(attr_name, attr, action, target):
"""Create the rule to match for sub-attribute policy checks."""
# TODO(salv-orlando): Instead of relying on validator info, introduce
# typing for API attributes
# Expect a dict as type descriptor
validate = attr['validate']
key = list(filter(lambda k: k.startswith('type:dict'), validate.keys()))
if not key:
LOG.warn(_LW("Unable to find data type descriptor for attribute %s"),
attr_name)
return
data = validate[key[0]]
if not isinstance(data, dict):
LOG.debug("Attribute type descriptor is not a dict. Unable to "
"generate any sub-attr policy rule for %s.",
attr_name)
return
sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' %
(action, attr_name,
sub_attr_name)) for
sub_attr_name in data if sub_attr_name in
target[attr_name]]
return policy.AndCheck(sub_attr_rules)
def _process_rules_list(rules, match_rule):
"""Recursively walk a policy rule to extract a list of match entries."""
if isinstance(match_rule, policy.RuleCheck):
rules.append(match_rule.match)
elif isinstance(match_rule, policy.AndCheck):
for rule in match_rule.rules:
_process_rules_list(rules, rule)
return rules
def _build_match_rule(action, target, pluralized):
"""Create the rule to match for a given action.
The policy rule to be matched is built in the following way:
1) add entries for matching permission on objects
2) add an entry for the specific action (e.g.: create_network)
3) add an entry for attributes of a resource for which the action
is being executed (e.g.: create_network:shared)
4) add an entry for sub-attributes of a resource for which the
action is being executed
(e.g.: create_router:external_gateway_info:network_id)
"""
match_rule = policy.RuleCheck('rule', action)
resource, is_write = get_resource_and_action(action, pluralized)
# Attribute-based checks shall not be enforced on GETs
if is_write:
# assigning to variable with short name for improving readability
res_map = attributes.RESOURCE_ATTRIBUTE_MAP
if resource in res_map:
for attribute_name in res_map[resource]:
if _is_attribute_explicitly_set(attribute_name,
res_map[resource],
target, action):
attribute = res_map[resource][attribute_name]
if 'enforce_policy' in attribute:
attr_rule = policy.RuleCheck('rule', '%s:%s' %
(action, attribute_name))
# Build match entries for sub-attributes
if _should_validate_sub_attributes(
attribute, target[attribute_name]):
attr_rule = policy.AndCheck(
[attr_rule, _build_subattr_match_rule(
attribute_name, attribute,
action, target)])
match_rule = policy.AndCheck([match_rule, attr_rule])
return match_rule
# This check is registered as 'tenant_id' so that it can override
# GenericCheck which was used for validating parent resource ownership.
# This will prevent us from having to handling backward compatibility
# for policy.json
# TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks
@policy.register('tenant_id')
class OwnerCheck(policy.Check):
"""Resource ownership check.
This check verifies the owner of the current resource, or of another
resource referenced by the one under analysis.
In the former case it falls back to a regular GenericCheck, whereas
in the latter case it leverages the plugin to load the referenced
resource and perform the check.
"""
def __init__(self, kind, match):
# Process the match
try:
self.target_field = re.findall(r'^\%\((.*)\)s$',
match)[0]
except IndexError:
err_reason = (_("Unable to identify a target field from:%s. "
"Match should be in the form %%(<field_name>)s") %
match)
LOG.exception(err_reason)
raise exceptions.PolicyInitError(
policy="%s:%s" % (kind, match),
reason=err_reason)
super(OwnerCheck, self).__init__(kind, match)
def __call__(self, target, creds, enforcer):
if self.target_field not in target:
# policy needs a plugin check
# target field is in the form resource:field
# however if they're not separated by a colon, use an underscore
# as a separator for backward compatibility
def do_split(separator):
parent_res, parent_field = self.target_field.split(
separator, 1)
return parent_res, parent_field
for separator in (':', '_'):
try:
parent_res, parent_field = do_split(separator)
break
except ValueError:
LOG.debug("Unable to find ':' as separator in %s.",
self.target_field)
else:
# If we are here split failed with both separators
err_reason = (_("Unable to find resource name in %s") %
self.target_field)
LOG.error(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get(
"%ss" % parent_res, None)
if not parent_foreign_key:
err_reason = (_("Unable to verify match:%(match)s as the "
"parent resource: %(res)s was not found") %
{'match': self.match, 'res': parent_res})
LOG.error(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
# NOTE(salv-orlando): This check currently assumes the parent
# resource is handled by the core plugin. It might be worth
# having a way to map resources to plugins so to make this
# check more general
# NOTE(ihrachys): if import is put in global, circular
# import failure occurs
manager = importutils.import_module('neutron.manager')
f = getattr(manager.NeutronManager.get_instance().plugin,
'get_%s' % parent_res)
# f *must* exist, if not found it is better to let neutron
# explode. Check will be performed with admin context
context = importutils.import_module('neutron.context')
try:
data = f(context.get_admin_context(),
target[parent_foreign_key],
fields=[parent_field])
target[self.target_field] = data[parent_field]
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Policy check error while calling %s!'),
f)
match = self.match % target
if self.kind in creds:
return match == six.text_type(creds[self.kind])
return False
@policy.register('field')
class FieldCheck(policy.Check):
def __init__(self, kind, match):
# Process the match
resource, field_value = match.split(':', 1)
field, value = field_value.split('=', 1)
super(FieldCheck, self).__init__(kind, '%s:%s:%s' %
(resource, field, value))
# Value might need conversion - we need help from the attribute map
try:
attr = attributes.RESOURCE_ATTRIBUTE_MAP[resource][field]
conv_func = attr['convert_to']
except KeyError:
conv_func = lambda x: x
self.field = field
self.value = conv_func(value)
self.regex = re.compile(value[1:]) if value.startswith('~') else None
def __call__(self, target_dict, cred_dict, enforcer):
target_value = target_dict.get(self.field)
# target_value might be a boolean, explicitly compare with None
if target_value is None:
LOG.debug("Unable to find requested field: %(field)s in target: "
"%(target_dict)s",
{'field': self.field, 'target_dict': target_dict})
return False
if self.regex:
return bool(self.regex.match(target_value))
return target_value == self.value
def _prepare_check(context, action, target, pluralized):
"""Prepare rule, target, and credentials for the policy engine."""
# Compare with None to distinguish case in which target is {}
if target is None:
target = {}
match_rule = _build_match_rule(action, target, pluralized)
credentials = context.to_dict()
return match_rule, target, credentials
def log_rule_list(match_rule):
if LOG.isEnabledFor(logging.DEBUG):
rules = _process_rules_list([], match_rule)
LOG.debug("Enforcing rules: %s", rules)
def check(context, action, target, plugin=None, might_not_exist=False,
pluralized=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:param might_not_exist: If True the policy check is skipped (and the
function returns True) if the specified policy does not exist.
Defaults to false.
:param pluralized: pluralized case of resource
e.g. firewall_policy -> pluralized = "firewall_policies"
:return: Returns True if access is permitted else False.
"""
# If we already know the context has admin rights do not perform an
# additional check and authorize the operation
if context.is_admin:
return True
if might_not_exist and not (_ENFORCER.rules and action in _ENFORCER.rules):
return True
match_rule, target, credentials = _prepare_check(context,
action,
target,
pluralized)
result = _ENFORCER.enforce(match_rule,
target,
credentials,
pluralized=pluralized)
# logging applied rules in case of failure
if not result:
log_rule_list(match_rule)
return result
def enforce(context, action, target, plugin=None, pluralized=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:param pluralized: pluralized case of resource
e.g. firewall_policy -> pluralized = "firewall_policies"
:raises oslo_policy.policy.PolicyNotAuthorized:
if verification fails.
"""
# If we already know the context has admin rights do not perform an
# additional check and authorize the operation
if context.is_admin:
return True
rule, target, credentials = _prepare_check(context,
action,
target,
pluralized)
try:
result = _ENFORCER.enforce(rule, target, credentials, action=action,
do_raise=True)
except policy.PolicyNotAuthorized:
with excutils.save_and_reraise_exception():
log_rule_list(rule)
LOG.debug("Failed policy check for '%s'", action)
return result
def check_is_admin(context):
"""Verify context has admin rights according to policy settings."""
init()
# the target is user-self
credentials = context.to_dict()
if ADMIN_CTX_POLICY not in _ENFORCER.rules:
return False
return _ENFORCER.enforce(ADMIN_CTX_POLICY, credentials, credentials)
def check_is_advsvc(context):
"""Verify context has advsvc rights according to policy settings."""
init()
# the target is user-self
credentials = context.to_dict()
if ADVSVC_CTX_POLICY not in _ENFORCER.rules:
return False
return _ENFORCER.enforce(ADVSVC_CTX_POLICY, credentials, credentials)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.code import If
from hwt.interfaces.std import HandshakeSync, VectSignal
from hwt.interfaces.utils import addClkRstn, propagateClkRstn
from hwt.synthesizer.param import Param
from hwtLib.abstract.busBridge import BusBridge
from hwtLib.amba.axi4 import Axi4, Axi4_addr
from hwtLib.amba.axi4Lite import Axi4Lite, Axi4Lite_addr
from hwtLib.amba.axi_comp.buff import AxiBuff
from hwtLib.amba.constants import PROT_DEFAULT
from hwtLib.handshaked.fifo import HandshakedFifo
from hwtLib.handshaked.streamNode import StreamNode
from typing import Optional
class HandshakedIdAndLen(HandshakeSync):
"""
.. hwt-autodoc::
"""
def _config(self):
self.ID_WIDTH = Param(4)
self.LEN_WIDTH = Param(8)
def _declr(self):
if self.ID_WIDTH > 0:
self.id = VectSignal(self.ID_WIDTH)
self.len = VectSignal(self.LEN_WIDTH)
super(HandshakedIdAndLen, self)._declr()
class Axi_to_AxiLite(BusBridge):
"""
AXI3/4 -> Axi4Lite bridge
:attention: AXI interfaces works in read first mode, overlapping transactions
are not checked to end up in proper r/w order
:attention: only last response code on AxiLite for transaction is used as a response code for Axi4
That means if the error appears somewhere in middle beat of the transaction the error is ignored
:ivar ~.MAX_TRANS_OVERLAP: depth of internal FIFO which is used to allow the transactions
to overlap each other in order to pipeline the execution of transactions
.. hwt-autodoc::
"""
def __init__(self, intfCls=Axi4, hdl_name_override:Optional[str]=None):
self.intfCls = intfCls
super(Axi_to_AxiLite, self).__init__(hdl_name_override=hdl_name_override)
def _config(self):
self.intfCls._config(self)
self.MAX_TRANS_OVERLAP = Param(4)
def _declr(self):
addClkRstn(self)
with self._paramsShared():
self.s = self.intfCls()
self.m = Axi4Lite()._m()
# *_req_fifo are used to aviod blocking during addr/data/confirmation waiting on axi channels
r_f = self.r_req_fifo = HandshakedFifo(HandshakedIdAndLen)
w_f = self.w_req_fifo = HandshakedFifo(HandshakedIdAndLen)
for f in [w_f, r_f]:
f.ID_WIDTH = self.ID_WIDTH
f.LEN_WIDTH = self.intfCls.LEN_WIDTH
f.DEPTH = self.MAX_TRANS_OVERLAP
with self._paramsShared():
self.out_reg = AxiBuff(Axi4Lite)
self.in_reg = AxiBuff(self.intfCls)
self.in_reg.DATA_BUFF_DEPTH = \
self.in_reg.ADDR_BUFF_DEPTH = \
self.out_reg.DATA_BUFF_DEPTH = \
self.out_reg.ADDR_BUFF_DEPTH = 1
def gen_addr_logic(self, addr_ch_in: Axi4_addr,
addr_ch_out: Axi4Lite_addr,
req_fifo_inp: HandshakedIdAndLen):
"""
Instanciate logic which splits the transactions to a beats on AxiLite interface
and propagate informations about the transacttions to req_fifo_inp for later use
"""
name_prefix = addr_ch_in._name + "_"
len_rem = self._reg(name_prefix + "len_rem", addr_ch_in.len._dtype, def_val=0)
# len_rem_vld is valid only if len > 0 otherwise transaction is processed
# without lem_rem care
len_rem_vld = self._reg(name_prefix + "len_rem_vld", def_val=0)
addr_step = self.DATA_WIDTH // 8
actual_addr = self._reg(name_prefix + "actual_addr", addr_ch_in.addr._dtype)
If(len_rem_vld,
addr_ch_out.addr(actual_addr),
addr_ch_in.ready(0), # because we need to process pending req. first
addr_ch_out.valid(1),
If(addr_ch_out.ready,
# move on next beat
actual_addr(actual_addr + addr_step),
If(len_rem._eq(0),
len_rem_vld(0),
),
len_rem(len_rem - 1),
),
).Else(
addr_ch_out.addr(addr_ch_in.addr),
# have to store request to register if it is longer than
# a single beat
actual_addr(addr_ch_out.addr + addr_step),
len_rem(addr_ch_in.len - 1),
len_rem_vld(addr_ch_in.valid & (addr_ch_in.len != 0) & addr_ch_out.ready & req_fifo_inp.rd),
# directly pass this first beat
StreamNode([addr_ch_in], [addr_ch_out]).sync(req_fifo_inp.rd),
)
addr_ch_out.prot(PROT_DEFAULT)
# push new request to req_fifo only on start of new requirest
req_fifo_inp.vld(~len_rem_vld & addr_ch_in.valid & addr_ch_out.ready)
if self.ID_WIDTH:
req_fifo_inp.id(addr_ch_in.id)
req_fifo_inp.len(addr_ch_in.len)
def gen_w_logic(self, w_in, w_out):
"""
Directly connect the w channels with ignore of extra signals
(The data should be already synchronized by order of beats on channel)
"""
ignored = {w_in.last}
if hasattr(w_in, "id"):
ignored.add(w_in.id)
w_out(w_in, exclude=ignored)
def gen_b_or_r_logic(self, inp, outp, fifo_out, propagete_only_on_last):
"""
Use counter to skip intermediate generated transactions
and pass only confirmation from last beat of the original transaction
"""
name_prefix = outp._name
rem = self._reg(name_prefix + "rem", self.s.aw.len._dtype)
if self.ID_WIDTH:
id_tmp = self._reg(name_prefix + "id_tmp", outp.id._dtype)
rem_vld = self._reg(name_prefix + "rem_vld", def_val=0)
StreamNode(
[inp],
[outp],
extraConds={
outp: rem_vld & rem._eq(0) if propagete_only_on_last else rem_vld,
inp: rem_vld,
}
).sync()
If(rem_vld,
fifo_out.rd(inp.valid & outp.ready & rem._eq(0)),
If(inp.valid & outp.ready,
# now processing next beat
If(rem != 0,
# this was not the last beat
rem(rem - 1)
).Elif(fifo_out.vld,
# this was the last beat and we can directly start new one
rem(fifo_out.len),
id_tmp(fifo_out.id) if self.ID_WIDTH else [],
).Else(
# this was the last beat and there is no next transaction
rem_vld(0),
)
),
).Else(
# in iddle store the information from b_fifo
rem(fifo_out.len),
id_tmp(fifo_out.id) if self.ID_WIDTH else [],
rem_vld(fifo_out.vld),
fifo_out.rd(1),
)
already_connected = {outp.valid, outp.ready}
if self.ID_WIDTH:
outp.id(id_tmp)
already_connected.add(outp.id)
if hasattr(outp, "last"):
outp.last(rem._eq(0) & rem_vld)
already_connected.add(outp.last)
outp(inp, exclude=already_connected)
def _impl(self):
m, s = self.in_reg.m, self.out_reg.s
w_fifo, r_fifo = self.w_req_fifo, self.r_req_fifo
propagateClkRstn(self)
self.in_reg.s(self.s)
self.gen_addr_logic(m.ar, s.ar, r_fifo.dataIn)
self.gen_addr_logic(m.aw, s.aw, w_fifo.dataIn)
self.gen_w_logic(m.w, s.w)
self.gen_b_or_r_logic(s.r, m.r, r_fifo.dataOut, False)
self.gen_b_or_r_logic(s.b, m.b, w_fifo.dataOut, True)
self.m(self.out_reg.m)
if __name__ == "__main__":
from hwt.synthesizer.utils import to_rtl_str
u = Axi_to_AxiLite()
print(to_rtl_str(u))
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
import paddle.fluid as fluid
def conv3d_forward_naive(input,
filter,
group,
conv_param,
padding_algorithm='EXPLICIT',
data_format="NCDHW"):
if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
raise ValueError("Unknown Attr(padding_algorithm): '%s'. "
"It can only be 'SAME' or 'VALID'." %
str(padding_algorithm))
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError("Unknown Attr(data_format): '%s' ."
"It can only be 'NCDHW' or 'NDHWC'." %
str(data_format))
channel_last = (data_format == "NDHWC")
if channel_last:
input = np.transpose(input, [0, 4, 1, 2, 3])
in_n, in_c, in_d, in_h, in_w = input.shape
f_n, f_c, f_d, f_h, f_w = filter.shape
out_n = in_n
out_c = f_n
assert f_c * group == in_c
assert np.mod(out_c, group) == 0
sub_out_c = out_c // group
sub_f_n = f_n // group
stride, pad, dilation = conv_param['stride'], conv_param['pad'], conv_param[
'dilations']
# update pad and dilation
def _get_padding_with_SAME(input_shape, pool_size, pool_stride):
padding = []
for input_size, filter_size, stride_size in zip(input_shape, pool_size,
pool_stride):
out_size = int((input_size + stride_size - 1) / stride_size)
pad_sum = np.max((
(out_size - 1) * stride_size + filter_size - input_size, 0))
pad_0 = int(pad_sum / 2)
pad_1 = int(pad_sum - pad_0)
padding.append(pad_0)
padding.append(pad_1)
return padding
ksize = filter.shape[2:5]
if padding_algorithm == "VALID":
pad = [0, 0, 0, 0, 0, 0]
elif padding_algorithm == "SAME":
dilation = [1, 1, 1]
input_data_shape = []
if data_format == "NCDHW":
input_data_shape = input.shape[2:5]
elif data_format == "NDHWC":
input_data_shape = input.shape[1:4]
pad = _get_padding_with_SAME(input_data_shape, ksize, stride)
pad_d_0, pad_d_1 = pad[0], pad[0]
pad_h_0, pad_h_1 = pad[1], pad[1]
pad_w_0, pad_w_1 = pad[2], pad[2]
if len(pad) == 6:
pad_d_0, pad_d_1 = pad[0], pad[1]
pad_h_0, pad_h_1 = pad[2], pad[3]
pad_w_0, pad_w_1 = pad[4], pad[5]
out_d = 1 + (in_d + pad_d_0 + pad_d_1 - (dilation[0] *
(f_d - 1) + 1)) // stride[0]
out_h = 1 + (in_h + pad_h_0 + pad_h_1 - (dilation[1] *
(f_h - 1) + 1)) // stride[1]
out_w = 1 + (in_w + pad_w_0 + pad_w_1 - (dilation[2] *
(f_w - 1) + 1)) // stride[2]
out = np.zeros((in_n, out_c, out_d, out_h, out_w))
d_bolck_d = (dilation[0] * (f_d - 1) + 1)
d_bolck_h = (dilation[1] * (f_h - 1) + 1)
d_bolck_w = (dilation[2] * (f_w - 1) + 1)
input_pad = np.pad(input, ((0, 0), (0, 0), (pad_d_0, pad_d_1),
(pad_h_0, pad_h_1), (pad_w_0, pad_w_1)),
mode='constant',
constant_values=0)
filter_dilation = np.zeros((f_n, f_c, d_bolck_d, d_bolck_h, d_bolck_w))
filter_dilation[:, :, 0:d_bolck_d:dilation[0], 0:d_bolck_h:dilation[1], 0:
d_bolck_w:dilation[2]] = filter
for d in range(out_d):
for i in range(out_h):
for j in range(out_w):
for g in range(group):
input_pad_masked = \
input_pad[:, g * f_c:(g + 1) * f_c,
d * stride[0]:d * stride[0] + d_bolck_d,
i * stride[1]:i * stride[1] + d_bolck_h,
j * stride[2]:j * stride[2] + d_bolck_w]
f_sub = filter_dilation[g * sub_f_n:(g + 1) *
sub_f_n, :, :, :, :]
for k in range(sub_out_c):
out[:, g * sub_out_c + k, d, i, j] = \
np.sum(input_pad_masked * f_sub[k, :, :, :, :],
axis=(1, 2, 3, 4))
if channel_last:
out = np.transpose(out, [0, 2, 3, 4, 1])
return out
def create_test_cudnn_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
cls_name = "{0}_{1}".format(parent.__name__, "CUDNN")
TestCUDNNCase.__name__ = cls_name
globals()[cls_name] = TestCUDNNCase
def create_test_padding_SAME_class(parent):
class TestPaddingSMAECase(parent):
def init_paddings(self):
self.pad = [0, 0, 0]
self.padding_algorithm = "SAME"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingSAMEOp")
TestPaddingSMAECase.__name__ = cls_name
globals()[cls_name] = TestPaddingSMAECase
def create_test_padding_VALID_class(parent):
class TestPaddingVALIDCase(parent):
def init_paddings(self):
self.pad = [1, 1, 1]
self.padding_algorithm = "VALID"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingVALIDOp")
TestPaddingVALIDCase.__name__ = cls_name
globals()[cls_name] = TestPaddingVALIDCase
def create_test_cudnn_padding_SAME_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNPaddingSMAECase(parent):
def init_kernel_type(self):
self.use_cudnn = True
def init_paddings(self):
self.pad = [1, 1, 1]
self.padding_algorithm = "SAME"
cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingSAMEOp")
TestCUDNNPaddingSMAECase.__name__ = cls_name
globals()[cls_name] = TestCUDNNPaddingSMAECase
def create_test_cudnn_padding_VALID_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNPaddingVALIDCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
def init_paddings(self):
self.pad = [1, 1, 1]
self.padding_algorithm = "VALID"
cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingVALIDOp")
TestCUDNNPaddingVALIDCase.__name__ = cls_name
globals()[cls_name] = TestCUDNNPaddingVALIDCase
def create_test_channel_last_class(parent):
class TestChannelLastCase(parent):
def init_data_format(self):
self.data_format = "NDHWC"
def init_test_case_2(self):
N, C, D, H, W = self.input_size
self.input_size = [N, D, H, W, C]
cls_name = "{0}_{1}".format(parent.__name__, "ChannelLast")
TestChannelLastCase.__name__ = cls_name
globals()[cls_name] = TestChannelLastCase
def create_test_cudnn_channel_last_class(parent):
class TestCudnnChannelLastCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
def init_data_format(self):
self.data_format = "NDHWC"
def init_test_case_2(self):
N, C, D, H, W = self.input_size
self.input_size = [N, D, H, W, C]
cls_name = "{0}_{1}".format(parent.__name__, "CudnnChannelLast")
TestCudnnChannelLastCase.__name__ = cls_name
globals()[cls_name] = TestCudnnChannelLastCase
class TestConv3dOp(OpTest):
def setUp(self):
self.op_type = "conv3d"
self.use_cudnn = False
self.use_mkldnn = False
self.data_format = "AnyLayout"
self.dtype = np.float32
self.init_kernel_type()
self.init_group()
self.init_dilation()
self.init_test_case()
conv3d_param = {
'stride': self.stride,
'pad': self.pad,
'dilations': self.dilations
}
input = np.random.random(self.input_size).astype(self.dtype)
filter = np.random.random(self.filter_size).astype(self.dtype)
output = conv3d_forward_naive(
input,
filter,
self.groups,
conv3d_param, ).astype(self.dtype)
self.inputs = {
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
}
self.attrs = {
'strides': self.stride,
'paddings': self.pad,
'groups': self.groups,
'dilations': self.dilations,
'use_cudnn': self.use_cudnn,
'use_mkldnn': self.use_mkldnn,
'data_format': self.data_format
}
self.outputs = {'Output': output}
def has_cudnn(self):
return core.is_compiled_with_cuda() and self.use_cudnn
def test_check_output(self):
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_output_with_place(place, atol=1e-5)
def test_check_grad(self):
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_grad_with_place(
place, {'Input', 'Filter'}, 'Output', max_relative_error=0.03)
def test_check_grad_no_filter(self):
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_grad_with_place(
place, ['Input'],
'Output',
max_relative_error=0.03,
no_grad_set=set(['Filter']))
def test_check_grad_no_input(self):
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_grad_with_place(
place, ['Input'],
'Output',
max_relative_error=0.03,
no_grad_set=set(['Input']))
def init_test_case(self):
self.pad = [0, 0, 0]
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3, 3]
def init_test_case_2(self):
pass
def init_dilation(self):
self.dilations = [1, 1, 1]
def init_group(self):
self.groups = 1
def init_kernel_type(self):
pass
class TestCase1(TestConv3dOp):
def init_test_case(self):
self.pad = [1, 1, 1]
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3, 3]
class TestWithGroup1(TestConv3dOp):
def init_group(self):
self.groups = 3
class TestWithGroup2(TestCase1):
def init_group(self):
self.groups = 3
class TestWith1x1(TestConv3dOp):
def init_test_case(self):
self.pad = [0, 0, 0]
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 1, 1, 1]
def init_dilation(self):
self.dilations = [1, 1, 1]
def init_group(self):
self.groups = 3
class TestWithInput1x1Filter1x1(TestConv3dOp):
def init_test_case(self):
self.pad = [0, 0, 0]
self.stride = [1, 1, 1]
self.input_size = [2, 3, 1, 1, 1]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 1, 1, 1]
def init_dilation(self):
self.dilations = [1, 1, 1]
def init_group(self):
self.groups = 3
class TestWithDilation(TestConv3dOp):
def init_test_case(self):
self.pad = [0, 0, 0]
self.stride = [1, 1, 1]
self.input_size = [2, 3, 6, 6, 6]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 2, 2, 2]
def init_dilation(self):
self.dilations = [2, 2, 2]
def init_group(self):
self.groups = 3
#---------------- Conv3dCUDNN ----------------
class TestCUDNN(TestConv3dOp):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16CUDNN(TestConv3dOp):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestWithGroup1CUDNN(TestWithGroup1):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16WithGroup1CUDNN(TestWithGroup1):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestWithGroup2CUDNN(TestWithGroup2):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16WithGroup2CUDNN(TestWithGroup2):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestWith1x1CUDNN(TestWith1x1):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16With1x1CUDNN(TestWith1x1):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestWithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16WithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestCUDNNExhaustiveSearch(TestCUDNN):
def init_kernel_type(self):
self.use_cudnn = True
self.exhaustive_search = True
# ---- test asymmetric padding ----
class TestConv3dOp_2(OpTest):
def setUp(self):
self.op_type = "conv3d"
self.use_cudnn = False
self.use_mkldnn = False
self.data_format = "NCDHW"
self.dtype = np.float32
self.init_kernel_type()
self.init_group()
self.init_dilation()
self.init_data_format()
self.init_test_case()
self.init_paddings()
self.init_test_case_2()
conv3d_param = {
'stride': self.stride,
'pad': self.pad,
'dilations': self.dilations
}
input = np.random.random(self.input_size).astype(self.dtype)
filter = np.random.random(self.filter_size).astype(self.dtype)
output = conv3d_forward_naive(input, filter, self.groups, conv3d_param,
self.padding_algorithm,
self.data_format).astype(self.dtype)
self.inputs = {
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
}
self.attrs = {
'strides': self.stride,
'paddings': self.pad,
'padding_algorithm': self.padding_algorithm,
'groups': self.groups,
'dilations': self.dilations,
'use_cudnn': self.use_cudnn,
'use_mkldnn': self.use_mkldnn,
'data_format': self.data_format
}
self.outputs = {'Output': output}
def has_cudnn(self):
return core.is_compiled_with_cuda() and self.use_cudnn
def test_check_output(self):
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_output_with_place(place, atol=1e-5)
def test_check_grad(self):
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_grad_with_place(
place, {'Input', 'Filter'}, 'Output', max_relative_error=0.03)
def test_check_grad_no_filter(self):
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_grad_with_place(
place, ['Input'],
'Output',
max_relative_error=0.03,
no_grad_set=set(['Filter']))
def test_check_grad_no_input(self):
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_grad_with_place(
place, ['Input'],
'Output',
max_relative_error=0.03,
no_grad_set=set(['Input']))
def init_test_case(self):
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3, 3]
def init_test_case_2(self):
pass
def init_dilation(self):
self.dilations = [1, 1, 1]
def init_group(self):
self.groups = 1
def init_kernel_type(self):
pass
def init_paddings(self):
self.pad = [0, 0, 0]
self.padding_algorithm = "EXPLICIT"
def init_data_format(self):
self.data_format = "NCDHW"
class TestConv3dOp_AsyPadding(TestConv3dOp_2):
def init_paddings(self):
self.pad = [1, 0, 1, 0, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestCase1_AsyPadding(TestConv3dOp_2):
def init_test_case(self):
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3, 3]
def init_paddings(self):
self.pad = [0, 0, 1, 0, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestWithGroup1_AsyPadding(TestConv3dOp_2):
def init_group(self):
self.groups = 3
def init_paddings(self):
self.pad = [1, 1, 1, 0, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestWithGroup2_AsyPadding(TestConv3dOp_2):
def init_test_case(self):
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3, 3]
def init_group(self):
self.groups = 3
def init_paddings(self):
self.pad = [1, 1, 0, 1, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestWith1x1_AsyPadding(TestConv3dOp_2):
def init_test_case(self):
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 1, 1, 1]
def init_dilation(self):
self.dilations = [1, 1, 1]
def init_group(self):
self.groups = 3
def init_paddings(self):
self.pad = [0, 0, 1, 0, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestWithDilation_AsyPadding(TestConv3dOp_2):
def init_test_case(self):
self.stride = [1, 1, 1]
self.input_size = [2, 3, 6, 6, 6]
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 2, 2, 2]
def init_dilation(self):
self.dilations = [2, 2, 2]
def init_group(self):
self.groups = 3
def init_paddings(self):
self.pad = [0, 0, 1, 0, 1, 0]
self.padding_algorithm = "EXPLICIT"
create_test_cudnn_class(TestConv3dOp_AsyPadding)
create_test_cudnn_class(TestWithGroup1_AsyPadding)
create_test_cudnn_class(TestWithGroup2_AsyPadding)
create_test_cudnn_class(TestWith1x1_AsyPadding)
create_test_cudnn_class(TestWithDilation_AsyPadding)
create_test_padding_SAME_class(TestConv3dOp_AsyPadding)
create_test_padding_SAME_class(TestWithGroup1_AsyPadding)
create_test_padding_SAME_class(TestWith1x1_AsyPadding)
create_test_padding_VALID_class(TestConv3dOp_AsyPadding)
create_test_padding_VALID_class(TestWithGroup1_AsyPadding)
create_test_padding_VALID_class(TestWith1x1_AsyPadding)
create_test_cudnn_padding_SAME_class(TestConv3dOp_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWithGroup1_AsyPadding)
create_test_cudnn_padding_SAME_class(TestWith1x1_AsyPadding)
create_test_cudnn_padding_VALID_class(TestConv3dOp_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWithGroup1_AsyPadding)
create_test_cudnn_padding_VALID_class(TestWith1x1_AsyPadding)
create_test_channel_last_class(TestConv3dOp_AsyPadding)
create_test_channel_last_class(TestWithGroup1_AsyPadding)
create_test_channel_last_class(TestWith1x1_AsyPadding)
create_test_channel_last_class(TestConv3dOp_AsyPadding)
create_test_channel_last_class(TestWithGroup1_AsyPadding)
create_test_channel_last_class(TestWith1x1_AsyPadding)
create_test_cudnn_channel_last_class(TestConv3dOp_AsyPadding)
create_test_cudnn_channel_last_class(TestWithGroup1_AsyPadding)
create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding)
create_test_cudnn_channel_last_class(TestConv3dOp_AsyPadding)
create_test_cudnn_channel_last_class(TestWithGroup1_AsyPadding)
create_test_cudnn_channel_last_class(TestWith1x1_AsyPadding)
# FIXME(typhoonzero): find a way to determine if
# using cudnn > 6 in python
# class TestWithDilationCUDNN(TestWithDilation):
# def init_op_type(self):
# self.op_type = "conv3d"
# --------- test python API ---------------
class TestConv3dAPI(OpTest):
def test_api(self):
input_NDHWC = fluid.layers.data(
name="input_NDHWC",
shape=[2, 5, 5, 5, 3],
append_batch_size=False,
dtype="float32")
input_NCDHW = fluid.layers.data(
name="input_NCDHW",
shape=[2, 3, 5, 5, 3],
append_batch_size=False,
dtype="float32")
fluid.layers.conv3d(
input=input_NDHWC,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding=0,
dilation=[1, 1, 1],
groups=1,
data_format="NCDHW")
fluid.layers.conv3d(
input=input_NCDHW,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding=[1, 2, 1, 0, 1, 0],
dilation=[1, 1, 1],
groups=1,
data_format="NCDHW")
fluid.layers.conv3d(
input=input_NCDHW,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding=[[0, 0], [0, 0], [1, 1], [1, 1], [1, 1]],
dilation=[1, 1, 1],
groups=1,
data_format="NCDHW")
fluid.layers.conv3d(
input=input_NDHWC,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding=[[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]],
dilation=[1, 1, 1],
groups=1,
data_format="NDHWC")
fluid.layers.conv3d(
input=input_NCDHW,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding="SAME",
dilation=[1, 1, 1],
groups=1,
data_format="NCDHW")
fluid.layers.conv3d(
input=input_NCDHW,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding="VALID",
dilation=[1, 1, 1],
groups=1,
data_format="NCDHW")
class TestConv3dAPI_Error(OpTest):
def test_api(self):
input = fluid.layers.data(
name="input",
shape=[2, 5, 5, 5, 4],
append_batch_size=False,
dtype="float32")
# ValueError: cudnn
def run_1():
fluid.layers.conv3d(
input=input,
num_filters=3,
filter_size=3,
stride=1,
padding=0,
dilation=1,
groups=1,
use_cudnn=[0],
data_format="NCDHW")
self.assertRaises(ValueError, run_1)
# ValueError: data_format
def run_2():
fluid.layers.conv3d(
input=input,
num_filters=3,
filter_size=[3, 3, 3],
stride=[1, 1, 1],
padding=0,
dilation=[1, 1, 1],
groups=1,
use_cudnn=False,
data_format="NCHWC")
self.assertRaises(ValueError, run_2)
# ValueError: padding
def run_3():
fluid.layers.conv3d(
input=input,
num_filters=3,
filter_size=3,
stride=1,
padding="SAMEE",
dilation=1,
groups=1,
use_cudnn=False,
data_format="NCDHW")
self.assertRaises(ValueError, run_3)
def run_4():
fluid.layers.conv3d(
input=input,
num_filters=3,
filter_size=3,
stride=1,
padding=[[0, 1], [0, 0], [0, 1], [0, 1], [0, 1]],
dilation=1,
groups=1,
use_cudnn=False,
data_format="NCDHW")
self.assertRaises(ValueError, run_4)
def run_5():
fluid.layers.conv3d(
input=input,
num_filters=3,
filter_size=0,
stride=0,
padding=[[0, 1], [0, 1], [0, 1], [0, 1], [0, 1]],
dilation=1,
groups=1,
use_cudnn=False,
data_format="NDHWC")
self.assertRaises(ValueError, run_5)
# ValueError: channel dimmention
x = fluid.layers.data(
name="x",
shape=[2, 5, 5, 5, -1],
append_batch_size=False,
dtype="float32")
def run_6():
fluid.layers.conv3d(
input=x,
num_filters=3,
filter_size=3,
stride=1,
padding=0,
dilation=1,
groups=1,
use_cudnn=False,
data_format="NDHWC")
self.assertRaises(ValueError, run_6)
# ValueError: groups
def run_7():
fluid.layers.conv3d(
input=input,
num_filters=3,
filter_size=3,
stride=1,
padding=0,
dilation=1,
groups=3,
use_cudnn=False,
data_format="NDHWC")
self.assertRaises(ValueError, run_7)
if __name__ == '__main__':
unittest.main()
|
|
from datetime import date
from itertools import accumulate
from calendar import monthrange
from collections import Counter
from kconfig import enablersBook, toolsBook
from kconfig import chaptersBook
from kernel.Backlog import Backlog, Issue
from kernel.IssuesModel import backlogIssuesModel
from kernel import calendar as FWcalendar
__author__ = "Manuel Escriche <[email protected]>"
class Reporter:
def __init__(self, backlog):
self.backlog = backlog
@property
def impeded(self):
return self.backlog.impeded
@property
def blockers(self):
return self.backlog.blockers
@property
def epics(self):
return self.backlog.epics
@property
def operativeIssues(self):
return self.backlog.operativeIssues
@property
def sortDict(self):
return Backlog._sortDict
@property
def issueType(self):
count = Counter([issue.issueType for issue in self.backlog])
return {issueType: count[issueType] for issueType in Backlog._issueTypes}
@property
def perspective(self):
count = Counter([issue.frame for issue in self.backlog])
return {frame: count[frame] for frame in Backlog._perspectives}
@property
def errors(self):
count = Counter([issue.test.gStatus for issue in self.backlog])
return {item: count[item] for item in ('OK', 'KO')}
@property
def status(self):
count = Counter([issue.status for issue in self.backlog if issue.frame == 'Working On'])
return {status:count[status] for status in count}
@property
def sprint_status(self):
count = Counter([issue.status for issue in self.backlog
if issue.frame == 'Working On' and issue.issueType in backlogIssuesModel.shortTermTypes])
return {status:count[status] for status in count}
@property
def implemented(self):
book = FWcalendar.monthBook
createdIssues = Counter(['{:02d}-{}'.format(issue.created.month, issue.created.year) for issue in self.backlog])
createdData = list(accumulate([createdIssues[book[month]] for month in FWcalendar.pastMonths]))
updatedIssues = Counter(['{:02d}-{}'.format(issue.updated.month, issue.updated.year) for issue in self.backlog])
updatedData = list(accumulate([updatedIssues[book[month]] for month in FWcalendar.pastMonths]))
closedIssues = [issue for issue in self.backlog if issue.status == 'Closed']
resolvedIssues = Counter(['{:02d}-{}'.format(issue.resolved.month, issue.resolved.year) for issue in closedIssues])
resolvedData = list(accumulate([resolvedIssues[book[month]] for month in FWcalendar.pastMonths]))
finishedIssues = [issue for issue in closedIssues if issue.frame in ('Working On','Implemented')]
releasedIssues = Counter(['{:02d}-{}'.format(issue.released.month, issue.released.year) for issue in finishedIssues])
progressData = [releasedIssues[book[month]] for month in FWcalendar.pastMonths]
releasedData = list(accumulate(progressData))
outdata = {}
outdata['categories'] = FWcalendar.timeline
outdata['created'] = createdData
outdata['resolved'] = resolvedData
outdata['updated'] = updatedData
outdata['released'] = releasedData
outdata['progress'] = progressData
return outdata
@property
def burndown(self):
issues = [issue for issue in self.backlog if issue.frame == 'Working On' \
and issue.issueType in backlogIssuesModel.shortTermTypes]
closedIssues = Counter([issue.updated.day for issue in issues if issue.status == 'Closed'])
#print(closedIssued)
NIssues = len(issues)
month_length = monthrange(date.today().year, date.today().month)[1]
data = [(day, closedIssues[day]) for day in range(1, date.today().day+1)]
#print(data)
data = zip([item[0] for item in data], accumulate([item[1] for item in data]))
data = {item[0]: NIssues-item[1] for item in data}
#print(data)
n = lambda x: NIssues/month_length if x > 0 else 0
ref_data = {day : n(day) for day in range(1, month_length+1)}
ref_data = dict(zip(ref_data.keys(), accumulate(ref_data.values())))
ref_data = {day : round(abs(NIssues-ref_data[day]), 1) for day in ref_data}
#print(ref_data)
cdata = lambda d: data[d] if d in data else 'null'
outdata = {}
outdata['categories'] = [day for day in range(1, month_length+1)]
outdata['reference'] = [ref_data[day] for day in range(1, month_length+1)]
outdata['actual'] = [cdata(day) for day in range(1, date.today().day+1)]
outdata['closed'] = [closedIssues[day] for day in range(1, date.today().day+1)]
return outdata
@property
def issueType_graph_data(self):
count = self.issueType
return [[issueType, count[issueType]] for issueType in Backlog._issueTypes ]
@property
def perspective_graph_data(self):
count = self.perspective
return [[frame, count[frame]] for frame in Backlog._perspectives]
@property
def sprint_status_graph_data(self):
count = self.sprint_status
return [[status, count[status]] for status in count]
@property
def errors_graph_data(self):
color = {'OK':'#bada55', 'KO':'#ff4040'}
count = Counter([issue.test.gStatus for issue in self.backlog])
return [{'name':_type, 'y': count[_type], 'color': color[_type]} for _type in count]
@property
def burndown_graph_data(self):
issues = [issue for issue in self.backlog if issue.frame == 'Working On' \
and issue.issueType in backlogIssuesModel.shortTermTypes]
closedIssues = Counter([issue.updated.day for issue in issues if issue.status == 'Closed'])
#print(closedIssued)
NIssues = len(issues)
month_length = monthrange(date.today().year, date.today().month)[1]
data = [(day, closedIssues[day]) for day in range(1, date.today().day+1)]
#print(data)
data = zip([item[0] for item in data], accumulate([item[1] for item in data]))
data = {item[0]: NIssues-item[1] for item in data}
#print(data)
n = lambda x: NIssues/month_length if x > 0 else 0
ref_data = {day : n(day) for day in range(1, month_length+1)}
ref_data = dict(zip(ref_data.keys(), accumulate(ref_data.values())))
ref_data = {day : round(abs(NIssues-ref_data[day]), 1) for day in ref_data}
#print(ref_data)
cdata = lambda d: data[d] if d in data else 'null'
outdata = {}
outdata['categories'] = [day for day in range(1, month_length+1)]
outdata['reference'] = dict()
outdata['reference']['type'] = 'spline'
outdata['reference']['name'] = 'Reference'
outdata['reference']['data'] = [ref_data[day] for day in range(1, month_length+1)]
outdata['reference']['marker'] = {'enabled': 'false'}
outdata['reference']['dashStyle'] = 'shortdot'
outdata['actual'] = dict()
outdata['actual']['type'] = 'spline'
outdata['actual']['name'] = 'Actual'
outdata['actual']['data'] = [cdata(day) for day in range(1, date.today().day+1)]
outdata['closed'] = dict()
outdata['closed']['type'] = 'column'
outdata['closed']['name'] = 'Closed'
outdata['closed']['data'] = [closedIssues[day] for day in range(1, date.today().day+1)]
return outdata
@property
def implemented_graph_data(self):
book = FWcalendar.monthBook
#issues = [issue for issue in self.backlog if issue.frame in ('Working On','Implemented') and issue.status == 'Closed']
createdIssues = Counter(['{:02d}-{}'.format(issue.created.month, issue.created.year) for issue in self.backlog])
createdData = list(accumulate([createdIssues[book[month]] for month in FWcalendar.pastMonths]))
updatedIssues = Counter(['{:02d}-{}'.format(issue.updated.month, issue.updated.year) for issue in self.backlog])
updatedData = list(accumulate([updatedIssues[book[month]] for month in FWcalendar.pastMonths]))
closedIssues = [issue for issue in self.backlog if issue.status == 'Closed']
resolvedIssues = Counter(['{:02d}-{}'.format(issue.resolved.month, issue.resolved.year) for issue in closedIssues])
resolvedData = list(accumulate([resolvedIssues[book[month]] for month in FWcalendar.pastMonths]))
finishedIssues = [issue for issue in closedIssues if issue.frame in ('Working On','Implemented')]
releasedIssues = Counter(['{:02d}-{}'.format(issue.released.month, issue.released.year) for issue in finishedIssues])
progressData = [releasedIssues[book[month]] for month in FWcalendar.pastMonths]
releasedData = list(accumulate(progressData))
outdata = {}
outdata['categories'] = FWcalendar.timeline
outdata['ncategories'] = len(FWcalendar.timeline) - 1
outdata['created'] = dict()
outdata['created']['type'] = 'spline'
outdata['created']['name'] = 'Created'
outdata['created']['data'] = createdData
outdata['resolved'] = dict()
outdata['resolved']['type'] = 'spline'
outdata['resolved']['name'] = 'Resolved'
outdata['resolved']['data'] = resolvedData
outdata['updated'] = dict()
outdata['updated']['type'] = 'spline'
outdata['updated']['name'] = 'Updated'
outdata['updated']['data'] = updatedData
outdata['released'] = dict()
outdata['released']['type'] = 'spline'
outdata['released']['name'] = 'Released'
outdata['released']['data'] = releasedData
outdata['progress'] = dict()
outdata['progress']['type'] = 'column'
outdata['progress']['name'] = 'Progress'
outdata['progress']['data'] = progressData
return outdata
@property
def length(self):
return len(self.backlog)
def __len__(self):
return len(self.backlog)
def frame_length(self, frame):
issues = [issue for issue in self.backlog if issue.frame == frame]
return len(issues)
def review(self):
self.backlog.review()
class CoordinationReporter(Reporter):
def __init__(self, chaptername, backlog):
super().__init__(backlog)
self.chaptername = chaptername
self.backlog.review()
@property
def chapter(self):
return chaptersBook[self.chaptername]
@property
def chaptersBook(self):
return chaptersBook
@property
def frame_status_graph_data(self):
frame='Working On'
issues = [issue for issue in self.backlog if issue.frame == frame]
#print('working on issues =', len(issues))
statuses = sorted(set([issue.status for issue in issues]))
#print('found statuses =', statuses)
enablerIssuesBook = {}
for enablername in enablersBook:
enabler = enablersBook[enablername]
enablerIssuesBook[enablername] = Counter([issue.status for issue in issues if enabler.key == issue.component])
#print(enabler, dict(enablerIssuesBook[key]))
_frame_status_graph_data = []
for status in statuses:
status_dict = {}
status_dict['name'] = status
status_dict['data'] = [enablerIssuesBook[enabler][status] for enabler in enablersBook]
_frame_status_graph_data.append(status_dict)
#print('frame_status_graph_data =', _frame_status_graph_data)
return _frame_status_graph_data
class ChapterReporter(Reporter):
def __init__(self, chaptername, backlog):
super().__init__( backlog)
self.chaptername = chaptername
self.chapter = chaptersBook[chaptername]
#start_time = time.time()
backlog.review()
#elapsed_time = time.time() - start_time
#print(elapsed_time)
@property
def chaptersBook(self):
return chaptersBook
@property
def enablers(self):
return list(self.chapter.enablers.keys())
@property
def tools(self):
return list(self.chapter.tools.keys())
@property
def coordination(self):
return self.chapter.coordination
@property
def frame_status_graph_data(self):
frame='Working On'
issues = [issue for issue in self.backlog if issue.frame == frame and issue.component]
statuses = sorted(set([issue.status for issue in issues]))
chapterIssuesBook = {}
for key in self.chapter.enablers:
enabler = self.chapter.enablers[key]
chapterIssuesBook[key] = Counter([issue.status for issue in issues if enabler.key == issue.component])
_frame_status_graph_data = []
for status in statuses:
status_dict = {}
status_dict['name'] = status
status_dict['data'] = [chapterIssuesBook[enabler][status] for enabler in self.chapter.enablers]
#print(status_dict)
_frame_status_graph_data.append(status_dict)
return _frame_status_graph_data
@property
def tools_frame_status_graph_data(self):
frame='Working On'
issues = [issue for issue in self.backlog if issue.frame == frame and issue.component]
statuses = sorted(set([issue.status for issue in issues]))
chapterIssuesBook = {}
for key in self.chapter.tools:
enabler = self.chapter.tools[key]
chapterIssuesBook[key] = Counter([issue.status for issue in issues if enabler.key == issue.component])
_frame_status_graph_data = []
for status in statuses:
status_dict = {}
status_dict['name'] = status
status_dict['data'] = [chapterIssuesBook[tool][status] for tool in self.chapter.tools]
#print(status_dict)
_frame_status_graph_data.append(status_dict)
return _frame_status_graph_data
@property
def enablers_execution_status(self):
frames = reversed(Issue._timeFrames)
chapterIssuesBook = {}
for key in self.chapter.enablers:
enabler = self.chapter.enablers[key]
chapterIssuesBook[key] = Counter([issue.frame for issue in self.backlog if enabler.key == issue.component])
_frame_graph_data = []
for frame in frames:
frame_dict = {}
frame_dict['name'] = frame
frame_dict['data'] = [chapterIssuesBook[enabler][frame] for enabler in self.chapter.enablers]
_frame_graph_data.append(frame_dict)
return _frame_graph_data
@property
def tools_execution_status(self):
frames = reversed(Issue._timeFrames)
chapterIssuesBook = {}
for key in self.chapter.tools:
tool = self.chapter.tools[key]
chapterIssuesBook[key] = Counter([issue.frame for issue in self.backlog if tool.key == issue.component])
_frame_graph_data = []
for frame in frames:
frame_dict = {}
frame_dict['name'] = frame
frame_dict['data'] = [chapterIssuesBook[tool][frame] for tool in self.chapter.tools]
_frame_graph_data.append(frame_dict)
return _frame_graph_data
@property
def no_component(self):
return [issue for issue in self.backlog if len(issue.components) == 0]
class ChaptersReporter(Reporter):
def __init__(self, backlog):
super().__init__(backlog)
#start_time = time.time()
backlog.review()
#elapsed_time = time.time() - start_time
#print(elapsed_time)
@property
def chapters_sprint_status_graph_data(self):
frame='Working On'
issues = [issue for issue in self.backlog
if issue.frame == frame and issue.issueType in backlogIssuesModel.shortTermTypes]
statuses = sorted(set([issue.status for issue in issues]))
chaptersIssuesBook = {}
for chaptername in chaptersBook:
chapter = chaptersBook[chaptername]
chaptersIssuesBook[chaptername] = Counter([issue.status for issue in issues if chapter.tracker == issue.tracker ])
_frame_status_graph_data = []
for status in statuses:
status_dict = {}
status_dict['name'] = status
status_dict['data'] = [chaptersIssuesBook[chapter][status] for chapter in chaptersBook]
#print(status_dict)
_frame_status_graph_data.append(status_dict)
#print(_frame_status_graph_data)
return _frame_status_graph_data
@property
def chapters_execution_status(self):
frames = reversed(Issue._timeFrames)
chaptersIssuesBook = {}
for chaptername in chaptersBook:
chapter = chaptersBook[chaptername]
chaptersIssuesBook[chaptername] = Counter([issue.frame for issue in self.backlog if chapter.tracker == issue.tracker ])
_frame_graph_data = []
for frame in frames:
frame_dict = {}
frame_dict['name'] = frame
frame_dict['data'] = [chaptersIssuesBook[chapter][frame] for chapter in chaptersBook]
_frame_graph_data.append(frame_dict)
return _frame_graph_data
@property
def chapters_errors_graph_data(self):
color = {'OK':'#bada55', 'KO':'#ff4040'}
values = ('OK', 'KO')
chaptersIssuesBook = {}
for key in chaptersBook:
chapter = chaptersBook[key]
#print(chapter)
chaptersIssuesBook[key] = Counter([issue.test.gStatus for issue in self.backlog if chapter.tracker == issue.tracker ])
#print(chaptersIssuesBook)
_frame_errors_graph_data = []
for value in values:
errors_dict = {}
errors_dict['name'] = value
errors_dict['data'] = [chaptersIssuesBook[chapter][value] for chapter in chaptersBook]
errors_dict['color'] = color[value]
#print(status_dict)
_frame_errors_graph_data.append(errors_dict)
return _frame_errors_graph_data
@property
def chapters(self):
return chaptersBook.chapters
@property
def enablers(self):
return list(enablersBook.keys())
@property
def chaptersBook(self):
return chaptersBook
@property
def nChapters(self):
return len(chaptersBook)
@property
def enablers_execution_status(self):
frames = reversed(Issue._timeFrames)
issuesBook = {}
for key in enablersBook:
enabler = enablersBook[key]
issuesBook[key] = Counter([issue.frame for issue in self.backlog if enabler.key == issue.component])
_frame_graph_data = []
for frame in frames:
frame_dict = {}
frame_dict['name'] = frame
frame_dict['data'] = [issuesBook[enabler][frame] for enabler in enablersBook]
_frame_graph_data.append(frame_dict)
return _frame_graph_data
class ToolReporter(Reporter):
def __init__(self, toolname, backlog):
super().__init__(backlog)
self.toolname = toolname
self.backlog.review()
@property
def toolsBook(self):
return toolsBook
@property
def tool(self):
return toolsBook[self.toolname]
class EnablerReporter(Reporter):
def __init__(self, enablername, backlog):
super().__init__(backlog)
self.enablername = enablername
self.backlog.review()
@property
def enablersBook(self):
return enablersBook
@property
def enabler(self):
return enablersBook[self.enablername]
class EnablersReporter(Reporter):
def __init__(self, backlog):
super().__init__(backlog)
#start_time = time.time()
backlog.review()
#elapsed_time = time.time() - start_time
#print(elapsed_time)
@property
def enablers_sprint_status_graph_data(self):
frame='Working On'
issues = [issue for issue in self.backlog
if issue.frame == frame and self.enablers and issue.issueType in backlogIssuesModel.shortTermTypes]
#print('working on issues =', len(issues))
statuses = sorted(set([issue.status for issue in issues]))
#print('found statuses =', statuses)
enablerIssuesBook = {}
for enablername in enablersBook:
enabler = enablersBook[enablername]
enablerIssuesBook[enablername] = Counter([issue.status for issue in issues if enabler.key == issue.component ])
#print(enabler, dict(enablerIssuesBook[key]))
_frame_status_graph_data = []
for status in statuses:
status_dict = {}
status_dict['name'] = status
status_dict['data'] = [enablerIssuesBook[enabler][status] for enabler in enablersBook]
_frame_status_graph_data.append(status_dict)
#print('frame_status_graph_data =', _frame_status_graph_data)
return _frame_status_graph_data
@property
def enablers_errors_graph_data(self):
color = {'OK':'#bada55', 'KO':'#ff4040'}
values = ('OK', 'KO')
enablerIssuesBook = {}
for enablername in enablersBook:
enabler = enablersBook[enablername]
#print(chapter)
enablerIssuesBook[enablername] = Counter([issue.test.gStatus for issue in self.backlog
if enabler.key == issue.component])
#print(chaptersIssuesBook)
_frame_errors_graph_data = []
for value in values:
errors_dict = {}
errors_dict['name'] = value
errors_dict['data'] = [enablerIssuesBook[enabler][value] for enabler in enablersBook]
errors_dict['color'] = color[value]
#print(status_dict)
_frame_errors_graph_data.append(errors_dict)
return _frame_errors_graph_data
@property
def enablers(self):
return list(enablersBook.keys())
@property
def enablersBook(self):
return enablersBook
if __name__ == "__main__":
pass
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
from tensorflow.contrib.eager.python import network
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import util as trackable_utils
# pylint: disable=not-callable
class MyNetwork(network.Network):
def __init__(self, name=None):
super(MyNetwork, self).__init__(name=name)
self.l1 = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.l1(x)
class RegularizedNetwork(network.Network):
def __init__(self):
super(RegularizedNetwork, self).__init__()
self.l1 = self.track_layer(core.Dense(
1,
bias_regularizer=regularizers.l1_regularizer(2.0),
kernel_regularizer=regularizers.l1_regularizer(2.0)))
self.l2 = self.track_layer(core.Dense(
1,
bias_regularizer=regularizers.l1_regularizer(2.0)))
def call(self, values):
return self.l2(self.l1(values))
class NetworkTest(test.TestCase):
def test_checkpointing_not_implemented(self):
checkpoint_directory = self.get_temp_dir()
checkpoint = trackable_utils.Checkpoint(net=MyNetwork())
with self.assertRaises(NotImplementedError):
checkpoint.save(checkpoint_directory)
def _save_modify_load_network_built(self, net, global_step=None):
checkpoint_directory = self.get_temp_dir()
checkpoint_path = network.save_network_checkpoint(
network=net, save_path=checkpoint_directory, global_step=global_step)
input_value = constant_op.constant([[42.0]])
original_output = self.evaluate(net(input_value))
for var in net.variables:
self.evaluate(var.assign(var + 1.))
self.assertGreater(
self.evaluate(net(input_value)),
original_output)
# Either the returned explicit checkpoint path or the directory should work.
network.restore_network_checkpoint(net, save_path=checkpoint_directory)
self.assertAllEqual(
original_output,
self.evaluate(net(input_value)))
for var in net.variables:
self.evaluate(var.assign(var + 2.))
network.restore_network_checkpoint(net, save_path=checkpoint_path)
self.assertAllEqual(
original_output,
self.evaluate(net(input_value)))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testTrainableAttribute(self):
net = network.Network()
self.assertTrue(net.trainable)
with self.assertRaises(AttributeError):
net.trainable = False
self.assertTrue(net.trainable)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNetworkCall(self):
net = MyNetwork(name="abcd")
net(constant_op.constant([[2.0]])) # Force variables to be created.
self.assertEqual(1, len(net.trainable_variables))
self.evaluate(net.trainable_variables[0].assign([[17.0]]))
# TODO(josh11b): Support passing Python values to networks.
result = net(constant_op.constant([[2.0]]))
self.assertEqual(34.0, self.evaluate(result))
def testReplacingNetworkCallWithDefun(self):
net = MyNetwork(name="abcd")
net.call = function.defun(net.call)
x = constant_op.constant([[2.0]])
net(x) # Force variables to be created.
self.evaluate(net.trainable_variables[0].assign([[17.0]]))
result = net(x) # Build and execute the TensorFlow function
self.assertEqual(34.0, self.evaluate(result))
# Force the creation of another TensorFlow function by changing input shape
y = constant_op.constant([[1.0], [2.0]])
result = net(y)
self.assertAllEqual([[17.0], [34.0]], self.evaluate(result))
# TODO(allenl): This test creates garbage in some Python versions
@test_util.run_in_graph_and_eager_modes
def testNetworkSaveRestoreAlreadyBuilt(self):
net = MyNetwork(name="abcd")
with self.assertRaisesRegexp(
ValueError, "Attempt to save the Network before it was first called"):
network.save_network_checkpoint(net, self.get_temp_dir())
net(constant_op.constant([[2.0]]))
self.evaluate(net.trainable_variables[0].assign([[17.0]]))
self._save_modify_load_network_built(net, global_step=None)
self._save_modify_load_network_built(net, global_step=10)
# TODO(allenl): This test creates garbage in some Python versions
@test_util.run_in_graph_and_eager_modes
def testSaveRestoreDefaultGlobalStep(self):
net = MyNetwork(name="abcd")
net(constant_op.constant([[2.0]]))
self.evaluate(net.variables[0].assign([[3.]]))
default_global_step = training_util.get_or_create_global_step()
self.evaluate(default_global_step.assign(4242))
save_path = network.save_network_checkpoint(net, self.get_temp_dir())
self.assertIn("abcd-4242", save_path)
# TODO(allenl): This test creates garbage in some Python versions
@test_util.run_in_graph_and_eager_modes
def testNetworkSaveAndRestoreIntoUnbuilt(self):
save_dir = self.get_temp_dir()
net1 = MyNetwork()
test_input = constant_op.constant([[2.0]])
net1(test_input)
self.evaluate(net1.trainable_variables[0].assign([[17.0]]))
save_path = network.save_network_checkpoint(net1, save_dir)
# With a pre-build restore we should have the same value.
net2 = MyNetwork()
network.restore_network_checkpoint(net2, save_path)
self.assertAllEqual(self.evaluate(net1(test_input)),
self.evaluate(net2(test_input)))
self.assertIsNot(net1.variables[0], net2.variables[0])
self.assertAllEqual(self.evaluate(net1.variables[0]),
self.evaluate(net2.variables[0]))
@test_util.run_in_graph_and_eager_modes
def testNetworkMatchesLayerVariableNames(self):
zero = constant_op.constant([[0.]])
layer_one = core.Dense(1, use_bias=False)
layer_one(zero)
layer_two = core.Dense(1, use_bias=False)
layer_two(zero)
class TwoLayerNet(network.Network):
def __init__(self, name=None):
super(TwoLayerNet, self).__init__(name=name)
self.first = self.track_layer(core.Dense(
1, use_bias=False))
self.second = self.track_layer(core.Dense(
1, use_bias=False))
def call(self, x):
return self.second(self.first(x))
net = TwoLayerNet()
net(zero)
self.assertEqual("two_layer_net/" + layer_one.variables[0].name,
net.first.variables[0].name)
self.assertEqual("two_layer_net/" + layer_two.variables[0].name,
net.second.variables[0].name)
@test_util.run_in_graph_and_eager_modes
def testLoadIntoUnbuiltSharedLayer(self):
class Owner(network.Network):
def __init__(self, name=None):
super(Owner, self).__init__(name=name)
self.first = self.track_layer(core.Dense(
1, name="first_layer", use_bias=False))
def call(self, x):
return self.first(x)
first_owner = Owner()
class User(network.Network):
def __init__(self, use_layer, name=None):
super(User, self).__init__(name=name)
self.first = self.track_layer(use_layer)
self.second = self.track_layer(core.Dense(
1, name="second_layer", use_bias=False))
def call(self, x):
return self.second(self.first(x))
class LikeUserButNotSharing(network.Network):
def __init__(self, name=None):
super(LikeUserButNotSharing, self).__init__(name=name)
self.first = self.track_layer(core.Dense(
1, name="first_layer", use_bias=False))
self.second = self.track_layer(core.Dense(
1, name="second_layer", use_bias=False))
def call(self, x):
return self.second(self.first(x))
checkpoint_creator = LikeUserButNotSharing(name="checkpoint_creator")
one = constant_op.constant([[1.0]])
checkpoint_creator(one)
self.assertEqual(2, len(checkpoint_creator.variables))
self.evaluate(checkpoint_creator.variables[0].assign([[5.]]))
self.evaluate(checkpoint_creator.variables[1].assign([[6.]]))
# Re-map the variable names so that with default restore mapping we'll
# attempt to restore into the unbuilt Layer.
name_mapping = {
"checkpoint_creator/first_layer/kernel": "owner/first_layer/kernel",
"checkpoint_creator/second_layer/kernel": "second_layer/kernel",
}
save_path = network.save_network_checkpoint(
checkpoint_creator,
self.get_temp_dir(),
map_func=lambda full_name: name_mapping[full_name])
load_into = User(use_layer=first_owner.first)
network.restore_network_checkpoint(load_into, save_path)
self.assertEqual(0, len(first_owner.variables))
self.assertAllEqual(self.evaluate(checkpoint_creator(one)),
self.evaluate(load_into(one)))
self.assertEqual(1, len(first_owner.variables))
self.assertAllEqual([[5.]], self.evaluate(load_into.variables[0]))
self.assertAllEqual([[6.]], self.evaluate(load_into.variables[1]))
first_owner(one)
self.assertAllEqual([[5.]], self.evaluate(first_owner.variables[0]))
# Try again with a garbage collected parent.
first_owner = Owner()
load_into = User(use_layer=first_owner.first)
del first_owner
gc.collect()
def _restore_map_func(original_name):
if original_name.startswith("owner/"):
return original_name.replace("owner/", "owner_1/")
else:
return "user_1/" + original_name
with self.assertRaisesRegexp(ValueError, "garbage collected"):
network.restore_network_checkpoint(
load_into, save_path, map_func=_restore_map_func)
@test_util.run_in_graph_and_eager_modes
def testRestoreIntoSubNetwork(self):
class Parent(network.Network):
def __init__(self, name=None):
super(Parent, self).__init__(name=name)
self.first = self.track_layer(MyNetwork())
self.second = self.track_layer(MyNetwork())
def call(self, x):
return self.first(self.second(x))
one = constant_op.constant([[3.]])
whole_model_saver = Parent()
whole_model_saver(one)
self.evaluate(whole_model_saver.variables[0].assign([[15.]]))
self.evaluate(whole_model_saver.variables[1].assign([[16.]]))
whole_model_checkpoint = network.save_network_checkpoint(
whole_model_saver, self.get_temp_dir())
save_from = MyNetwork()
save_from(one)
self.evaluate(save_from.variables[0].assign([[5.]]))
checkpoint = network.save_network_checkpoint(save_from, self.get_temp_dir())
save_into_parent = Parent()
network.restore_network_checkpoint(save_into_parent, whole_model_checkpoint)
network.restore_network_checkpoint(save_into_parent.first, checkpoint)
# deferred loading multiple times is fine
network.restore_network_checkpoint(save_into_parent.first, checkpoint)
save_into_parent(one) # deferred loading
self.assertAllEqual([[5.]], self.evaluate(save_into_parent.variables[0]))
self.assertAllEqual([[16.]], self.evaluate(save_into_parent.variables[1]))
# Try again with the opposite ordering, and we should get different results
# (deferred restoration should happen the same way non-deferred happens,
# with later restorations overwriting older ones).
save_into_parent = Parent()
# deferred loading multiple times is fine
network.restore_network_checkpoint(save_into_parent.first, checkpoint)
network.restore_network_checkpoint(save_into_parent, whole_model_checkpoint)
save_into_parent(one) # deferred loading
# We've overwritten the sub-Network restore.
self.assertAllEqual([[15.]], self.evaluate(save_into_parent.variables[0]))
self.assertAllEqual([[16.]], self.evaluate(save_into_parent.variables[1]))
self.evaluate(save_into_parent.variables[0].assign([[3.]]))
self.evaluate(save_into_parent.variables[1].assign([[4.]]))
network.restore_network_checkpoint(save_into_parent.second, checkpoint)
self.assertAllEqual([[5.]], self.evaluate(save_into_parent.variables[1]))
with self.assertRaisesRegexp(errors_impl.NotFoundError,
"not found in checkpoint"):
# The checkpoint is incompatible.
network.restore_network_checkpoint(save_into_parent, checkpoint)
@test_util.run_in_graph_and_eager_modes
def testCustomMapCollisionErrors(self):
class Parent(network.Network):
def __init__(self, name=None):
super(Parent, self).__init__(name=name)
self.first = self.track_layer(MyNetwork())
self.second = self.track_layer(MyNetwork())
def call(self, x):
return self.first(self.second(x))
make_checkpoint = Parent()
one = constant_op.constant([[1.]])
make_checkpoint(one)
self.evaluate(make_checkpoint.variables[0].assign([[2.]]))
self.evaluate(make_checkpoint.variables[1].assign([[3.]]))
with self.assertRaisesRegexp(
ValueError,
"The map_func passed to save_network_checkpoint for the Network "
"'parent' resulted in two variables named 'foo'"):
network.save_network_checkpoint(
make_checkpoint, self.get_temp_dir(), map_func=lambda n: "foo")
checkpoint = network.save_network_checkpoint(
network=make_checkpoint.first,
save_path=self.get_temp_dir(),
map_func=lambda n: "foo")
loader = Parent()
network.restore_network_checkpoint(
loader, checkpoint, map_func=lambda n: "foo")
with self.assertRaisesRegexp(
ValueError,
("The map_func passed to restore_network_checkpoint for the Network"
" 'parent_1' resulted in two variables named 'foo'")):
loader(one)
loader = Parent()
loader(one)
with self.assertRaisesRegexp(
ValueError,
("The map_func passed to restore_network_checkpoint for the Network"
" 'parent_2' resulted in two variables named 'foo'")):
network.restore_network_checkpoint(
loader, checkpoint, map_func=lambda n: "foo")
@test_util.run_in_graph_and_eager_modes
def testDefaultMapCollisionErrors(self):
one = constant_op.constant([[1.]])
first = core.Dense(1, name="dense", use_bias=False)
first(one)
class Parent(network.Network):
def __init__(self, name=None):
super(Parent, self).__init__(name=name)
self.first = self.track_layer(first)
self.second = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.first(self.second(x))
make_checkpoint = Parent()
one = constant_op.constant([[1.]])
make_checkpoint(one)
self.evaluate(make_checkpoint.variables[0].assign([[2.]]))
self.evaluate(make_checkpoint.variables[1].assign([[3.]]))
with self.assertRaisesRegexp(
ValueError,
("The default checkpoint variable name mapping strategy for Network "
"'parent' resulted in a naming conflict.")):
network.save_network_checkpoint(make_checkpoint, self.get_temp_dir())
class Compatible(network.Network):
def __init__(self, name=None):
super(Compatible, self).__init__(name=name)
self.first = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.first(x)
successful_checkpoint = Compatible()
successful_checkpoint(one)
self.evaluate(successful_checkpoint.variables[0].assign([[-1.]]))
checkpoint_path = network.save_network_checkpoint(
successful_checkpoint, self.get_temp_dir())
load_checkpoint = Parent()
load_checkpoint(one)
with self.assertRaisesRegexp(
ValueError,
("The default checkpoint variable name mapping strategy for Network "
"'parent_1' resulted in a naming conflict.")):
network.restore_network_checkpoint(load_checkpoint, checkpoint_path)
def testNoReferenceCyclesAfterCall(self):
class ChildNetwork(network.Network):
def __init__(self, name=None):
super(ChildNetwork, self).__init__(name=name)
def call(self, x):
return x * 2.
class ParentNetwork(network.Network):
def __init__(self, name=None):
super(ParentNetwork, self).__init__(name=name)
self.l1 = self.track_layer(ChildNetwork())
def call(self, x):
return self.l1(x)
one = constant_op.constant([[1.0]])
gc.disable()
gc.collect()
previous_gc_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
preexisting = len(gc.garbage)
net = ParentNetwork()
net(one)
del net
gc.collect()
# There should be no additional garbage requiring collection.
self.assertEqual(preexisting, len(gc.garbage))
gc.set_debug(previous_gc_debug_flags)
gc.enable()
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testAnonymousNoNameInitially(self):
net = MyNetwork()
with self.assertRaisesRegexp(ValueError, "does not yet have a final name"):
net.name # pylint: disable=pointless-statement
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testExplicitHasNameInitially(self):
net = MyNetwork(name="abcd")
self.assertEqual("abcd", net.name)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testUsingResourceVariables(self):
net = MyNetwork()
net(constant_op.constant([[0.]]))
self.assertIsInstance(net.trainable_weights[0],
resource_variable_ops.ResourceVariable)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testVariableRegularizers(self):
net = RegularizedNetwork()
net(constant_op.constant([[1.]]))
self.evaluate(net.variables[0].assign([[2.]]))
self.evaluate(net.variables[1].assign([3.]))
self.evaluate(net.variables[2].assign([[-2.]]))
self.evaluate(net.variables[3].assign([4.]))
self.assertAllEqual([4., 6., 8.], self.evaluate(net.losses))
self.evaluate(net.variables[3].assign([5.]))
self.assertAllEqual([4., 6., 10.], self.evaluate(net.losses))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testDuplicateNameError(self):
one = constant_op.constant([[1.]])
net = MyNetwork(name="foo")
net(one)
with self.assertRaisesRegexp(
ValueError, "named 'foo' already exists"):
net1 = MyNetwork(name="foo")
net1(one)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testWrappingInVariableScope(self):
one = constant_op.constant([[1.]])
# Naming happens in the order of first build rather than the order of
# construction, but for clarity they're the same here and construction is
# annotated.
outside_net_before = MyNetwork() # name=my_network
outside_net_before(one)
captured_scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope("outside_scope"):
net1 = MyNetwork() # name=outside_scope/my_network
net1(one)
name_conflict1 = MyNetwork(name="name_conflict") # fine, unique so far
name_conflict2 = MyNetwork(name="name_conflict") # error on build
with variable_scope.variable_scope("inside_scope"):
# No issue here since the name is unique within its scope.
name_conflict3 = MyNetwork(name="name_conflict")
net2 = MyNetwork() # name=outside_scope/my_network_2 to avoid the
# variable_scope my_network_1 below.
vs_name_conflict = MyNetwork(name="vs_name_conflict") # conflict below
with variable_scope.variable_scope("intervening_scope"):
with variable_scope.variable_scope(captured_scope):
with variable_scope.variable_scope("outside_scope"):
name_conflict4 = MyNetwork(name="name_conflict") # error on build
with variable_scope.variable_scope("my_network_1"):
pass
with variable_scope.variable_scope("vs_name_conflict"):
pass
net3 = MyNetwork() # name=outside_scope/my_network_4
name_conflict1(one)
with self.assertRaisesRegexp(
ValueError, "named 'name_conflict' already exists"):
name_conflict2(one)
name_conflict3(one)
net2(one)
with self.assertRaisesRegexp(
ValueError, "or a variable_scope was created with this name"):
vs_name_conflict(one)
with self.assertRaisesRegexp(
ValueError, "named 'name_conflict' already exists"):
name_conflict4(one)
self.assertEqual("outside_scope/name_conflict",
name_conflict1.name)
self.assertStartsWith(
expected_start="outside_scope/name_conflict/dense/",
actual=name_conflict1.variables[0].name)
self.assertEqual("outside_scope/inside_scope/name_conflict",
name_conflict3.name)
self.assertStartsWith(
expected_start="outside_scope/inside_scope/name_conflict/dense/",
actual=name_conflict3.variables[0].name)
self.assertEqual("outside_scope/my_network", net1.name)
self.assertStartsWith(
expected_start="outside_scope/my_network/dense/",
actual=net1.trainable_weights[0].name)
self.assertEqual("outside_scope/my_network_2", net2.name)
self.assertStartsWith(
expected_start="outside_scope/my_network_2/dense/",
actual=net2.trainable_weights[0].name)
net3(one)
self.assertEqual("outside_scope/my_network_3", net3.name)
self.assertStartsWith(
expected_start="outside_scope/my_network_3/dense/",
actual=net3.trainable_weights[0].name)
outside_net_after = MyNetwork()
outside_net_after(one)
self.assertEqual("my_network", outside_net_before.name)
self.assertStartsWith(
expected_start="my_network/dense/",
actual=outside_net_before.trainable_weights[0].name)
self.assertEqual("my_network_1", outside_net_after.name)
self.assertStartsWith(
expected_start="my_network_1/dense/",
actual=outside_net_after.trainable_weights[0].name)
@test_util.run_in_graph_and_eager_modes
def testVariableScopeStripping(self):
with variable_scope.variable_scope("scope1"):
with variable_scope.variable_scope("scope2"):
net = MyNetwork()
net(constant_op.constant([[2.0]]))
self.evaluate(net.variables[0].assign([[42.]]))
self.assertEqual(net.name, "scope1/scope2/my_network")
self.assertStartsWith(
expected_start="scope1/scope2/my_network/dense/",
actual=net.trainable_weights[0].name)
save_path = network.save_network_checkpoint(net, self.get_temp_dir())
self.assertIn("scope1_scope2_my_network", save_path)
restore_net = MyNetwork()
# Delayed restoration
network.restore_network_checkpoint(restore_net, save_path)
restore_net(constant_op.constant([[1.0]]))
self.assertAllEqual([[42.]],
self.evaluate(restore_net.variables[0]))
self.evaluate(restore_net.variables[0].assign([[-1.]]))
# Immediate restoration
network.restore_network_checkpoint(restore_net, save_path)
self.assertAllEqual([[42.]],
self.evaluate(restore_net.variables[0]))
@test_util.run_in_graph_and_eager_modes
def testLayerNamesRespected(self):
class ParentNetwork(network.Network):
def __init__(self):
super(ParentNetwork, self).__init__()
self.first = self.track_layer(
core.Dense(1, use_bias=False, name="explicit_name"))
def call(self, x):
return self.first(x)
one = constant_op.constant([[1.]])
net = ParentNetwork()
net(one)
self.assertStartsWith(expected_start="parent_network/explicit_name/",
actual=net.trainable_weights[0].name)
self.assertEqual("explicit_name", net.first.name)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testWrappingInAnonymousVariableScope(self):
# Named outside variable_scopes are not supported at the moment. However,
# blank-named top level variable scopes do not change variable names, and so
# can be used to set the properties of Network variables.
was_called = [False]
def _custom_getter(getter, *args, **kwargs):
was_called[0] = True
return getter(*args, **kwargs)
with variable_scope.variable_scope("", custom_getter=_custom_getter):
net = MyNetwork()
one = constant_op.constant([[1.]])
net(one)
self.assertTrue(was_called[0])
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testReasonableSlashError(self):
with self.assertRaisesRegexp(
ValueError, "not allowed in Network names"):
MyNetwork(name="slash/slash")
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNoVariableScopeNames(self):
with self.assertRaisesRegexp(
ValueError, "VariableScopes are not valid Network names"):
with variable_scope.variable_scope("some_scope") as vs:
MyNetwork(name=vs)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testVariableScopeNameCollision(self):
with variable_scope.variable_scope("abcd"):
pass
with self.assertRaisesRegexp(
ValueError, "or a variable_scope was created with this name"):
net = MyNetwork(name="abcd")
one = constant_op.constant([[1.]])
net(one)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNetworkVariablesDoNotInterfere(self):
core.Dense(1, use_bias=True) # Should not interfere with naming.
net1 = MyNetwork()
net2 = MyNetwork()
one = constant_op.constant([[1.]])
net1(one)
net2(one)
# Layer names typically are globally unique rather than being unique within
# the scope of their first use. However, within a Network they must be named
# locally so that previous Layer construction does not interfere with
# variable naming (e.g. add a Layer construction before the Network,
# suddenly your previously saved checkpoint is incompatible).
self.assertEqual("dense", net1.l1.name)
self.assertEqual("dense", net2.l1.name)
self.evaluate(net1.trainable_weights[0].assign([[1.]]))
self.evaluate(net2.trainable_weights[0].assign([[2.]]))
self.assertEqual(2., self.evaluate(net2.trainable_weights[0]))
self.assertEqual(1., self.evaluate(net1.trainable_weights[0]))
self.assertStartsWith(expected_start="my_network/dense/",
actual=net1.trainable_weights[0].name)
self.assertStartsWith(expected_start="my_network_1/dense/",
actual=net2.trainable_weights[0].name)
@test_util.run_in_graph_and_eager_modes
def testNestableAnonymous(self):
# The case where no explicit names are specified. We make up unique names,
# and these should match the variable names.
class ParentNetwork(network.Network):
def __init__(self):
super(ParentNetwork, self).__init__()
self.first = self.track_layer(MyNetwork())
self.second = self.track_layer(MyNetwork())
def call(self, x):
return self.second(self.first(x))
one = constant_op.constant([[1.]])
net = ParentNetwork()
net(one)
self.assertStartsWith(expected_start="parent_network/my_network/dense",
actual=net.trainable_weights[0].name)
self.assertStartsWith(expected_start="parent_network/my_network/dense",
actual=net.first.trainable_weights[0].name)
self.assertStartsWith(expected_start="parent_network/my_network_1/dense",
actual=net.trainable_weights[1].name)
self.assertStartsWith(expected_start="parent_network/my_network_1/dense",
actual=net.second.trainable_weights[0].name)
self.assertEqual("parent_network", net.name)
self.assertEqual("my_network", net.first.name)
self.assertEqual("my_network_1", net.second.name)
net2 = ParentNetwork()
net2(one)
self.assertStartsWith(expected_start="parent_network_1/my_network/dense",
actual=net2.trainable_weights[0].name)
self.assertStartsWith(expected_start="parent_network_1/my_network/dense",
actual=net2.first.trainable_weights[0].name)
self.assertStartsWith(expected_start="parent_network_1/my_network_1/dense",
actual=net2.trainable_weights[1].name)
self.assertStartsWith(expected_start="parent_network_1/my_network_1/dense",
actual=net2.second.trainable_weights[0].name)
self.assertEqual("parent_network_1", net2.name)
self.assertEqual("my_network", net2.first.name)
self.assertEqual("my_network_1", net2.second.name)
@test_util.run_in_graph_and_eager_modes
def testNestableExplicit(self):
# We have explicit network names and everything is globally unique.
class ParentNetwork(network.Network):
def __init__(self):
super(ParentNetwork, self).__init__(name="unique_parent_name")
self.first = self.track_layer(
MyNetwork(name="first_unique_child_name"))
self.second = self.track_layer(
MyNetwork(name="second_unique_child_name"))
def call(self, x):
return self.second(self.first(x))
one = constant_op.constant([[1.]])
net = ParentNetwork()
net(one)
self.assertStartsWith(
expected_start="unique_parent_name/first_unique_child_name/dense",
actual=net.trainable_weights[0].name)
self.assertStartsWith(
expected_start="unique_parent_name/second_unique_child_name/dense",
actual=net.trainable_weights[1].name)
self.assertEqual("unique_parent_name", net.name)
self.assertEqual("first_unique_child_name", net.first.name)
self.assertEqual("second_unique_child_name", net.second.name)
@test_util.run_in_graph_and_eager_modes
def testLayerNetworkNameInteractions(self):
# Same base name as core.Dense; Networks and non-Network Layers with the
# same base name should use the same numbering system.
class Dense(network.Network):
def __init__(self):
super(Dense, self).__init__()
self.first = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.first(x)
class MixedLayerNetwork(network.Network):
def __init__(self):
super(MixedLayerNetwork, self).__init__()
self.first = self.track_layer(core.Dense(1, use_bias=False))
self.second = self.track_layer(core.Dense(1, use_bias=False))
self.third = self.track_layer(Dense())
self.fourth = self.track_layer(core.Dense(1, use_bias=False))
self.fifth = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.fifth(self.fourth(self.third(self.second(self.first(x)))))
one = constant_op.constant([[1.]])
net = MixedLayerNetwork()
net(one)
self.assertEqual("dense", net.first.name)
self.assertEqual("dense_1", net.second.name)
self.assertEqual("dense_2", net.third.name)
self.assertEqual("dense_3", net.fourth.name)
self.assertEqual("dense_4", net.fifth.name)
# Note that this is _not_ the default naming behavior for Layers. Layers
# which are added to Networks follow Network variable naming conventions
# (i.e. variable names = network name unless variable sharing). Nested
# Layers revert to Layer behavior.
self.assertStartsWith(expected_start="mixed_layer_network/dense/",
actual=net.trainable_weights[0].name)
self.assertStartsWith(expected_start="mixed_layer_network/dense_1/",
actual=net.trainable_weights[1].name)
self.assertStartsWith(expected_start="mixed_layer_network/dense_2/",
actual=net.trainable_weights[2].name)
self.assertStartsWith(expected_start="mixed_layer_network/dense_3/",
actual=net.trainable_weights[3].name)
self.assertStartsWith(expected_start="mixed_layer_network/dense_4/",
actual=net.trainable_weights[4].name)
self.assertEqual("mixed_layer_network", net.name)
@test_util.run_in_graph_and_eager_modes
def testNestableExplicitCollisions(self):
# We have explicit network names and they are unique within the layer
# they're added to.
class ParentNetwork(network.Network):
def __init__(self):
super(ParentNetwork, self).__init__(name="nonunique_name")
self.first = self.track_layer(
MyNetwork(name="nonunique_name"))
self.second = self.track_layer(
MyNetwork(name="second_unique_child_name"))
def call(self, x):
return self.second(self.first(x))
one = constant_op.constant([[1.]])
net = ParentNetwork()
net(one)
self.assertStartsWith(
expected_start="nonunique_name/nonunique_name/dense",
actual=net.trainable_weights[0].name)
self.assertStartsWith(
expected_start="nonunique_name/second_unique_child_name/dense",
actual=net.trainable_weights[1].name)
self.assertEqual("nonunique_name", net.name)
self.assertEqual("nonunique_name", net.first.name)
self.assertEqual("second_unique_child_name", net.second.name)
@test_util.run_in_graph_and_eager_modes
def testNestableExplicitWithAnonymousParent(self):
# A parent network is instantiated multiple times with explicitly named
# children. We shouldn't throw any name errors.
class ParentNetwork(network.Network):
def __init__(self):
super(ParentNetwork, self).__init__()
self.first = self.track_layer(
MyNetwork(name="first_unique_child_name"))
self.second = self.track_layer(
MyNetwork(name="second_unique_child_name"))
def call(self, x):
return self.second(self.first(x))
one = constant_op.constant([[1.]])
net = ParentNetwork()
net(one)
self.assertStartsWith(
expected_start="parent_network/first_unique_child_name/dense/",
actual=net.trainable_weights[0].name)
self.assertStartsWith(
expected_start="parent_network/second_unique_child_name/dense/",
actual=net.trainable_weights[1].name)
self.assertEqual("parent_network", net.name)
self.assertEqual("first_unique_child_name", net.first.name)
self.assertEqual("second_unique_child_name", net.second.name)
net2 = ParentNetwork()
net2(one)
self.assertStartsWith(
expected_start="parent_network_1/first_unique_child_name/dense",
actual=net2.trainable_weights[0].name)
self.assertStartsWith(
expected_start="parent_network_1/second_unique_child_name/dense",
actual=net2.trainable_weights[1].name)
self.assertEqual("parent_network_1", net2.name)
self.assertEqual("first_unique_child_name", net2.first.name)
self.assertEqual("second_unique_child_name", net2.second.name)
@test_util.run_in_graph_and_eager_modes
def testNestableExplicitSameLayerCollisions(self):
# We have explicit network names and they are _not_ unique within the layer
# they're added to. Error.
class ParentNetwork(network.Network):
def __init__(self):
super(ParentNetwork, self).__init__(name="unique_parent_name")
self.first = self.track_layer(MyNetwork(name="nonunique_name"))
self.second = self.track_layer(MyNetwork(name="nonunique_name"))
def call(self, x):
return self.second(self.first(x))
with self.assertRaisesRegexp(ValueError, "nonunique_name"):
ParentNetwork()
@test_util.run_in_graph_and_eager_modes
def testAnonymousVariableSharing(self):
# Two "owned" Networks
class FirstParentNetwork(network.Network):
def __init__(self):
super(FirstParentNetwork, self).__init__()
self.first = self.track_layer(MyNetwork())
self.second = self.track_layer(MyNetwork())
def call(self, x):
return self.second(self.first(x))
one = constant_op.constant([[1.]])
net = FirstParentNetwork()
net(one)
# One Network shared with FirstParentNetwork, one owned Network. Same name,
# but this is OK because only one is owned. This name collision is
# avoidable; we could have looked at the base_name of the non-owned Network
# and incremented our naming based on that.
class SecondParentNetwork(network.Network):
def __init__(self):
super(SecondParentNetwork, self).__init__()
self.first = self.track_layer(net.first)
self.second = self.track_layer(MyNetwork())
def call(self, x):
return self.second(self.first(x))
net2 = SecondParentNetwork()
net2(one)
self.assertStartsWith(
expected_start="first_parent_network/my_network/dense/",
actual=net2.trainable_weights[0].name)
self.assertStartsWith(
expected_start="second_parent_network/my_network/dense/",
actual=net2.trainable_weights[1].name)
self.assertEqual("second_parent_network", net2.name)
self.assertTrue(net2.first is net.first)
self.assertEqual("my_network", net2.first.name)
self.assertEqual("my_network", net2.second.name)
# No name collision; the owned Network is added first and has a different
# name than the shared Network.
class ThirdParentNetwork(network.Network):
def __init__(self):
super(ThirdParentNetwork, self).__init__()
self.first = self.track_layer(MyNetwork())
self.second = self.track_layer(net.second)
def call(self, x):
return self.second(self.first(x))
net3 = ThirdParentNetwork()
net3(one)
self.assertStartsWith(
expected_start="third_parent_network/my_network/dense",
actual=net3.trainable_weights[0].name)
self.assertStartsWith(
expected_start="first_parent_network/my_network_1/dense",
actual=net3.trainable_weights[1].name)
self.assertEqual("third_parent_network", net3.name)
self.assertTrue(net3.second is net.second)
self.assertEqual("my_network", net3.first.name)
self.assertEqual("my_network_1", net3.second.name)
# "Unavoidable" same-name Layer. The owned name is added first (fixed), then
# a shared Network is added with the same name.
class FourthParentNetwork(network.Network):
def __init__(self):
super(FourthParentNetwork, self).__init__()
self.first = self.track_layer(MyNetwork())
self.second = self.track_layer(net.first)
def call(self, x):
return self.second(self.first(x))
net4 = FourthParentNetwork()
net4(one)
self.assertStartsWith(
expected_start="fourth_parent_network/my_network/dense/",
actual=net4.trainable_weights[0].name)
self.assertStartsWith(
expected_start="first_parent_network/my_network/dense/",
actual=net4.trainable_weights[1].name)
self.assertEqual("fourth_parent_network", net4.name)
self.assertTrue(net4.second is net.first)
self.assertEqual("my_network", net4.first.name)
self.assertEqual("my_network", net4.second.name)
@test_util.run_in_graph_and_eager_modes
def testRecursiveLayerRenaming(self):
core.Dense(1) # Under default Layer naming, would change subsequent names.
class NetworkWithLayerChildren(network.Network):
def __init__(self):
super(NetworkWithLayerChildren, self).__init__()
self.first = self.track_layer(core.Dense(1, use_bias=False))
self.second = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.second(self.first(x))
class ParentNetwork(network.Network):
def __init__(self):
super(ParentNetwork, self).__init__()
self.first = self.track_layer(NetworkWithLayerChildren())
self.second = self.track_layer(NetworkWithLayerChildren())
def call(self, x):
return self.second(self.first(x))
net = ParentNetwork()
one = constant_op.constant([[1.]])
net(one)
self.assertStartsWith(
expected_start=("parent_network/network_with_layer_children/"
"dense/"),
actual=net.trainable_weights[0].name)
self.assertStartsWith(
expected_start=("parent_network/network_with_layer_children/"
"dense_1/"),
actual=net.trainable_weights[1].name)
self.assertStartsWith(
expected_start=("parent_network/network_with_layer_children_1/"
"dense/"),
actual=net.trainable_weights[2].name)
self.assertStartsWith(
expected_start=("parent_network/network_with_layer_children_1/"
"dense_1/"),
actual=net.trainable_weights[3].name)
self.assertEqual("parent_network", net.name)
self.assertEqual("network_with_layer_children", net.first.name)
self.assertEqual("network_with_layer_children_1", net.second.name)
self.assertEqual("dense", net.first.first.name)
self.assertEqual("dense_1", net.first.second.name)
self.assertEqual("dense", net.second.first.name)
self.assertEqual("dense_1", net.second.second.name)
@test_util.run_in_graph_and_eager_modes
def testCallInDifferentOrderThanConstruct(self):
shared_network = MyNetwork()
class FirstNetwork(network.Network):
def __init__(self):
super(FirstNetwork, self).__init__()
self.first = self.track_layer(shared_network)
self.second = self.track_layer(MyNetwork())
def call(self, x):
return self.second(self.first(x))
class SecondNetwork(network.Network):
def __init__(self):
super(SecondNetwork, self).__init__()
self.first = self.track_layer(shared_network)
self.second = self.track_layer(MyNetwork())
def call(self, x):
return self.second(self.first(x))
net1 = FirstNetwork()
net2 = SecondNetwork()
one = constant_op.constant([[1.]])
net2(one)
net1(one)
self.assertStartsWith(
expected_start="first_network/my_network/dense/",
actual=net1.trainable_weights[0].name)
self.assertStartsWith(
expected_start="first_network/my_network_1/dense/",
actual=net1.trainable_weights[1].name)
self.assertStartsWith(
expected_start="first_network/my_network/dense/",
actual=net2.trainable_weights[0].name)
self.assertStartsWith(
expected_start="second_network/my_network/dense/",
actual=net2.trainable_weights[1].name)
self.assertTrue(net1.trainable_weights[0] is net2.trainable_weights[0])
self.assertEqual("first_network", net1.name)
self.assertEqual("my_network", net1.first.name)
self.assertEqual("my_network_1", net1.second.name)
self.assertTrue(net2.first is net1.first)
self.assertEqual("my_network", net2.second.name)
@test_util.run_in_graph_and_eager_modes
def testLayerCallInDifferentOrderThanConstruct(self):
# Same idea as testCallInDifferentOrderThanConstruct, but this time with a
# non-Network Layer shared between two Networks rather than a
# Network. Naming should follow the same rules.
shared_layer = core.Dense(1, use_bias=False)
class FirstNetwork(network.Network):
def __init__(self):
super(FirstNetwork, self).__init__()
self.first = self.track_layer(shared_layer)
self.second = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.second(self.first(x))
class SecondNetwork(network.Network):
def __init__(self):
super(SecondNetwork, self).__init__()
self.first = self.track_layer(shared_layer)
self.second = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.second(self.first(x))
net1 = FirstNetwork()
net2 = SecondNetwork()
one = constant_op.constant([[1.]])
net2(one)
net1(one)
self.assertStartsWith(
expected_start="first_network/dense/",
actual=net1.trainable_weights[0].name)
self.assertStartsWith(
expected_start="first_network/dense_1/",
actual=net1.trainable_weights[1].name)
self.assertStartsWith(
expected_start="first_network/dense/",
actual=net2.trainable_weights[0].name)
self.assertStartsWith(
expected_start="second_network/dense/",
actual=net2.trainable_weights[1].name)
self.assertTrue(net1.trainable_weights[0] is net2.trainable_weights[0])
self.assertEqual("first_network", net1.name)
self.assertEqual("dense", net1.first.name)
self.assertEqual("dense_1", net1.second.name)
self.assertTrue(net2.first is net1.first)
self.assertEqual("dense", net2.second.name)
@test_util.run_in_graph_and_eager_modes
def testLayerAlreadyBuilt(self):
one = constant_op.constant([[1.]])
core.Dense(1, use_bias=False) # pre-built layers use global naming
one = constant_op.constant([[1.]])
core.Dense(1, use_bias=False)(one)
shared_layer = core.Dense(1, use_bias=False)
shared_layer(one)
class FirstNetwork(network.Network):
def __init__(self):
super(FirstNetwork, self).__init__()
self.first = self.track_layer(shared_layer)
self.second = self.track_layer(core.Dense(1, use_bias=False))
def call(self, x):
return self.second(self.first(x))
net = FirstNetwork()
net(one)
self.assertStartsWith(
expected_start="dense_1/", # Pre-built layers have variable names which
# do not match their layer names.
actual=net.trainable_weights[0].name)
self.assertStartsWith(
expected_start="first_network/dense/",
actual=net.trainable_weights[1].name)
self.assertTrue(
net.trainable_weights[0] is shared_layer.trainable_weights[0])
self.assertEqual("first_network", net.name)
self.assertEqual("dense_3", net.first.name)
self.assertEqual("dense", net.second.name)
class SequentialTest(test.TestCase):
@test_util.assert_no_garbage_created
def testTwoLayers(self):
# Create a sequential network with one layer.
net = network.Sequential([core.Dense(1, use_bias=False)])
# Set that layer's weights so it multiplies by 3
l1 = net.get_layer(index=0)
net(constant_op.constant([[2.0]])) # Create l1's variables
self.assertEqual(1, len(l1.trainable_variables))
l1.trainable_variables[0].assign([[3.0]])
self.assertEqual(21.0, net(constant_op.constant([[7.0]])).numpy())
# Add a second layer to the network.
l2 = core.Dense(1, use_bias=False)
net.add(l2)
# Set the second layer's weights so it multiplies by 11
net(constant_op.constant([[2.0]])) # Create l2's variables
self.assertEqual(1, len(l2.trainable_variables))
l2.trainable_variables[0].assign([[11.0]])
self.assertEqual(231.0, net(constant_op.constant([[7.0]])).numpy())
@test_util.assert_no_garbage_created
def testFunctions(self):
# Create a sequential network with one function.
net = network.Sequential([nn_ops.relu])
two = constant_op.constant(2.0)
self.assertEqual(2.0, net(two).numpy())
self.assertEqual(0.0, net(-two).numpy())
# Add a second function.
net.add(math_ops.negative)
self.assertEqual(-2.0, net(two).numpy())
@test_util.assert_no_garbage_created
def testTrainingLayer(self):
net = network.Sequential([core.Dropout(0.99999)])
two = constant_op.constant(2.0)
self.assertEqual(2.0, net(two).numpy())
self.assertEqual(2.0, net(two, training=False).numpy())
for _ in range(20):
with_dropout = net(two, training=True).numpy()
self.assertIn(with_dropout, [0.0, 2.0])
if with_dropout == 0.0:
return
# Should only fail spuriously 1 in 10^100 runs.
self.fail("Didn't see dropout happen after 20 tries.")
@test_util.assert_no_garbage_created
def testTrainingFunction(self):
# Output depends on value of "training".
def add_training(input_value, training=None):
if training is None:
return input_value
elif training:
return input_value + 1
return input_value - 1
# Passing a "training" argument to double would cause an error.
def double(input_value):
return 2 * input_value
net = network.Sequential([add_training, double])
two = constant_op.constant(2)
self.assertEqual(4, net(two).numpy())
self.assertEqual(2, net(two, training=False).numpy())
self.assertEqual(6, net(two, training=True).numpy())
if __name__ == "__main__":
test.main()
|
|
import argparse
import json
import datetime
import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.framework.ops import disable_eager_execution
#from tensorflow.python import keras
import keras
#from donkeycar.parts.keras import KerasVAE
#from vae_model import KerasVAE
from malpi.dkwm.vae import KerasVAE
from keras.datasets import cifar10
import donkeycar as dk
#from donkeycar.train.tub_generator import generator
from vae_generator import vae_generator
from donkeycar.templates.train import collate_records, preprocessFileList
from donkeycar.utils import gather_records
from donkeycar.parts.keras import KerasLinear, KerasIMU,\
KerasCategorical, KerasBehavioral, Keras3D_CNN,\
KerasRNN_LSTM, KerasLatent
from malpi.notify import notify, read_email_config
from malpi import Experiment
def plot_results( history, path=None ):
plt.figure(1)
# loss - vae_r_loss - vae_kl_loss - val_loss - val_vae_r_loss - val_vae_kl_loss
plt.subplot(211)
# summarize history for loss
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validate'], loc='upper right')
# summarize history for r_loss and kl_loss (validation only)
r_loss = 'val_vae_r_loss'
legend1 = 'R Loss'
if r_loss not in history:
r_loss = 'main_output_vae_r_loss'
if 'val_steering_output_loss' in history:
r_loss = 'val_steering_output_loss'
legend1 = 'Steering Loss'
kl_loss = 'val_vae_kl_loss'
legend2 = 'KL Loss'
if kl_loss not in history:
kl_loss = 'main_output_vae_kl_loss'
if 'val_throttle_output_loss' in history:
kl_loss = 'val_throttle_output_loss'
legend2 = 'Throttle Loss'
plt.subplot(212)
plt.plot(history[r_loss])
plt.plot(history[kl_loss])
plt.title('R and KL Losses')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend([legend1, legend2], loc='upper right')
if path is not None:
plt.savefig(path + '.png')
plt.show()
def load_cifar10_data():
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255.0
x_test /= 255.0
print( "Train: {}".format( x_train.shape ) )
return (x_train, x_test )
def load_tubs(cfg, tub_names, kl, aux_name=None, pilot=False):
opts = { 'cfg' : cfg}
opts['categorical'] = False
opts['keras_pilot'] = kl # This will be needed for generator
opts['continuous'] = False
gen_records = {}
records = gather_records(cfg, tub_names, verbose=True)
collate_records(records, gen_records, opts)
# These options should be part of the KerasPilot class
if type(kl.model.output) is list:
opts['model_out_shape'] = (2, 1)
else:
opts['model_out_shape'] = kl.model.output.shape
if type(kl.model.input) is list:
opts['model_in_shape'] = (2, 1)
else:
opts['model_in_shape'] = kl.model.input.shape
opts['has_imu'] = type(kl) is KerasIMU
opts['has_bvh'] = type(kl) is KerasBehavioral
opts['img_out'] = type(kl) is KerasLatent
opts['vae_out'] = type(kl) is KerasVAE
train_gen = vae_generator(cfg, gen_records, cfg.BATCH_SIZE, isTrainSet=True, aug=False, aux=aux_name, pilot=pilot)
val_gen = vae_generator(cfg, gen_records, cfg.BATCH_SIZE, isTrainSet=False, aug=False, aux=aux_name, pilot=pilot)
num_train = 0
num_val = 0
for key, _record in gen_records.items():
if _record['train'] == True:
num_train += 1
else:
num_val += 1
steps = num_train // cfg.BATCH_SIZE
val_steps = num_val // cfg.BATCH_SIZE
print( "Num/Steps: {} {}".format( num_train, steps ) )
print( "Val/Steps: {} {}".format( num_val, val_steps ) )
return train_gen, val_gen, steps, val_steps
def train( kl, train_gen, val_gen, train_steps, val_steps, z_dim, beta, optim, lr=None, decay=None, momentum=None, dropout=None, epochs=40, batch_size=64, aux=None, loss_weights={} ):
optim_args = {}
if lr is not None:
optim_args["lr"] = lr
if decay is not None:
optim_args["decay"] = decay
if (optim == "sgd") and (momentum is not None):
optim_args["momentum"] = momentum
if optim == "adam":
optim = tf.keras.optimizers.Adam(**optim_args)
elif optim == "sgd":
optim = tf.keras.optimizers.SGD(**optim_args)
elif optim == "rmsprop":
optim = tf.keras.optimizers.RMSprop(**optim_args)
kl.set_optimizer(optim)
kl.compile(**loss_weights)
kl.model.summary()
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
min_delta=0.00001,
patience=5,
verbose=True,
mode='auto')
workers_count = 1
use_multiprocessing = False
hist = kl.model.fit_generator(
train_gen,
steps_per_epoch=train_steps,
epochs=epochs,
verbose=cfg.VEBOSE_TRAIN,
validation_data=val_gen,
callbacks=[early_stop],
validation_steps=val_steps,
workers=workers_count,
use_multiprocessing=use_multiprocessing)
return hist
def check_model( z_dim, beta ):
shapes = [(16, 16, 3), (32, 32, 3), (64, 64, 3), (128, 128, 3)]
for input_shape in shapes:
print( "Checking input shape: {}".format( input_shape ) )
kl = KerasVAE(input_shape=input_shape, z_dim=z_dim, beta=beta)
if input_shape[0] == 128:
kl.model.summary()
kl2 = KerasVAE(input_shape=input_shape, z_dim=z_dim, beta=beta, aux=7)
print( "Model with 7 auxiliary outputs" )
kl2.compile()
kl2.model.summary()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Train a VAE on Cifar10.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--z_dim', type=int, default=128, help='Size of the latent space.')
parser.add_argument('--beta', type=float, default=0.001, help='VAE beta hyperparameter.')
parser.add_argument('--aux', default=None, help='Name of the auxilliary data to use.')
parser.add_argument('--pilot', action="store_true", default=False, help='Train a pilot (steering/throttle)')
parser.add_argument('--check', action="store_true", default=False, help='Check model then exit')
parser.add_argument('--plot', action="store_true", default=False, help='Plot history in exp/name then exit')
parser.add_argument('-o', '--optimizer', default='adam', choices=["adam", "sgd", "rmsprop"], help='Optimizer')
parser.add_argument('--lr', type=float, default=None, help='Initial learning rate. None = optimizer default')
parser.add_argument('--decay', type=float, default=None, help='Learning rate decay. None = optimizer default')
parser.add_argument('--momentum', type=float, default=None, help='SGD momentum. None = optimizer default')
parser.add_argument('--dropout', type=float, default=None, help='Dropout amount to use.')
parser.add_argument('--epochs', type=int, default=40, help='Maximum number of epoch to train.')
parser.add_argument('--pre', default=None, help='Path to pre-trained weights to load.')
parser.add_argument('--email', default=None, help='Email address to send finished notification.')
parser.add_argument('--name', default=None, help='Name for this experiment run.')
parser.add_argument('--exp', default="experiments", help='Directory where experiments are saved.')
group = parser.add_mutually_exclusive_group()
group.add_argument('--cifar10', action="store_true", default=False, help='Train on Cifar10 data.')
group.add_argument('--file', nargs='*', help='Text file with a list of tubs to train on.')
args = parser.parse_args()
if args.check:
check_model( args.z_dim, args.beta )
exit()
if args.plot and (args.name is not None):
histname = os.path.join( args.exp, args.name, 'histories.pickle' )
with open( histname, 'rb' ) as f:
hist = pickle.load( f )
plot_results( hist )
exit()
disable_eager_execution()
if args.cifar10:
x_train, x_val = load_cifar10_data()
input_shape = x_train.shape[1:]
kl = KerasVAE(input_shape=input_shape, z_dim=args.z_dim, beta=args.beta, dropout=args.dropout)
else:
try:
cfg = dk.load_config()
except FileNotFoundError:
cfg = dk.load_config("config.py") # retry in the current directory
dirs = preprocessFileList( args.file )
input_shape = (cfg.IMAGE_W, cfg.IMAGE_H, cfg.IMAGE_DEPTH)
# Code for multiple inputs: http://digital-thinking.de/deep-learning-combining-numerical-and-text-features-in-deep-neural-networks/
aux_out = 0
if args.aux is not None:
aux_out = 7 # need to get number of aux outputs from data
kl = KerasVAE(input_shape=input_shape, z_dim=args.z_dim, beta=args.beta, dropout=args.dropout, aux=aux_out, pilot=args.pilot)
train_gen, val_gen, train_steps, val_steps = load_tubs(cfg, dirs, kl, aux_name=args.aux, pilot=args.pilot)
if args.pre is not None:
kl.load_weights( args.pre, by_name=True )
loss_weights = {"main_weight": 0.1, "steering_weight":100.0, "throttle_weight":100.0}
exp = None
if args.name is not None:
hparams = {**cfg.__dict__, **loss_weights}
exp = Experiment( args.name, args, exp_dir=args.exp, num_samples=train_steps+val_steps, input_dim=(cfg.TARGET_H,cfg.TARGET_W,cfg.TARGET_D), hparams=hparams, modules=[np, tf, dk] )
hist = train( kl, train_gen, val_gen, train_steps, val_steps, args.z_dim, args.beta, args.optimizer, args.lr, args.decay, args.momentum, args.dropout, args.epochs, aux=args.aux, loss_weights=loss_weights )
# loss: 5.2231 - main_output_loss: 2.9757 - steering_output_loss: 0.0160 - throttle_output_loss: 0.0050 - main_output_vae_r_loss: 2.3089 - main_output_vae_kl_loss: 0.6668 - val_loss: 9.9828 - val_main_output_loss: 3.0794 - val_steering_output_loss: 0.0621 - val_throttle_output_loss: 0.0056 - val_main_output_vae_r_loss: 2.4030 - val_main_output_vae_kl_loss: 0.6764
loss = hist.history['val_loss'][-1]
if exp is not None:
exp.writeAfter( model=kl.model, histories=hist.history, saveModel=True, results={"loss": loss} )
print( "python3 scripts/sample.py --tub=data/20190921_4.tub {}_weights.h5".format( os.path.splitext(exp.filename)[0] ) )
print( "python3 scripts/tubplot.py" ) # TODO: Add arguments to this once tubplot is finished
try:
notifications = read_email_config()
notify( "Training Finished", subTitle='', message='Validation Loss {:.6f}'.format( loss ), email_to=args.email, mac=True, sound=True, email_config=notifications )
except Exception as ex:
print( "Failed to send notifications: {}".format( ex ) )
if cfg.SHOW_PLOT:
fname = os.path.splitext(exp.filename)[0]
print( "Training loss plot: {}.png".format( os.path.splitext(exp.filename)[0] ) )
plot_results( hist.history, fname )
|
|
# Copyright 2010 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import re
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
import stevedore
import webob
from webob import exc
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import servers as schema_servers
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova.image import glance
from nova import objects
from nova import utils
ALIAS = 'servers'
CONF = cfg.CONF
CONF.import_opt('enable_instance_password',
'nova.api.openstack.compute.legacy_v2.servers')
CONF.import_opt('network_api_class', 'nova.network')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
CONF.import_opt('extensions_blacklist', 'nova.api.openstack', group='osapi_v3')
CONF.import_opt('extensions_whitelist', 'nova.api.openstack', group='osapi_v3')
LOG = logging.getLogger(__name__)
authorize = extensions.os_compute_authorizer(ALIAS)
class ServersController(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
EXTENSION_CREATE_NAMESPACE = 'nova.api.v3.extensions.server.create'
EXTENSION_DESERIALIZE_EXTRACT_SERVER_NAMESPACE = (
'nova.api.v3.extensions.server.create.deserialize')
EXTENSION_REBUILD_NAMESPACE = 'nova.api.v3.extensions.server.rebuild'
EXTENSION_DESERIALIZE_EXTRACT_REBUILD_NAMESPACE = (
'nova.api.v3.extensions.server.rebuild.deserialize')
EXTENSION_UPDATE_NAMESPACE = 'nova.api.v3.extensions.server.update'
EXTENSION_RESIZE_NAMESPACE = 'nova.api.v3.extensions.server.resize'
_view_builder_class = views_servers.ViewBuilderV3
schema_server_create = schema_servers.base_create
schema_server_update = schema_servers.base_update
schema_server_rebuild = schema_servers.base_rebuild
schema_server_resize = schema_servers.base_resize
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = filter(lambda l: l['rel'] == 'self',
robj.obj['server']['links'])
if link:
robj['Location'] = utils.utf8(link[0]['href'])
# Convenience return
return robj
def __init__(self, **kwargs):
def _check_load_extension(required_function):
def check_whiteblack_lists(ext):
# Check whitelist is either empty or if not then the extension
# is in the whitelist
if (not CONF.osapi_v3.extensions_whitelist or
ext.obj.alias in CONF.osapi_v3.extensions_whitelist):
# Check the extension is not in the blacklist
if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist:
return True
else:
LOG.warning(_LW("Not loading %s because it is "
"in the blacklist"), ext.obj.alias)
return False
else:
LOG.warning(
_LW("Not loading %s because it is not in the "
"whitelist"), ext.obj.alias)
return False
def check_load_extension(ext):
if isinstance(ext.obj, extensions.V3APIExtensionBase):
# Filter out for the existence of the required
# function here rather than on every request. We
# don't have a new abstract base class to reduce
# duplication in the extensions as they may want
# to implement multiple server (and other) entry
# points if hasattr(ext.obj, 'server_create'):
if hasattr(ext.obj, required_function):
LOG.debug('extension %(ext_alias)s detected by '
'servers extension for function %(func)s',
{'ext_alias': ext.obj.alias,
'func': required_function})
return check_whiteblack_lists(ext)
else:
LOG.debug(
'extension %(ext_alias)s is missing %(func)s',
{'ext_alias': ext.obj.alias,
'func': required_function})
return False
else:
return False
return check_load_extension
self.extension_info = kwargs.pop('extension_info')
super(ServersController, self).__init__(**kwargs)
self.compute_api = compute.API(skip_policy_check=True)
# Look for implementation of extension point of server creation
self.create_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_CREATE_NAMESPACE,
check_func=_check_load_extension('server_create'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.create_extension_manager):
LOG.debug("Did not find any server create extensions")
# Look for implementation of extension point of server rebuild
self.rebuild_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_REBUILD_NAMESPACE,
check_func=_check_load_extension('server_rebuild'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.rebuild_extension_manager):
LOG.debug("Did not find any server rebuild extensions")
# Look for implementation of extension point of server update
self.update_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_UPDATE_NAMESPACE,
check_func=_check_load_extension('server_update'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.update_extension_manager):
LOG.debug("Did not find any server update extensions")
# Look for implementation of extension point of server resize
self.resize_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_RESIZE_NAMESPACE,
check_func=_check_load_extension('server_resize'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.resize_extension_manager):
LOG.debug("Did not find any server resize extensions")
# Look for API schema of server create extension
self.create_schema_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_CREATE_NAMESPACE,
check_func=_check_load_extension('get_server_create_schema'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if list(self.create_schema_manager):
self.create_schema_manager.map(self._create_extension_schema,
self.schema_server_create)
else:
LOG.debug("Did not find any server create schemas")
# Look for API schema of server update extension
self.update_schema_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_UPDATE_NAMESPACE,
check_func=_check_load_extension('get_server_update_schema'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if list(self.update_schema_manager):
self.update_schema_manager.map(self._update_extension_schema,
self.schema_server_update)
else:
LOG.debug("Did not find any server update schemas")
# Look for API schema of server rebuild extension
self.rebuild_schema_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_REBUILD_NAMESPACE,
check_func=_check_load_extension('get_server_rebuild_schema'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if list(self.rebuild_schema_manager):
self.rebuild_schema_manager.map(self._rebuild_extension_schema,
self.schema_server_rebuild)
else:
LOG.debug("Did not find any server rebuild schemas")
# Look for API schema of server resize extension
self.resize_schema_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_RESIZE_NAMESPACE,
check_func=_check_load_extension('get_server_resize_schema'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if list(self.resize_schema_manager):
self.resize_schema_manager.map(self._resize_extension_schema,
self.schema_server_resize)
else:
LOG.debug("Did not find any server resize schemas")
@extensions.expected_errors((400, 403))
def index(self, req):
"""Returns a list of server names and ids for a given user."""
context = req.environ['nova.context']
authorize(context, action="index")
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
@extensions.expected_errors((400, 403))
def detail(self, req):
"""Returns a list of server details for a given user."""
context = req.environ['nova.context']
authorize(context, action="detail")
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options(req))
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
search_opts.pop('status', None)
if 'status' in req.GET.keys():
statuses = req.GET.getall('status')
states = common.task_and_vm_state_from_status(statuses)
vm_state, task_state = states
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
# When we search by vm state, task state will return 'default'.
# So we don't need task_state search_opt.
if 'default' not in task_state:
search_opts['task_state'] = task_state
if 'changes-since' in search_opts:
try:
parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes-since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes-since' is specified, because 'changes-since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
if 'changes-since' not in search_opts:
# No 'changes-since', so we only want non-deleted servers
search_opts['deleted'] = False
else:
# Convert deleted filter value to a valid boolean.
# Return non-deleted servers if an invalid value
# is passed with deleted filter.
search_opts['deleted'] = strutils.bool_from_string(
search_opts['deleted'], default=False)
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPForbidden(explanation=msg)
# If tenant_id is passed as a search parameter this should
# imply that all_tenants is also enabled unless explicitly
# disabled. Note that the tenant_id parameter is filtered out
# by remove_invalid_options above unless the requestor is an
# admin.
# TODO(gmann): 'all_tenants' flag should not be required while
# searching with 'tenant_id'. Ref bug# 1185290
# +microversions to achieve above mentioned behavior by
# uncommenting below code.
# if 'tenant_id' in search_opts and 'all_tenants' not in search_opts:
# We do not need to add the all_tenants flag if the tenant
# id associated with the token is the tenant id
# specified. This is done so a request that does not need
# the all_tenants flag does not fail because of lack of
# policy permission for compute:get_all_tenants when it
# doesn't actually need it.
# if context.project_id != search_opts.get('tenant_id'):
# search_opts['all_tenants'] = 1
# If all tenants is passed with 0 or false as the value
# then remove it from the search options. Nothing passed as
# the value for all_tenants is considered to enable the feature
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
if not strutils.bool_from_string(all_tenants, True):
del search_opts['all_tenants']
except ValueError as err:
raise exception.InvalidInput(six.text_type(err))
elevated = None
if 'all_tenants' in search_opts:
if is_detail:
authorize(context, action="detail:get_all_tenants")
else:
authorize(context, action="index:get_all_tenants")
del search_opts['all_tenants']
elevated = context.elevated()
else:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
sort_keys, sort_dirs = common.get_sort_params(req.params)
try:
instance_list = self.compute_api.get_all(elevated or context,
search_opts=search_opts, limit=limit, marker=marker,
want_objects=True, expected_attrs=['pci_devices'],
sort_keys=sort_keys, sort_dirs=sort_dirs)
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
LOG.debug("Flavor '%s' could not be found ",
search_opts['flavor'])
instance_list = objects.InstanceList()
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid):
"""Utility function for looking up an instance by uuid."""
instance = common.get_instance(self.compute_api, context,
instance_uuid,
expected_attrs=['pci_devices',
'flavor'])
req.cache_db_instance(instance)
return instance
def _check_string_length(self, value, name, max_length=None):
try:
if isinstance(value, six.string_types):
value = value.strip()
utils.check_string_length(value, name, min_length=1,
max_length=max_length)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
network_uuids = []
for network in requested_networks:
request = objects.NetworkRequest()
try:
# fixed IP address is optional
# if the fixed IP address is not provided then
# it will use one of the available IP address from the network
request.address = network.get('fixed_ip', None)
request.port_id = network.get('port', None)
if request.port_id:
request.network_id = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument: port")
raise exc.HTTPBadRequest(explanation=msg)
if request.address is not None:
msg = _("Specified Fixed IP '%(addr)s' cannot be used "
"with port '%(port)s': port already has "
"a Fixed IP allocated.") % {
"addr": request.address,
"port": request.port_id}
raise exc.HTTPBadRequest(explanation=msg)
else:
request.network_id = network['uuid']
if (not request.port_id and
not uuidutils.is_uuid_like(request.network_id)):
br_uuid = request.network_id.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % request.network_id
raise exc.HTTPBadRequest(explanation=msg)
# duplicate networks are allowed only for neutron v2.0
if (not utils.is_neutron() and request.network_id and
request.network_id in network_uuids):
expl = (_("Duplicate networks"
" (%s) are not allowed") %
request.network_id)
raise exc.HTTPBadRequest(explanation=expl)
network_uuids.append(request.network_id)
networks.append(request)
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return objects.NetworkRequestList(objects=networks)
# NOTE(vish): Without this regex, b64decode will happily
# ignore illegal bytes in the base64 encoded
# data.
B64_REGEX = re.compile('^(?:[A-Za-z0-9+\/]{4})*'
'(?:[A-Za-z0-9+\/]{2}=='
'|[A-Za-z0-9+\/]{3}=)?$')
def _decode_base64(self, data):
data = re.sub(r'\s', '', data)
if not self.B64_REGEX.match(data):
return None
try:
return base64.b64decode(data)
except TypeError:
return None
@extensions.expected_errors(404)
def show(self, req, id):
"""Returns server details by server id."""
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, id,
expected_attrs=['pci_devices',
'flavor'])
authorize(context, action="show")
req.cache_db_instance(instance)
return self._view_builder.show(req, instance)
@wsgi.response(202)
@extensions.expected_errors((400, 403, 409, 413))
@validation.schema(schema_server_create)
def create(self, req, body):
"""Creates a new server for a given user."""
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
name = server_dict['name']
# Arguments to be passed to instance create function
create_kwargs = {}
# Query extensions which want to manipulate the keyword
# arguments.
# NOTE(cyeoh): This is the hook that extensions use
# to replace the extension specific code below.
# When the extensions are ported this will also result
# in some convenience function from this class being
# moved to the extension
if list(self.create_extension_manager):
self.create_extension_manager.map(self._create_extension_point,
server_dict, create_kwargs, body)
availability_zone = create_kwargs.get("availability_zone")
target = {
'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
authorize(context, target, 'create')
# TODO(Shao He, Feng) move this policy check to os-availabilty-zone
# extension after refactor it.
if availability_zone:
_dummy, host, node = self.compute_api._handle_availability_zone(
context, availability_zone)
if host or node:
authorize(context, {}, 'create:forced_host')
block_device_mapping = create_kwargs.get("block_device_mapping")
# TODO(Shao He, Feng) move this policy check to os-block-device-mapping
# extension after refactor it.
if block_device_mapping:
authorize(context, target, 'create:attach_volume')
image_uuid = self._image_from_req_data(server_dict, create_kwargs)
# NOTE(cyeoh): Although an extension can set
# return_reservation_id in order to request that a reservation
# id be returned to the client instead of the newly created
# instance information we do not want to pass this parameter
# to the compute create call which always returns both. We use
# this flag after the instance create call to determine what
# to return to the client
return_reservation_id = create_kwargs.pop('return_reservation_id',
False)
requested_networks = None
if ('os-networks' in self.extension_info.get_extensions()
or utils.is_neutron()):
requested_networks = server_dict.get('networks')
if requested_networks is not None:
requested_networks = self._get_requested_networks(
requested_networks)
if requested_networks and len(requested_networks):
authorize(context, target, 'create:attach_network')
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
try:
inst_type = flavors.get_flavor_by_flavor_id(
flavor_id, ctxt=context, read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=name,
metadata=server_dict.get('metadata', {}),
admin_password=password,
requested_networks=requested_networks,
check_server_group_quota=True,
**create_kwargs)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.ImageNotFound:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _("Invalid config_drive provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ExternalNetworkAttachForbidden as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except messaging.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % error
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.InvalidRequest,
exception.InvalidVolume,
exception.MultiplePortsNotApplicable,
exception.InvalidFixedIpAndMaxCountRequest,
exception.InstanceUserDataMalformed,
exception.InstanceUserDataTooLarge,
exception.PortNotFound,
exception.FixedIpAlreadyInUse,
exception.SecurityGroupNotFound,
exception.PortRequiresFixedIP,
exception.NetworkRequiresSubnet,
exception.NetworkNotFound,
exception.NetworkDuplicated,
exception.InvalidBDMSnapshot,
exception.InvalidBDMVolume,
exception.InvalidBDMImage,
exception.InvalidBDMBootSequence,
exception.InvalidBDMLocalsLimit,
exception.InvalidBDMVolumeNotBootable,
exception.AutoDiskConfigDisabledByImage,
exception.ImageNUMATopologyIncomplete,
exception.ImageNUMATopologyForbidden,
exception.ImageNUMATopologyAsymmetric,
exception.ImageNUMATopologyCPUOutOfRange,
exception.ImageNUMATopologyCPUDuplicates,
exception.ImageNUMATopologyCPUsUnassigned,
exception.ImageNUMATopologyMemoryOutOfRange) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.InstanceExists,
exception.NetworkAmbiguous,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
# If the caller wanted a reservation_id, return it
if return_reservation_id:
# NOTE(cyeoh): In v3 reservation_id was wrapped in
# servers_reservation but this is reverted for V2 API
# compatibility. In the long term with the tasks API we
# will probably just drop the concept of reservation_id
return wsgi.ResponseObject({'reservation_id': resv_id})
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
# NOTE(gmann): Parameter 'req_body' is placed to handle scheduler_hint
# extension for V2.1. No other extension supposed to use this as
# it will be removed soon.
def _create_extension_point(self, ext, server_dict,
create_kwargs, req_body):
handler = ext.obj
LOG.debug("Running _create_extension_point for %s", ext.obj)
handler.server_create(server_dict, create_kwargs, req_body)
def _rebuild_extension_point(self, ext, rebuild_dict, rebuild_kwargs):
handler = ext.obj
LOG.debug("Running _rebuild_extension_point for %s", ext.obj)
handler.server_rebuild(rebuild_dict, rebuild_kwargs)
def _resize_extension_point(self, ext, resize_dict, resize_kwargs):
handler = ext.obj
LOG.debug("Running _resize_extension_point for %s", ext.obj)
handler.server_resize(resize_dict, resize_kwargs)
def _update_extension_point(self, ext, update_dict, update_kwargs):
handler = ext.obj
LOG.debug("Running _update_extension_point for %s", ext.obj)
handler.server_update(update_dict, update_kwargs)
def _create_extension_schema(self, ext, create_schema):
handler = ext.obj
LOG.debug("Running _create_extension_schema for %s", ext.obj)
schema = handler.get_server_create_schema()
if ext.obj.name == 'SchedulerHints':
# NOTE(oomichi): The request parameter position of scheduler-hint
# extension is different from the other extensions, so here handles
# the difference.
create_schema['properties'].update(schema)
else:
create_schema['properties']['server']['properties'].update(schema)
def _update_extension_schema(self, ext, update_schema):
handler = ext.obj
LOG.debug("Running _update_extension_schema for %s", ext.obj)
schema = handler.get_server_update_schema()
update_schema['properties']['server']['properties'].update(schema)
def _rebuild_extension_schema(self, ext, rebuild_schema):
handler = ext.obj
LOG.debug("Running _rebuild_extension_schema for %s", ext.obj)
schema = handler.get_server_rebuild_schema()
rebuild_schema['properties']['rebuild']['properties'].update(schema)
def _resize_extension_schema(self, ext, resize_schema):
handler = ext.obj
LOG.debug("Running _resize_extension_schema for %s", ext.obj)
schema = handler.get_server_resize_schema()
resize_schema['properties']['resize']['properties'].update(schema)
def _delete(self, context, req, instance_uuid):
authorize(context, action='delete')
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
try:
self.compute_api.soft_delete(context, instance)
except exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
@extensions.expected_errors((400, 404))
@validation.schema(schema_server_update)
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
ctxt = req.environ['nova.context']
update_dict = {}
authorize(ctxt, action='update')
if 'name' in body['server']:
update_dict['display_name'] = body['server']['name']
if list(self.update_extension_manager):
self.update_extension_manager.map(self._update_extension_point,
body['server'], update_dict)
instance = common.get_instance(self.compute_api, ctxt, id,
expected_attrs=['pci_devices'])
try:
# NOTE(mikal): this try block needs to stay because save() still
# might throw an exception.
req.cache_db_instance(instance)
instance.update(update_dict)
instance.save()
return self._view_builder.show(req, instance,
extend_address=False)
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
# NOTE(gmann): Returns 204 for backwards compatibility but should be 202
# for representing async API as this API just accepts the request and
# request hypervisor driver to complete the same in async mode.
@wsgi.response(204)
@extensions.expected_errors((400, 404, 409))
@wsgi.action('confirmResize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
authorize(context, action='confirm_resize')
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirmResize', id)
@wsgi.response(202)
@extensions.expected_errors((400, 404, 409))
@wsgi.action('revertResize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
authorize(context, action='revert_resize')
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize', id)
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('reboot')
@validation.schema(schema_servers.reboot)
def _action_reboot(self, req, id, body):
reboot_type = body['reboot']['type'].upper()
context = req.environ['nova.context']
authorize(context, action='reboot')
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot', id)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
authorize(context, action='resize')
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.CannotResizeDisk,
exception.AutoDiskConfigDisabledByImage) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize', instance_id)
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.NoValidHost,
exception.AutoDiskConfigDisabledByImage) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.response(204)
@extensions.expected_errors((404, 409))
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete', id)
def _image_uuid_from_href(self, image_href):
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
def _image_from_req_data(self, server_dict, create_kwargs):
"""Get image data from the request or raise appropriate
exceptions.
The field imageRef is mandatory when no block devices have been
defined and must be a proper uuid when present.
"""
image_href = server_dict.get('imageRef')
if not image_href and create_kwargs.get('block_device_mapping'):
return ''
elif image_href:
return self._image_uuid_from_href(six.text_type(image_href))
else:
msg = _("Missing imageRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _flavor_id_from_req_data(self, data):
flavor_ref = data['server']['flavorRef']
return common.get_id_from_href(flavor_ref)
@wsgi.response(202)
@extensions.expected_errors((400, 401, 403, 404, 409))
@wsgi.action('resize')
@validation.schema(schema_server_resize)
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
resize_dict = body['resize']
flavor_ref = str(resize_dict["flavorRef"])
resize_kwargs = {}
if list(self.resize_extension_manager):
self.resize_extension_manager.map(self._resize_extension_point,
resize_dict, resize_kwargs)
self._resize(req, id, flavor_ref, **resize_kwargs)
@wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409, 413))
@wsgi.action('rebuild')
@validation.schema(schema_server_rebuild)
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
rebuild_dict = body['rebuild']
image_href = rebuild_dict["imageRef"]
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(rebuild_dict)
context = req.environ['nova.context']
authorize(context, action='rebuild')
instance = self._get_server(context, req, id)
attr_map = {
'name': 'display_name',
'metadata': 'metadata',
}
rebuild_kwargs = {}
if list(self.rebuild_extension_manager):
self.rebuild_extension_manager.map(self._rebuild_extension_point,
rebuild_dict, rebuild_kwargs)
for request_attribute, instance_attribute in attr_map.items():
try:
rebuild_kwargs[instance_attribute] = rebuild_dict[
request_attribute]
except (KeyError, TypeError):
pass
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
**rebuild_kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild', id)
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.AutoDiskConfigDisabledByImage) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
view = self._view_builder.show(req, instance, extend_address=False)
# Add on the admin_password attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409))
@wsgi.action('createImage')
@common.check_snapshots_enabled
@validation.schema(schema_servers.create_image)
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
authorize(context, action='create_image')
entity = body["createImage"]
image_name = entity["name"]
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
instance = self._get_server(context, req, id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance,
bdms):
authorize(context, action="create_image:allow_volume_backed")
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_name,
extra_properties=
metadata)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=metadata)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage', id)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# build location of newly-created image entity
image_id = str(image['id'])
image_ref = glance.generate_image_url(image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
password = server['adminPass']
except KeyError:
password = utils.generate_password()
return password
def _get_server_search_options(self, req):
"""Return server search options allowed by non-admin."""
opt_list = ('reservation_id', 'name', 'status', 'image', 'flavor',
'ip', 'changes-since', 'all_tenants')
req_ver = req.api_version_request
if req_ver > api_version_request.APIVersionRequest("2.4"):
opt_list += ('ip6',)
return opt_list
def _get_instance(self, context, instance_uuid):
try:
attrs = ['system_metadata', 'metadata']
return objects.Instance.get_by_uuid(context, instance_uuid,
expected_attrs=attrs)
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('os-start')
def _start_server(self, req, id, body):
"""Start an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorize(context, instance, 'start')
LOG.debug('start instance', instance=instance)
try:
self.compute_api.start(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'start', id)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('os-stop')
def _stop_server(self, req, id, body):
"""Stop an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorize(context, instance, 'stop')
LOG.debug('stop instance', instance=instance)
try:
self.compute_api.stop(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'stop', id)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
LOG.debug("Removing options '%s' from query",
", ".join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)
class Servers(extensions.V3APIExtensionBase):
"""Servers."""
name = "Servers"
alias = ALIAS
version = 1
def get_resources(self):
member_actions = {'action': 'POST'}
collection_actions = {'detail': 'GET'}
resources = [
extensions.ResourceExtension(
ALIAS,
ServersController(extension_info=self.extension_info),
member_name='server', collection_actions=collection_actions,
member_actions=member_actions)]
return resources
def get_controller_extensions(self):
return []
|
|
import re
from collections import Counter, defaultdict
from datetime import datetime
from hamper.interfaces import ChatCommandPlugin, Command
from hamper.utils import ude, uen
import pytz
from pytz import timezone
from pytz.exceptions import UnknownTimeZoneError
from sqlalchemy import Column, DateTime, Integer, String, func
from sqlalchemy.ext.declarative import declarative_base
SQLAlchemyBase = declarative_base()
class KarmaAdv(ChatCommandPlugin):
'''Give, take, and scoreboard Internet Points'''
"""
Hamper will look for lines that end in ++ or -- and modify that user's
karma value accordingly as well as track a few other stats about users
NOTE: The user is just a string, this really could be anything...like
potatoes or the infamous cookie clicker....
"""
name = 'karma_adv'
priority = -2
short_desc = ("karma - Give positive or negative karma. Where you see"
" !karma, !score will work as well")
long_desc = ("username++ - Give karma\n"
"username-- - Take karma\n"
"!karma --top - Show the top 5 karma earners\n"
"!karma --bottom - Show the bottom 5 karma earners\n"
"!karma --giver or --taker - Show who's given the most"
" positive or negative karma\n"
"!karma --when-positive or --when-negative "
" - Show when people are the most positive or negative\n"
"!karma username - Show the user's karma count\n")
gotta_catch_em_all = r"""# 3 or statement
(
# Starting with a (, look for anything within
# parens that end with 2 or more + or -
(?=\()[^\)]+\)(\+\++|--+) |
# Looking from the start of the line until 2 or
# more - or + are found. No whitespace in this
# grouping
^[^\s]+(\+\++|--+) |
# Finally group any non-whitespace groupings
# that end with 2 or more + or -
[^\s]+?(\+\++|--+)((?=\s)|(?=$))
)
"""
regstr = re.compile(gotta_catch_em_all, re.X)
def setup(self, loader):
super(KarmaAdv, self).setup(loader)
self.db = loader.db
SQLAlchemyBase.metadata.create_all(self.db.engine)
# Config
config = loader.config.get("karma_adv", {})
self.timezone = config.get('timezone', 'UTC')
try:
self.tzinfo = timezone(self.timezone)
except UnknownTimeZoneError:
self.tzinfo = timezone('UTC')
self.timezone = 'UTC'
def message(self, bot, comm):
"""
Check for strings ending with 2 or more '-' or '+'
"""
super(KarmaAdv, self).message(bot, comm)
# No directed karma giving or taking
if not comm['directed'] and not comm['pm']:
msg = comm['message'].strip().lower()
# use the magic above
words = self.regstr.findall(msg)
# Do things to people
karmas = self.modify_karma(words)
# Notify the users they can't modify their own karma
if comm['user'] in karmas.keys():
if karmas[comm['user']] <= 0:
bot.reply(comm, "Don't be so hard on yourself.")
else:
bot.reply(comm, "Tisk, tisk, no up'ing your own karma.")
# Commit karma changes to the db
self.update_db(comm["user"], karmas)
def modify_karma(self, words):
"""
Given a regex object, look through the groups and modify karma
as necessary
"""
# 'user': karma
k = defaultdict(int)
if words:
# For loop through all of the group members
for word_tuple in words:
word = word_tuple[0]
ending = word[-1]
# This will either end with a - or +, if it's a - subract 1
# kara, if it ends with a +, add 1 karma
change = -1 if ending == '-' else 1
# Now strip the ++ or -- from the end
if '-' in ending:
word = word.rstrip('-')
elif '+' in ending:
word = word.rstrip('+')
# Check if surrounded by parens, if so, remove them
if word.startswith('(') and word.endswith(')'):
word = word[1:-1]
# Finally strip whitespace
word = word.strip()
# Add the user to the dict
if word:
k[word] += change
return k
def update_db(self, giver, receiverkarma):
"""
Record a the giver of karma, the receiver of karma, and the karma
amount. Typically the count will be 1, but it can be any positive or
negative integer.
"""
for receiver in receiverkarma:
if receiver != giver:
urow = KarmaStatsTable(
ude(giver), ude(receiver), receiverkarma[receiver])
self.db.session.add(urow)
self.db.session.commit()
class KarmaList(Command):
"""
Return the highest or lowest 5 receivers of karma
"""
regex = r'^(?:score|karma) --(top|bottom)$'
LIST_MAX = 5
def command(self, bot, comm, groups):
# Let the database restrict the amount of rows we get back.
# We can then just deal with a few rows later on
session = bot.factory.loader.db.session
kcount = func.sum(KarmaStatsTable.kcount).label('kcount')
kts = session.query(KarmaStatsTable.receiver, kcount) \
.group_by(KarmaStatsTable.receiver)
# For legacy support
classic = session.query(KarmaStatsTable)
# Counter for sorting and updating data
counter = Counter()
if kts.count() or classic.count():
# We should limit the list of users to at most self.LIST_MAX
if groups[0] == 'top':
classic_q = classic.order_by(
KarmaStatsTable.kcount.desc()).limit(
self.LIST_MAX).all()
query = kts.order_by(kcount.desc())\
.limit(self.LIST_MAX).all()
counter.update(dict(classic_q))
counter.update(dict(query))
snippet = counter.most_common(self.LIST_MAX)
elif groups[0] == 'bottom':
classic_q = classic.order_by(KarmaStatsTable.kcount)\
.limit(self.LIST_MAX).all()
query = kts.order_by(kcount)\
.limit(self.LIST_MAX).all()
counter.update(dict(classic_q))
counter.update(dict(query))
snippet = reversed(counter.most_common(self.LIST_MAX))
else:
bot.reply(
comm, r'Something went wrong with karma\'s regex'
)
return
for rec in snippet:
bot.reply(
comm, '%s\x0f: %d' % (uen(rec[0]), rec[1]),
encode=False
)
else:
bot.reply(comm, 'No one has any karma yet :-(')
class UserKarma(Command):
"""
Retrieve karma for a given user
"""
# !karma <username>
regex = r'^(?:score|karma)(?:\s+([^-].*))?$'
def command(self, bot, comm, groups):
# The receiver (or in old terms, user) of the karma being tallied
receiver = groups[0]
if receiver is None:
reciever = comm['user']
receiver = ude(reciever.strip().lower())
# Manage both tables
sesh = bot.factory.loader.db.session
# Old Table
kt = sesh.query(KarmaStatsTable)
user = kt.filter(KarmaStatsTable.user == receiver).first()
# New Table
kst = sesh.query(KarmaStatsTable)
kst_list = kst.filter(KarmaStatsTable.receiver == receiver).all()
# The total amount of karma from both tables
total = 0
# Add karma from the old table
if user:
total += user.kcount
# Add karma from the new table
if kst_list:
for row in kst_list:
total += row.kcount
# Pluralization
points = "points"
if total == 1 or total == -1:
points = "point"
# Send the message
bot.reply(
comm, '%s has %d %s' % (uen(receiver), total, points),
encode=False
)
class KarmaGiver(Command):
"""
Identifies the person who gives the most karma
"""
regex = r'^(?:score|karma) --(giver|taker)$'
def command(self, bot, comm, groups):
kt = bot.factory.loader.db.session.query(KarmaStatsTable)
counter = Counter()
if groups[0] == 'giver':
positive_karma = kt.filter(KarmaStatsTable.kcount > 0)
for row in positive_karma:
counter[row.giver] += row.kcount
m = counter.most_common(1)
most = m[0] if m else None
if most:
bot.reply(
comm,
'%s has given the most karma (%d)' %
(uen(most[0]), most[1])
)
else:
bot.reply(
comm,
'No positive karma has been given yet :-('
)
elif groups[0] == 'taker':
negative_karma = kt.filter(KarmaStatsTable.kcount < 0)
for row in negative_karma:
counter[row.giver] += row.kcount
m = counter.most_common()
most = m[-1] if m else None
if most:
bot.reply(
comm,
'%s has given the most negative karma (%d)' %
(uen(most[0]), most[1])
)
else:
bot.reply(
comm,
'No negative karma has been given yet'
)
class MostActive(Command):
"""
Least/Most active hours of karma giving/taking
This will now look in the config for a timezone to use when displaying
the hour.
Example
Karma:
timezone: America/Los_Angeles
If no timezone is given, or it's invalid, time will be reported in UTC
"""
regex = r'^(?:score|karma)\s+--when-(positive|negative)'
def command(self, bot, comm, groups):
kt = bot.factory.loader.db.session.query(KarmaStatsTable)
counter = Counter()
if groups[0] == "positive":
karma = kt.filter(KarmaStatsTable.kcount > 0)
elif groups[0] == "negative":
karma = kt.filter(KarmaStatsTable.kcount < 0)
for row in karma:
hour = row.datetime.hour
counter[hour] += row.kcount
common_hour = (counter.most_common(1)[0][0]
if counter.most_common(1) else None)
# Title case for when
title_case = groups[0][0].upper() + groups[0][1:]
if common_hour:
# Create a datetime object
current_time = datetime.now(pytz.utc)
# Give it the common_hour
current_time = current_time.replace(hour=int(common_hour))
# Get the localized common hour
hour = self.plugin.tzinfo.normalize(
current_time.astimezone(self.plugin.tzinfo)).hour
# Report to the channel
bot.reply(
comm,
'%s karma is usually given during the %d:00 hour (%s)' %
(title_case, hour, self.plugin.timezone)
)
else:
# Inform that no karma of that type has been awarded yet
bot.reply(
comm,
'%s karma has been given yet' % title_case
)
class KarmaTable(SQLAlchemyBase):
"""
Bringing back the classic table so data doesn't need to be dumped
"""
__tablename__ = 'karma'
# Karma Classic Table
user = Column(String, primary_key=True)
kcount = Column(Integer)
def __init__(self, user, kcount):
self.user = user
self.kcount = kcount
class KarmaStatsTable(SQLAlchemyBase):
"""
Keep track of users karma in a persistant manner
"""
__tablename__ = 'karmastats'
# Calling the primary key user, though, really, this can be any string
id = Column(Integer, primary_key=True)
giver = Column(String)
receiver = Column(String)
kcount = Column(Integer)
datetime = Column(DateTime, default=datetime.utcnow())
def __init__(self, giver, receiver, kcount):
self.giver = giver
self.receiver = receiver
self.kcount = kcount
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement)
# from builtins import (bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
from functools import partial
import re
from pathlib import Path, PurePosixPath, PureWindowsPath
import stat
from typing import (Dict, Set, List, Tuple, Sequence, Union, Pattern, Match, overload, Iterator)
from datetime import datetime as dt
from utils import futils
# from utils import dtutils
# MONTHS = dtutils.MONTHS
"""
Really raw WIP - this is a side thing that I've been doing on-demand, so it
has a lot of unused code, and basically is a mess.
Renames files in the provided directory using a date value in the file name,
or based on the attributes of the file.
Has only been tested in Windows, but the ultimate goal is for it to work across OS types.
"""
PATH_TYPE = Union[PurePosixPath, PureWindowsPath]
PATH_SEQ_TYPE = Sequence[PATH_TYPE]
PATTERN_TYPE = Pattern
MATCH_SEQ_TYPE = Sequence[Match]
STR_BOOL_TUPLE = Tuple[str, bool]
MATCH_STRING_TUPLE = Tuple[Match, str]
RE_IM = re.IGNORECASE + re.MULTILINE
INPUT_PATHS = [
Path('INPUT PATH')
]
EXCLUDE_LIST = ['~', '.cache', '.git', '.idea', '.project', '.iml', '.vscode', 'desktop.ini'] # type: List[str]
DELETE_LIST = ['.DS_Store', 'Thumbs.db'] # type: List[str]
# TODO: Combine all of these data templates and patterns into another module
YEAR_TEMPLATE = '(?:20|19)[0-9][0-9]' # type: str
SHORT_YEAR_TEMPLATE = '[0-9][0-9]' # type: str
LONG_OR_SHORT_YEAR_TEMPLATE = '{year_pattern}|{short_year_pattern}'.format(
year_pattern = YEAR_TEMPLATE,
short_year_pattern = SHORT_YEAR_TEMPLATE
) # type: str
MONTH_TEMPLATE = '[1-9]|0[0-9]|1[0-2]' # type: str
DAY_TEMPLATE = '0[0-9]|[1-2][0-9]|3[0-1]|[1-9]' # type: str
DAY_YEAR_MONTH_TEMPLATE = '\\b(?P<day>{day_pattern}) ?(?P<year>{year_pattern}) ?(?P<month>{month_pattern})'.format(
year_pattern = YEAR_TEMPLATE,
month_pattern = MONTH_TEMPLATE,
day_pattern = DAY_TEMPLATE
) # type: str
MONTH_AND_YEAR_TEMPLATE = '((?P<year1>{year_pattern})\\b\\s*(?P<month1>{month_pattern})|(?P<month2>{month_pattern})\\b\\s*(?P<year2>{year_pattern}))'.format(
year_pattern = LONG_OR_SHORT_YEAR_TEMPLATE,
month_pattern = MONTH_TEMPLATE
) # type: str
# Match month names to month numbers
MONTH_REPLACEMENT_TEMPLATES = {
'(?:january|jan|01)': '01',
'(?:february|feb|02)': '02',
'(?:march|mar|03)': '03',
'(?:april|apr|04)': '04',
'(?:may|05)': '05',
'(?:june|jun|06)': '06',
'(?:july|jul|07)': '07',
'(?:august|aug|08)': '08',
'(?:september|sept|sep|09)': '09',
'(?:october|oct|10)': '10',
'(?:november|nov|11)': '11',
'(?:december|dec|12)': '12'
} # type: Dict[str, str]
# August 2016 / Aug 2016 / 08 2016
M_YEAR_TEMPLATE = '\\b(?P<month>{month_pattern})\'(?P<year>{year_template})\\b' # type: str
# 2016 08 02
ISO_DATE_TEMPLATE = '\\b(?P<year>{year_pattern}) ?(?P<month>{month_pattern}) ?(?P<day>{day_pattern})\\b'.format(
year_pattern = LONG_OR_SHORT_YEAR_TEMPLATE,
month_pattern = MONTH_TEMPLATE,
day_pattern = DAY_TEMPLATE
) # type: str
# 08 02 2016
US_DATE_TEMPLATE = '\\b(?P<month>{month_pattern}) ?(?P<day>{day_pattern}) ?(?P<year>{year_pattern})\\b'.format(
month_pattern = MONTH_TEMPLATE,
day_pattern = DAY_TEMPLATE,
year_pattern = LONG_OR_SHORT_YEAR_TEMPLATE
) # type: str
# Patterns = compiled RegEx templates
MONTH_REPLACEMENT_PATTERNS = {
re.compile(pattern='\\b({month_pattern})\\b'.format(month_pattern=k), flags=RE_IM): v
for k, v in MONTH_REPLACEMENT_TEMPLATES.items()
} # type: Dict[PATTERN_TYPE, str]
# Apr'16
M_YEAR_PATTERNS = {
re.compile(
pattern=M_YEAR_TEMPLATE.format(
month_pattern=k,
year_template=LONG_OR_SHORT_YEAR_TEMPLATE
),
flags=RE_IM
): v
for k, v in MONTH_REPLACEMENT_TEMPLATES.items()
} # type: Dict[PATTERN_TYPE, str]
# MM dd yyyy
US_DATE_PATTERN = re.compile(
pattern=US_DATE_TEMPLATE,
flags=RE_IM
) # type: Pattern
# dd yyyy dd
DAY_YEAR_MONTH_PATTERN = re.compile(
pattern=DAY_YEAR_MONTH_TEMPLATE,
flags=RE_IM
) # type: Pattern
# yyyy MM dd
LONG_DATE_PATTERN = re.compile(
pattern=ISO_DATE_TEMPLATE,
flags=RE_IM
) # type: Pattern
# yyyy MM or MM yyyy
MONTH_YEAR_PATTERN = re.compile(
pattern=MONTH_AND_YEAR_TEMPLATE,
flags=RE_IM
) # type: Pattern
YEAR_PATTERN = re.compile(
pattern='(?:\'?\\b(?P<year>{year_pattern}))\\b'.format(
year_pattern = LONG_OR_SHORT_YEAR_TEMPLATE
),
flags=RE_IM
) # type:PATTERN_TYPE
MONTH_PATTERN = re.compile(
pattern='\\b(?P<month>{month_pattern})\\b'.format(
month_pattern = MONTH_TEMPLATE
),
flags=RE_IM
) # type: Pattern
WHITESPACE_PATTERN = re.compile('\s', RE_IM) # type: PATTERN_TYPE
SEPARATOR_PATTERN = re.compile(pattern='([ \\.\\,\\_\\-\\+])') # type: PATTERN_TYPE
BRACKET_PATTERN = re.compile(pattern='([\\(\\)\\[\\]\\{\\}])') # type: PATTERN_TYPE
format_year_string = lambda year_string: year_string if len(year_string.strip()) == 4 else '20{0}'.format(year_string.strip())
format_day_or_month_string = lambda day_or_month_string: day_or_month_string.strip().zfill(2)
def get_matches(input_string:str, search_pattern:Pattern) -> Iterator[Match]:
"""
Moves from left to right, in input_string, yielding each match from search_pattern
until there are no more matches, when None is returned
"""
start_pos = 0 # type: int
search_result = search_pattern.search(input_string, start_pos) # type: Match
while search_result is not None:
yield search_result # type: Match
start_pos = search_result.span()[1]
search_result = search_pattern.search(input_string, start_pos)
def match_patterns(input_string:str, search_patterns:Union[Dict[Pattern, str], List[Pattern]]) -> List[Pattern]:
"""
Returns a List of all patterns in search_patterns that matched input_string. If none
of the patterns matched, or if there was an error, an empty List is returned.
"""
return {pattern: None if isinstance(search_patterns, List) else search_patterns[pattern] for pattern in search_patterns if pattern.search(str(input_string)) is not None} # type: Dict[Pattern, Union[str, None]]
@partial
def execute_on_matches(func:callable, input_string:str, search_patterns:Union[Dict[Pattern, str], List[Pattern]]) -> Tuple[str, bool]:
"""
For each matching pattern in search_patterns, passes input_string and the result to func
Returns Tuple[return_string, made_a_match] where return_string will be the result of func and True,
or input_string with no changes, and False, if no matches were found in search_patterns
"""
return_string = str(input_string) # type:str
made_a_match = False # type: bool
matching_patterns = match_patterns(input_string, search_patterns) # type: List[Pattern]
if len(matching_patterns) > 0:
for matching_pattern in matching_patterns: # type; Pattern
made_a_match = True
if isinstance(search_patterns, Dict):
str_value = search_patterns[matching_pattern] # type: Union[None, str]
else:
str_value = None
for match in get_matches(return_string, matching_pattern):
return_string = func(return_string, (matching_pattern, str_value))
return (return_string, made_a_match)
@partial
def execute_on_file_stem(func:callable, full_file_path:Union[str, Path], **kwargs) -> Tuple[Path, bool]:
"""
Calls func(provided_file_stem, **kwargs), which should return Tuple[str, made_a_change],
where str is the provided string, with any changes, and made_a_change is a boolean indicating
whether changes were made.
The returned string is returned as the stem of the provided full_file_path, as a Path object
"""
try:
file_obj, file_parent, filename, file_suffix = get_file_parts(full_file_path)
except AttributeError:
raise
return_string, made_a_change = func(filename, **kwargs) # type: str, bool
new_filename = '{0}{1}'.format(return_string, file_suffix) # type: str
return (Path.joinpath(file_parent, new_filename), made_a_change)
def format_m_year_execute(input_string:str, match_pattern:Tuple[Pattern, str]) -> str:
"""
Core of loop for format_m_year_strings
"""
return_string = str(input_string) # type:str
search_pattern, month_number = match_pattern # type: Pattern, str
search_result = search_pattern.search(return_string) # type: Match
string_to_replace, year = search_result.group(0), format_year_string(search_result.group('year')) # type: str, str
return_string = replace_and_prepend(return_string, string_to_replace, '{0} {1} '.format(year, month_number))
return return_string
def format_m_year_strings(input_string: str) -> Tuple[str, bool]:
"""
Looks for a m'year value in the string. If it finds
one, then it moves it to the front of the string
Returns a tuple (return_string:str, made_a_match:bool)
"""
return execute_on_matches(format_m_year_execute, input_string, M_YEAR_PATTERNS)
def format_month_string_execute(input_string:str, match_pattern:Tuple[Pattern, str]) -> str:
"""
Core of loop for format_month_strings_with_numbers function
"""
return_string = str(input_string) # type:str
search_pattern, month_number = match_pattern # type: Match, str
return search_pattern.sub(month_number, return_string)
def format_month_strings_with_numbers(input_string:str) -> Tuple[str, bool]:
"""
Replaces month names with their padded numeric equivalent
"""
return execute_on_matches(format_month_string_execute, input_string, MONTH_REPLACEMENT_PATTERNS)
def format_day_year_month_execute(input_string:str, match_pattern:Tuple[Pattern, None]) -> str:
"""
Core of loop for format_day_year_month_date_string
"""
return_string = str(input_string) # type:str
search_result = match_pattern[0].search(return_string) # type: Match
replacement_string = '{0} {1} {2}'.format(search_result.group('year'), search_result.group('month'), search_result.group('day')) # type: str
return input_string.replace(search_result.group(0), replacement_string)
def format_day_year_month_date_string(input_string:str) -> Tuple[str, bool]:
"""
Replaces dates with the format dd yyyy MM with yyyy MM dd format
"""
return execute_on_matches(format_day_year_month_execute, input_string, [DAY_YEAR_MONTH_PATTERN])
def format_us_date_strings_execute(input_string:str, match_pattern:Tuple[Pattern, None]) -> str:
"""
Core of loop for format_us_date_strings
"""
return_string = str(input_string) # type:str
search_result = match_pattern[0].search(return_string) # type: Match
replacement_string = '{0} {1} {2}'.format(
format_year_string(search_result.group('year')),
format_day_or_month_string(search_result.group('month')),
format_day_or_month_string(search_result.group('day'))
) # type: str
return return_string.replace(search_result.group(0), replacement_string)
def format_us_date_strings(input_string:str) -> Tuple[str, bool]:
"""
Re-arranges US-style date formats (MM-dd-yyyy) to yyyy-MM-dd style
Un-padded month and day values are also matched.
Years without a century value will be assumed to be after 2000.
"""
return execute_on_matches(format_us_date_strings_execute, input_string, [US_DATE_PATTERN])
def format_year_month_execute(input_string:str, match_pattern:Tuple[Pattern, None]) -> str:
"""
Core of loop for format_year_month_strings
"""
return_string = str(input_string) # type:str
search_result = match_pattern[0].search(return_string) # type: Match
replacement_string = '{0} {1}'.format(
format_year_string(search_result.group('year1') or search_result.group('year2')),
format_day_or_month_string(search_result.group('month1') or search_result.group('month2'))
) # type: str
return return_string.replace(search_result.group(0), replacement_string)
def format_year_month_strings(input_string:str) -> Tuple[str, bool]:
"""
Formats MM yyyy date strings as yyyy MM
"""
return execute_on_matches(format_year_month_execute, input_string, [MONTH_YEAR_PATTERN])
def remove_double_spaces(input_string:str) -> str:
"""
Replaces double spaces with single spaces, in the provided string
"""
return ' '.join(WHITESPACE_PATTERN.sub(' ', input_string).split())
def clean_up_name(input_string:str) -> Tuple[str, bool]:
"""
Replaces .,_-+%20 with spaces
Replaces unicode spaces with standard spaces
Replaces double spaces with single spaces
Removes trailing and leading spaces
Removes ([{}])
"""
filename = str(input_string).strip()
# Replace separators with spaces
new_filename = re.sub(SEPARATOR_PATTERN, ' ', filename)
# Replace %20 with space
new_filename = new_filename.replace('%20', ' ')
# Replaces double spaces
new_filename = remove_double_spaces(new_filename)
# Remove brackets
new_filename = re.sub(BRACKET_PATTERN, '', new_filename).strip()
return (new_filename, new_filename.endswith(filename))
def fix_date_strings(input_string:str) -> Tuple[str, bool]:
"""
Looks for several date formats in the provided string, and replaces
them with a date with the most complete format that can be found,
from the list below:
yyyy MM dd
yyyy MM
yyyy
Operational order
* Replace mmm'yy or mmm'yyyy with yyyy MM
* Replace dd yyyy MM with yyyy MM dd
* Replace MM dd yyyy with yyyy MM dd
Returns Tuple[return_string, made_a_match]
If no changes were made, the provided string is returned, without any changes.
"""
return_string = str(input_string).strip() # type:str
made_a_match = False # type: bool
date_funcs = (
format_m_year_strings,
format_month_strings_with_numbers,
format_day_year_month_date_string,
format_us_date_strings
)
# Only try these if we weren't able to find matches from date_funcs
additional_date_patterns = [
YEAR_PATTERN,
MONTH_PATTERN
]
for date_func in date_funcs:
return_string, matched = date_func(return_string) # type: str, bool
made_a_match = max(made_a_match, matched)
if made_a_match is True:
return (return_string, made_a_match)
else:
matching_patterns = match_patterns(return_string, additional_date_patterns)
for matching_pattern in matching_patterns:
if matching_pattern == YEAR_PATTERN:
format_func = format_year_string
group_name = 'year'
else:
format_func = format_day_or_month_string
group_name = 0
made_a_match = True
for date_match in get_matches(return_string, matching_pattern): # type: Match
return_string = return_string.replace(date_match.group(0), format_func(date_match.group(group_name)))
break
if made_a_match is False:
return (input_string, made_a_match)
else:
return (return_string, made_a_match)
def replace_and_prepend(input_string:str, search_string: str, replacement_string:str=None, prepend_string:str=None) -> str:
"""
If search_string is in input_string, it is replaced with replacement_string,
the string is then trimmed, prepended with prepend_string, and returned.
If search_string is not in input_string, the original string is returned.
"""
return_string = input_string
if prepend_string is None:
prepend_string = ''
if search_string in input_string:
return remove_double_spaces('{0}{1}'.format(prepend_string, re.sub(search_string, replacement_string, return_string).strip()))
else:
return input_string
def get_best_date_string(input_string:str, start_pos:int=0) -> Match:
"""
Returns the most complete date string found in input_string,
starting at start_pos.
If no match is found, then None is returned.
"""
provided_string = str(input_string) # type: str
date_patterns = [
LONG_DATE_PATTERN,
MONTH_YEAR_PATTERN,
YEAR_PATTERN
]
for date_pattern in match_patterns(provided_string, date_patterns):
for search_result in get_matches(provided_string, date_pattern):
yield search_result
break
def add_file_date(file_name:str, full_file_path:Union[Path, str]) -> str:
"""
Looks for the first, most complete date string in the stem of the provided
file. If that date is missing a year and/or month value, then those
values will be retrieved from either the parent folder name, or the file's
modified timestamp. A day value will not be used unless it is already
in the filename.
Any date string retrieved from the filename will be moved to the
begining of the string, in the format yyyy MM dd or yyyy MM.
"""
file_path_obj = Path(full_file_path)
if file_path_obj.is_file() is False:
raise AttributeError('You must provide the file path to this function!')
input_string = str(file_name)
date_parts = ('year', 'month', 'day')
file_name_date = {k: None for k in date_parts}
string_to_replace = '' # type:str
if YEAR_PATTERN.search(str(file_path_obj.parent)) is not None:
file_name_date['year'] = YEAR_PATTERN.search(str(source_file.parent)).group('year')
else:
file_name_date['year'] = str(dt.fromtimestamp(source_file.stat().st_mtime).year)
if MONTH_PATTERN.search(str(file_path_obj.parent)) is not None:
file_name_date['month'] = MONTH_PATTERN.search(str(file_path_obj.parent)).group('month')
else:
file_name_date['month'] = str(dt.fromtimestamp(source_file.stat().st_mtime).month)
# Get the best date we have
for date_match in get_date_strings(input_string):
string_to_replace = date_match.group(0)
found_date_parts = [k.strip().lower() for k in date_match.groupdict().keys() if k.strip().lower() in date_parts]
for date_part in found_date_parts:
file_name_date[date_part] = date_match.groups(date_part)
break
best_date_string = '{0} {1} '.format(format_year_string(file_name_date['year']), format_day_or_month_string(file_name_date['month']))
if file_name_date['day'] is not None:
best_date_string = '{0}{1} '.format(best_date_string, format_day_or_month_string(file_name_date['day']))
return_string = replace_and_prepend(input_string=input_string, search_string=string_to_replace, prepend_string=best_date_string)
def move_date_to_start_of_string(input_string:str) -> str:
"""
Finds the best date string, and moves it to the begining of the string
"""
try:
best_date_strings = [date_string_match for date_string in get_best_date_string(input_string)]
date_start_pos = best_date_strings[0].span()[0]
date_end_pos = best_date_strings[len(best_date_strings) - 1].span()[1]
date_string = input_string[date_start_pos:date_end_pos]
except Exception as err:
a = 1
return input_string
return replace_and_prepend(input_string=input_string, search_string=date_string, prepend_string=date_string)
def get_file_parts(file_obj:Union[str, Path]) -> Tuple[Path, Path, str, str]:
"""
Returns Tuple[file_path_obj, file_parent_obj, file_stem, file_suffix]
"""
source_file = futils.get_clean_path(file_obj)
if source_file.parent == '.' or source_file.is_file() is False:
raise AttributeError('You must provide a complete file path to this function!')
return (source_file, source_file.parent, source_file.stem, source_file.suffix)
def apply_renaming_rules(filename:Union[PATH_TYPE, str], **kwargs:Dict[str, bool]) -> PATH_TYPE:
"""
Applies some basic renaming rules to the file, and renames it, if neccesary
Available options:
* clean_names: Removes junk from the file name
* fix_dates: Re-formats dates in the file name to yyyy MM dd format
* add_file_date Adds the year and/or month to the file date, if it is not present
This is done by using dates from the parent folder name or the
file's modified_timestamp
* move_date: Moves dates to the begining of the filename.
TODO: Properly handle date ranges, for move_date
TODO: Properly handle all calls to execute_on_file_stem
"""
try:
source_file, source_file_parent, source_file_stem, source_file_suffix = get_file_parts(filename)
except AttributeError:
raise
if len(kwargs) == 0:
return source_file
func_list = []
options = [o.strip().lower() for o in kwargs.keys()]
# We need to apply these in this order
if 'clean_names' in options:
func_list.append(clean_up_name)
if 'fix_dates' in options:
func_list.append(fix_date_strings)
if 'add_file_date' in options:
func_list.append(add_file_date)
if 'move_date' in options:
func_list.append(move_date_to_start_of_string)
for func in func_list:
execute_on_file_stem(func, source_file_stem)
# Logic:
# * Clean up filename
# * Fix dates in the filename
# * Try renaming:
# * If the filename contains a date range, then move it to the begining of the file, and stop
# * If the filename contains a full date, then move it to the begining of the file, and stop
# * If the filename contains year month only, then move it to the begining of the file, and stop
# * If the filename only contains a month, then
# * Get the year from the parent folder name, or from the file's created timestamp
# * Prepend the year, and move the month just after it, and stop
# * If the filename only contains a year, then move it to the begining of the file, and stop
new_file_stem = clean_up_name(source_file_stem)
# Try to fix dates
new_file_stem, found_match = fix_date_strings(new_file_stem)
date_parts = ('year', 'month', 'day')
file_name_date = {}
# Get the best date we have
for date_match in get_date_strings(new_file_stem):
for date_part in date_parts:
if date_part in date_match.groupdict():
file_name_date[date_part] = date_match.groups(date_part)
if 'year' not in file_name_date:
file_name_date['year'] = backup_file_year
break
# We should have a good date now
file_prefix = ' '.join(file_name_date[d] for d in date_parts)
new_file_stem, found_match = move_year_month_to_string_start(new_file_stem)
# In this case, we should use some other value for the year
if found_match is False:
new_file_stem, found_match = replace_month(new_file_stem)
if found_match:
if YEAR_PATTERN.search(str(filename.parent)) is not None:
file_year = YEAR_PATTERN.search(str(filename.parent)).group(0)
else:
file_year = dt.fromtimestamp(filename.stat().st_mtime).year
new_file_stem = '{0} {1}'.format(file_year, new_file_stem)
if found_match is True and new_file_stem != source_file_stem:
destination_file = futils.get_unique_filename(source_file.with_name('{0}{1}'.format(new_file_stem, file_suffix)))
destination_file = futils.safe_move(source_file, destination_file.name)
else:
destination_file = source_file
return destination_file
def get_files(directory:Union[PurePosixPath, PureWindowsPath]) -> Sequence[str]:
"""
Returns a list of the full path for each file in the given directory
"""
# return_file_list = [Path(f) for f in directory.glob('**/*') if f.is_file() and not bool(f.stat().st_file_attributes & stat.FILE_ATTRIBUTE_HIDDEN) and len(f.suffix) > 0]
return_file_list = [Path(f) for f in directory.glob('**/*') if f.is_file()]
for exclude_pattern in EXCLUDE_LIST:
return_file_list = [f for f in return_file_list if exclude_pattern not in str(f)]
return return_file_list
def process_files(input_directory:PATH_TYPE) -> PATH_SEQ_TYPE:
"""
Processes files in the provided directory
"""
processed_files = []
for this_file in get_files(input_directory):
if this_file.name in DELETE_LIST:
this_file.unlink()
processed_files.append((this_file, 'Deleted'))
else:
processed_files.append((this_file, apply_renaming_rules(this_file)))
return processed_files
# for input_directory in INPUT_PATHS:
# processed_files = process_files(input_directory)
# for original_file, new_file in processed_files:
# if str(original_file) != str(new_file):
# print('Renamed {0}\nto\t{1}\n'.format(str(original_file), str(new_file)))
test_strings = [
'CLIENT Weekly Performance Report 6 9 14 to 6 15 14',
'2014 03 27 CLIENT Monthly Reporting Sample',
'Rev Share \'14'
]
for test_string in test_strings:
a, matched = fix_date_strings(test_string)
for date_match in get_date_strings(a):
c = 1
|
|
import sqlite3
import datetime
import calendar
import time
import threading
import subprocess
import re
import sys
import time
class SensorStore:
def __init__(self):
# reentrant lock used for all sqlite3 operations
self.lock = threading.RLock()
def __acquire_lock(self):
self.lock.acquire()
def __release_lock(self):
self.lock.release()
def get_conn(self):
return sqlite3.connect('test.db', check_same_thread=False)
# return id
def create_sensor(self, name, units):
with self.lock:
conn = self.get_conn()
c = conn.cursor()
c.execute("insert into sensor(name, unit) values ( ?, ? )", ( name, units ) )
conn.commit()
conn.close()
def get_sensor(self, name):
with self.lock:
conn = self.get_conn()
c = conn.cursor()
c.execute("select id from sensor where name = ?", (name,))
result = c.fetchone()
conn.close()
if result is None:
return None
else:
return result[0]
def put_float_data(self, sensor, value, time):
with self.lock:
conn = self.get_conn()
c = conn.cursor()
c.execute("insert into sensor_float_data(val, timestamp, sensor_id) values( ?, ?, ? )", (value, calendar.timegm(time.utctimetuple()), sensor))
conn.commit()
def get_latest_float_reading(self, sensor):
with self.lock:
conn = self.get_conn()
c = conn.cursor()
c.execute("select max(timestamp), val from sensor_float_data where sensor_id = ?", (sensor, ))
result = c.fetchone()
if result is not None and type(result) is tuple and result[0] is None:
result = None
conn.close()
return result
def init_sensor(self, name, units):
with self.lock:
sensor = self.get_sensor(name)
if sensor is None:
sensor = self.create_sensor(name, units)
return sensor
temperature = "temp1"
humidity = "hum1"
class DHTSensorSource(threading.Thread):
def __init__(self, temp, hum, pin, sensor_store):
threading.Thread.__init__(self)
self.temperature = temp
self.humidity = hum
self.pin = pin
self.sensor_store = sensor_store
self.shutdown = False
def run ( self ):
while(not self.shutdown):
# Run the DHT program to get the humidity and temperature readings!
output = subprocess.check_output(["./Adafruit_DHT", "2302", self.pin]);
# print output
matches = re.search("Temp =\s+([0-9.]+)", output)
if (not matches):
time.sleep(3)
continue
temp = float(matches.group(1))
# search for humidity printout
matches = re.search("Hum =\s+([0-9.]+)", output)
if (not matches):
time.sleep(3)
continue
humidity = float(matches.group(1))
timestamp = datetime.datetime.utcnow()
self.sensor_store.put_float_data(self.temperature, float(temp), timestamp )
self.sensor_store.put_float_data(self.humidity, float(humidity), timestamp )
for i in range(0, 40):
if not self.shutdown:
time.sleep(1)
print "Shutting down DHT writer for pin {}".format(self.pin)
return None
class UpdateReader(threading.Thread):
def __init__(self, sensor, name, sleep_delay, sensor_store):
threading.Thread.__init__(self)
self.sensor = sensor
self.name = name
self.sleep_delay = sleep_delay
self.sensor_store = sensor_store
self.shutdown = False
def run ( self ):
while True:
try:
reading = self.sensor_store.get_latest_float_reading(self.sensor)
if reading is None:
print "No reading available"
else:
if reading[0] is not None:
timestamp=datetime.datetime.utcfromtimestamp(reading[0])
delta = datetime.datetime.utcnow() - timestamp
print "{} reading is {} at {} ({} ago)".format(self.name, reading[1], timestamp.strftime('%Y-%m-%d %H:%M:%S'), delta)
time.sleep(self.sleep_delay)
except sqlite3.OperationalError:
print "Databasetimeout, will try again"
time.sleep(self.sleep_delay)
if self.shutdown:
print "Shutting down updater thread!"
return None
class TemperatureControl(threading.Thread):
'''Very basic temperature control, controls a external source using a powerswitch tail'''
def __init__(self, gpio_pin, sensor_store):
threading.Thread.__init__(self)
self.sensor_store = sensor_store
self.state_sensor = self.sensor_store.init_sensor("temp_control_state", "integer")
self.gpio_pin = gpio_pin
self.shutdown = False
self.set_temp(False)
def set_temp(self, temp):
print "Setting temperature {}".format(temp)
self.temp = temp
# todo, add GPIO code here
timestamp = datetime.datetime.utcnow()
if temp:
logic_level = 1.0
else:
logic_level = 0.0
print "Updating sensor store..."
self.sensor_store.put_float_data(self.state_sensor, logic_level, timestamp )
def run ( self ):
state = False
while ( not self.shutdown ):
state = not state
self.set_temp(state)
for i in range(0, 60*10):
if not self.shutdown:
time.sleep(1)
print "Shut down temperature control"
store = SensorStore()
therm = store.init_sensor(temperature, "C")
hum = store.init_sensor(humidity, "%")
threads = []
try:
threads.append(DHTSensorSource(therm, hum, "4", store))
threads.append(UpdateReader(therm, hum, 4, store))
threads.append(TemperatureControl("8", store))
for thread in threads:
thread.start()
while True:
time.sleep(1)
except (KeyboardInterrupt, SystemExit):
print '\n! Received keyboard interrupt, quitting threads.\n'
for thread in threads:
thread.shutdown = True
for thread in threads:
thread.join()
print "Exit main"
|
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
r"""Code to validate and convert settings of the Microsoft build tools.
This file contains code to validate and convert settings of the Microsoft
build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(),
and ValidateMSBuildSettings() are the entry points.
This file was created by comparing the projects created by Visual Studio 2008
and Visual Studio 2010 for all available settings through the user interface.
The MSBuild schemas were also considered. They are typically found in the
MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild
"""
import sys
import re
# Dictionaries of settings validators. The key is the tool name, the value is
# a dictionary mapping setting names to validation functions.
_msvs_validators = {}
_msbuild_validators = {}
# A dictionary of settings converters. The key is the tool name, the value is
# a dictionary mapping setting names to conversion functions.
_msvs_to_msbuild_converters = {}
# Tool name mapping from MSVS to MSBuild.
_msbuild_name_of_tool = {}
class _Tool(object):
"""Represents a tool used by MSVS or MSBuild.
Attributes:
msvs_name: The name of the tool in MSVS.
msbuild_name: The name of the tool in MSBuild.
"""
def __init__(self, msvs_name, msbuild_name):
self.msvs_name = msvs_name
self.msbuild_name = msbuild_name
def _AddTool(tool):
"""Adds a tool to the four dictionaries used to process settings.
This only defines the tool. Each setting also needs to be added.
Args:
tool: The _Tool object to be added.
"""
_msvs_validators[tool.msvs_name] = {}
_msbuild_validators[tool.msbuild_name] = {}
_msvs_to_msbuild_converters[tool.msvs_name] = {}
_msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return msbuild_settings.setdefault(tool.msbuild_name, {})
class _Type(object):
"""Type of settings (Base class)."""
def ValidateMSVS(self, value):
"""Verifies that the value is legal for MSVS.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSVS.
"""
def ValidateMSBuild(self, value):
"""Verifies that the value is legal for MSBuild.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSBuild.
"""
def ConvertToMSBuild(self, value):
"""Returns the MSBuild equivalent of the MSVS value given.
Args:
value: the MSVS value to convert.
Returns:
the MSBuild equivalent.
Raises:
ValueError if value is not valid.
"""
return value
class _String(_Type):
"""A setting that's just a string."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
return ConvertVCMacrosToMSBuild(value)
class _StringList(_Type):
"""A settings that's a list of strings."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
if isinstance(value, list):
return [ConvertVCMacrosToMSBuild(i) for i in value]
else:
return ConvertVCMacrosToMSBuild(value)
class _Boolean(_Type):
"""Boolean settings, can have the values 'false' or 'true'."""
def _Validate(self, value):
if value != 'true' and value != 'false':
raise ValueError('expected bool; got %r' % value)
def ValidateMSVS(self, value):
self._Validate(value)
def ValidateMSBuild(self, value):
self._Validate(value)
def ConvertToMSBuild(self, value):
self._Validate(value)
return value
class _Integer(_Type):
"""Integer settings."""
def __init__(self, msbuild_base=10):
_Type.__init__(self)
self._msbuild_base = msbuild_base
def ValidateMSVS(self, value):
# Try to convert, this will raise ValueError if invalid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
# Try to convert, this will raise ValueError if invalid.
int(value, self._msbuild_base)
def ConvertToMSBuild(self, value):
msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x'
return msbuild_format % int(value)
class _Enumeration(_Type):
"""Type of settings that is an enumeration.
In MSVS, the values are indexes like '0', '1', and '2'.
MSBuild uses text labels that are more representative, like 'Win32'.
Constructor args:
label_list: an array of MSBuild labels that correspond to the MSVS index.
In the rare cases where MSVS has skipped an index value, None is
used in the array to indicate the unused spot.
new: an array of labels that are new to MSBuild.
"""
def __init__(self, label_list, new=None):
_Type.__init__(self)
self._label_list = label_list
self._msbuild_values = set(value for value in label_list
if value is not None)
if new is not None:
self._msbuild_values.update(new)
def ValidateMSVS(self, value):
# Try to convert. It will raise an exception if not valid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
if value not in self._msbuild_values:
raise ValueError('unrecognized enumerated value %s' % value)
def ConvertToMSBuild(self, value):
index = int(value)
if index < 0 or index >= len(self._label_list):
raise ValueError('index value (%d) not in expected range [0, %d)' %
(index, len(self._label_list)))
label = self._label_list[index]
if label is None:
raise ValueError('converted value for %s not specified.' % value)
return label
# Instantiate the various generic types.
_boolean = _Boolean()
_integer = _Integer()
# For now, we don't do any special validation on these types:
_string = _String()
_file_name = _String()
_folder_name = _String()
_file_list = _StringList()
_folder_list = _StringList()
_string_list = _StringList()
# Some boolean settings went from numerical values to boolean. The
# mapping is 0: default, 1: false, 2: true.
_newly_boolean = _Enumeration(['', 'false', 'true'])
def _Same(tool, name, setting_type):
"""Defines a setting that has the same name in MSVS and MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_Renamed(tool, name, name, setting_type)
def _Renamed(tool, msvs_name, msbuild_name, setting_type):
"""Defines a setting for which the name has changed.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting.
msbuild_name: the name of the MSBuild setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS
_msbuild_validators[tool.msbuild_name][msbuild_name] = (
setting_type.ValidateMSBuild)
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _Moved(tool, settings_name, msbuild_tool_name, setting_type):
_MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name,
setting_type)
def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name,
msbuild_settings_name, setting_type):
"""Defines a setting that may have moved to a new section.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_settings_name: the MSVS name of the setting.
msbuild_tool_name: the name of the MSBuild tool to place the setting under.
msbuild_settings_name: the MSBuild name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {})
tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_settings_name] = (
setting_type.ValidateMSVS)
validator = setting_type.ValidateMSBuild
_msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate
def _MSVSOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSVS.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(unused_value, unused_msbuild_settings):
# Since this is for MSVS only settings, no translation will happen.
pass
_msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _MSBuildOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild
def _ConvertedToAdditionalOption(tool, msvs_name, flag):
"""Defines a setting that's handled via a command line option in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting that if 'true' becomes a flag
flag: the flag to insert at the end of the AdditionalOptions
"""
def _Translate(value, msbuild_settings):
if value == 'true':
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if 'AdditionalOptions' in tool_settings:
new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag)
else:
new_flags = flag
tool_settings['AdditionalOptions'] = new_flags
_msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _CustomGeneratePreprocessedFile(tool, msvs_name):
def _Translate(value, msbuild_settings):
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if value == '0':
tool_settings['PreprocessToFile'] = 'false'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '1': # /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '2': # /EP /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'true'
else:
raise ValueError('value must be one of [0, 1, 2]; got %s' % value)
# Create a bogus validator that looks for '0', '1', or '2'
msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS
_msvs_validators[tool.msvs_name][msvs_name] = msvs_validator
msbuild_validator = _boolean.ValidateMSBuild
msbuild_tool_validators = _msbuild_validators[tool.msbuild_name]
msbuild_tool_validators['PreprocessToFile'] = msbuild_validator
msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir')
fix_vc_macro_slashes_regex = re.compile(
r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list)
)
# Regular expression to detect keys that were generated by exclusion lists
_EXCLUDED_SUFFIX_RE = re.compile('^(.*)_excluded$')
def _ValidateExclusionSetting(setting, settings, error_msg, stderr=sys.stderr):
"""Verify that 'setting' is valid if it is generated from an exclusion list.
If the setting appears to be generated from an exclusion list, the root name
is checked.
Args:
setting: A string that is the setting name to validate
settings: A dictionary where the keys are valid settings
error_msg: The message to emit in the event of error
stderr: The stream receiving the error messages.
"""
# This may be unrecognized because it's an exclusion list. If the
# setting name has the _excluded suffix, then check the root name.
unrecognized = True
m = re.match(_EXCLUDED_SUFFIX_RE, setting)
if m:
root_setting = m.group(1)
unrecognized = root_setting not in settings
if unrecognized:
# We don't know this setting. Give a warning.
print >> stderr, error_msg
def FixVCMacroSlashes(s):
"""Replace macros which have excessive following slashes.
These macros are known to have a built-in trailing slash. Furthermore, many
scripts hiccup on processing paths with extra slashes in the middle.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
s = fix_vc_macro_slashes_regex.sub(r'\1', s)
return s
def ConvertVCMacrosToMSBuild(s):
"""Convert the the MSVS macros found in the string to the MSBuild equivalent.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
replace_map = {
'$(ConfigurationName)': '$(Configuration)',
'$(InputDir)': '%(RelativeDir)',
'$(InputExt)': '%(Extension)',
'$(InputFileName)': '%(Filename)%(Extension)',
'$(InputName)': '%(Filename)',
'$(InputPath)': '%(Identity)',
'$(ParentName)': '$(ProjectFileName)',
'$(PlatformName)': '$(Platform)',
'$(SafeInputName)': '%(Filename)',
}
for old, new in replace_map.iteritems():
s = s.replace(old, new)
s = FixVCMacroSlashes(s)
return s
def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr):
"""Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+).
Args:
msvs_settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
Returns:
A dictionary of MSBuild settings. The key is either the MSBuild tool name
or the empty string (for the global settings). The values are themselves
dictionaries of settings and their values.
"""
msbuild_settings = {}
for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems():
if msvs_tool_name in _msvs_to_msbuild_converters:
msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name]
for msvs_setting, msvs_value in msvs_tool_settings.iteritems():
if msvs_setting in msvs_tool:
# Invoke the translation function.
try:
msvs_tool[msvs_setting](msvs_value, msbuild_settings)
except ValueError, e:
print >> stderr, ('Warning: while converting %s/%s to MSBuild, '
'%s' % (msvs_tool_name, msvs_setting, e))
else:
_ValidateExclusionSetting(msvs_setting,
msvs_tool,
('Warning: unrecognized setting %s/%s '
'while converting to MSBuild.' %
(msvs_tool_name, msvs_setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s while converting to '
'MSBuild.' % msvs_tool_name)
return msbuild_settings
def ValidateMSVSSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSVS.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msvs_validators, settings, stderr)
def ValidateMSBuildSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSBuild.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msbuild_validators, settings, stderr)
def _ValidateSettings(validators, settings, stderr):
"""Validates that the settings are valid for MSBuild or MSVS.
We currently only validate the names of the settings, not their values.
Args:
validators: A dictionary of tools and their validators.
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
for tool_name in settings:
if tool_name in validators:
tool_validators = validators[tool_name]
for setting, value in settings[tool_name].iteritems():
if setting in tool_validators:
try:
tool_validators[setting](value)
except ValueError, e:
print >> stderr, ('Warning: for %s/%s, %s' %
(tool_name, setting, e))
else:
_ValidateExclusionSetting(setting,
tool_validators,
('Warning: unrecognized setting %s/%s' %
(tool_name, setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s' % tool_name)
# MSVS and MBuild names of the tools.
_compile = _Tool('VCCLCompilerTool', 'ClCompile')
_link = _Tool('VCLinkerTool', 'Link')
_midl = _Tool('VCMIDLTool', 'Midl')
_rc = _Tool('VCResourceCompilerTool', 'ResourceCompile')
_lib = _Tool('VCLibrarianTool', 'Lib')
_manifest = _Tool('VCManifestTool', 'Manifest')
_AddTool(_compile)
_AddTool(_link)
_AddTool(_midl)
_AddTool(_rc)
_AddTool(_lib)
_AddTool(_manifest)
# Add sections only found in the MSBuild settings.
_msbuild_validators[''] = {}
_msbuild_validators['ProjectReference'] = {}
_msbuild_validators['ManifestResourceCompile'] = {}
# Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and
# ClCompile in MSBuild.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for
# the schema of the MSBuild ClCompile settings.
# Options that have the same name in MSVS and MSBuild
_Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_compile, 'AdditionalOptions', _string_list)
_Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI
_Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa
_Same(_compile, 'BrowseInformationFile', _file_name)
_Same(_compile, 'BufferSecurityCheck', _boolean) # /GS
_Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za
_Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd
_Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT
_Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false'
_Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx
_Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except
_Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope
_Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI
_Same(_compile, 'ForcedUsingFiles', _file_list) # /FU
_Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc
_Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_compile, 'MinimalRebuild', _boolean) # /Gm
_Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl
_Same(_compile, 'OmitFramePointers', _boolean) # /Oy
_Same(_compile, 'PreprocessorDefinitions', _string_list) # /D
_Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd
_Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR
_Same(_compile, 'ShowIncludes', _boolean) # /showIncludes
_Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc
_Same(_compile, 'StringPooling', _boolean) # /GF
_Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t
_Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u
_Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_compile, 'UseFullPaths', _boolean) # /FC
_Same(_compile, 'WholeProgramOptimization', _boolean) # /GL
_Same(_compile, 'XMLDocumentationFileName', _file_name)
_Same(_compile, 'AssemblerOutput',
_Enumeration(['NoListing',
'AssemblyCode', # /FA
'All', # /FAcs
'AssemblyAndMachineCode', # /FAc
'AssemblyAndSourceCode'])) # /FAs
_Same(_compile, 'BasicRuntimeChecks',
_Enumeration(['Default',
'StackFrameRuntimeCheck', # /RTCs
'UninitializedLocalUsageCheck', # /RTCu
'EnableFastChecks'])) # /RTC1
_Same(_compile, 'BrowseInformation',
_Enumeration(['false',
'true', # /FR
'true'])) # /Fr
_Same(_compile, 'CallingConvention',
_Enumeration(['Cdecl', # /Gd
'FastCall', # /Gr
'StdCall', # /Gz
'VectorCall'])) # /Gv
_Same(_compile, 'CompileAs',
_Enumeration(['Default',
'CompileAsC', # /TC
'CompileAsCpp'])) # /TP
_Same(_compile, 'DebugInformationFormat',
_Enumeration(['', # Disabled
'OldStyle', # /Z7
None,
'ProgramDatabase', # /Zi
'EditAndContinue'])) # /ZI
_Same(_compile, 'EnableEnhancedInstructionSet',
_Enumeration(['NotSet',
'StreamingSIMDExtensions', # /arch:SSE
'StreamingSIMDExtensions2', # /arch:SSE2
'AdvancedVectorExtensions', # /arch:AVX (vs2012+)
'NoExtensions',])) # /arch:IA32 (vs2012+)
_Same(_compile, 'ErrorReporting',
_Enumeration(['None', # /errorReport:none
'Prompt', # /errorReport:prompt
'Queue'], # /errorReport:queue
new=['Send'])) # /errorReport:send"
_Same(_compile, 'ExceptionHandling',
_Enumeration(['false',
'Sync', # /EHsc
'Async'], # /EHa
new=['SyncCThrow'])) # /EHs
_Same(_compile, 'FavorSizeOrSpeed',
_Enumeration(['Neither',
'Speed', # /Ot
'Size'])) # /Os
_Same(_compile, 'FloatingPointModel',
_Enumeration(['Precise', # /fp:precise
'Strict', # /fp:strict
'Fast'])) # /fp:fast
_Same(_compile, 'InlineFunctionExpansion',
_Enumeration(['Default',
'OnlyExplicitInline', # /Ob1
'AnySuitable'], # /Ob2
new=['Disabled'])) # /Ob0
_Same(_compile, 'Optimization',
_Enumeration(['Disabled', # /Od
'MinSpace', # /O1
'MaxSpeed', # /O2
'Full'])) # /Ox
_Same(_compile, 'RuntimeLibrary',
_Enumeration(['MultiThreaded', # /MT
'MultiThreadedDebug', # /MTd
'MultiThreadedDLL', # /MD
'MultiThreadedDebugDLL'])) # /MDd
_Same(_compile, 'StructMemberAlignment',
_Enumeration(['Default',
'1Byte', # /Zp1
'2Bytes', # /Zp2
'4Bytes', # /Zp4
'8Bytes', # /Zp8
'16Bytes'])) # /Zp16
_Same(_compile, 'WarningLevel',
_Enumeration(['TurnOffAllWarnings', # /W0
'Level1', # /W1
'Level2', # /W2
'Level3', # /W3
'Level4'], # /W4
new=['EnableAllWarnings'])) # /Wall
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking',
_boolean) # /Gy
_Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions',
_boolean) # /Oi
_Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C
_Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo
_Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp
_Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile',
_file_name) # Used with /Yc and /Yu
_Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile',
_file_name) # /Fp
_Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader',
_Enumeration(['NotUsing', # VS recognized '' for this value too.
'Create', # /Yc
'Use'])) # /Yu
_Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX
_ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J')
# MSVS options not found in MSBuild.
_MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean)
_MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_compile, 'BuildingInIDE', _boolean)
_MSBuildOnly(_compile, 'CompileAsManaged',
_Enumeration([], new=['false',
'true', # /clr
'Pure', # /clr:pure
'Safe', # /clr:safe
'OldSyntax'])) # /clr:oldSyntax
_MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch
_MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP
_MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi
_MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors
_MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we
_MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu
# Defines a setting that needs very customized processing
_CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile')
# Directives for converting MSVS VCLinkerTool to MSBuild Link.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for
# the schema of the MSBuild Link settings.
# Options that have the same name in MSVS and MSBuild
_Same(_link, 'AdditionalDependencies', _file_list)
_Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
# /MANIFESTDEPENDENCY:
_Same(_link, 'AdditionalManifestDependencies', _file_list)
_Same(_link, 'AdditionalOptions', _string_list)
_Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE
_Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION
_Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE
_Same(_link, 'BaseAddress', _string) # /BASE
_Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK
_Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD
_Same(_link, 'DelaySign', _boolean) # /DELAYSIGN
_Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE
_Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC
_Same(_link, 'EntryPointSymbol', _string) # /ENTRY
_Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE
_Same(_link, 'FunctionOrder', _file_name) # /ORDER
_Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG
_Same(_link, 'GenerateMapFile', _boolean) # /MAP
_Same(_link, 'HeapCommitSize', _string)
_Same(_link, 'HeapReserveSize', _string) # /HEAP
_Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL
_Same(_link, 'ImportLibrary', _file_name) # /IMPLIB
_Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER
_Same(_link, 'KeyFile', _file_name) # /KEYFILE
_Same(_link, 'ManifestFile', _file_name) # /ManifestFile
_Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS
_Same(_link, 'MapFileName', _file_name)
_Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT
_Same(_link, 'MergeSections', _string) # /MERGE
_Same(_link, 'MidlCommandFile', _file_name) # /MIDL
_Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_link, 'OutputFile', _file_name) # /OUT
_Same(_link, 'PerUserRedirection', _boolean)
_Same(_link, 'Profile', _boolean) # /PROFILE
_Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD
_Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB
_Same(_link, 'RegisterOutput', _boolean)
_Same(_link, 'SetChecksum', _boolean) # /RELEASE
_Same(_link, 'StackCommitSize', _string)
_Same(_link, 'StackReserveSize', _string) # /STACK
_Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED
_Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD
_Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD
_Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY
_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT
_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID
_Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true'
_Same(_link, 'Version', _string) # /VERSION
_Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF
_Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED
_Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE
_Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF
_Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE
_Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE
_subsystem_enumeration = _Enumeration(
['NotSet',
'Console', # /SUBSYSTEM:CONSOLE
'Windows', # /SUBSYSTEM:WINDOWS
'Native', # /SUBSYSTEM:NATIVE
'EFI Application', # /SUBSYSTEM:EFI_APPLICATION
'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER
'EFI ROM', # /SUBSYSTEM:EFI_ROM
'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER
'WindowsCE'], # /SUBSYSTEM:WINDOWSCE
new=['POSIX']) # /SUBSYSTEM:POSIX
_target_machine_enumeration = _Enumeration(
['NotSet',
'MachineX86', # /MACHINE:X86
None,
'MachineARM', # /MACHINE:ARM
'MachineEBC', # /MACHINE:EBC
'MachineIA64', # /MACHINE:IA64
None,
'MachineMIPS', # /MACHINE:MIPS
'MachineMIPS16', # /MACHINE:MIPS16
'MachineMIPSFPU', # /MACHINE:MIPSFPU
'MachineMIPSFPU16', # /MACHINE:MIPSFPU16
None,
None,
None,
'MachineSH4', # /MACHINE:SH4
None,
'MachineTHUMB', # /MACHINE:THUMB
'MachineX64']) # /MACHINE:X64
_Same(_link, 'AssemblyDebug',
_Enumeration(['',
'true', # /ASSEMBLYDEBUG
'false'])) # /ASSEMBLYDEBUG:DISABLE
_Same(_link, 'CLRImageType',
_Enumeration(['Default',
'ForceIJWImage', # /CLRIMAGETYPE:IJW
'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE
'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE
_Same(_link, 'CLRThreadAttribute',
_Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE
'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA
'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA
_Same(_link, 'DataExecutionPrevention',
_Enumeration(['',
'false', # /NXCOMPAT:NO
'true'])) # /NXCOMPAT
_Same(_link, 'Driver',
_Enumeration(['NotSet',
'Driver', # /Driver
'UpOnly', # /DRIVER:UPONLY
'WDM'])) # /DRIVER:WDM
_Same(_link, 'LinkTimeCodeGeneration',
_Enumeration(['Default',
'UseLinkTimeCodeGeneration', # /LTCG
'PGInstrument', # /LTCG:PGInstrument
'PGOptimization', # /LTCG:PGOptimize
'PGUpdate'])) # /LTCG:PGUpdate
_Same(_link, 'ShowProgress',
_Enumeration(['NotSet',
'LinkVerbose', # /VERBOSE
'LinkVerboseLib'], # /VERBOSE:Lib
new=['LinkVerboseICF', # /VERBOSE:ICF
'LinkVerboseREF', # /VERBOSE:REF
'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH
'LinkVerboseCLR'])) # /VERBOSE:CLR
_Same(_link, 'SubSystem', _subsystem_enumeration)
_Same(_link, 'TargetMachine', _target_machine_enumeration)
_Same(_link, 'UACExecutionLevel',
_Enumeration(['AsInvoker', # /level='asInvoker'
'HighestAvailable', # /level='highestAvailable'
'RequireAdministrator'])) # /level='requireAdministrator'
_Same(_link, 'MinimumRequiredVersion', _string)
_Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_link, 'ErrorReporting', 'LinkErrorReporting',
_Enumeration(['NoErrorReport', # /ERRORREPORT:NONE
'PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin'], # /ERRORREPORT:QUEUE
new=['SendErrorReport'])) # /ERRORREPORT:SEND
_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries',
_file_list) # /NODEFAULTLIB
_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY
_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET
_Moved(_link, 'GenerateManifest', '', _boolean)
_Moved(_link, 'IgnoreImportLibrary', '', _boolean)
_Moved(_link, 'LinkIncremental', '', _newly_boolean)
_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean)
_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_link, 'BuildingInIDE', _boolean)
_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH
_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false'
_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS
_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND
_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND
_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false'
_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN
_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION
_MSBuildOnly(_link, 'ForceFileOutput',
_Enumeration([], new=['Enabled', # /FORCE
# /FORCE:MULTIPLE
'MultiplyDefinedSymbolOnly',
'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED
_MSBuildOnly(_link, 'CreateHotPatchableImage',
_Enumeration([], new=['Enabled', # /FUNCTIONPADMIN
'X86Image', # /FUNCTIONPADMIN:5
'X64Image', # /FUNCTIONPADMIN:6
'ItaniumImage'])) # /FUNCTIONPADMIN:16
_MSBuildOnly(_link, 'CLRSupportLastError',
_Enumeration([], new=['Enabled', # /CLRSupportLastError
'Disabled', # /CLRSupportLastError:NO
# /CLRSupportLastError:SYSTEMDLL
'SystemDlls']))
# Directives for converting VCResourceCompilerTool to ResourceCompile.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for
# the schema of the MSBuild ResourceCompile settings.
_Same(_rc, 'AdditionalOptions', _string_list)
_Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_rc, 'Culture', _Integer(msbuild_base=16))
_Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_rc, 'PreprocessorDefinitions', _string_list) # /D
_Same(_rc, 'ResourceOutputFileName', _string) # /fo
_Same(_rc, 'ShowProgress', _boolean) # /v
# There is no UI in VisualStudio 2008 to set the following properties.
# However they are found in CL and other tools. Include them here for
# completeness, as they are very likely to have the same usage pattern.
_Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u
# MSBuild options not found in MSVS.
_MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n
_MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name)
# Directives for converting VCMIDLTool to Midl.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for
# the schema of the MSBuild Midl settings.
_Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_midl, 'AdditionalOptions', _string_list)
_Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt
_Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation
_Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check
_Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum
_Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref
_Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data
_Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf
_Same(_midl, 'GenerateTypeLibrary', _boolean)
_Same(_midl, 'HeaderFileName', _file_name) # /h
_Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir
_Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid
_Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203
_Same(_midl, 'OutputDirectory', _string) # /out
_Same(_midl, 'PreprocessorDefinitions', _string_list) # /D
_Same(_midl, 'ProxyFileName', _file_name) # /proxy
_Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o
_Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_midl, 'TypeLibraryName', _file_name) # /tlb
_Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_midl, 'WarnAsError', _boolean) # /WX
_Same(_midl, 'DefaultCharType',
_Enumeration(['Unsigned', # /char unsigned
'Signed', # /char signed
'Ascii'])) # /char ascii7
_Same(_midl, 'TargetEnvironment',
_Enumeration(['NotSet',
'Win32', # /env win32
'Itanium', # /env ia64
'X64'])) # /env x64
_Same(_midl, 'EnableErrorChecks',
_Enumeration(['EnableCustom',
'None', # /error none
'All'])) # /error all
_Same(_midl, 'StructMemberAlignment',
_Enumeration(['NotSet',
'1', # Zp1
'2', # Zp2
'4', # Zp4
'8'])) # Zp8
_Same(_midl, 'WarningLevel',
_Enumeration(['0', # /W0
'1', # /W1
'2', # /W2
'3', # /W3
'4'])) # /W4
_Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata
_Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters',
_boolean) # /robust
# MSBuild options not found in MSVS.
_MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config
_MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub
_MSBuildOnly(_midl, 'GenerateClientFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'GenerateServerFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL
_MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub
_MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn
_MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_midl, 'TypeLibFormat',
_Enumeration([], new=['NewFormat', # /newtlb
'OldFormat'])) # /oldtlb
# Directives for converting VCLibrarianTool to Lib.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for
# the schema of the MSBuild Lib settings.
_Same(_lib, 'AdditionalDependencies', _file_list)
_Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
_Same(_lib, 'AdditionalOptions', _string_list)
_Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT
_Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE
_Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB
_Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_lib, 'OutputFile', _file_name) # /OUT
_Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_lib, 'UseUnicodeResponseFiles', _boolean)
_Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG
_Same(_lib, 'TargetMachine', _target_machine_enumeration)
# TODO(jeanluc) _link defines the same value that gets moved to
# ProjectReference. We may want to validate that they are consistent.
_Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false'
_MSBuildOnly(_lib, 'ErrorReporting',
_Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin', # /ERRORREPORT:QUEUE
'SendErrorReport', # /ERRORREPORT:SEND
'NoErrorReport'])) # /ERRORREPORT:NONE
_MSBuildOnly(_lib, 'MinimumRequiredVersion', _string)
_MSBuildOnly(_lib, 'Name', _file_name) # /NAME
_MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE
_MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration)
_MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX
_MSBuildOnly(_lib, 'Verbose', _boolean)
# Directives for converting VCManifestTool to Mt.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for
# the schema of the MSBuild Lib settings.
# Options that have the same name in MSVS and MSBuild
_Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest
_Same(_manifest, 'AdditionalOptions', _string_list)
_Same(_manifest, 'AssemblyIdentity', _string) # /identity:
_Same(_manifest, 'ComponentFileName', _file_name) # /dll
_Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs
_Same(_manifest, 'InputResourceManifests', _string) # /inputresource
_Same(_manifest, 'OutputManifestFile', _file_name) # /out
_Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs
_Same(_manifest, 'ReplacementsFile', _file_name) # /replacements
_Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb:
_Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate
_Same(_manifest, 'UpdateFileHashesSearchPath', _file_name)
_Same(_manifest, 'VerboseOutput', _boolean) # /verbose
# Options that have moved location.
_MovedAndRenamed(_manifest, 'ManifestResourceFile',
'ManifestResourceCompile',
'ResourceOutputFileName',
_file_name)
_Moved(_manifest, 'EmbedManifest', '', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_manifest, 'DependencyInformationFile', _file_name)
_MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean)
_MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean)
_MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category
_MSBuildOnly(_manifest, 'ManifestFromManagedAssembly',
_file_name) # /managedassemblyname
_MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource
_MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency
_MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name)
|
|
#!/usr/bin/env python3
# Copyright (c) 2015 The Presidentielcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import ToHex, CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import *
from io import BytesIO
import time
'''
This test is meant to exercise activation of the first version bits soft fork
This soft fork will activate the following BIPS:
BIP 68 - nSequence relative lock times
BIP 112 - CHECKSEQUENCEVERIFY
BIP 113 - MedianTimePast semantics for nLockTime
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 82 blocks whose coinbases will be used to generate inputs for our tests
mine 61 blocks to transition from DEFINED to STARTED
mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period
mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN
mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572
mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered
mine 1 block and test that enforcement has triggered (which triggers ACTIVE)
Test BIP 113 is enforced
Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
Mine 1 block so next height is 582 and test BIP 68 now passes time and height
Test that BIP 112 is enforced
Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
And that after the soft fork activates transactions pass and fail as they should according to the rules.
For each BIP, transactions of versions 1 and 2 will be tested.
----------------
BIP 113:
bip113tx - modify the nLocktime variable
BIP 68:
bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
BIP 112:
bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
'''
base_relative_locktime = 10
seq_disable_flag = 1<<31
seq_random_high_bit = 1<<25
seq_type_flag = 1<<22
seq_random_low_bit = 1<<18
# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field
# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1
relative_locktimes = []
for b31 in range(2):
b25times = []
for b25 in range(2):
b22times = []
for b22 in range(2):
b18times = []
for b18 in range(2):
rlt = base_relative_locktime
if (b31):
rlt = rlt | seq_disable_flag
if (b25):
rlt = rlt | seq_random_high_bit
if (b22):
rlt = rlt | seq_type_flag
if (b18):
rlt = rlt | seq_random_low_bit
b18times.append(rlt)
b22times.append(b18times)
b25times.append(b22times)
relative_locktimes.append(b25times)
def all_rlt_txs(txarray):
txs = []
for b31 in range(2):
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
txs.append(txarray[b31][b25][b22][b18])
return txs
class BIP68_112_113Test(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=4']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def send_generic_input_tx(self, node, coinbases):
amount = Decimal("49.99")
return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount))))
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
return tx
def sign_transaction(self, node, unsignedtx):
rawtx = ToHex(unsignedtx)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = self.create_test_block([], version)
test_blocks.append([block, True])
self.last_block_time += 600
self.tip = block.sha256
self.tipheight += 1
return test_blocks
def create_test_block(self, txs, version = 536870912):
block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
block.nVersion = version
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0):
txs = []
assert(len(bip68inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
tx.nVersion = txversion
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
b18txs.append(self.sign_transaction(self.nodes[0], tx))
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def create_bip112special(self, input, txversion):
tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98"))
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0):
txs = []
assert(len(bip112inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed
tx.vin[0].nSequence = base_relative_locktime + locktime_delta
else: # vary nSequence instead, OP_CSV is fixed
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
if (varyOP_CSV):
signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
else:
signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
b18txs.append(signtx)
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def get_tests(self):
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
self.tipheight = 82 # height of the next block to build
self.last_block_time = long_past_time
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
test_blocks = self.generate_blocks(61, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 1
# Advanced from DEFINED to STARTED, height = 143
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 0
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 2
# Failed to advance past STARTED, height = 287
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# 108 out of 144 signal bit 0 to achieve lock-in
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 3
# Advanced from STARTED to LOCKED_IN, height = 431
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# 140 more version 4 blocks
test_blocks = self.generate_blocks(140, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 4
### Inputs at height = 572
# Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block)
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
for i in range(16):
bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112diverseinputs.append(inputs)
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
# 1 normal input
bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
self.nodes[0].setmocktime(self.last_block_time + 600)
inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572
self.nodes[0].setmocktime(0)
self.tip = int("0x" + inputblockhash, 0)
self.tipheight += 1
self.last_block_time += 600
assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 5
# Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# Test both version 1 and version 2 transactions for all tests
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
# For BIP68 test all 16 relative sequence locktimes
bip68txs_v1 = self.create_bip68txs(bip68inputs, 1)
bip68txs_v2 = self.create_bip68txs(bip68inputs, 2)
# For BIP112 test:
# 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1)
bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2)
# 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1)
bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1)
# sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1)
bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2)
# sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1)
bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1)
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1)
bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2)
### TESTING ###
##################################
### Before Soft Forks Activate ###
##################################
# All txs should pass
### Version 1 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 6
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 7
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# 1 more version 4 block to get us to height 575 so the fork should now be active for the next block
test_blocks = self.generate_blocks(1, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 8
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active')
#################################
### After Soft Forks Activate ###
#################################
### BIP 113 ###
# BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Next block height = 580 after 4 blocks of random version
test_blocks = self.generate_blocks(4, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 13
### BIP 68 ###
### Version 1 txs ###
# All still pass
success_txs = []
success_txs.extend(all_rlt_txs(bip68txs_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 14
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
bip68success_txs = []
# All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
bip68success_txs.append(bip68txs_v2[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = []
for b25 in range(2):
for b18 in range(2):
bip68timetxs.append(bip68txs_v2[0][b25][1][b18])
for tx in bip68timetxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19
bip68heighttxs = []
for b25 in range(2):
for b18 in range(2):
bip68heighttxs.append(bip68txs_v2[0][b25][0][b18])
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23
# Advance one block to 581
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 24
# Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
bip68success_txs.extend(bip68timetxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29
# Advance one block to 582
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 30
# All BIP 68 txs should pass
bip68success_txs.extend(bip68heighttxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### BIP 112 ###
### Version 1 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18])
success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(success_txs), True]]) # 33
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18])
fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18])
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81
### Version 2 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV
success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9
yield TestInstance([[self.create_test_block(success_txs), True]]) # 83
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115
# If sequencelock types mismatch, tx should fail
fail_txs = []
for b25 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence
fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123
# Remaining txs should pass, just test masking works properly
success_txs = []
for b25 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence
success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV
yield TestInstance([[self.create_test_block(success_txs), True]]) # 124
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Additional test, of checking that comparison of two time types works properly
time_txs = []
for b25 in range(2):
for b18 in range(2):
tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18]
tx.vin[0].nSequence = base_relative_locktime | seq_type_flag
signtx = self.sign_transaction(self.nodes[0], tx)
time_txs.append(signtx)
yield TestInstance([[self.create_test_block(time_txs), True]]) # 125
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Missing aspects of test
## Testing empty stack fails
if __name__ == '__main__':
BIP68_112_113Test().main()
|
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import warnings
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_array, check_random_state
from collections import Counter
__author__ = 'stavrianos'
# Link to paper: bit.ly/22KgAnP
class ADASYN(object):
"""
Oversampling parent class with the main methods required by scikit-learn:
fit, transform and fit_transform
"""
def __init__(self,
ratio=0.5,
imb_threshold=0.5,
k=5,
random_state=None,
verbose=True):
"""
:ratio:
Growth percentage with respect to initial minority
class size. For example if ratio=0.65 then after
resampling minority class(es) will have 1.65 times
its initial size
:imb_threshold:
The imbalance ratio threshold to allow/deny oversampling.
For example if imb_threshold=0.5 then minority class needs
to be at most half the size of the majority in order for
resampling to apply
:k:
Number of K-nearest-neighbors
:random_state:
seed for random number generation
:verbose:
Determines if messages will be printed to terminal or not
Extra Instance variables:
:self.X:
Feature matrix to be oversampled
:self.y:
Class labels for data
:self.clstats:
Class populations to determine minority/majority
:self.unique_classes_:
Number of unique classes
:self.maj_class_:
Label of majority class
:self.random_state_:
Seed
"""
self.ratio = ratio
self.imb_threshold = imb_threshold
self.k = k
self.random_state = random_state
self.verbose = verbose
self.clstats = {}
self.num_new = 0
self.index_new = []
def fit(self, X, y):
"""
Class method to define class populations and store them as instance
variables. Also stores majority class label
"""
self.X = check_array(X)
self.y = np.array(y).astype(np.int64)
self.random_state_ = check_random_state(self.random_state)
self.unique_classes_ = set(self.y)
# Initialize all class populations with zero
for element in self.unique_classes_:
self.clstats[element] = 0
# Count occurences of each class
for element in self.y:
self.clstats[element] += 1
# Find majority class
v = list(self.clstats.values())
k = list(self.clstats.keys())
self.maj_class_ = k[v.index(max(v))]
if self.verbose:
print(
'Majority class is %s and total number of classes is %s'
% (self.maj_class_, len(self.unique_classes_)))
def transform(self, X, y):
"""
Applies oversampling transformation to data as proposed by
the ADASYN algorithm. Returns oversampled X,y
"""
self.new_X, self.new_y = self.oversample()
def fit_transform(self, X, y):
"""
Fits the data and then returns the transformed version
"""
self.fit(X, y)
self.new_X, self.new_y = self.oversample()
self.new_X = np.concatenate((self.new_X, self.X), axis=0)
self.new_y = np.concatenate((self.new_y, self.y), axis=0)
return self.new_X, self.new_y
def generate_samples(self, x, knns, knnLabels, cl):
# List to store synthetically generated samples and their labels
new_data = []
new_labels = []
for ind, elem in enumerate(x):
# calculating k-neighbors that belong to minority (their indexes in x)
# Unfortunately knn returns the example itself as a neighbor. So it needs
# to be ignored thats why it is iterated [1:-1] and
# knnLabelsp[ind][+1].
min_knns = [ele for index,ele in enumerate(knns[ind][1:-1])
if knnLabels[ind][index +1] == cl]
if not min_knns:
continue
# generate gi synthetic examples for every minority example
for i in range(0, int(self.gi[ind])):
# randi holds an integer to choose a random minority kNNs
randi = self.random_state_.random_integers(
0, len(min_knns) - 1)
# l is a random number in [0,1)
l = self.random_state_.random_sample()
# X[min_knns[randi]] is the Xzi on equation [5]
si = self.X[elem] + \
(self.X[min_knns[randi]] - self.X[elem]) * l
new_data.append(si)
new_labels.append(self.y[elem])
self.num_new += 1
return(np.asarray(new_data), np.asarray(new_labels))
def oversample(self):
"""
Preliminary calculations before generation of
synthetic samples. Calculates and stores as instance
variables: img_degree(d),G,ri,gi as defined by equations
[1],[2],[3],[4] in the original paper
"""
try:
# Checking if variable exists, i.e. if fit() was called
self.unique_classes_ = self.unique_classes_
except:
raise RuntimeError("You need to fit() before applying tranform(),"
"or simply fit_transform()")
int_X = np.zeros([1, self.X.shape[1]])
int_y = np.zeros([1])
# Iterating through all minority classes to determine
# if they should be oversampled and to what extent
for cl in self.unique_classes_:
# Calculate imbalance degree and compare to threshold
imb_degree = float(self.clstats[cl]) / \
self.clstats[self.maj_class_]
if imb_degree > self.imb_threshold:
if self.verbose:
print('Class %s is within imbalance threshold' % cl)
else:
# G is the number of synthetic examples to be synthetically
# produced for the current minority class
self.G = (self.clstats[self.maj_class_] - self.clstats[cl]) \
* self.ratio
# ADASYN is built upon eucliden distance so p=2 default
self.nearest_neighbors_ = NearestNeighbors(n_neighbors=self.k + 1)
self.nearest_neighbors_.fit(self.X)
# keeping indexes of minority examples
minx = [ind for ind, exam in enumerate(self.X) if self.y[ind] == cl]
# Computing kNearestNeighbors for every minority example
knn = self.nearest_neighbors_.kneighbors(
self.X[minx], return_distance=False)
# Getting labels of k-neighbors of each example to determine how many of them
# are of different class than the one being oversampled
knnLabels = self.y[knn.ravel()].reshape(knn.shape)
tempdi = [Counter(i) for i in knnLabels]
# Calculating ri as defined in ADASYN paper:
# No. of k-neighbors belonging to different class than the minority divided by K
# which is ratio of friendly/non-friendly neighbors
self.ri = np.array(
[(sum(i.values())- i[cl]) / float(self.k) for i in tempdi])
# Normalizing so that ri is a density distribution (i.e.
# sum(ri)=1)
if np.sum(self.ri):
self.ri = self.ri / np.sum(self.ri)
# Calculating #synthetic_examples that need to be generated for
# each minority instance and rounding to nearest integer because
# it can't produce e.g 2.35 new examples.
self.gi = np.rint(self.ri * self.G)
# Generation of synthetic samples
inter_X, inter_y = self.generate_samples(
minx, knn, knnLabels, cl)
# in case no samples where generated at all concatenation
# won't be attempted
if len(inter_X):
int_X = np.concatenate((int_X, inter_X), axis=0)
if len(inter_y):
int_y = np.concatenate((int_y, inter_y), axis=0)
# New samples are concatenated in the beggining of the X,y arrays
# index_new contains the indiced of artificial examples
self.index_new = [i for i in range(0,self.num_new)]
return(int_X[1:-1], int_y[1:-1])
|
|
# -*- coding: utf-8 -*-
#/usr/bin/python2
'''
June 2017 by kyubyong park.
[email protected].
https://www.github.com/kyubyong/transformer
'''
from __future__ import print_function
import tensorflow as tf
def normalize(inputs,
epsilon = 1e-8,
scope="ln",
reuse=None):
'''Applies layer normalization.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`.
epsilon: A floating number. A very small number for preventing ZeroDivision Error.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A tensor with the same shape and data dtype as `inputs`.
'''
with tf.variable_scope(scope, reuse=reuse):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
# beta= tf.Variable(tf.zeros(params_shape))
# gamma = tf.Variable(tf.ones(params_shape))
beta = tf.get_variable('beta', params_shape, initializer=tf.zeros_initializer)
gamma = tf.get_variable('gamma', params_shape, initializer=tf.ones_initializer)
normalized = (inputs - mean) / ( (variance + epsilon) ** (.5) )
outputs = gamma * normalized + beta
return outputs
def embedding(inputs,
vocab_size,
num_units,
zero_pad=True,
scale=True,
scope="embedding",
reuse=None):
'''Embeds a given tensor.
Args:
inputs: A `Tensor` with type `int32` or `int64` containing the ids
to be looked up in `lookup table`.
vocab_size: An int. Vocabulary size.
num_units: An int. Number of embedding hidden units.
zero_pad: A boolean. If True, all the values of the fist row (id 0)
should be constant zeros.
scale: A boolean. If True. the outputs is multiplied by sqrt num_units.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A `Tensor` with one more rank than inputs's. The last dimensionality
should be `num_units`.
For example,
```
import tensorflow as tf
inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3)))
outputs = embedding(inputs, 6, 2, zero_pad=True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(outputs)
>>
[[[ 0. 0. ]
[ 0.09754146 0.67385566]
[ 0.37864095 -0.35689294]]
[[-1.01329422 -1.09939694]
[ 0.7521342 0.38203377]
[-0.04973143 -0.06210355]]]
```
```
import tensorflow as tf
inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3)))
outputs = embedding(inputs, 6, 2, zero_pad=False)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(outputs)
>>
[[[-0.19172323 -0.39159766]
[-0.43212751 -0.66207761]
[ 1.03452027 -0.26704335]]
[[-0.11634696 -0.35983452]
[ 0.50208133 0.53509563]
[ 1.22204471 -0.96587461]]]
```
'''
with tf.variable_scope(scope, reuse=reuse):
lookup_table = tf.get_variable('lookup_table',
dtype=tf.float32,
shape=[vocab_size, num_units],
initializer=tf.contrib.layers.xavier_initializer())
if zero_pad:
lookup_table = tf.concat((tf.zeros(shape=[1, num_units]),
lookup_table[1:, :]), 0)
outputs = tf.nn.embedding_lookup(lookup_table, inputs)
if scale:
outputs = outputs * (num_units ** 0.5)
return outputs
def multihead_attention(queries,
keys,
num_units=None,
num_heads=8,
dropout_rate=0,
is_training=True,
causality=False,
scope="multihead_attention",
reuse=None):
'''Applies multihead attention.
Args:
queries: A 3d tensor with shape of [N, T_q, C_q].
keys: A 3d tensor with shape of [N, T_k, C_k].
num_units: A scalar. Attention size.
dropout_rate: A floating point number.
is_training: Boolean. Controller of mechanism for dropout.
causality: Boolean. If true, units that reference the future are masked.
num_heads: An int. Number of heads.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns
A 3d tensor with shape of (N, T_q, C)
'''
with tf.variable_scope(scope, reuse=reuse):
# Set the fall back option for num_units
if num_units is None:
num_units = queries.get_shape().as_list[-1]
# Linear projections
Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu) # (N, T_q, C)
K = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C)
V = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C)
# Split and concat
Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0) # (h*N, T_q, C/h)
K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
# Multiplication
outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k)
# Scale
outputs = outputs / (K_.get_shape().as_list()[-1] ** 0.5)
# Key Masking
key_masks = tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1))) # (N, T_k)
key_masks = tf.tile(key_masks, [num_heads, 1]) # (h*N, T_k)
key_masks = tf.tile(tf.expand_dims(key_masks, 1), [1, tf.shape(queries)[1], 1]) # (h*N, T_q, T_k)
paddings = tf.ones_like(outputs)*(-2**32+1)
outputs = tf.where(tf.equal(key_masks, 0), paddings, outputs) # (h*N, T_q, T_k)
# Causality = Future blinding
if causality:
diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k)
tril = tf.contrib.linalg.LinearOperatorTriL(diag_vals).to_dense() # (T_q, T_k)
masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(outputs)[0], 1, 1]) # (h*N, T_q, T_k)
paddings = tf.ones_like(masks)*(-2**32+1)
outputs = tf.where(tf.equal(masks, 0), paddings, outputs) # (h*N, T_q, T_k)
# Activation
outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k)
# Query Masking
query_masks = tf.sign(tf.abs(tf.reduce_sum(queries, axis=-1))) # (N, T_q)
query_masks = tf.tile(query_masks, [num_heads, 1]) # (h*N, T_q)
query_masks = tf.tile(tf.expand_dims(query_masks, -1), [1, 1, tf.shape(keys)[1]]) # (h*N, T_q, T_k)
outputs *= query_masks # broadcasting. (N, T_q, C)
# Dropouts
outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training))
# Weighted sum
outputs = tf.matmul(outputs, V_) # ( h*N, T_q, C/h)
# Restore shape
outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2 ) # (N, T_q, C)
# Residual connection
outputs += queries
# Normalize
outputs = normalize(outputs) # (N, T_q, C)
return outputs
def feedforward(inputs,
num_units=[2048, 512],
scope="multihead_attention_fw",
reuse=None):
'''Point-wise feed forward net.
Args:
inputs: A 3d tensor with shape of [N, T, C].
num_units: A list of two integers.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A 3d tensor with the same shape and dtype as inputs
'''
with tf.variable_scope(scope, reuse=reuse):
# Inner layer
params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1,
"activation": tf.nn.relu, "use_bias": True}
outputs = tf.layers.conv1d(**params)
# Readout layer
params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1,
"activation": None, "use_bias": True}
outputs = tf.layers.conv1d(**params)
# Residual connection
outputs += inputs
# Normalize
outputs = normalize(outputs)
return outputs
def label_smoothing(inputs, epsilon=0.1):
'''Applies label smoothing. See https://arxiv.org/abs/1512.00567.
Args:
inputs: A 3d tensor with shape of [N, T, V], where V is the number of vocabulary.
epsilon: Smoothing rate.
For example,
```
import tensorflow as tf
inputs = tf.convert_to_tensor([[[0, 0, 1],
[0, 1, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[0, 1, 0]]], tf.float32)
outputs = label_smoothing(inputs)
with tf.Session() as sess:
print(sess.run([outputs]))
>>
[array([[[ 0.03333334, 0.03333334, 0.93333334],
[ 0.03333334, 0.93333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334]],
[[ 0.93333334, 0.03333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334],
[ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)]
```
'''
K = inputs.get_shape().as_list()[-1] # number of channels
return ((1-epsilon) * inputs) + (epsilon / K)
|
|
try:
from pyb import CAN
except ImportError:
print("SKIP")
raise SystemExit
from uarray import array
import micropython
import pyb
# test we can correctly create by id (2 handled in can2.py test)
for bus in (-1, 0, 1, 3):
try:
CAN(bus, CAN.LOOPBACK)
print("CAN", bus)
except ValueError:
print("ValueError", bus)
CAN(1).deinit()
CAN.initfilterbanks(14)
can = CAN(1)
print(can)
# Test state when de-init'd
print(can.state() == can.STOPPED)
can.init(CAN.LOOPBACK)
print(can)
print(can.any(0))
# Test state when freshly created
print(can.state() == can.ERROR_ACTIVE)
# Test that restart can be called
can.restart()
# Test info returns a sensible value
print(can.info())
# Catch all filter
can.setfilter(0, CAN.MASK16, 0, (0, 0, 0, 0))
can.send("abcd", 123, timeout=5000)
print(can.any(0), can.info())
print(can.recv(0))
can.send("abcd", -1, timeout=5000)
print(can.recv(0))
can.send("abcd", 0x7FF + 1, timeout=5000)
print(can.recv(0))
# Test too long message
try:
can.send("abcdefghi", 0x7FF, timeout=5000)
except ValueError:
print("passed")
else:
print("failed")
# Test that recv can work without allocating memory on the heap
buf = bytearray(10)
l = [0, 0, 0, memoryview(buf)]
l2 = None
micropython.heap_lock()
can.send("", 42)
l2 = can.recv(0, l)
assert l is l2
print(l, len(l[3]), buf)
can.send("1234", 42)
l2 = can.recv(0, l)
assert l is l2
print(l, len(l[3]), buf)
can.send("01234567", 42)
l2 = can.recv(0, l)
assert l is l2
print(l, len(l[3]), buf)
can.send("abc", 42)
l2 = can.recv(0, l)
assert l is l2
print(l, len(l[3]), buf)
micropython.heap_unlock()
# Test that recv can work with different arrays behind the memoryview
can.send("abc", 1)
print(bytes(can.recv(0, [0, 0, 0, memoryview(array("B", range(8)))])[3]))
can.send("def", 1)
print(bytes(can.recv(0, [0, 0, 0, memoryview(array("b", range(8)))])[3]))
# Test for non-list passed as second arg to recv
can.send("abc", 1)
try:
can.recv(0, 1)
except TypeError:
print("TypeError")
# Test for too-short-list passed as second arg to recv
can.send("abc", 1)
try:
can.recv(0, [0, 0, 0])
except ValueError:
print("ValueError")
# Test for non-memoryview passed as 4th element to recv
can.send("abc", 1)
try:
can.recv(0, [0, 0, 0, 0])
except TypeError:
print("TypeError")
# Test for read-only-memoryview passed as 4th element to recv
can.send("abc", 1)
try:
can.recv(0, [0, 0, 0, memoryview(bytes(8))])
except ValueError:
print("ValueError")
# Test for bad-typecode-memoryview passed as 4th element to recv
can.send("abc", 1)
try:
can.recv(0, [0, 0, 0, memoryview(array("i", range(8)))])
except ValueError:
print("ValueError")
del can
# Testing extended IDs
can = CAN(1, CAN.LOOPBACK, extframe=True)
# Catch all filter
can.setfilter(0, CAN.MASK32, 0, (0, 0))
print(can)
try:
can.send("abcde", 0x7FF + 1, timeout=5000)
except ValueError:
print("failed")
else:
r = can.recv(0)
if r[0] == 0x7FF + 1 and r[3] == b"abcde":
print("passed")
else:
print("failed, wrong data received")
# Test filters
for n in [0, 8, 16, 24]:
filter_id = 0b00001000 << n
filter_mask = 0b00011100 << n
id_ok = 0b00001010 << n
id_fail = 0b00011010 << n
can.clearfilter(0)
can.setfilter(0, pyb.CAN.MASK32, 0, (filter_id, filter_mask))
can.send("ok", id_ok, timeout=3)
if can.any(0):
msg = can.recv(0)
print((hex(filter_id), hex(filter_mask), hex(msg[0]), msg[3]))
can.send("fail", id_fail, timeout=3)
if can.any(0):
msg = can.recv(0)
print((hex(filter_id), hex(filter_mask), hex(msg[0]), msg[3]))
del can
# Test RxCallbacks
can = CAN(1, CAN.LOOPBACK)
can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4))
can.setfilter(1, CAN.LIST16, 1, (5, 6, 7, 8))
def cb0(bus, reason):
print("cb0")
if reason == 0:
print("pending")
if reason == 1:
print("full")
if reason == 2:
print("overflow")
def cb1(bus, reason):
print("cb1")
if reason == 0:
print("pending")
if reason == 1:
print("full")
if reason == 2:
print("overflow")
def cb0a(bus, reason):
print("cb0a")
if reason == 0:
print("pending")
if reason == 1:
print("full")
if reason == 2:
print("overflow")
def cb1a(bus, reason):
print("cb1a")
if reason == 0:
print("pending")
if reason == 1:
print("full")
if reason == 2:
print("overflow")
can.rxcallback(0, cb0)
can.rxcallback(1, cb1)
can.send("11111111", 1, timeout=5000)
can.send("22222222", 2, timeout=5000)
can.send("33333333", 3, timeout=5000)
can.rxcallback(0, cb0a)
can.send("44444444", 4, timeout=5000)
can.send("55555555", 5, timeout=5000)
can.send("66666666", 6, timeout=5000)
can.send("77777777", 7, timeout=5000)
can.rxcallback(1, cb1a)
can.send("88888888", 8, timeout=5000)
print(can.recv(0))
print(can.recv(0))
print(can.recv(0))
print(can.recv(1))
print(can.recv(1))
print(can.recv(1))
can.send("11111111", 1, timeout=5000)
can.send("55555555", 5, timeout=5000)
print(can.recv(0))
print(can.recv(1))
del can
# Testing asynchronous send
can = CAN(1, CAN.LOOPBACK)
can.setfilter(0, CAN.MASK16, 0, (0, 0, 0, 0))
while can.any(0):
can.recv(0)
can.send("abcde", 1, timeout=0)
print(can.any(0))
while not can.any(0):
pass
print(can.recv(0))
try:
can.send("abcde", 2, timeout=0)
can.send("abcde", 3, timeout=0)
can.send("abcde", 4, timeout=0)
can.send("abcde", 5, timeout=0)
except OSError as e:
if str(e) == "16":
print("passed")
else:
print("failed")
pyb.delay(500)
while can.any(0):
print(can.recv(0))
# Testing rtr messages
bus1 = CAN(1, CAN.LOOPBACK)
while bus1.any(0):
bus1.recv(0)
bus1.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4))
bus1.setfilter(1, CAN.LIST16, 0, (5, 6, 7, 8), rtr=(True, True, True, True))
bus1.setfilter(2, CAN.MASK16, 0, (64, 64, 32, 32), rtr=(False, True))
bus1.send("", 1, rtr=True)
print(bus1.any(0))
bus1.send("", 5, rtr=True)
print(bus1.recv(0))
bus1.send("", 6, rtr=True)
print(bus1.recv(0))
bus1.send("", 7, rtr=True)
print(bus1.recv(0))
bus1.send("", 16, rtr=True)
print(bus1.any(0))
bus1.send("", 32, rtr=True)
print(bus1.recv(0))
# test HAL error, timeout
can = pyb.CAN(1, pyb.CAN.NORMAL)
try:
can.send("1", 1, timeout=50)
except OSError as e:
print(repr(e))
|
|
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
from .config import Configuration
import os
class Script:
products = []
workspaces = []
extra = ""
def __init__(self):
pass
def add_product(self, product):
self.workspaces = None
self.products.append(product)
def add_workspace(self, workspace):
self.products = None
self.workspaces.append(workspace)
def add_text(self, text):
self.extra += text + "\n\n"
def generate_products(self):
variables = ""
for key, val in Configuration.current.variables.items():
variables += key + "=" + val
variables += "\n"
verbose_flags = """
VERBOSE_FLAGS = """
if Configuration.current.verbose:
verbose_flags += "-v"
verbose_flags += "\n"
swift_triple = Configuration.current.target.swift_triple
base_flags = """
TARGET = """ + Configuration.current.target.triple + """
DSTROOT = """ + Configuration.current.install_directory.absolute() + """
"""
if swift_triple is not None:
base_flags += """
SWIFT_TARGET = """ + Configuration.current.target.swift_triple + """
SWIFT_ARCH = """ + Configuration.current.target.swift_arch + """
"""
base_flags += """
MODULE_CACHE_PATH = """ + Configuration.current.module_cache_directory.relative() + """
BUILD_DIR = """ + Configuration.current.build_directory.relative() + """
INTERMEDIATE_DIR = """ + Configuration.current.intermediate_directory.relative() + """
CLANG = """ + Configuration.current.clang + """
CLANGXX = """ + Configuration.current.clangxx + """
SWIFT = """ + Configuration.current.swift + """
SWIFTC = """ + Configuration.current.swiftc + """
SDKROOT = """ + Configuration.current.swift_sdk + """
AR = """ + Configuration.current.ar + """
OS = """ + Configuration.current.target.swift_sdk_name + """
ARCH = """ + Configuration.current.target.swift_arch + """
DYLIB_PREFIX = """ + Configuration.current.target.dynamic_library_prefix + """
DYLIB_SUFFIX = """ + Configuration.current.target.dynamic_library_suffix + """
STATICLIB_PREFIX = """ + Configuration.current.target.static_library_prefix + """
STATICLIB_SUFFIX = """ + Configuration.current.target.static_library_suffix + """
PREFIX = """ + Configuration.current.prefix + """
"""
if Configuration.current.requires_pkg_config:
base_flags += """
PKG_CONFIG = """ + Configuration.current.pkg_config + """
"""
if Configuration.current.system_root is not None:
base_flags += """
SYSROOT = """ + Configuration.current.system_root.absolute() + """
"""
base_flags += """
SRCROOT = """ + Configuration.current.source_root.relative() + """
BINUTILS_VERSION = 4.8
TARGET_LDSYSROOT =
"""
if Configuration.current.bootstrap_directory is not None:
base_flags += """
BOOTSTRAP_DIR = """ + Configuration.current.bootstrap_directory.relative() + """/common
TARGET_BOOTSTRAP_DIR = """ + Configuration.current.bootstrap_directory.relative() + """/${TARGET}
"""
c_flags = """
TARGET_CFLAGS = -fcolor-diagnostics -fdollars-in-identifiers -fblocks -fobjc-runtime=macosx-10.11 -fintegrated-as -fPIC --target=${TARGET} """
if Configuration.current.build_mode == Configuration.Debug:
c_flags += "-g -O0 "
elif Configuration.current.build_mode == Configuration.Release:
c_flags += "-O2 "
if Configuration.current.system_root is not None:
c_flags += "--sysroot=${SYSROOT}"
if Configuration.current.bootstrap_directory is not None:
c_flags += """ -I${BOOTSTRAP_DIR}/usr/include -I${BOOTSTRAP_DIR}/usr/local/include """
c_flags += """ -I${TARGET_BOOTSTRAP_DIR}/usr/include -I${TARGET_BOOTSTRAP_DIR}/usr/local/include """
c_flags += Configuration.current.extra_c_flags
swift_flags = "\nTARGET_SWIFTCFLAGS = -I${SDKROOT}/lib/swift/" + Configuration.current.target.swift_sdk_name + " -Xcc -fblocks -resource-dir ${SDKROOT}/lib/swift "
if swift_triple is not None:
swift_flags += "-target ${SWIFT_TARGET} "
if Configuration.current.system_root is not None:
swift_flags += "-sdk ${SYSROOT} "
if Configuration.current.bootstrap_directory is not None:
swift_flags += """ -I${BOOTSTRAP_DIR}/usr/include -I${BOOTSTRAP_DIR}/usr/local/include """
swift_flags += """ -I${TARGET_BOOTSTRAP_DIR}/usr/include -I${TARGET_BOOTSTRAP_DIR}/usr/local/include """
if Configuration.current.build_mode == Configuration.Debug:
swift_flags += "-g -Onone "
elif Configuration.current.build_mode == Configuration.Release:
swift_flags += "-O "
swift_flags += Configuration.current.extra_swift_flags
swift_flags += """
TARGET_SWIFTEXE_FLAGS = -I${SDKROOT}/lib/swift/""" + Configuration.current.target.swift_sdk_name + """ -L${SDKROOT}/lib/swift/""" + Configuration.current.target.swift_sdk_name + """ """
if Configuration.current.build_mode == Configuration.Debug:
swift_flags += "-g -Onone -enable-testing "
elif Configuration.current.build_mode == Configuration.Release:
swift_flags += " "
swift_flags += Configuration.current.extra_swift_flags
ld_flags = """
EXTRA_LD_FLAGS = """ + Configuration.current.extra_ld_flags
ld_flags += """
TARGET_LDFLAGS = --target=${TARGET} ${EXTRA_LD_FLAGS} -L ${SDKROOT}/lib/swift/""" + Configuration.current.target.swift_sdk_name + """/${ARCH} -L${SDKROOT}/lib/swift/""" + Configuration.current.target.swift_sdk_name + """ """
if Configuration.current.system_root is not None:
ld_flags += "--sysroot=${SYSROOT}"
if Configuration.current.bootstrap_directory is not None:
ld_flags += """ -L${TARGET_BOOTSTRAP_DIR}/usr/lib"""
if Configuration.current.build_mode == Configuration.Debug:
ld_flags += """ -rpath ${SDKROOT}/lib/swift/""" + Configuration.current.target.swift_sdk_name + """ """
if Configuration.current.linker is not None:
ld_flags += " -fuse-ld=" + Configuration.current.linker
if Configuration.current.toolchain is not None:
bin_dir = Configuration.current.toolchain
if not os.path.exists(bin_dir.path_by_appending("ld").relative()):
bin_dir = Configuration.current.toolchain.path_by_appending("bin")
c_flags += " -B" + bin_dir.relative()
ld_flags += " -B" + bin_dir.relative()
c_flags += "\n"
swift_flags += "\n"
ld_flags += "\n"
cxx_flags = """
TARGET_CXXFLAGS = -std=gnu++11 -I${SYSROOT}/usr/include/c++/${BINUTILS_VERSION} -I${SYSROOT}/usr/include/${TARGET}/c++/${BINUTILS_VERSION}
"""
ar_flags = """
AR_FLAGS = rcs
"""
flags = variables + verbose_flags + base_flags + c_flags + swift_flags + cxx_flags + ld_flags + ar_flags
cp_command = """
rule Cp
command = mkdir -p `dirname $out`; /bin/cp -r $in $out
description = Cp $in
"""
compilec_command = """
rule CompileC
command = mkdir -p `dirname $out`; ${CLANG} ${TARGET_CFLAGS} $flags ${VERBOSE_FLAGS} -c $in -o $out
description = CompileC: $in
rule CompileCxx
command = mkdir -p `dirname $out`; ${CLANGXX} ${TARGET_CFLAGS} ${TARGET_CXXFLAGS} $flags ${VERBOSE_FLAGS} -c $in -o $out
description = CompileCxx: $in
"""
swiftc_command = """
rule CompileSwift
command = mkdir -p `dirname $out`; mkdir -p ${MODULE_CACHE_PATH}; ${SWIFT} -frontend -c $module_sources ${TARGET_SWIFTCFLAGS} $flags -module-name $module_name -module-link-name $module_name -o $out -emit-module-path $out.~partial.swiftmodule -emit-module-doc-path $out.~partial.swiftdoc -emit-dependencies-path $out.d -emit-reference-dependencies-path $out.swiftdeps -module-cache-path ${MODULE_CACHE_PATH}
description = CompileSwift: $in
depfile = $out.d
rule MergeSwiftModule
command = mkdir -p `dirname $out`; ${SWIFT} -frontend -sil-merge-partial-modules -emit-module $partials ${TARGET_SWIFTCFLAGS} $flags -module-cache-path ${MODULE_CACHE_PATH} -module-link-name $module_name -o $out
description = Merge $out
"""
assembler_command = """
rule Assemble
command = mkdir -p `dirname $out`; ${CLANG} -x assembler-with-cpp -c $in -o $out ${TARGET_CFLAGS} $flags ${VERBOSE_FLAGS}
description = Assemble: $in
"""
link_command = """
rule Link
command = mkdir -p `dirname $out`; ${CLANG} ${TARGET_LDFLAGS} ${VERBOSE_FLAGS} $start $in $end $flags -o $out"""
if Configuration.current.verbose:
link_command += "-Xlinker --verbose"
link_command += """
description = Link: $out
rule Archive
command = mkdir -p `dirname $out`; ${AR} ${AR_FLAGS} $out $in
description = Archive: $out
"""
swift_build_command = """
rule SwiftExecutable
command = mkdir -p `dirname $out`; ${SWIFTC} ${TARGET_SWIFTEXE_FLAGS} ${EXTRA_LD_FLAGS} $flags $in -o $out
description = SwiftExecutable: $out
"""
commands = cp_command + compilec_command + swiftc_command + assembler_command + link_command + swift_build_command
script = flags + commands
for product in self.products:
script += "".join([product_build_command for product_build_command in product.generate() if not isinstance(product_build_command, list)])
script += """
rule RunReconfigure
command = ./configure --reconfigure
description = Reconfiguring build script.
build ${BUILD_DIR}/.reconfigure: RunReconfigure
build reconfigure: phony | ${BUILD_DIR}/.reconfigure
"""
script += self.extra
script += "\n\n"
return script
def generate_workspaces(self):
build_project_command = """
rule BuildProject
command = pushd $project; ninja; popd
"""
script = build_project_command
for workspace in self.workspaces:
script += workspace.generate()
script += "\n\n"
return script
def generate(self):
script = None
if self.workspaces is None:
script = self.generate_products()
script_file = open(Configuration.current.build_script_path.absolute(), 'w')
script_file.write(script)
script_file.close()
else:
for workspace in self.workspaces:
workspace.configure()
script = self.generate_workspaces()
|
|
#!/usr/bin/env python
'''======================================================
Created by: D. Spencer Maughan
Last updated: February 2015
File name: DF_experiment_joy.py
Organization: RISC Lab, Utah State University
Notes:
This file is meant to allow use of the joystick
to toggle between trajectories.
======================================================'''
import roslib; roslib.load_manifest('ardrone_tutorials')
roslib.load_manifest('risc_msgs')
import rospy
from math import *
import rospkg
import numpy as np
import scipy.linalg as la
import time
#=======================#
# Messages Needed #
#=======================#
from risc_msgs.msg import *
from ardrone_autonomy.msg import Navdata
from sensor_msgs.msg import Joy
from geometry_msgs.msg import PointStamped
from std_msgs.msg import Empty
#========================#
# Globals #
#========================#
PI = 3.141592653589793
Threshold = 1000
states = Cortex()
states.Obj = [States()]*1
euler_max = 0.349066 #in radians
max_yaw_rate = .3490659 #in radians/sec
max_alt_rate = 1000 # in mm/sec
rate = 45 # Hz
start_time = 0
back = 0
forward = 0
mode = 1 # mode of 4 listed under cases
old_mode = 0
cases = ['Origin','Slanted Figure Eight','Origin',\
'Flat Figure Eight','Origin','Circle']
#==================#
# Publishers #
#==================#
pubTakeoff = rospy.Publisher('/ardrone/takeoff',Empty, queue_size = 1)
pub_ctrl = rospy.Publisher('/controls', Controls, queue_size = 1)
pub_traj = rospy.Publisher('/trajectory', Trajectories, queue_size = 1)
#=====================#
# Gain Matrices #
#=====================#
#K_way = np.matrix([[ .1, 0, 0, .25, 0, 0, 0],\
# [ 0, .1, 0, 0, .25, 0, 0],\
# [ 0, 0, -.4, 0, 0, -0.7, 0],\
# [ 0, 0, 0, 0, 0, 0, 1]])
K_way = np.matrix([[ .2, 0, 0, .34, 0, 0, 0],\
[ 0, .2, 0, 0, .34, 0, 0],\
[ 0, 0, -.6, 0, 0, -6.6, 0],\
[ 0, 0, 0, 0, 0, 0, 1]])
K_slf8 = np.matrix([[ .27, 0, 0, .7, 0, 0, 0],\
[ 0, .27, 0, 0, .7, 0, 0],\
[ 0, 0, -.6, 0, 0, -6.6, 0],\
[ 0, 0, 0, 0, 0, 0, 1]])
K_flf8 = np.matrix([[ .27, 0, 0, .7, 0, 0, 0],\
[ 0, .27, 0, 0, .7, 0, 0],\
[ 0, 0, -.6, 0, 0, -6.6, 0],\
[ 0, 0, 0, 0, 0, 0, 1]])
K_crcl = np.matrix([[ .27, 0, 0, .7, 0, 0, 0],\
[ 0, .27, 0, 0, .7, 0, 0],\
[ 0, 0, -.6, 0, 0, -6.6, 0],\
[ 0, 0, 0, 0, 0, 0, 1]])
#===================================#
# Radians between + or - pi/2 #
#===================================#
def pi2pi(angle):
if abs(angle)>PI/2:
if angle>0:
angle = angle-PI
else:
angle = angle+PI
return angle
#==============#
# Get Joy #
#==============#
def GetJoy(joy):
global start_time, mode, old_mode, forward, back
if (joy.buttons[5] == 1 and forward == 1) or (joy.buttons[4] == 1 and back == -1):
mode = mode
else:
back = -joy.buttons[4]
forward = joy.buttons[5]
old_mode = mode
start_time = rospy.get_time()
mode = mode + back + forward
if mode > 6:
mode = 1
if mode < 1:
mode = 6
#=====================#
# Get Trajectory #
#=====================#
def GetTrajectory(period,a,b,c,n,w1,w2,w3,case):
global start_time, time_past, cases, pub_traj
time_now = rospy.get_time()
t = time_now-start_time
WP = Trajectories()
WP.Obj = [Trajectory()]*1
#=================#
# Trajectory #
#=================#
traj = Trajectory()
traj.name = cases[case-1]
# Position
traj.x = a*cos(w2*t)
traj.y = b*sin(w1*t)
traj.z = n+c*sin(w3*t)
traj.psi = 0
# Velocity
traj.xdot = -a*w2*sin(w2*t)
traj.ydot = b*w1*cos(w1*t)
traj.zdot = c*w3*cos(w3*t)
traj.psidot = 0
# Acceleration
traj.xddot = -a*w2*w2*cos(w2*t)
traj.yddot = -b*w1*w1*sin(w1*t)
traj.zddot = -c*w3*w3*sin(w3*t)
traj.psiddot = 0
WP.Obj = [traj]
pub_traj.publish(WP)
return WP
#========================#
# Get Cortex States #
#========================#
def GetStates(S):
global states
states = S
#============#
# Origin #
#============#
def Origin():
global K_way
traj = GetTrajectory(10, 0,0,0,1,0,0,0,mode)
Basic_Controller(traj, K_way)
#======================#
# Slanted Figure 8 #
#======================#
def Slanted_Figure_8():
global K_slf8,cycles
# Trajectory Variables
period = 10 # seconds
a = 1
b = 0.5
c = 0.5
n = 1
w1 = 2*PI/period
w2 = w1/2
w3 = w1
traj = GetTrajectory(period, a, b, c, n, w1, w2, w3,mode)
Basic_Controller(traj, K_slf8)
#===================#
# Flat Figure 8 #
#===================#
def Flat_Figure_8():
global K_flf8,cycles
# Trajectory Variables
period = 10 # seconds
a = 1
b = 0.5
c = 0.0
n = 1
w1 = 2*PI/period
w2 = w1/2
w3 = w1
traj = GetTrajectory(period, a, b, c, n, w1, w2, w3,mode)
Basic_Controller(traj, K_flf8)
#============#
# Circle #
#============#
def Circle():
global K_crcl
# Trajectory Variables
period = 8 # seconds
a = 0.8
b = 0.8
c = 0
n = 1
w1 = 2*PI/period
w2 = w1
w3 = w1
traj = GetTrajectory(period, a, b, c, n, w1, w2, w3,mode)
Basic_Controller(traj, K_crcl)
#========================#
# Basic Controller #
#========================#
def Basic_Controller(traj,K):
global states,PI, euler_max, max_yaw_rate, max_alt_rate, pub_ctrl
#rospy.loginfo("In Basic controller")
Ctrl = Controls()
# Initiate Control Messages
bodies = 1
Ctrl.Obj = [Control()]*bodies
Ctrl.header.stamp = states.header.stamp
g = 9.81
m = .450 # ARDrone mass
#===================================#
# Get State Trajectory Errors #
#===================================#
if states.Obj[0].visible:
X = np.asmatrix(np.zeros((7,1)))
X[0] = traj.Obj[0].x-states.Obj[0].x
X[1] = traj.Obj[0].y-states.Obj[0].y
X[2] = traj.Obj[0].z-states.Obj[0].z
X[3] = traj.Obj[0].xdot-states.Obj[0].u
X[4] = traj.Obj[0].ydot-states.Obj[0].v
X[5] = traj.Obj[0].zdot-states.Obj[0].w
X[6] = pi2pi(traj.Obj[0].psi)-states.Obj[0].psi*PI/180
#============================================#
# Differential Flatness Control Input #
#============================================#
# LQR input
utilde = -K*X
# required input
u_r = np.matrix([[traj.Obj[0].xddot],[traj.Obj[0].yddot],[traj.Obj[0].zddot],[traj.Obj[0].psidot]])
u = utilde-u_r+np.matrix([[0],[0],[9.81],[0]])
#==================================#
# Rotate to Vehicle 1 Frame #
#==================================#
psi = states.Obj[0].psi*PI/180
rotZ = np.matrix([[cos(-psi), -sin(-psi), 0],[sin(-psi), cos(-psi), 0],[0, 0, 1]])
Cart = np.matrix([[-1, 0, 0],[0, -1, 0],[0, 0, 1]]) # fix x and y directions
u[:-1] = Cart*rotZ*u[:-1]
#===================================#
# Normalize given the Thrust #
#===================================#
T = sqrt(u[0:3].T*u[0:3])
u[:-1] = np.divide(u[:-1],T)
#==================#
# Set Controls #
#==================#
# Controls for Ardrone
# -phi = right... +phi = left
# -theta = back... +theta = forward
# -psi = right... +psi = left
ctrl = Control()
ctrl.name = states.Obj[0].name
ctrl.phi = asin(u[1,-1])/euler_max
ctrl.theta = asin(u[0,-1])/euler_max
ctrl.psi = u[3,-1] /max_yaw_rate
ctrl.T = T*m
Ctrl.Obj[0] = ctrl
Ctrl.header = states.header
#rospy.loginfo("latency = %f",states.header.stamp.to_sec()-rospy.get_time())
pub_ctrl.publish(Ctrl)
def Datahandler():
global mode, old_mode
if mode == 1 or mode == 3 or mode == 5:
Origin()
if old_mode != mode:
rospy.loginfo("Origin")
old_mode = mode
if mode == 2:
Slanted_Figure_8()
if old_mode != mode:
rospy.loginfo("Slanted Figure 8 Trajectory")
old_mode = mode
if mode == 4:
Flat_Figure_8()
if old_mode != mode:
rospy.loginfo("Flat Figure 8 Trajectory")
old_mode = mode
if mode == 6:
mode = 6
Circle()
if old_mode != mode:
rospy.loginfo("Circular Trajectory")
old_mode = mode
if rospy.get_param('controller_status',False):
start_time = rospy.get_time()
#===================#
# Main #
#===================#
if __name__=='__main__':
import sys
rospy.init_node('LQR_controller')
#=======================#
# quad parameters #
#=======================#
euler_max = float(rospy.get_param("euler_angle_max","0.349066")) #in radians
max_yaw_rate = float(rospy.get_param("control_yaw",".3490659")) #in radians/sec
max_alt_rate = float(rospy.get_param("control_vz_max","1000")) #in mm/sec
switch_time = rospy.get_time()
#=====================================#
# Set up Publish/Subscribe Loop #
#=====================================#
r = rospy.Rate(rate)
while not rospy.is_shutdown():
sub_cortex = rospy.Subscriber('/cortex_raw' , Cortex, GetStates, queue_size = 1, buff_size = 2**24)
sub_joy = rospy.Subscriber('/joy' , Joy, GetJoy)
Datahandler()
r.sleep()
|
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Contributing:
1. Add the .csv file
2. Run import
3. Then run translate
"""
# loading
# doctype, page, report
# boot(startup)
# frappe.require
# frappe._
import frappe, os, re, codecs, json
def guess_language_from_http_header(lang):
"""set frappe.local.lang from HTTP headers at beginning of request"""
if not lang:
return frappe.local.lang
guess = None
lang_list = get_all_languages() or []
if ";" in lang: # not considering weightage
lang = lang.split(";")[0]
if "," in lang:
lang = lang.split(",")
else:
lang = [lang]
for l in lang:
code = l.strip()
if code in lang_list:
guess = code
break
# check if parent language (pt) is setup, if variant (pt-BR)
if "-" in code:
code = code.split("-")[0]
if code in lang_list:
guess = code
break
return guess or frappe.local.lang
def get_user_lang(user=None):
"""set frappe.local.lang from user preferences on session beginning or resumption"""
if not user:
user = frappe.session.user
# via cache
lang = frappe.cache().get_value("lang:" + user)
if not lang:
# if defined in user profile
user_lang = frappe.db.get_value("User", user, "language")
if user_lang and user_lang!="Loading...":
lang = get_lang_dict().get(user_lang)
else:
default_lang = frappe.db.get_default("lang")
lang = default_lang or frappe.local.lang
frappe.cache().set_value("lang:" + user, lang)
return lang
def set_default_language(language):
lang = get_lang_dict()[language]
frappe.db.set_default("lang", lang)
frappe.local.lang = lang
def get_all_languages():
return [a.split()[0] for a in get_lang_info()]
def get_lang_dict():
return dict([[a[1], a[0]] for a in [a.split() for a in get_lang_info()]])
def get_lang_info():
return frappe.cache().get_value("langinfo",
lambda:frappe.get_file_items(os.path.join(frappe.local.sites_path, "languages.txt")))
def get_dict(fortype, name=None):
fortype = fortype.lower()
cache = frappe.cache()
cache_key = "translation_assets:" + frappe.local.lang
asset_key = fortype + ":" + (name or "-")
translation_assets = cache.get_value(cache_key) or {}
if not asset_key in translation_assets:
if fortype=="doctype":
messages = get_messages_from_doctype(name)
elif fortype=="page":
messages = get_messages_from_page(name)
elif fortype=="report":
messages = get_messages_from_report(name)
elif fortype=="include":
messages = get_messages_from_include_files()
elif fortype=="jsfile":
messages = get_messages_from_file(name)
elif fortype=="boot":
messages = get_messages_from_include_files()
messages += frappe.db.sql_list("select name from tabDocType")
messages += frappe.db.sql_list("select name from `tabModule Def`")
translation_assets[asset_key] = make_dict_from_messages(messages)
cache.set_value(cache_key, translation_assets)
return translation_assets[asset_key]
def add_lang_dict(code):
messages = extract_messages_from_code(code)
code += "\n\n$.extend(frappe._messages, %s)" % json.dumps(make_dict_from_messages(messages))
return code
def make_dict_from_messages(messages, full_dict=None):
out = {}
if full_dict==None:
full_dict = get_full_dict(frappe.local.lang)
for m in messages:
if m in full_dict:
out[m] = full_dict[m]
return out
def get_lang_js(fortype, name):
return "\n\n$.extend(frappe._messages, %s)" % json.dumps(get_dict(fortype, name))
def get_full_dict(lang):
if lang == "en": return {}
return frappe.cache().get_value("lang:" + lang, lambda:load_lang(lang))
def load_lang(lang, apps=None):
out = {}
for app in (apps or frappe.get_all_apps(True)):
path = os.path.join(frappe.get_pymodule_path(app), "translations", lang + ".csv")
if os.path.exists(path):
cleaned = dict([item for item in dict(read_csv_file(path)).iteritems() if item[1]])
out.update(cleaned)
return out
def clear_cache():
cache = frappe.cache()
cache.delete_value("langinfo")
for lang in get_all_languages():
cache.delete_value("lang:" + lang)
cache.delete_value("translation_assets:" + lang)
def get_messages_for_app(app):
messages = []
modules = ", ".join(['"{}"'.format(m.title().replace("_", " ")) \
for m in frappe.local.app_modules[app]])
# doctypes
if modules:
for name in frappe.db.sql_list("""select name from tabDocType
where module in ({})""".format(modules)):
messages.extend(get_messages_from_doctype(name))
# pages
for name, title in frappe.db.sql("""select name, title from tabPage
where module in ({})""".format(modules)):
messages.append(title or name)
messages.extend(get_messages_from_page(name))
# reports
for name in frappe.db.sql_list("""select tabReport.name from tabDocType, tabReport
where tabReport.ref_doctype = tabDocType.name
and tabDocType.module in ({})""".format(modules)):
messages.append(name)
messages.extend(get_messages_from_report(name))
# app_include_files
messages.extend(get_messages_from_include_files(app))
# server_messages
messages.extend(get_server_messages(app))
return list(set(messages))
def get_messages_from_doctype(name):
messages = []
meta = frappe.get_meta(name)
messages = [meta.name, meta.module]
for d in meta.get("fields"):
messages.extend([d.label, d.description])
if d.fieldtype=='Select' and d.options \
and not d.options.startswith("link:") \
and not d.options.startswith("attach_files:"):
options = d.options.split('\n')
if not "icon" in options[0]:
messages.extend(options)
# extract from js, py files
doctype_file_path = frappe.get_module_path(meta.module, "doctype", meta.name, meta.name)
messages.extend(get_messages_from_file(doctype_file_path + ".js"))
return clean(messages)
def get_messages_from_page(name):
return get_messages_from_page_or_report("Page", name)
def get_messages_from_report(name):
report = frappe.get_doc("Report", name)
messages = get_messages_from_page_or_report("Report", name,
frappe.db.get_value("DocType", report.ref_doctype, "module"))
if report.query:
messages.extend(re.findall('"([^:,^"]*):', report.query))
messages.append(report.report_name)
return clean(messages)
def get_messages_from_page_or_report(doctype, name, module=None):
if not module:
module = frappe.db.get_value(doctype, name, "module")
file_path = frappe.get_module_path(module, doctype, name, name)
messages = get_messages_from_file(file_path + ".js")
return clean(messages)
def get_server_messages(app):
messages = []
for basepath, folders, files in os.walk(frappe.get_pymodule_path(app)):
for dontwalk in (".git", "public", "locale"):
if dontwalk in folders: folders.remove(dontwalk)
for f in files:
if f.endswith(".py") or f.endswith(".html"):
messages.extend(get_messages_from_file(os.path.join(basepath, f)))
return clean(messages)
def get_messages_from_include_files(app_name=None):
messages = []
for file in (frappe.get_hooks("app_include_js") or []) + (frappe.get_hooks("web_include_js") or []):
messages.extend(get_messages_from_file(os.path.join(frappe.local.sites_path, file)))
return clean(messages)
def get_messages_from_file(path):
"""get list of messages from a code file"""
if os.path.exists(path):
with open(path, 'r') as sourcefile:
return extract_messages_from_code(sourcefile.read(), path.endswith(".py"))
else:
return []
def extract_messages_from_code(code, is_py=False):
messages = []
messages += re.findall('_\("([^"]*)"', code)
messages += re.findall("_\('([^']*)'", code)
if is_py:
messages += re.findall('_\("{3}([^"]*)"{3}.*\)', code, re.S)
return clean(messages)
def clean(messages):
l = []
messages = list(set(messages))
for m in messages:
if m:
if re.search("[a-z]", m) and not m.startswith("icon-") and not m.endswith("px") and not m.startswith("eval:"):
l.append(m)
return l
def read_csv_file(path):
from csv import reader
with codecs.open(path, 'r', 'utf-8') as msgfile:
data = msgfile.read()
data = reader([r.encode('utf-8') for r in data.splitlines()])
newdata = [[unicode(val, 'utf-8') for val in row] for row in data]
return newdata
def write_csv_file(path, app_messages, lang_dict):
app_messages.sort()
from csv import writer
with open(path, 'w') as msgfile:
w = writer(msgfile)
for m in app_messages:
t = lang_dict.get(m, '')
# strip whitespaces
t = re.sub('{\s?([0-9]+)\s?}', "{\g<1>}", t)
w.writerow([m.encode('utf-8'), t.encode('utf-8')])
def get_untranslated(lang, untranslated_file, get_all=False):
"""translate objects using Google API. Add you own API key for translation"""
clear_cache()
apps = frappe.get_all_apps(True)
messages = []
untranslated = []
for app in apps:
messages.extend(get_messages_for_app(app))
if get_all:
print str(len(messages)) + " messages"
with open(untranslated_file, "w") as f:
for m in messages:
f.write((m + "\n").encode("utf-8"))
else:
full_dict = get_full_dict(lang)
for m in messages:
if not full_dict.get(m):
untranslated.append(m)
if untranslated:
print str(len(untranslated)) + " missing translations of " + str(len(messages))
with open(untranslated_file, "w") as f:
for m in untranslated:
f.write((m + "\n").encode("utf-8"))
else:
print "all translated!"
def update_translations(lang, untranslated_file, translated_file):
clear_cache()
full_dict = get_full_dict(lang)
full_dict.update(dict(zip(frappe.get_file_items(untranslated_file),
frappe.get_file_items(translated_file))))
for app in frappe.get_all_apps(True):
write_translations_file(app, lang, full_dict)
def rebuild_all_translation_files():
for lang in get_all_languages():
for app in frappe.get_all_apps():
write_translations_file(app, lang)
def write_translations_file(app, lang, full_dict=None):
tpath = frappe.get_pymodule_path(app, "translations")
frappe.create_folder(tpath)
write_csv_file(os.path.join(tpath, lang + ".csv"),
get_messages_for_app(app), full_dict or get_full_dict(lang))
|
|
'''
Created on Jun 10, 2015
@author: mzwier
'''
import logging
log = logging.getLogger(__name__)
from .core import ZMQCore, Message, Task, Result, ZMQWorkerMissing, ZMQWMEnvironmentError, IsNode
from .core import randport
from .worker import ZMQWorker
from .node import ZMQNode
import work_managers
from work_managers import WorkManager, WMFuture
import multiprocessing
from .core import PassiveMultiTimer
import zmq
from collections import deque
import socket, re, json
class ZMQWorkManager(ZMQCore,WorkManager,IsNode):
@classmethod
def add_wm_args(cls, parser, wmenv=None):
if wmenv is None:
wmenv = work_managers.environment.default_env
wm_group = parser.add_argument_group('options for ZeroMQ ("zmq") work manager (master or node)')
wm_group.add_argument(wmenv.arg_flag('zmq_mode'), metavar='MODE', choices=('master','node','server','client'),
help='Operate as a master (server) or a node (workers/client). '
+'"server" is a deprecated synonym for "master" and "client" is a '
+'deprecated synonym for "node".')
wm_group.add_argument(wmenv.arg_flag('zmq_comm_mode'), metavar='COMM_MODE', choices=('ipc', 'tcp'),
help='Use the given communication mode -- TCP or IPC (Unix-domain) -- sockets '
+'for communication within a node. IPC (the default) may be more '
+'efficient but is not available on (exceptionally rare) systems '
+'without node-local storage (e.g. /tmp); on such systems, TCP may be used instead.')
wm_group.add_argument(wmenv.arg_flag('zmq_write_host_info'), metavar='INFO_FILE',
help='Store hostname and port information needed to connect to this instance '
+'in INFO_FILE. This allows the master and nodes assisting in '
+'coordinating the communication of other nodes to choose ports '
+'randomly. Downstream nodes read this file with '
+wmenv.arg_flag('zmq_read_host_info') + ' and know where how to connect.')
wm_group.add_argument(wmenv.arg_flag('zmq_read_host_info'), metavar='INFO_FILE',
help='Read hostname and port information needed to connect to the master '
+'(or other coordinating node) from INFO_FILE. '
+'This allows the master and nodes assisting in '
+'coordinating the communication of other nodes to choose ports '
+'randomly, writing that information with '
+wmenv.arg_flag('zmq_write_host_info') + ' for this instance to read.')
wm_group.add_argument(wmenv.arg_flag('zmq_upstream_rr_endpoint'), metavar='ENDPOINT',
help='ZeroMQ endpoint to which to send request/response (task and result) '
+'traffic toward the master.')
wm_group.add_argument(wmenv.arg_flag('zmq_upstream_ann_endpoint'), metavar='ENDPOINT',
help='ZeroMQ endpoint on which to receive announcement '
+'(heartbeat and shutdown notification) traffic from the master.')
wm_group.add_argument(wmenv.arg_flag('zmq_downstream_rr_endpoint'), metavar='ENDPOINT',
help='ZeroMQ endpoint on which to listen for request/response '
+'(task and result) traffic from subsidiary workers.')
wm_group.add_argument(wmenv.arg_flag('zmq_downstream_ann_endpoint'), metavar='ENDPOINT',
help='ZeroMQ endpoint on which to send announcement '
+'(heartbeat and shutdown notification) traffic toward workers.')
wm_group.add_argument(wmenv.arg_flag('zmq_master_heartbeat'), metavar='MASTER_HEARTBEAT',
type=float,
help='Every MASTER_HEARTBEAT seconds, the master announces its presence '
+'to workers.')
wm_group.add_argument(wmenv.arg_flag('zmq_worker_heartbeat'), metavar='WORKER_HEARTBEAT',
type=float,
help='Every WORKER_HEARTBEAT seconds, workers announce their presence '
+'to the master.')
wm_group.add_argument(wmenv.arg_flag('zmq_timeout_factor'), metavar='FACTOR',
type=float,
help='Scaling factor for heartbeat timeouts. '
+"If the master doesn't hear from a worker in WORKER_HEARTBEAT*FACTOR, "
+"the worker is assumed to have crashed. If a worker doesn't hear from "
+"the master in MASTER_HEARTBEAT*FACTOR seconds, the master is assumed "
+"to have crashed. Both cases result in shutdown. "
)
wm_group.add_argument(wmenv.arg_flag('zmq_startup_timeout'), metavar='STARTUP_TIMEOUT',
type=float,
help='Amount of time (in seconds) to wait for communication between '
+'the master and at least one worker. This may need to be changed '
+'on very large, heavily-loaded computer systems that start all processes '
+'simultaneously. '
)
wm_group.add_argument(wmenv.arg_flag('zmq_shutdown_timeout'), metavar='SHUTDOWN_TIMEOUT',
type=float,
help='Amount of time (in seconds) to wait for workers to shut down.')
@classmethod
def from_environ(cls, wmenv=None):
if wmenv is None:
wmenv = work_managers.environment.default_env
# determine mode
mode = wmenv.get_val('zmq_mode', 'master').lower()
if mode in {'master', 'server'}:
mode = 'master'
elif mode in {'node', 'client'}:
mode = 'node'
else:
raise ValueError('invalid ZMQ work manager mode {!r}'.format(mode))
# determine number of workers
# 0 with mode=='master' is a dedicated master
# 0 with mode=='node' is a dedicated communications process (former ZMQRouter)
n_workers = wmenv.get_val('n_workers', multiprocessing.cpu_count(), int)
# We set this at the class level, because outside of testing, a node either
# can support IPC or it can't, and there is no obvious need (currently)
# to support both modes on an instance-by-instance basis
comm_mode = wmenv.get_val('zmq_comm_mode', cls.default_comm_mode)
ZMQWorkManager.internal_transport = comm_mode
ZMQWorker.internal_transport = comm_mode
ZMQNode.internal_transport = comm_mode
write_host_info = wmenv.get_val('zmq_write_host_info')
read_host_info = wmenv.get_val('zmq_read_host_info')
master_heartbeat = wmenv.get_val('zmq_master_heartbeat', cls.default_master_heartbeat, float)
worker_heartbeat = wmenv.get_val('zmq_worker_heartbeat', cls.default_worker_heartbeat, float)
timeout_factor = wmenv.get_val('zmq_timeout_factor', cls.default_timeout_factor, float)
startup_timeout = wmenv.get_val('zmq_startup_timeout', cls.default_startup_timeout, float)
if mode == 'master':
instance = ZMQWorkManager(n_workers)
else: # mode =='node'
upstream_info = {}
if read_host_info:
upstream_info.update(cls.read_host_info(read_host_info))
log.debug('upstream_info: {!r}'.format(upstream_info))
upstream_rr_endpoint = wmenv.get_val('zmq_upstream_rr_endpoint', upstream_info.get('rr_endpoint'))
upstream_ann_endpoint = wmenv.get_val('zmq_upstream_ann_endpoint', upstream_info.get('ann_endpoint'))
if not (upstream_rr_endpoint and upstream_ann_endpoint):
raise ZMQWMEnvironmentError('at least one upstream endpoint unspecified')
# expand hostnames, if present, to IP addresses
# reject wildcard hostnames, which is a logic error (can't connect to a host
# without specifying an address)
upstream_rr_endpoint = cls.canonicalize_endpoint(upstream_rr_endpoint, allow_wildcard_host=False)
upstream_ann_endpoint = cls.canonicalize_endpoint(upstream_ann_endpoint, allow_wildcard_host=False)
log.debug('upstream_rr_endpoint = {}'.format(upstream_rr_endpoint))
log.debug('upstream_ann_endpoint = {}'.format(upstream_ann_endpoint))
instance = ZMQNode(upstream_ann_endpoint=upstream_ann_endpoint,
upstream_rr_endpoint=upstream_rr_endpoint,
n_local_workers=n_workers)
# Both server and node bind downstream endpoints, so that users get fan-out communications
# "for free" when starting up a computational node
downstream_rr_endpoint = cls.canonicalize_endpoint(wmenv.get_val('zmq_downstream_rr_endpoint',
'tcp://*:{}'.format(randport())))
downstream_ann_endpoint = cls.canonicalize_endpoint(wmenv.get_val('zmq_downstream_ann_endpoint',
'tcp://*:{}'.format(randport())))
instance.downstream_rr_endpoint = downstream_rr_endpoint
instance.downstream_ann_endpoint = downstream_ann_endpoint
instance.master_beacon_period = master_heartbeat
instance.worker_beacon_period = worker_heartbeat
instance.timeout_factor = timeout_factor
instance.startup_timeout = startup_timeout
assert isinstance(instance, IsNode)
for worker in instance.local_workers:
worker.master_beacon_period = master_heartbeat
worker.worker_beacon_period = worker_heartbeat
worker.timeout_factor = timeout_factor
worker.startup_timeout = startup_timeout
# We always write host info (since we are always either master or node)
# we choose not to in the special case that read_host_info is '' but not None
# (None implies nothing found on command line or in environment variables, but ''
# implies that it was found somewhere but it is empty)
if write_host_info is not None and write_host_info != '':
instance.write_host_info(write_host_info)
log.debug('prepared {!r} with:'.format(instance))
log.debug('n_workers = {}'.format(n_workers))
for attr in ('master_beacon_period', 'worker_beacon_period', 'startup_timeout', 'timeout_factor',
'downstream_rr_endpoint', 'downstream_ann_endpoint'):
log.debug('{} = {!r}'.format(attr, getattr(instance, attr)))
return instance
@classmethod
def read_host_info(cls, filename):
return json.load(open(filename,'rt'))
@classmethod
def canonicalize_endpoint(cls, endpoint, allow_wildcard_host = True):
if endpoint.startswith('ipc://'):
return endpoint
elif endpoint.startswith('tcp://'):
fields = endpoint[6:].split(':')
# get IP address
if fields[0] != '*':
ipaddr = socket.gethostbyname(fields[0])
else:
if allow_wildcard_host:
ipaddr = '*'
else:
raise ValueError('wildcard host not permitted')
# get/generate port
try:
port = fields[1]
except IndexError:
# no port given; select one
port = randport()
else:
port = int(fields[1])
return 'tcp://{}:{}'.format(ipaddr,port)
else:
raise ValueError('unrecognized/unsupported endpoint: {!r}'.format(endpoint))
def __init__(self, n_local_workers=1):
ZMQCore.__init__(self)
WorkManager.__init__(self)
IsNode.__init__(self, n_local_workers)
# Futures indexed by task ID
self.futures = dict()
# Tasks pending distribution
self.outgoing_tasks = deque()
# Tasks being processed by workers (indexed by worker_id)
self.assigned_tasks = dict()
# Identity information and last contact from workers
self.worker_information = dict() # indexed by worker_id
self.worker_timeouts = PassiveMultiTimer() # indexed by worker_id
# Number of seconds between checks to see which workers have timed out
self.worker_timeout_check = 5.0
# Amount of time to wait for stray requests to arrive so that workers shut down properly
self.shutdown_timeout = 0.5
self.master_id = self.node_id
@property
def n_workers(self):
return len(self.worker_information)
def submit(self, fn, args=None, kwargs=None):
if self.futures is None:
# We are shutting down
raise ZMQWMEnvironmentError('work manager is shutting down')
future = WMFuture()
task = Task(fn, args or (), kwargs or {}, task_id = future.task_id)
self.futures[task.task_id] = future
self.outgoing_tasks.append(task)
# Wake up the communications loop (if necessary) to announce new tasks
self.send_inproc_message(Message.TASKS_AVAILABLE)
return future
def submit_many(self, tasks):
if self.futures is None:
# We are shutting down
raise ZMQWMEnvironmentError('work manager is shutting down')
futures = []
for (fn,args,kwargs) in tasks:
future = WMFuture()
task = Task(fn, args, kwargs, task_id = future.task_id)
self.futures[task.task_id] = future
self.outgoing_tasks.append(task)
futures.append(future)
# Wake up the communications loop (if necessary) to announce new tasks
self.send_inproc_message(Message.TASKS_AVAILABLE)
return futures
def send_message(self, socket, message, payload=None, flags=0):
message = Message(message, payload)
message.master_id = self.node_id
super(ZMQWorkManager,self).send_message(socket, message, payload, flags)
def handle_result(self, socket, msg):
self.send_ack(socket,msg)
with self.message_validation(msg):
assert msg.message == Message.RESULT
assert isinstance(msg.payload, Result)
assert msg.payload.task_id in self.futures
assert self.assigned_tasks[msg.src_id].task_id == msg.payload.task_id
result = msg.payload
future = self.futures.pop(result.task_id)
del self.assigned_tasks[msg.src_id]
if result.exception is not None:
future._set_exception(result.exception, result.traceback)
else:
future._set_result(result.result)
def handle_task_request(self, socket, msg):
if not self.outgoing_tasks:
# No tasks available
self.send_nak(socket,msg)
else:
task = self.outgoing_tasks.popleft()
worker_id = msg.src_id
self.assigned_tasks[worker_id] = task
self.send_message(socket, Message.TASK, task)
def update_worker_information(self, msg):
if msg.message == Message.IDENTIFY:
with self.message_validation(msg):
assert isinstance(msg.payload, dict)
self.worker_information[msg.src_id] = msg.payload
else:
self.worker_information[msg.src_id] = {}
try:
self.worker_timeouts.reset(msg.src_id)
except KeyError:
self.worker_timeouts.add_timer(msg.src_id,self.worker_beacon_period*self.timeout_factor)
def check_workers(self):
expired_worker_ids = self.worker_timeouts.which_expired()
for expired_worker_id in expired_worker_ids:
try:
worker_description = '{!s} ({!s})'.format(expired_worker_id,
self.worker_information[expired_worker_id]['description'])
except KeyError:
worker_description = str(expired_worker_id)
self.log.error('no contact from worker {}'.format(expired_worker_id, worker_description))
self.remove_worker(expired_worker_id)
def remove_worker(self, worker_id):
try:
expired_task = self.assigned_tasks.pop(worker_id)
except KeyError:
pass
else:
self.log.error('aborting task {!r} running on expired worker {!s}'
.format(expired_task, worker_id))
future = self.futures.pop(expired_task.task_id)
future._set_exception(ZMQWorkerMissing('worker running this task disappeared'))
del self.worker_information[worker_id]
def shutdown_clear_tasks(self):
'''Abort pending tasks with error on shutdown.'''
while self.futures:
task_id, future = self.futures.popitem()
future._set_exception(ZMQWMEnvironmentError('work manager shut down during task'))
self.futures = None
def comm_loop(self):
self.context = zmq.Context()
rr_socket = self.context.socket(zmq.REP)
ann_socket = self.context.socket(zmq.PUB)
for endpoint in (self.local_rr_endpoint, self.downstream_rr_endpoint):
if endpoint: rr_socket.bind(endpoint)
for endpoint in (self.local_ann_endpoint, self.downstream_ann_endpoint):
if endpoint: ann_socket.bind(endpoint)
inproc_socket = self.context.socket(zmq.SUB)
inproc_socket.setsockopt(zmq.SUBSCRIBE,b'')
inproc_socket.bind(self.inproc_endpoint)
poller = zmq.Poller()
poller.register(inproc_socket, zmq.POLLIN)
poller.register(rr_socket, zmq.POLLIN)
timers = PassiveMultiTimer()
timers.add_timer('tasks_avail', self.master_beacon_period)
timers.add_timer('master_beacon', self.master_beacon_period)
timers.add_timer('worker_timeout_check', self.worker_beacon_period*self.timeout_factor)
timers.add_timer('startup_timeout', self.startup_timeout)
timers.reset()
self.log.debug('master beacon period: {!r}'.format(self.master_beacon_period))
self.log.debug('startup timeout: {!r}'.format(self.startup_timeout))
peer_found = False
try:
# Send a master alive message immediately; it will get discarded if necessary
self.send_message(ann_socket, Message.MASTER_BEACON)
while True:
# If a timer is already expired, next_expiration_in() will return 0, which
# zeromq interprets as infinite wait; so instead we select a 1 ms wait in this
# case.
timeout = (timers.next_expiration_in() or 0.001)*1000
# Wake up every second to check for signals
timeout = min(timeout, 1000)
poll_results = dict(poller.poll(timeout))
if inproc_socket in poll_results:
msgs = self.recv_all(inproc_socket,validate=False)
# Check for shutdown; do nothing else if shutdown is signalled
if Message.SHUTDOWN in (msg.message for msg in msgs):
self.log.debug('shutdown received')
break
# Check for any other wake-up messages
for msg in msgs:
if msg.message == Message.TASKS_AVAILABLE:
self.send_message(ann_socket, Message.TASKS_AVAILABLE)
if rr_socket in poll_results:
msg = self.recv_message(rr_socket)
self.update_worker_information(msg)
if msg.message == Message.TASK_REQUEST:
self.handle_task_request(rr_socket, msg)
elif msg.message == Message.RESULT:
self.handle_result(rr_socket, msg)
else:
self.send_ack(rr_socket, msg)
if self.worker_information:
peer_found = True
if timers.expired('tasks_avail'):
if self.outgoing_tasks:
self.send_message(ann_socket, Message.TASKS_AVAILABLE)
timers.reset('tasks_avail')
if timers.expired('master_beacon'):
self.send_message(ann_socket, Message.MASTER_BEACON)
timers.reset('master_beacon')
if peer_found and timers.expired('worker_timeout_check'):
self.check_workers()
if not self.worker_information:
self.log.error('all workers disappeared; exiting')
break
timers.reset('worker_timeout_check')
if not peer_found and timers.expired('startup_timeout'):
self.log.error('startup phase elapsed with no contact from workers; shutting down')
while self.futures:
future = self.futures.popitem()[1]
future._set_exception(ZMQWorkerMissing('no workers available'))
break
# Post a shutdown message
self.log.debug('sending shutdown on ann_socket')
self.send_message(ann_socket, Message.SHUTDOWN)
poller.unregister(inproc_socket)
# Clear tasks
self.shutdown_clear_tasks()
# Clear incoming queue of requests, to let clients exit request/reply states gracefully
# (clients will still timeout in these states if necessary)
timers.add_timer('shutdown', self.shutdown_timeout)
while not timers.expired('shutdown'):
poll_results = dict(poller.poll(self.shutdown_timeout / 10 * 1000))
if rr_socket in poll_results:
msg = self.recv_message(rr_socket)
self.send_nak(rr_socket, msg)
finally:
self.context.destroy(linger=1)
self.context = None
self.remove_ipc_endpoints()
def startup(self):
IsNode.startup(self)
super(ZMQWorkManager,self).startup()
def shutdown(self):
self.signal_shutdown()
IsNode.shutdown(self)
self.join()
super(ZMQWorkManager,self).shutdown()
|
|
#!/usr/bin/env python
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# This is the main of the glideinFrontend
#
# Arguments:
# $1 = parent PID
# $2 = work dir
# $3 = group_name
#
# Author:
# Igor Sfiligoi (was glideinFrontend.py until Nov 21, 2008)
#
import signal
import sys
import os
import copy
import traceback
import time
import string
import logging
import cPickle
import re
sys.path.append(os.path.join(sys.path[0],"../.."))
from glideinwms.lib import symCrypto,pubCrypto
from glideinwms.lib import glideinWMSVersion
from glideinwms.lib import logSupport
from glideinwms.lib import cleanupSupport
from glideinwms.frontend import glideinFrontendConfig
from glideinwms.frontend import glideinFrontendInterface
from glideinwms.frontend import glideinFrontendLib
from glideinwms.frontend import glideinFrontendPidLib
from glideinwms.frontend import glideinFrontendMonitoring
from glideinwms.frontend import glideinFrontendPlugins
############################################################
def check_parent(parent_pid):
if os.path.exists('/proc/%s' % parent_pid):
return # parent still exists, we are fine
logSupport.log.warning("Parent died, exit.")
raise KeyboardInterrupt, "Parent died"
############################################################
def write_stats(stats):
for k in stats.keys():
stats[k].write_file();
############################################################
# Will log the factory_stat_arr (tuple composed of 13 numbers)
# and return a sum of factory_stat_arr+old_factory_stat_arr
def log_and_sum_factory_line(factory, is_down, factory_stat_arr, old_factory_stat_arr):
# if numbers are too big, reduce them to either k or M for presentation
form_arr = []
for i in factory_stat_arr:
if i < 100000:
form_arr.append("%5i" % i)
elif i < 10000000:
form_arr.append("%4ik" % (i / 1000))
else:
form_arr.append("%4iM" % (i / 1000000))
if is_down:
down_str = "Down"
else:
down_str = "Up "
logSupport.log.info(("%s(%s %s %s %s) %s(%s %s) | %s %s %s | %s %s " % tuple(form_arr)) +
("%s %s" % (down_str, factory)))
new_arr = []
for i in range(len(factory_stat_arr)):
new_arr.append(factory_stat_arr[i] + old_factory_stat_arr[i])
return new_arr
def init_factory_stats_arr():
return [0] * 13
def log_factory_header():
logSupport.log.info(" Jobs in schedd queues | Glideins | Request ")
logSupport.log.info("Idle (match eff old uniq ) Run ( here max ) | Total Idle Run | Idle MaxRun Down Factory")
###############################
# to be used with fork clients
# Args:
# r - input pipe
# pid - pid of the child
def fetch_fork_result(r,pid):
try:
rin=""
s=os.read(r,1024*1024)
while (s!=""): # "" means EOF
rin+=s
s=os.read(r,1024*1024)
finally:
os.close(r)
os.waitpid(pid,0)
out=cPickle.loads(rin)
return out
# in: pipe_is - dictionary, each element is {'r':r,'pid':pid} - see above
# out: dictionary of fork_results
def fetch_fork_result_list(pipe_ids):
out={}
failures=0
for k in pipe_ids.keys():
try:
# now collect the results
rin=fetch_fork_result(pipe_ids[k]['r'],pipe_ids[k]['pid'])
out[k]=rin
except Exception, e:
logSupport.log.warning("Failed to retrieve %s state information from the subprocess." % k)
logSupport.log.debug("Failed to retrieve %s state from the subprocess: %s" % (k, e))
failures+=1
if failures>0:
raise RuntimeError, "Found %i errors"%failures
return out
######################
# expand $$(attribute)
def expand_DD(qstr,attr_dict):
import re
robj=re.compile("\$\$\((?P<attrname>[^\)]*)\)")
while 1:
m=robj.search(qstr)
if m is None:
break # no more substitutions to do
attr_name=m.group('attrname')
if not attr_dict.has_key(attr_name):
raise KeyError, "Missing attribute %s"%attr_name
attr_val=attr_dict[attr_name]
if type(attr_val)==type(1):
attr_str=str(attr_val)
else: # assume it is a string for all other purposes... quote and escape existing quotes
attr_str='"%s"'%attr_val.replace('"','\\"')
qstr="%s%s%s"%(qstr[:m.start()],attr_str,qstr[m.end():])
return qstr
############################################################
def iterate_one(client_name, elementDescript, paramsDescript, attr_dict, signatureDescript, x509_proxy_plugin, stats, history_obj):
frontend_name = elementDescript.frontend_data['FrontendName']
group_name = elementDescript.element_data['GroupName']
security_name = elementDescript.merged_data['SecurityName']
web_url = elementDescript.frontend_data['WebURL']
monitoring_web_url=elementDescript.frontend_data['MonitoringWebURL']
pipe_ids={}
factory_constraint = elementDescript.merged_data['FactoryQueryExpr']
factory_pools = elementDescript.merged_data['FactoryCollectors']
logSupport.log.info("Querying schedd, entry, and glidein status using child processes.")
# query globals
# We can't fork this since the M2Crypto key objects are not pickle-able. Not much to gain by forking anyway.
globals_dict = {}
for factory_pool in factory_pools:
factory_pool_node = factory_pool[0]
my_identity_at_factory_pool = factory_pool[2]
try:
factory_globals_dict = glideinFrontendInterface.findGlobals(factory_pool_node, None, None)
except RuntimeError:
# failed to talk, like empty... maybe the next factory will have something
if factory_pool_node is not None:
logSupport.log.exception("Failed to talk to factory_pool %s for global info: " % factory_pool_node)
else:
logSupport.log.exception("Failed to talk to factory_pool for global info: " )
factory_globals_dict = {}
for globalid in factory_globals_dict:
globals_el = factory_globals_dict[globalid]
if not globals_el['attrs'].has_key('PubKeyType'): # no pub key at all
pass # no public key, nothing to do
elif globals_el['attrs']['PubKeyType'] == 'RSA': # only trust RSA for now
try:
globals_el['attrs']['PubKeyObj'] = pubCrypto.PubRSAKey(str(re.sub(r"\\+n", r"\n", globals_el['attrs']['PubKeyValue'])))
globals_el['attrs']['FactoryPoolNode'] = factory_pool_node
globals_el['attrs']['FactoryPoolId'] = my_identity_at_factory_pool
# KEL ok to put here? do we want all globals even if there is no key? may resolve other issues with checking later on
globals_dict[globalid] = globals_el
except:
# if no valid key, just notify...
# if key needed, will handle the error later on
logSupport.log.warning("Factory Globals '%s': invalid RSA key" % globalid)
tb = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1], sys.exc_info()[2])
logSupport.log.debug("Factory Globals '%s': invalid RSA key traceback: %s\n" % (globalid, str(tb)))
else:
# don't know what to do with this key, notify the admin
# if key needed, will handle the error later on
# KEL I think this log message is wrong, globalid is not a tuple? or should it be?
logSupport.log.info("Factory '%s@%s': unsupported pub key type '%s'" % (globalid[1], globalid[0], globals_el['attrs']['PubKeyType']))
# query entries
r,w=os.pipe()
pid=os.fork()
if pid==0:
# this is the child... return output as a pickled object via the pipe
os.close(r)
try:
glidein_dict = {}
factory_constraint=expand_DD(elementDescript.merged_data['FactoryQueryExpr'],attr_dict)
factory_pools=elementDescript.merged_data['FactoryCollectors']
for factory_pool in factory_pools:
factory_pool_node = factory_pool[0]
factory_identity = factory_pool[1]
my_identity_at_factory_pool = factory_pool[2]
try:
factory_glidein_dict = glideinFrontendInterface.findGlideins(factory_pool_node, None, signatureDescript.signature_type, factory_constraint)
except RuntimeError:
# failed to talk, like empty... maybe the next factory will have something
if factory_pool_node is not None:
logSupport.log.exception("Failed to talk to factory_pool %s for entry info: " % factory_pool_node)
else:
logSupport.log.exception("Failed to talk to factory_pool for entry info: ")
factory_glidein_dict = {}
for glidename in factory_glidein_dict.keys():
if (not factory_glidein_dict[glidename]['attrs'].has_key('AuthenticatedIdentity')) or (factory_glidein_dict[glidename]['attrs']['AuthenticatedIdentity'] != factory_identity):
logSupport.log.warning("Found an untrusted factory %s at %s; ignoring." % (glidename, factory_pool_node))
if factory_glidein_dict[glidename]['attrs'].has_key('AuthenticatedIdentity'):
logSupport.log.warning("Found an untrusted factory %s at %s; identity mismatch '%s'!='%s'" % (glidename, factory_pool_node, factory_glidein_dict[glidename]['attrs']['AuthenticatedIdentity'], factory_identity))
else:
glidein_dict[(factory_pool_node, glidename, my_identity_at_factory_pool)] = factory_glidein_dict[glidename]
os.write(w,cPickle.dumps(glidein_dict))
except Exception, ex:
tb = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],
sys.exc_info()[2])
logSupport.log.debug("Error in talking to the factory pool: %s" % tb)
os.close(w)
# hard kill myself... don't want any cleanup, since i was created just for this calculation
os.kill(os.getpid(),signal.SIGKILL)
else:
# this is the original
# just remember what you did for now
os.close(w)
pipe_ids['entries']={'r':r,'pid':pid}
## schedd
r,w=os.pipe()
pid=os.fork()
if pid==0:
# this is the child... return output as a pickled object via the pipe
os.close(r)
try:
#condorq_format_list = elementDescript.merged_data['JobMatchAttrs']
#if x509_proxy_plugin is not None:
# condorq_format_list = list(condorq_format_list) + list(x509_proxy_plugin.get_required_job_attributes())
### Add in elements to help in determining if jobs have voms creds
#condorq_format_list=list(condorq_format_list)+list((('x509UserProxyFirstFQAN','s'),))
#condorq_format_list=list(condorq_format_list)+list((('x509UserProxyFQAN','s'),))
#condorq_dict = glideinFrontendLib.getCondorQ(elementDescript.merged_data['JobSchedds'],
# elementDescript.merged_data['JobQueryExpr'],
# condorq_format_list)
try:
condorq_format_list = elementDescript.merged_data['JobMatchAttrs']
if x509_proxy_plugin is not None:
condorq_format_list = list(condorq_format_list) + list(x509_proxy_plugin.get_required_job_attributes())
### Add in elements to help in determining if jobs have voms creds
condorq_format_list=list(condorq_format_list)+list((('x509UserProxyFirstFQAN','s'),))
condorq_format_list=list(condorq_format_list)+list((('x509UserProxyFQAN','s'),))
condorq_format_list=list(condorq_format_list)+list((('x509userproxy','s'),))
condorq_dict = glideinFrontendLib.getCondorQ(elementDescript.merged_data['JobSchedds'],
expand_DD(elementDescript.merged_data['JobQueryExpr'],attr_dict),
condorq_format_list)
except Exception:
logSupport.log.exception("In query schedd child, exception:")
os.write(w,cPickle.dumps(condorq_dict))
except Exception, ex:
tb = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],
sys.exc_info()[2])
logSupport.log.debug("Error in retrieving information from schedd (condor_q): %s" % tb)
os.close(w)
# hard kill myself... don't want any cleanup, since i was created just for this calculation
os.kill(os.getpid(),signal.SIGKILL)
else:
# this is the original
# just remember what you did for now
os.close(w)
pipe_ids['jobs']={'r':r,'pid':pid}
## resource
r,w=os.pipe()
pid=os.fork()
if pid==0:
# this is the child... return output as a pickled object via the pipe
os.close(r)
try:
status_format_list=[]
if x509_proxy_plugin is not None:
status_format_list=list(status_format_list)+list(x509_proxy_plugin.get_required_classad_attributes())
# use the main collector... all adds must go there
status_dict=glideinFrontendLib.getCondorStatus([None],
'GLIDECLIENT_Name=?="%s.%s"'%(frontend_name,group_name),
status_format_list)
os.write(w,cPickle.dumps(status_dict))
except Exception, ex:
tb = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],
sys.exc_info()[2])
logSupport.log.debug("Error in talking to the user pool (condor_status): %s" % tb)
os.close(w)
# hard kill myself... don't want any cleanup, since i was created just for this calculation
os.kill(os.getpid(),signal.SIGKILL)
else:
# this is the original
# just remember what you did for now
os.close(w)
pipe_ids['startds']={'r':r,'pid':pid}
try:
pipe_out=fetch_fork_result_list(pipe_ids)
except RuntimeError, e:
# expect all errors logged already
logSupport.log.info("Missing schedd, factory entry, and/or current glidein state information. " \
"Unable to calculate required glideins, terminating loop.")
return
logSupport.log.info("All children terminated")
glidein_dict=pipe_out['entries']
condorq_dict=pipe_out['jobs']
status_dict=pipe_out['startds']
condorq_dict_proxy=glideinFrontendLib.getIdleProxyCondorQ(condorq_dict)
condorq_dict_voms=glideinFrontendLib.getIdleVomsCondorQ(condorq_dict)
condorq_dict_idle = glideinFrontendLib.getIdleCondorQ(condorq_dict)
condorq_dict_old_idle = glideinFrontendLib.getOldCondorQ(condorq_dict_idle, 600)
condorq_dict_running = glideinFrontendLib.getRunningCondorQ(condorq_dict)
condorq_dict_types = {'Idle':{'dict':condorq_dict_idle, 'abs':glideinFrontendLib.countCondorQ(condorq_dict_idle)},
'OldIdle':{'dict':condorq_dict_old_idle, 'abs':glideinFrontendLib.countCondorQ(condorq_dict_old_idle)},
'VomsIdle':{'dict':condorq_dict_voms, 'abs':glideinFrontendLib.countCondorQ(condorq_dict_voms)},
'ProxyIdle':{'dict':condorq_dict_proxy,'abs':glideinFrontendLib.countCondorQ(condorq_dict_proxy)},
'Running':{'dict':condorq_dict_running, 'abs':glideinFrontendLib.countCondorQ(condorq_dict_running)}}
condorq_dict_abs = glideinFrontendLib.countCondorQ(condorq_dict);
stats['group'].logJobs({'Total':condorq_dict_abs,
'Idle':condorq_dict_types['Idle']['abs'],
'OldIdle':condorq_dict_types['OldIdle']['abs'],
'Running':condorq_dict_types['Running']['abs']})
logSupport.log.info("Jobs found total %i idle %i (old %i, grid %i, voms %i) running %i" % (condorq_dict_abs,
condorq_dict_types['Idle']['abs'],
condorq_dict_types['OldIdle']['abs'],
condorq_dict_types['ProxyIdle']['abs'],
condorq_dict_types['VomsIdle']['abs'],
condorq_dict_types['Running']['abs']))
status_dict_idle = glideinFrontendLib.getIdleCondorStatus(status_dict)
status_dict_running = glideinFrontendLib.getRunningCondorStatus(status_dict)
#logSupport.log.debug("condor stat: %s\n\n" % status_dict_running[None].fetchStored())
glideinFrontendLib.appendRealRunning(condorq_dict_running, status_dict_running)
#logSupport.log.debug("condorq running: %s\n\n" % condorq_dict_running['devg-1.t2.ucsd.edu'].fetchStored())
status_dict_types = {'Total':{'dict':status_dict, 'abs':glideinFrontendLib.countCondorStatus(status_dict)},
'Idle':{'dict':status_dict_idle, 'abs':glideinFrontendLib.countCondorStatus(status_dict_idle)},
'Running':{'dict':status_dict_running, 'abs':glideinFrontendLib.countCondorStatus(status_dict_running)}}
stats['group'].logGlideins({'Total':status_dict_types['Total']['abs'],
'Idle':status_dict_types['Idle']['abs'],
'Running':status_dict_types['Running']['abs']})
total_max_glideins=int(elementDescript.element_data['MaxRunningTotal'])
total_curb_glideins=int(elementDescript.element_data['CurbRunningTotal'])
total_glideins=status_dict_types['Total']['abs']
logSupport.log.info("Glideins found total %i idle %i running %i limit %i curb %i"%
(total_glideins,
status_dict_types['Idle']['abs'],
status_dict_types['Running']['abs'],
total_max_glideins,total_curb_glideins)
)
# TODO: PM check if it is commented out because of globals section
# extract the public key, if present
#for glideid in glidein_dict.keys():
# glidein_el = glidein_dict[glideid]
# if not glidein_el['attrs'].has_key('PubKeyType'): # no pub key at all
# pass # no public key, nothing to do
# elif glidein_el['attrs']['PubKeyType'] == 'RSA': # only trust RSA for now
# try:
# glidein_el['attrs']['PubKeyObj'] = pubCrypto.PubRSAKey(str(string.replace(glidein_el['attrs']['PubKeyValue'], '\\n', '\n')))
# except:
# # if no valid key, just notify...
# # if key needed, will handle the error later on
# logSupport.log.warning("Factory '%s@%s': invalid RSA key" % (glideid[1], glideid[0]))
# else:
# # don't know what to do with this key, notify the admin
# # if key needed, will handle the error later on
# logSupport.log.info("Factory '%s@%s': unsupported pub key type '%s'" % (glideid[1], glideid[0], glidein_el['attrs']['PubKeyType']))
# update x509 user map and give proxy plugin a chance to update based on condor stats
if x509_proxy_plugin is not None:
logSupport.log.info("Updating usermap ");
x509_proxy_plugin.update_usermap(condorq_dict, condorq_dict_types,
status_dict, status_dict_types)
# here we have all the data needed to build a GroupAdvertizeType object
descript_obj = glideinFrontendInterface.FrontendDescript(client_name, frontend_name, group_name, web_url, signatureDescript.frontend_descript_fname, signatureDescript.group_descript_fname, signatureDescript.signature_type, signatureDescript.frontend_descript_signature, signatureDescript.group_descript_signature, x509_proxy_plugin)
descript_obj.add_monitoring_url(monitoring_web_url)
# reuse between loops might be a good idea, but this will work for now
key_builder = glideinFrontendInterface.Key4AdvertizeBuilder()
logSupport.log.info("Match")
# extract only the attribute names from format list
condorq_match_list=[]
for f in elementDescript.merged_data['JobMatchAttrs']:
condorq_match_list.append(f[0])
#logSupport.log.debug("realcount: %s\n\n" % glideinFrontendLib.countRealRunning(elementDescript.merged_data['MatchExprCompiledObj'],condorq_dict_running,glidein_dict))
logSupport.log.info("Counting subprocess created")
pipe_ids={}
for dt in condorq_dict_types.keys()+['Real','Glidein']:
# will make calculations in parallel,using multiple processes
r,w=os.pipe()
pid=os.fork()
if pid==0:
# this is the child... return output as a pickled object via the pipe
os.close(r)
try:
if dt=='Real':
out=glideinFrontendLib.countRealRunning(elementDescript.merged_data['MatchExprCompiledObj'],condorq_dict_running,glidein_dict,attr_dict,condorq_match_list)
elif dt=='Glidein':
count_status_multi={}
for glideid in glidein_dict.keys():
request_name=glideid[1]
count_status_multi[request_name]={}
for st in status_dict_types.keys():
c=glideinFrontendLib.getClientCondorStatus(status_dict_types[st]['dict'],frontend_name,group_name,request_name)
count_status_multi[request_name][st]=glideinFrontendLib.countCondorStatus(c)
out=count_status_multi
else:
c,p,h=glideinFrontendLib.countMatch(elementDescript.merged_data['MatchExprCompiledObj'],condorq_dict_types[dt]['dict'],glidein_dict,attr_dict,condorq_match_list)
t=glideinFrontendLib.countCondorQ(condorq_dict_types[dt]['dict'])
out=(c,p,h,t)
os.write(w,cPickle.dumps(out))
finally:
os.close(w)
# hard kill myself... don't want any cleanup, since i was created just for this calculation
os.kill(os.getpid(),signal.SIGKILL)
else:
# this is the original
# just remember what you did for now
os.close(w)
pipe_ids[dt]={'r':r,'pid':pid}
try:
pipe_out=fetch_fork_result_list(pipe_ids)
except RuntimeError:
# expect all errors logged already
logSupport.log.exception("Terminating iteration due to errors:")
return
logSupport.log.info("All children terminated")
# TODO: PM Need to check if we are counting correctly after the merge
for dt in condorq_dict_types.keys():
el=condorq_dict_types[dt]
(el['count'], el['prop'], el['hereonly'], el['total'])=pipe_out[dt]
count_real=pipe_out['Real']
count_status_multi=pipe_out['Glidein']
max_running = int(elementDescript.element_data['MaxRunningPerEntry'])
fraction_running = float(elementDescript.element_data['FracRunningPerEntry'])
max_idle = int(elementDescript.element_data['MaxIdlePerEntry'])
reserve_idle = int(elementDescript.element_data['ReserveIdlePerEntry'])
max_vms_idle = int(elementDescript.element_data['MaxIdleVMsPerEntry'])
curb_vms_idle = int(elementDescript.element_data['CurbIdleVMsPerEntry'])
glexec='UNDEFINED'
if 'GLIDEIN_Glexec_Use' in elementDescript.frontend_data:
glexec=elementDescript.frontend_data['GLIDEIN_Glexec_Use']
if 'GLIDEIN_Glexec_Use' in elementDescript.merged_data:
glexec=elementDescript.merged_data['GLIDEIN_Glexec_Use']
total_running = condorq_dict_types['Running']['total']
logSupport.log.info("Total matching idle %i (old %i) running %i limit %i" % (condorq_dict_types['Idle']['total'], condorq_dict_types['OldIdle']['total'], total_running, max_running))
advertizer = glideinFrontendInterface.MultiAdvertizeWork(descript_obj)
resource_advertiser = glideinFrontendInterface.ResourceClassadAdvertiser(multi_support=glideinFrontendInterface.frontendConfig.advertise_use_multi)
# Add globals
for globalid in globals_dict:
globals_el = globals_dict[globalid]
if globals_el['attrs'].has_key('PubKeyObj'):
key_obj = key_builder.get_key_obj(globals_el['attrs']['FactoryPoolId'], globals_el['attrs']['PubKeyID'], globals_el['attrs']['PubKeyObj'])
advertizer.add_global(globals_el['attrs']['FactoryPoolNode'],globalid,security_name,key_obj)
glideid_list = condorq_dict_types['Idle']['count'].keys()
# TODO: PM Following shows up in branch_v2plus. Which is correct?
# glideid_list=glidein_dict.keys()
glideid_list.sort() # sort for the sake of monitoring
processed_glideid_strs=[] # we will need this for faster lookup later
log_factory_header()
total_up_stats_arr=init_factory_stats_arr()
total_down_stats_arr=init_factory_stats_arr()
for glideid in glideid_list:
if glideid == (None, None, None):
continue # This is the special "Unmatched" entry
factory_pool_node = glideid[0]
request_name = glideid[1]
my_identity = str(glideid[2]) # get rid of unicode
glideid_str = "%s@%s" % (request_name, factory_pool_node)
processed_glideid_strs.append(glideid_str)
glidein_el = glidein_dict[glideid]
glidein_in_downtime = False
if glidein_el['attrs'].has_key('GLIDEIN_In_Downtime'):
glidein_in_downtime = (glidein_el['attrs']['GLIDEIN_In_Downtime'] == 'True')
count_jobs={} # straight match
prop_jobs={} # proportional subset for this entry
hereonly_jobs={} # can only run on this site
for dt in condorq_dict_types.keys():
count_jobs[dt] = condorq_dict_types[dt]['count'][glideid]
prop_jobs[dt] = condorq_dict_types[dt]['prop'][glideid]
hereonly_jobs[dt] = condorq_dict_types[dt]['hereonly'][glideid]
count_status=count_status_multi[request_name]
#If the glidein requires a voms proxy, only match voms idle jobs
# Note: if GLEXEC is set to NEVER, the site will never see the proxy,
# so it can be avoided.
if (glexec != 'NEVER'):
if (glidein_el['attrs'].get('GLIDEIN_REQUIRE_VOMS')=="True"):
prop_jobs['Idle']=prop_jobs['VomsIdle']
logSupport.log.info("Voms proxy required, limiting idle glideins to: %i" % prop_jobs['Idle'])
elif (glidein_el['attrs'].get('GLIDEIN_REQUIRE_GLEXEC_USE')=="True"):
prop_jobs['Idle']=prop_jobs['ProxyIdle']
logSupport.log.info("Proxy required (GLEXEC), limiting idle glideins to: %i" % prop_jobs['Idle'])
# effective idle is how much more we need
# if there are idle slots, subtract them, they should match soon
effective_idle = prop_jobs['Idle'] - count_status['Idle']
if effective_idle < 0:
effective_idle = 0
effective_oldidle=prop_jobs['OldIdle']-count_status['Idle']
if effective_oldidle<0:
effective_oldidle=0
if count_status['Total']>=max_running:
# have all the running jobs I wanted
glidein_min_idle=0
elif count_status['Idle']>=max_vms_idle:
# enough idle vms, do not ask for more
glidein_min_idle=0
elif total_glideins>=total_max_glideins:
# reached the system-wide limit
glidein_min_idle=0
elif (effective_idle>0):
glidein_min_idle = effective_idle
glidein_min_idle=glidein_min_idle/3 # since it takes a few cycles to stabilize, ask for only one third
glidein_idle_reserve=effective_oldidle/3 # do not reserve any more than the number of old idles for reserve (/3)
if glidein_idle_reserve>reserve_idle:
glidein_idle_reserve=reserve_idle
glidein_min_idle+=glidein_idle_reserve
if glidein_min_idle>max_idle:
glidein_min_idle=max_idle # but never go above max
if glidein_min_idle>(max_running-count_status['Total']+glidein_idle_reserve):
glidein_min_idle=(max_running-count_status['Total']+glidein_idle_reserve) # don't go over the max_running
if glidein_min_idle>(total_max_glideins-total_glideins+glidein_idle_reserve):
# don't go over the system-wide max
# not perfect, given te number of entries, but better than nothing
glidein_min_idle=(total_max_glideins-total_glideins+glidein_idle_reserve)
if count_status['Idle']>=curb_vms_idle:
glidein_min_idle/=2 # above first treshold, reduce
if total_glideins>=total_curb_glideins:
glidein_min_idle/=2 # above global treshold, reduce
if glidein_min_idle<1:
glidein_min_idle=1
else:
# no idle, make sure the glideins know it
glidein_min_idle = 0
glidein_min_idle=int(glidein_min_idle)
# we don't need more slots than number of jobs in the queue (unless the fraction is positive)
if (prop_jobs['Idle'] + count_real[glideid]) > 0:
if prop_jobs['Idle']>0:
glidein_max_run = int((prop_jobs['Idle'] + count_real[glideid]) * fraction_running + 1)
else:
# no good reason for a delta when we don't need more than we have
glidein_max_run = int(count_real[glideid])
else:
# the above calculation is always >0, but should be 0 if nothing in the user queue
glidein_max_run = 0
remove_excess_wait = False # do not remove excessive glideins by default
# keep track of how often idle was 0
if not history_obj.has_key('idle0'):
history_obj['idle0'] = {}
history_idle0 = history_obj['idle0']
if not history_idle0.has_key(glideid):
history_idle0[glideid] = 0
if count_jobs['Idle'] == 0:
# no idle jobs in the queue left
# consider asking for unsubmitted idle glideins to be removed
history_idle0[glideid] += 1
if history_idle0[glideid] > 5:
# nobody asked for anything more for some time, so
remove_excess_wait = True
else:
history_idle0[glideid] = 0
remove_excess_idle = False # do not remove excessive glideins by default
# keep track of how often glideidle was 0
if not history_obj.has_key('glideempty'):
history_obj['glideempty'] = {}
history_glideempty = history_obj['glideempty']
if not history_glideempty.has_key(glideid):
history_glideempty[glideid] = 0
if count_status['Idle'] >= count_status['Total']:
# no glideins being used
# consider asking for all idle glideins to be removed
history_glideempty[glideid] += 1
if remove_excess_wait and (history_glideempty[glideid] > 10):
# no requests and no glideins being used
# no harm getting rid of everything
remove_excess_idle = True
else:
history_glideempty[glideid] = 0
remove_excess_running = False # do not remove excessive glideins by default
# keep track of how often glidetotal was 0
if not history_obj.has_key('glidetotal0'):
history_obj['glidetotal0'] = {}
history_glidetotal0 = history_obj['glidetotal0']
if not history_glidetotal0.has_key(glideid):
history_glidetotal0[glideid] = 0
if count_status['Total'] == 0:
# no glideins registered
# consider asking for all idle glideins to be removed
history_glidetotal0[glideid] += 1
if remove_excess_wait and (history_glidetotal0[glideid] > 10):
# no requests and no glidein registered
# no harm getting rid of everything
remove_excess_running = True
else:
history_glidetotal0[glideid] = 0
if remove_excess_running:
remove_excess_str = "ALL"
elif remove_excess_idle:
remove_excess_str = "IDLE"
elif remove_excess_wait:
remove_excess_str = "WAIT"
else:
remove_excess_str = "NO"
this_stats_arr = (prop_jobs['Idle'], count_jobs['Idle'], effective_idle, prop_jobs['OldIdle'], hereonly_jobs['Idle'], count_jobs['Running'], count_real[glideid], max_running,
count_status['Total'], count_status['Idle'], count_status['Running'],
glidein_min_idle, glidein_max_run)
stats['group'].logMatchedJobs(
glideid_str, prop_jobs['Idle'], effective_idle, prop_jobs['OldIdle'],
count_jobs['Running'], count_real[glideid])
stats['group'].logMatchedGlideins(
glideid_str, count_status['Total'], count_status['Idle'],
count_status['Running'])
stats['group'].logFactAttrs(glideid_str, glidein_el['attrs'], ('PubKeyValue', 'PubKeyObj'))
stats['group'].logFactDown(glideid_str, glidein_in_downtime)
if glidein_in_downtime:
total_down_stats_arr = log_and_sum_factory_line(glideid_str, glidein_in_downtime, this_stats_arr, total_down_stats_arr)
else:
total_up_stats_arr = log_and_sum_factory_line(glideid_str, glidein_in_downtime, this_stats_arr, total_up_stats_arr)
# get the parameters
glidein_params = copy.deepcopy(paramsDescript.const_data)
for k in paramsDescript.expr_data.keys():
kexpr = paramsDescript.expr_objs[k]
# convert kexpr -> kval
glidein_params[k] = glideinFrontendLib.evalParamExpr(kexpr, paramsDescript.const_data, glidein_el)
# we will need this param to monitor orphaned glideins
glidein_params['GLIDECLIENT_ReqNode'] = factory_pool_node
stats['group'].logFactReq(
glideid_str, glidein_min_idle, glidein_max_run, glidein_params)
glidein_monitors = {}
for t in count_jobs.keys():
glidein_monitors[t] = count_jobs[t]
glidein_monitors['RunningHere'] = count_real[glideid]
for t in count_status.keys():
glidein_monitors['Glideins%s' % t] = count_status[t]
key_obj = None
for globalid in globals_dict.keys():
if glideid[1].endswith(globalid):
globals_el = globals_dict[globalid]
if (globals_el['attrs'].has_key('PubKeyObj') and globals_el['attrs'].has_key('PubKeyID')):
key_obj = key_builder.get_key_obj(my_identity, globals_el['attrs']['PubKeyID'], globals_el['attrs']['PubKeyObj'])
break
if glidein_el['attrs'].has_key('GLIDEIN_TrustDomain'):
trust_domain=glidein_el['attrs']['GLIDEIN_TrustDomain']
else:
trust_domain="Grid"
if glidein_el['attrs'].has_key('GLIDEIN_SupportedAuthenticationMethod'):
auth_method=glidein_el['attrs']['GLIDEIN_SupportedAuthenticationMethod']
else:
auth_method="grid_proxy"
# Only advertize if there is a valid key for encryption
if key_obj is not None:
advertizer.add(factory_pool_node,
request_name, request_name,
glidein_min_idle, glidein_max_run, glidein_params, glidein_monitors,
remove_excess_str=remove_excess_str,
key_obj=key_obj,glidein_params_to_encrypt=None,security_name=security_name,
trust_domain=trust_domain,auth_method=auth_method)
else:
logSupport.log.warning("Cannot advertise requests for %s because no factory %s key was found"% (request_name, factory_pool_node))
# Create the resource classad and populate the required information
resource_classad = glideinFrontendInterface.ResourceClassad(request_name, client_name)
resource_classad.setInDownTime(glidein_in_downtime)
resource_classad.setEntryInfo(glidein_el['attrs'])
resource_classad.setGlideFactoryMonitorInfo(glidein_el['monitor'])
resource_classad.setMatchExprs(elementDescript.merged_data['MatchExpr'],
elementDescript.merged_data['JobQueryExpr'],
elementDescript.merged_data['FactoryQueryExpr'],
attr_dict['GLIDECLIENT_Start'])
try:
resource_classad.setGlideClientMonitorInfo(this_stats_arr)
except RuntimeError:
logSupport.log.exception("Populating GlideClientMonitor info in resource classad failed: ")
resource_advertiser.addClassad(resource_classad.adParams['Name'], resource_classad)
# end for glideid in condorq_dict_types['Idle']['count'].keys()
###
# Find out the Factory entries that are running, but for which
# Factory ClassAds don't exist
#
factory_entry_list=glideinFrontendLib.getFactoryEntryList(status_dict)
processed_glideid_str_set=frozenset(processed_glideid_strs)
factory_entry_list.sort() # sort for the sake of monitoring
for a in factory_entry_list:
request_name,factory_pool_node=a
glideid_str="%s@%s"%(request_name,factory_pool_node)
if glideid_str in processed_glideid_str_set:
continue # already processed... ignore
count_status_multi[request_name]={}
for st in status_dict_types.keys():
c=glideinFrontendLib.getClientCondorStatus(status_dict_types[st]['dict'],frontend_name,group_name,request_name)
count_status_multi[request_name][st]=glideinFrontendLib.countCondorStatus(c)
count_status=count_status_multi[request_name]
# ignore matching jobs
# since we don't have the entry classad, we have no clue how to match
this_stats_arr=(0,0,0,0,0,0,0,0,
count_status['Total'],count_status['Idle'],count_status['Running'],
0,0)
stats['group'].logMatchedGlideins(
glideid_str, count_status['Total'],count_status['Idle'],
count_status['Running'])
# since I don't see it in the factory anymore, mark it as down
stats['group'].logFactDown(glideid_str, True)
total_down_stats_arr=log_and_sum_factory_line(glideid_str,True,this_stats_arr,total_down_stats_arr)
# Log the totals
for el in (('MatchedUp',total_up_stats_arr, True),('MatchedDown',total_down_stats_arr, False)):
el_str,el_stats_arr,el_updown=el
stats['group'].logMatchedJobs(
el_str, el_stats_arr[0],el_stats_arr[2], el_stats_arr[3],
el_stats_arr[5], el_stats_arr[6])
stats['group'].logMatchedGlideins(el_str,el_stats_arr[8],el_stats_arr[9], el_stats_arr[10])
stats['group'].logFactAttrs(el_str, [], ()) # just for completeness
stats['group'].logFactDown(el_str, el_updown)
stats['group'].logFactReq(el_str,el_stats_arr[11],el_stats_arr[12], {})
# Print the totals
# Ignore the resulting sum
log_factory_header()
log_and_sum_factory_line('Sum of useful factories', False, tuple(total_up_stats_arr), total_down_stats_arr)
log_and_sum_factory_line('Sum of down factories', True, tuple(total_down_stats_arr), total_up_stats_arr)
# Print unmatched... Ignore the resulting sum
unmatched_idle = condorq_dict_types['Idle']['count'][(None, None, None)]
unmatched_oldidle = condorq_dict_types['OldIdle']['count'][(None, None, None)]
unmatched_running = condorq_dict_types['Running']['count'][(None, None, None)]
stats['group'].logMatchedJobs(
'Unmatched', unmatched_idle, unmatched_idle, unmatched_oldidle,
unmatched_running, 0)
stats['group'].logMatchedGlideins('Unmatched', 0,0,0) # Nothing running
stats['group'].logFactAttrs('Unmatched', [], ()) # just for completeness
stats['group'].logFactDown('Unmatched', True)
stats['group'].logFactReq('Unmatched', 0, 0, {})
this_stats_arr = (unmatched_idle, unmatched_idle, unmatched_idle, unmatched_oldidle, unmatched_idle, unmatched_running, 0, 0,
0, 0, 0, # glideins... none, since no matching
0, 0) # requested... none, since not matching
log_and_sum_factory_line('Unmatched', True, this_stats_arr, total_down_stats_arr)
# Advertise glideclient and glideclient global classads
try:
logSupport.log.info("Advertising global requests")
advertizer.do_global_advertize()
except:
logSupport.log.exception("Unknown error advertising global requests")
try:
logSupport.log.info("Advertising glidein requests") # cannot advertise len of queue since has both global and glidein requests
advertizer.do_advertize()
except:
logSupport.log.exception("Unknown error advertising glidein requests")
logSupport.log.info("Done advertising requests")
# Advertise glideresource classads
try:
logSupport.log.info("Advertising %i glideresource classads to the user pool" % len(resource_advertiser.classads))
#logSupport.log.info("glideresource classads to advertise -\n%s" % resource_advertiser.getAllClassads())
resource_advertiser.advertiseAllClassads()
logSupport.log.info("Done advertising glideresource classads")
except RuntimeError:
logSupport.log.exception("Advertising failed: ")
except:
logSupport.log.exception("Advertising failed: ")
return
############################################################
def iterate(parent_pid, elementDescript, paramsDescript, attr_dict, signatureDescript, x509_proxy_plugin):
sleep_time = int(elementDescript.frontend_data['LoopDelay'])
factory_pools = elementDescript.merged_data['FactoryCollectors']
frontend_name = elementDescript.frontend_data['FrontendName']
group_name = elementDescript.element_data['GroupName']
stats = {}
history_obj = {}
if not elementDescript.frontend_data.has_key('X509Proxy'):
published_frontend_name = '%s.%s' % (frontend_name, group_name)
else:
# if using a VO proxy, label it as such
# this way we don't risk of using the wrong proxy on the other side
# if/when we decide to stop using the proxy
published_frontend_name = '%s.XPVO_%s' % (frontend_name, group_name)
try:
is_first = 1
while 1: # will exit by exception
check_parent(parent_pid)
logSupport.log.info("Iteration at %s" % time.ctime())
try:
# recreate every time (an easy way to start from a clean state)
stats['group'] = glideinFrontendMonitoring.groupStats()
done_something = iterate_one(published_frontend_name, elementDescript, paramsDescript, attr_dict, signatureDescript, x509_proxy_plugin, stats, history_obj)
logSupport.log.info("iterate_one status: %s" % str(done_something))
logSupport.log.info("Writing stats")
try:
write_stats(stats)
except KeyboardInterrupt:
raise # this is an exit signal, pass through
except:
# never fail for stats reasons!
logSupport.log.exception("Exception occurred writing stats: " )
except KeyboardInterrupt:
raise # this is an exit signal, pass trough
except:
if is_first:
raise
else:
# if not the first pass, just warn
logSupport.log.exception("Exception occurred iteration: ")
is_first = 0
# do it just before the sleep
cleanupSupport.cleaners.cleanup()
logSupport.log.info("Sleep")
time.sleep(sleep_time)
finally:
logSupport.log.info("Deadvertize my ads")
for factory_pool in factory_pools:
factory_pool_node = factory_pool[0]
try:
glideinFrontendInterface.deadvertizeAllWork(factory_pool_node, published_frontend_name)
except:
pass # just ignore errors... this was cleanup
try:
glideinFrontendInterface.deadvertizeAllGlobals(factory_pool_node, published_frontend_name)
except:
pass # just ignore errors... this was cleanup
# Invalidate all resource classads
try:
resource_advertiser = glideinFrontendInterface.ResourceClassadAdvertiser()
resource_advertiser.invalidateConstrainedClassads('GlideClientName == "%s"' % published_frontend_name)
except:
# Ignore all errors
pass
############################################################
def main(parent_pid, work_dir, group_name):
startup_time = time.time()
elementDescript = glideinFrontendConfig.ElementMergedDescript(work_dir, group_name)
# the log dir is shared between the frontend main and the groups, so use a subdir
logSupport.log_dir = os.path.join(elementDescript.frontend_data['LogDir'], "group_%s" % group_name)
# Configure frontend group process logging
process_logs = eval(elementDescript.frontend_data['ProcessLogs'])
for plog in process_logs:
logSupport.add_processlog_handler(group_name, logSupport.log_dir, plog['msg_types'], plog['extension'],
int(float(plog['max_days'])),
int(float(plog['min_days'])),
int(float(plog['max_mbytes'])))
logSupport.log = logging.getLogger(group_name)
logSupport.log.info("Logging initialized")
logSupport.log.debug("Frontend Element startup time: %s" % str(startup_time))
paramsDescript = glideinFrontendConfig.ParamsDescript(work_dir, group_name)
attrsDescript = glideinFrontendConfig.AttrsDescript(work_dir,group_name)
signatureDescript = glideinFrontendConfig.GroupSignatureDescript(work_dir, group_name)
#
# We decided we will not use the data from the stage area
# Leaving it commented in the code, in case we decide in the future
# it was a good validation of the Web server health
#
#stageArea=glideinFrontendConfig.MergeStageFiles(elementDescript.frontend_data['WebURL'],
# signatureDescript.signature_type,
# signatureDescript.frontend_descript_fname,signatureDescript.frontend_descript_signature,
# group_name,
# signatureDescript.group_descript_fname,signatureDescript.group_descript_signature)
# constsDescript=stageArea.get_constants()
#
attr_dict=attrsDescript.data
glideinFrontendMonitoring.monitoringConfig.monitor_dir = os.path.join(work_dir, "monitor/group_%s" % group_name)
glideinFrontendInterface.frontendConfig.advertise_use_tcp = (elementDescript.frontend_data['AdvertiseWithTCP'] in ('True', '1'))
glideinFrontendInterface.frontendConfig.advertise_use_multi = (elementDescript.frontend_data['AdvertiseWithMultiple'] in ('True', '1'))
try:
glideinwms_dir = os.path.dirname(os.path.dirname(sys.argv[0]))
glideinFrontendInterface.frontendConfig.glideinwms_version = glideinWMSVersion.GlideinWMSDistro(glideinwms_dir, 'checksum.frontend').version()
except:
logSupport.log.exception("Exception occurred while trying to retrieve the glideinwms version: ")
if len(elementDescript.merged_data['Proxies']) > 0:
if not glideinFrontendPlugins.proxy_plugins.has_key(elementDescript.merged_data['ProxySelectionPlugin']):
logSupport.log.warning("Invalid ProxySelectionPlugin '%s', supported plugins are %s" % (elementDescript.merged_data['ProxySelectionPlugin']), glideinFrontendPlugins.proxy_plugins.keys())
return 1
x509_proxy_plugin = glideinFrontendPlugins.proxy_plugins[elementDescript.merged_data['ProxySelectionPlugin']](os.path.join(work_dir, "group_%s" % group_name), glideinFrontendPlugins.createCredentialList(elementDescript))
else:
# no proxies, will try to use the factory one
x509_proxy_plugin = None
# set the condor configuration and GSI setup globally, so I don't need to worry about it later on
os.environ['CONDOR_CONFIG'] = elementDescript.frontend_data['CondorConfig']
os.environ['_CONDOR_CERTIFICATE_MAPFILE'] = elementDescript.element_data['MapFile']
os.environ['X509_USER_PROXY'] = elementDescript.frontend_data['ClassAdProxy']
# create lock file
pid_obj = glideinFrontendPidLib.ElementPidSupport(work_dir, group_name)
pid_obj.register(parent_pid)
try:
try:
logSupport.log.info("Starting up")
iterate(parent_pid, elementDescript, paramsDescript, attr_dict, signatureDescript, x509_proxy_plugin)
except KeyboardInterrupt:
logSupport.log.info("Received signal...exit")
except:
logSupport.log.exception("Unhandled exception, dying: ")
finally:
pid_obj.relinquish()
############################################################
#
# S T A R T U P
#
############################################################
def termsignal(signr, frame):
raise KeyboardInterrupt, "Received signal %s" % signr
if __name__ == '__main__':
signal.signal(signal.SIGTERM, termsignal)
signal.signal(signal.SIGQUIT, termsignal)
main(int(sys.argv[1]), sys.argv[2], sys.argv[3])
|
|
__author__ = 'henningo'
from ..tiremodelbase import TireModelBase
from ..solvermode import SolverMode
import math
import numpy as np
from opentire.TireModel.PAC2002.PAC2002_Core import PAC2002_Core
class PAC2002(TireModelBase):
def createmodel(self):
self.ModelInfo = dict()
self.Coefficients = dict()
self.ModelInfo['Name'] = 'PAC2002'
self.ModelInfo['Description'] = 'An implementation of Pacejka 2002 as described in the First Editions of Tire and Vehicle Dynamics'
# Create an instance for internal calcs
# These are typically called multiple times in the code
self.Core = PAC2002_Core()
# General Coefficients
self.Coefficients['FNOMIN'] = 4850
self.Coefficients['UNLOADED_RADIUS'] = 0.344
self.Coefficients['LONGVL'] = 16.6
# General Scaling Factors
self.Coefficients['LFZ0'] = 1.0
self.Coefficients['LCZ'] = 1.0
# Pure Longitudinal Scaling Factors
self.Coefficients['LCX'] = 1.0
self.Coefficients['LMUX'] = 1.0
self.Coefficients['LEX'] = 1.0
self.Coefficients['LKX'] = 1.0
self.Coefficients['LHX'] = 1.0
self.Coefficients['LVX'] = 1.0
self.Coefficients['LGAX'] = 1.0
# Pure Lateral Scaling Factors
self.Coefficients['LCY'] = 1.0
self.Coefficients['LMUY'] = 1.0
self.Coefficients['LEY'] = 1.0
self.Coefficients['LKY'] = 1.0
self.Coefficients['LHY'] = 1.0
self.Coefficients['LVY'] = 1.0
self.Coefficients['LGAY'] = 1.0
# Pure Aligning Moment Scaling Factors
self.Coefficients['LTR'] = 1.0
self.Coefficients['LRES'] = 1.0
self.Coefficients['LGAZ'] = 1.0
# Combined Scaling Factors
self.Coefficients['LXAL'] = 1.0
self.Coefficients['LYKA'] = 1.0
self.Coefficients['LVYKA'] = 1.0
self.Coefficients['LS'] = 1.0
# Overturning Scaling Factors
self.Coefficients['LMX'] = 1.0
self.Coefficients['LVMX'] = 1.0
# Rolling Resistance Factors
self.Coefficients['LMY'] = 1.0
#Relaxation Scaling Factors
self.Coefficients['LSGKP'] = 1.0
self.Coefficients['LSGAL'] = 1.0
# Pure Lateral Coefficients
self.Coefficients['PCY1'] = 1.3507
self.Coefficients['PDY1'] = 1.0489
self.Coefficients['PDY2'] = -0.18033
self.Coefficients['PDY3'] = -2.8821
self.Coefficients['PEY1'] = -0.0074722
self.Coefficients['PEY2'] = -0.0063208
self.Coefficients['PEY3'] = -9.9935
self.Coefficients['PEY4'] = -760.14
self.Coefficients['PKY1'] = -21.92
self.Coefficients['PKY2'] = 2.0012
self.Coefficients['PKY3'] = -0.024778
self.Coefficients['PHY1'] = 0.0026747
self.Coefficients['PHY2'] = 8.9094e-005
self.Coefficients['PHY3'] = 0.031415
self.Coefficients['PVY1'] = 0.037318
self.Coefficients['PVY2'] = -0.010049
self.Coefficients['PVY3'] = -0.32931
self.Coefficients['PVY4'] = -0.69553
# Combined Lateral Coefficients
self.Coefficients['RBY1'] = 7.1433
self.Coefficients['RBY2'] = 9.1916
self.Coefficients['RBY3'] = -0.027856
self.Coefficients['RCY1'] = 1.0719
self.Coefficients['REY1'] = -0.27572
self.Coefficients['REY2'] = 0.32802
self.Coefficients['RHY1'] = 5.7448e-006
self.Coefficients['RHY2'] = -3.1368e-005
self.Coefficients['RVY1'] = -0.027825
self.Coefficients['RVY2'] = 0.053604
self.Coefficients['RVY3'] = -0.27568
self.Coefficients['RVY4'] = 12.12
self.Coefficients['RVY5'] = 1.9
self.Coefficients['RVY6'] = -10.704
# Pure Aligning Torque Coefficients
self.Coefficients['QBZ1'] = 10.904
self.Coefficients['QBZ2'] = -1.8412
self.Coefficients['QBZ3'] = -0.52041
self.Coefficients['QBZ4'] = 0.039211
self.Coefficients['QBZ5'] = 0.41511
self.Coefficients['QBZ9'] = 8.9846
self.Coefficients['QBZ10'] = 0.0
self.Coefficients['QCZ1'] = 1.2136
self.Coefficients['QDZ1'] = 0.093509
self.Coefficients['QDZ2'] = -0.009218
self.Coefficients['QDZ3'] = -0.057061
self.Coefficients['QDZ4'] = 0.73954
self.Coefficients['QDZ6'] = -0.0067783
self.Coefficients['QDZ7'] = 0.0052254
self.Coefficients['QDZ8'] = -0.18175
self.Coefficients['QDZ9'] = 0.029952
self.Coefficients['QEZ1'] = -1.5697
self.Coefficients['QEZ2'] = 0.33394
self.Coefficients['QEZ3'] = 0.0
self.Coefficients['QEZ4'] = 0.26711
self.Coefficients['QEZ5'] = -3.594
self.Coefficients['QHZ1'] = 0.0047326
self.Coefficients['QHZ2'] = 0.0026687
self.Coefficients['QHZ3'] = 0.11998
self.Coefficients['QHZ4'] = 0.059083
# Combined Aligning Coefficients
self.Coefficients['SSZ1'] = 0.033372
self.Coefficients['SSZ2'] = 0.0043624
self.Coefficients['SSZ3'] = 0.56742
self.Coefficients['SSZ4'] = -0.24116
# Pure longitudinal coefficients
self.Coefficients['PCX1'] = 1.6411
self.Coefficients['PDX1'] = 1.1739
self.Coefficients['PDX2'] = -0.16395
self.Coefficients['PDX3'] = 5.0
self.Coefficients['PEX1'] = 0.46403
self.Coefficients['PEX2'] = 0.25022
self.Coefficients['PEX3'] = 0.067842
self.Coefficients['PEX4'] = -3.7604e-005
self.Coefficients['PKX1'] = 22.303
self.Coefficients['PKX2'] = 0.48896
self.Coefficients['PKX3'] = 0.21253
self.Coefficients['PHX1'] = 0.0012297 #TODO: There is something funky with these params. Should be zero? To have no offset at SR=0
self.Coefficients['PHX2'] = 0.0004318
self.Coefficients['PVX1'] = -8.8098e-006
self.Coefficients['PVX2'] = 1.862e-005
# Combined longitudinal coefficients
self.Coefficients['RBX1'] = 13.276
self.Coefficients['RBX2'] = -13.778
self.Coefficients['RCX1'] = 1.2568
self.Coefficients['REX1'] = 0.65225
self.Coefficients['REX2'] = -0.24948
self.Coefficients['RHX1'] = 0.0050722
# Overturning Moment Coefficients
self.Coefficients['QSX1'] = 2.3155e-04
self.Coefficients['QSX2'] = 0.51574
self.Coefficients['QSX3'] = 0.046399
# Rolling Resistance Coefficients
self.Coefficients['QSY1'] = 0.01
self.Coefficients['QSY2'] = 0.0
self.Coefficients['QSY3'] = 0.0
self.Coefficients['QSY4'] = 0.0
# Loaded Radius
self.Coefficients['QV1'] = 7.15073791e-005
self.Coefficients['QV2'] = 0.14892
self.Coefficients['QFCX'] = 0
self.Coefficients['QFCY'] = 0
self.Coefficients['QFCG'] = -3.0
self.Coefficients['QFZ1'] = 28.0249
self.Coefficients['QFZ2'] = 29.2
# Rolling Radius
self.Coefficients['BREFF'] = 8.4
self.Coefficients['DREFF'] = 0.27
self.Coefficients['FREFF'] = 0.07
# Lateral Relaxation
self.Coefficients['PTY1'] = 2.1439
self.Coefficients['PTY2'] = 1.9829
# Longitudinal Relaxation
self.Coefficients['PTX1'] = 2.3657
self.Coefficients['PTX2'] = 1.4112
self.Coefficients['PTX3'] = 0.56626
# Turn-slip and parking calculated values.
# Note that turn slip isn't implemented yet
# Therefore all these reduction factors are set to 1
# For future versions these needs to be calcualted for every time
self.ZETA0 = 1
self.ZETA1 = 1
self.ZETA2 = 1
self.ZETA3 = 1
self.ZETA4 = 1
self.ZETA5 = 1
self.ZETA6 = 1
self.ZETA7 = 1
self.ZETA8 = 1
def save(self, fname, data):
return 'saving'
def load(self, fname):
return 'loading'
def solve(self, state, mode=SolverMode.All):
if mode is SolverMode.PureFy or mode is SolverMode.PureMz:
state['FY'] = self.calculate_pure_fy(state)
if mode is SolverMode.Fy or mode is SolverMode.All:
state['FY'] = self.calculate_fy(state)
if mode is SolverMode.PureFx:
state['FX'] = self.calculate_pure_fx(state)
if mode is SolverMode.Fx or mode is SolverMode.All:
state['FX'] = self.calculate_fx(state)
if mode is SolverMode.PureMz:
state['MZ'] = self.calculate_mz(state)
if mode is SolverMode.Mz or mode is SolverMode.All:
state['MZ'] = self.calculate_mz(state)
if mode is SolverMode.Mx or mode is SolverMode.All:
state['MX'] = self.calculate_mx(state)
if mode is SolverMode.Mz or mode is SolverMode.All:
state['MY'] = self.calculate_my(state)
if mode is SolverMode.Radius or mode is SolverMode.All:
state['RL'], state['RE'] = self.calculate_radius(state)
if mode is SolverMode.Relaxation or mode is SolverMode.All:
state['SIGMA_ALPHA'] = self.calculate_lateral_relaxation_length(state)
state['SIGMA_KAPPA'] = self.calculate_longitudinal_relaxation_length(state)
return state
def getparameters(self):
return self.Coefficients
def setparameters(self, params):
#TODO: Must check that params keys actually match the models coefs.
self.Coefficients = params
return True # should return False if the parameter structure doesn't match required one
def getmodelinfo(self):
return self.ModelInfo
###Private properties
def calculate_common_values(self, state):
# First we calculate some standard values used in multiple locations
dfz = (state['FZ'] - self.Coefficients['FNOMIN']) / self.Coefficients['FNOMIN']
alpha_star = math.tan(state['SA']) * np.sign(state['V'])
gamma_star = math.sin(state['IA'])
kappa = state['SR']
return dfz, alpha_star, gamma_star, kappa
def calculate_fx(self, state):
p = self.Coefficients
dfz, alpha_star, gamma_star, kappa = self.calculate_common_values(state)
F_x0 = self.calculate_pure_fx(state)
S_Hxa = self.Core.calculate_S_Hxa(p)
#60
alpha_s = alpha_star + S_Hxa
B_xa = self.Core.calculate_B_xa(p, kappa)
C_xa = self.Core.calculate_C_xa(p)
E_xa = self.Core.calculate_E_xa(p, dfz)
# 66
G_xa_numerator = math.cos(C_xa * math.atan(B_xa * alpha_s - E_xa * (B_xa * alpha_s - math.atan(B_xa * alpha_s))))
G_xa_denominator = math.cos(C_xa * math.atan(B_xa * S_Hxa - E_xa * (B_xa * S_Hxa - math.atan(B_xa * S_Hxa))))
G_xa = G_xa_numerator / G_xa_denominator
return F_x0 * G_xa
def calculate_pure_fx(self, state):
p = self.Coefficients
dfz, alpha_star, gamma_star, kappa = self.calculate_common_values(state)
C_x = self.Core.calculate_C_x(p)
D_x = self.Core.calculate_D_x(p, state, dfz, gamma_star, self.ZETA1)
S_Hx = self.Core.calculate_S_Hx(p, dfz)
# 19
kappa_x = kappa + S_Hx
E_x = self.Core.calculate_E_x(p, dfz, kappa_x)
K_x = self.Core.calculate_K_x(p, state, dfz)
B_x = self.Core.calculate_B_x(C_x, D_x, K_x)
S_Vx = self.Core.calculate_S_Vx(p, state, dfz, self.ZETA1)
# 18
fx_pure = D_x * math.sin((C_x * math.atan(B_x * kappa_x - E_x * (B_x * kappa_x - math.atan(B_x * kappa_x))))) + S_Vx
return fx_pure
def calculate_fy(self, state):
p = self.Coefficients
dfz, alpha_star, gamma_star, kappa = self.calculate_common_values(state)
F_y0 = self.calculate_pure_fy(state)
gamma_y = self.Core.calculate_gamma_y(p, gamma_star)
B_yk = self.Core.calculate_B_yk(p, alpha_star)
C_yk = self.Core.calculate_C_yk(p)
E_yk = self.Core.calculate_E_yk(p, dfz)
D_Vyk = self.Core.calculate_D_Vyk(p, state, dfz, gamma_y, alpha_star, gamma_star)
S_Vyk = self.Core.calculate_S_Vyk(p, kappa, D_Vyk)
# 69
S_Hyk = self.Core.calculate_S_Hyk(p, dfz)
kappa_s = kappa + S_Hyk
# 77
G_yk_numerator = math.cos(C_yk * math.atan(B_yk * kappa_s - E_yk * (B_yk * kappa_s - math.atan(B_yk * kappa_s))))
G_yk_denominator = math.cos(C_yk * math.atan(B_yk * S_Hyk - E_yk * (B_yk * S_Hyk - math.atan(B_yk * S_Hyk))))
G_yk = G_yk_numerator/G_yk_denominator
return G_yk * F_y0 + S_Vyk
def calculate_pure_fy(self, state):
p = self.Coefficients
dfz, alpha_star, gamma_star, kappa = self.calculate_common_values(state)
gamma_y = self.Core.calculate_gamma_y(p, gamma_star)
C_y = self.Core.calculate_C_y(p)
D_y = self.Core.calculate_D_y(p, state, dfz, gamma_y, self.ZETA1)
S_Hy = self.Core.calculate_S_Hy(p, dfz, gamma_y, self.ZETA0, self.ZETA4)
# 31
alpha_y = alpha_star + S_Hy
E_y = self.Core.calculate_E_y(p, dfz, gamma_y, alpha_y)
K_y = self.Core.calculate_K_y(p, state, gamma_y, self.ZETA3)
B_y = self.Core.calculate_B_y(C_y, D_y, K_y)
S_Vy = self.Core.calculate_S_Vy(p, state, dfz, gamma_y, self.ZETA4)
# 30
fy_pure = D_y * math.sin(C_y * math.atan(B_y * alpha_y - E_y * (B_y * alpha_y - math.atan(B_y * alpha_y)))) + S_Vy
return fy_pure
def calculate_mz(self, state):
p = self.Coefficients
dfz, alpha_star, gamma_star, kappa = self.calculate_common_values(state)
# 32
gamma_y = self.Core.calculate_gamma_y(p, gamma_star)
gamma_z = self.Core.calculate_gamma_z(p, gamma_star)
# Combined Trail Calcs
S_Ht = self.Core.calculate_S_Ht(p, dfz, gamma_z)
# 47
alpha_t = alpha_star + S_Ht
K_x = self.Core.calculate_K_x(p, state, dfz)
K_y = self.Core.calculate_K_y(p, state, gamma_y, self.ZETA3)
# 84
alpha_teq = math.atan(math.sqrt(math.tan(alpha_t)**2 + (K_x/K_y)**2 * kappa**2) * np.sign(alpha_t))
B_t = self.Core.calculate_B_t(p, dfz, gamma_z)
C_t = self.Core.calculate_C_t(p)
D_t = self.Core.calculate_D_t(p, state, dfz, gamma_z, self.ZETA5)
E_t = self.Core.calculate_E_t(p, dfz, gamma_z, alpha_t, B_t, C_t)
# Combined Trail Calc, here we are using alpha_teq instead of alpha_t
t = self.Core.calculate_t(p, B_t, C_t, D_t, E_t, alpha_teq, alpha_star)
# Combined Residual Torque Calcs
S_Hy = self.Core.calculate_S_Hy(p, dfz, gamma_y, self.ZETA0, self.ZETA4)
S_Vy = self.Core.calculate_S_Vy(p, state, dfz, gamma_y, self.ZETA4)
K_y = self.Core.calculate_K_y(p, state, gamma_y, self.ZETA3)
S_Hf = self.Core.calculate_S_Hf(S_Hy, S_Vy, K_y)
# 47
alpha_r = alpha_star + S_Hf
# 85
alpha_req = math.atan(math.sqrt(math.tan(alpha_r)**2 + (K_x/K_y)**2 * kappa**2) * np.sign(alpha_r))
C_y = self.Core.calculate_C_y(p)
D_y = self.Core.calculate_D_y(p, state, dfz, gamma_y, self.ZETA1)
B_y = self.Core.calculate_B_y(C_y, D_y, K_y)
B_r = self.Core.calculate_B_r(p, B_y, C_y, self.ZETA6)
C_r = self.Core.calculate_C_r(self.ZETA7)
D_r = self.Core.calculate_D_r(p, state, dfz, gamma_z, self.ZETA8)
M_zr = self.Core.calculate_M_zr(B_r, C_r, D_r, alpha_req, alpha_star)
# FY Prime Calcs
D_Vyk = self.Core.calculate_D_Vyk(p, state, dfz, gamma_y, alpha_star, gamma_star)
S_Vyk = self.Core.calculate_S_Vyk(p, kappa, D_Vyk)
Fy_prime = state['FY'] - S_Vyk # This is the combined lateral force without Fx induced Fy
# Pneumatic scrub (s) calcs
s = p['UNLOADED_RADIUS'] * (p['SSZ1'] + p['SSZ2'] * (state['FY'] / (p['FNOMIN'] * p['LFZ0'])) + (p['SSZ3'] + p['SSZ4'] * dfz) * gamma_star) * p['LS']
M_prime = -t * Fy_prime + M_zr + s * state['FX']
return M_prime
def calculate_pure_mz(self, state):
p = self.Coefficients
dfz, alpha_star, gamma_star, kappa = self.calculate_common_values(state)
gamma_z = self.Core.calculate_gamma_z(p, gamma_star)
gamma_y = self.Core.calculate_gamma_y(p, gamma_star)
# Trail calculations (D_t, C_t, B_t, E_t)
S_Ht = self.Core.calculate_S_Ht(p, dfz, gamma_z)
# 47
alpha_t = alpha_star + S_Ht
B_t = self.Core.calculate_B_t(p, dfz, gamma_z)
C_t = self.Core.calculate_C_t(p)
D_t = self.Core.calculate_D_t(p, state, dfz, gamma_z, self.ZETA5)
E_t = self.Core.calculate_E_t(p, dfz, gamma_z, alpha_t, B_t, C_t)
# Trail Calculation
t = self.Core.calculate_t(p, B_t, C_t, D_t, E_t, alpha_t, alpha_star)
# Residual Moment Calculations (C_r, D_r, B_r)
# These calcs uses Pure Fy calculations, so they are repeated here (such as K_y and D_y)
S_Hy = self.Core.calculate_S_Hy(p, dfz, gamma_y, self.ZETA0, self.ZETA4)
S_Vy = self.Core.calculate_S_Vy(p, state, dfz, gamma_y, self.ZETA4)
K_y = self.Core.calculate_K_y(p, state, gamma_y, self.ZETA3)
C_y = self.Core.calculate_C_y(p)
D_y = self.Core.calculate_D_y(p, state, dfz, gamma_y, self.ZETA1)
B_y = self.Core.calculate_B_y(C_y, D_y, K_y)
B_r = self.Core.calculate_B_r(p, B_y, C_y, self.ZETA6)
C_r = self.Core.calculate_C_r(self.ZETA7)
D_r = self.Core.calculate_D_r(p, state, dfz, gamma_z, self.ZETA8)
S_Hf = self.Core.calculate_S_Hf(S_Hy, S_Vy, K_y)
# 47
alpha_r = alpha_star + S_Hf
# Residual Moment Calculation
M_zr = self.Core.calculate_M_zr(B_r, C_r, D_r, alpha_r, alpha_star)
fy_pure = state['FY'] # This requires that FY have been calculated already
mz_pure = -t * fy_pure + M_zr
return mz_pure
def calculate_mx(self, state):
p = self.Coefficients
dfz, alpha_star, gamma_star, kappa = self.calculate_common_values(state)
# 86
M_x = p['UNLOADED_RADIUS'] * state['FZ'] * (p['QSX1'] * p['LVMX'] - p['QSX2'] * gamma_star + p['QSX3'] * state['FY'] / p['FNOMIN']) * p['LMX']
return M_x
def calculate_my(self, state):
p = self.Coefficients
dfz, alpha_star, gamma_star, kappa = self.calculate_common_values(state)
# 87
M_y = p['UNLOADED_RADIUS'] * state['FZ'] * (p['QSY1'] + p['QSY2'] * state['FX']/p['FNOMIN'] + p['QSY3'] * abs(state['V'] / p['LONGVL']) + p['QSY4'] * (state['V'] / p['LONGVL'])**4) * p['LMY']
return M_y
def calculate_radius(self, state):
p = self.Coefficients
dfz, alpha_star, gamma_star, kappa = self.calculate_common_values(state)
# If we don't have omega, we use an approximation
omega = state['V'] / (p['UNLOADED_RADIUS'] * 0.98)
# First we solve for dynamic displacement
# External Effects
speed_effect = p['QV2'] * abs(omega) * p['UNLOADED_RADIUS'] / p['LONGVL']
fx_effect = (p['QFCX'] * state['FX'] / p['FNOMIN'])**2
fy_effect = (p['QFCY'] * state['FY'] / p['FNOMIN'])**2
camber_effect = p['QFCG'] * gamma_star**2
external_effects = 1.0 + speed_effect - fx_effect - fy_effect + camber_effect
# Fz/(external_effects * Fz0) = a*x2 + b*x
# 0 = a*x2 + b*x + c
a = (p['QFZ2'] / p['UNLOADED_RADIUS'])**2
b = p['QFZ1'] / p['UNLOADED_RADIUS']
c = -(state['FZ'] / (external_effects * p['FNOMIN']))
if b**2 - 4*a*c > 0:
rho = (-b + math.sqrt(b**2 - 4*a*c)) / (2 * a)
else:
rho = 999999
# Then we calculate free-spinning radius
R_omega = p['UNLOADED_RADIUS'] + p['QV1'] * p['UNLOADED_RADIUS'] * (omega * p['UNLOADED_RADIUS'] / p['LONGVL'])**2
# The loaded radius is the free-spinning radius minus the deflection
R_l = R_omega - rho
# Effective Rolling Radius
# Nominal stiffness
C_z0 = p['FNOMIN'] / p['UNLOADED_RADIUS'] * math.sqrt(p['QFZ1']**2 + 4.0 * p['QFZ2'])
if C_z0 == 0.0:
return 0.0, 0.0
# Eff. Roll. Radius #This is a newer version
R_e_old = R_omega - (p['FNOMIN'] / C_z0) * (p['DREFF'] * math.atan(p['BREFF'] * state['FZ'] / p['FNOMIN']) + p['FREFF'] * state['FZ'] / p['FNOMIN'])
# Eff. Roll. Radius Pac 2002
C_z = p['QFZ1'] * p['FNOMIN'] / p['UNLOADED_RADIUS']
rho_Fz0 = p['FNOMIN'] / (C_z0 * p['LCZ'])
rho_d = rho/rho_Fz0
R_e = R_omega - rho_Fz0 * (p['DREFF'] * math.atan(p['BREFF'] * rho_d) + p['FREFF'] * rho_d)
return R_l, R_e
def calculate_lateral_relaxation_length(self, state):
p = self.Coefficients
if p['PTY2'] == 0:
return 0
dfz, alpha_star, gamma_star, kappa = self.calculate_common_values(state)
gamma_y = self.Core.calculate_gamma_y(p, gamma_star)
# 93
sigma_alpha = p['PTY1'] * math.sin(2.0 * math.atan(state['FZ'] / (p['PTY2'] * p['FNOMIN'] * p['LFZ0']))) * (1 - p['PKY3'] * abs(gamma_y)) * p['UNLOADED_RADIUS'] * p['LFZ0'] * p['LSGAL']
return sigma_alpha
def calculate_longitudinal_relaxation_length(self, state):
p = self.Coefficients
dfz, alpha_star, gamma_star, kappa = self.calculate_common_values(state)
# 92
sigma_kappa = state['FZ'] * (p['PTX1'] + p['PTX2'] * dfz) * math.exp(-p['PTX3'] * dfz) * (p['UNLOADED_RADIUS'] / p['FNOMIN']) * p['LSGKP']
return sigma_kappa
|
|
# Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
import m5
from m5.defines import buildEnv
from m5.objects import *
from Benchmarks import *
import CpuConfig
import MemConfig
import PlatformConfig
from FSConfig import os_types
def _listCpuTypes(option, opt, value, parser):
CpuConfig.print_cpu_list()
sys.exit(0)
def _listMemTypes(option, opt, value, parser):
MemConfig.print_mem_list()
sys.exit(0)
def _listPlatformTypes(option, opt, value, parser):
PlatformConfig.print_platform_list()
sys.exit(0)
def _preprocess_replicate(option, size):
if len(option) > size:
print "Bug on def _preprocess_replicate(option, size)", option, size
sys.exit(1)
if len(option) < size:
last = option[len(option)-1]
while len(option) < size:
option.append(last)
print option
def options_preprocess(options):
options.cpus_types = options.cpus_types.split(";")
options.num_cpus_eachtype = options.num_cpus_eachtype.split(";")
options.cpus_type_names = options.cpus_type_names.split(";")
options.cpu_clock = options.cpu_clock.split(";")
options.cpu_voltage = options.cpu_voltage.split(";")
options.cpu_pipeline_width = options.cpu_pipeline_width.split(";")
options.cpu_LQentries = options.cpu_LQentries.split(";")
options.cpu_SQentries = options.cpu_SQentries.split(";")
options.cpu_IQentries = options.cpu_IQentries.split(";")
options.cpu_ROBentries = options.cpu_ROBentries.split(";")
options.cpu_numPhysIntRegs = options.cpu_numPhysIntRegs.split(";")
options.cpu_numPhysFloatRegs = options.cpu_numPhysFloatRegs.split(";")
options.cpu_localPredictorSize = options.cpu_localPredictorSize.split(";")
options.cpu_globalPredictorSize = options.cpu_globalPredictorSize.split(";")
options.cpu_choicePredictorSize = options.cpu_choicePredictorSize.split(";")
options.cpu_BTBEntries = options.cpu_BTBEntries.split(";")
options.cpu_RASSize = options.cpu_RASSize.split(";")
options.num_l2caches = options.num_l2caches.split(";")
options.l1d_size = options.l1d_size.split(";")
options.l1i_size = options.l1i_size.split(";")
options.l2_size = options.l2_size.split(";")
options.l1d_assoc = options.l1d_assoc.split(";")
options.l1i_assoc = options.l1i_assoc.split(";")
options.l2_assoc = options.l2_assoc.split(";")
_preprocess_replicate(options.num_cpus_eachtype, options.num_cpus_types)
_preprocess_replicate(options.cpus_type_names, options.num_cpus_types)
_preprocess_replicate(options.cpu_clock, options.num_cpus_types)
_preprocess_replicate(options.cpu_voltage, options.num_cpus_types)
_preprocess_replicate(options.cpu_pipeline_width, options.num_cpus_types)
_preprocess_replicate(options.cpu_LQentries, options.num_cpus_types)
_preprocess_replicate(options.cpu_SQentries, options.num_cpus_types)
_preprocess_replicate(options.cpu_IQentries, options.num_cpus_types)
_preprocess_replicate(options.cpu_ROBentries, options.num_cpus_types)
_preprocess_replicate(options.cpu_numPhysIntRegs, options.num_cpus_types)
_preprocess_replicate(options.cpu_numPhysFloatRegs, options.num_cpus_types)
_preprocess_replicate(options.cpu_localPredictorSize, options.num_cpus_types)
_preprocess_replicate(options.cpu_globalPredictorSize, options.num_cpus_types)
_preprocess_replicate(options.cpu_choicePredictorSize, options.num_cpus_types)
_preprocess_replicate(options.cpu_BTBEntries, options.num_cpus_types)
_preprocess_replicate(options.cpu_RASSize, options.num_cpus_types)
_preprocess_replicate(options.num_l2caches, options.num_cpus_types)
_preprocess_replicate(options.l1d_size, options.num_cpus_types)
_preprocess_replicate(options.l1i_size, options.num_cpus_types)
_preprocess_replicate(options.l2_size, options.num_cpus_types)
_preprocess_replicate(options.l1d_assoc, options.num_cpus_types)
_preprocess_replicate(options.l1i_assoc, options.num_cpus_types)
_preprocess_replicate(options.l2_assoc, options.num_cpus_types)
def addCommonOptions(parser):
# system options
parser.add_option("--list-cpu-types",
action="callback", callback=_listCpuTypes,
help="List available CPU types")
parser.add_option("--cpu-type", type="choice", default="atomic",
choices=CpuConfig.cpu_names(),
help = "type of cpu to run with")
parser.add_option("--cpus-types", action="store", type="string", default="atomic")
parser.add_option("--checker", action="store_true");
parser.add_option("--num-cpus-types", action="store", type="int",
help="If != 1, then all CPU related types must be specified. Each CPU type will have at least one diferent L2 cache")
parser.add_option("--num-cpus-eachtype", action="store", type="string", default="1")#, nargs='+')
parser.add_option("--cpus-type-names", action="store", type="string", default="big")#, nargs='+')
parser.add_option("--sys-voltage", action="store", type="string",
default='1.0V',
help = """Top-level voltage for blocks running at system
power supply""")
parser.add_option("--sys-clock", action="store", type="string",
default='1GHz',
help = """Top-level clock for blocks running at system
speed""")
parser.add_option("--cpu-clock", action="store", type="string",
default='2GHz',
help="Clock for blocks running at CPU speed")
parser.add_option("--cpu-voltage", action="store", type="string",
default='1.0V',
help = """Top-level voltage for blocks running at CPU power supply""")
parser.add_option("--tech-node", action="store", type="int",
default=65,
help="Technology node in nm")
parser.add_option("--smt", action="store_true", default=False,
help = """
Only used if multiple programs are specified. If true,
then the number of threads per cpu is same as the
number of programs.""")
parser.add_option("--elastic-trace-en", action="store_true",
help="""Enable capture of data dependency and instruction
fetch traces using elastic trace probe.""")
# Trace file paths input to trace probe in a capture simulation and input
# to Trace CPU in a replay simulation
parser.add_option("--inst-trace-file", action="store", type="string",
help="""Instruction fetch trace file input to
Elastic Trace probe in a capture simulation and
Trace CPU in a replay simulation""", default="")
parser.add_option("--data-trace-file", action="store", type="string",
help="""Data dependency trace file input to
Elastic Trace probe in a capture simulation and
Trace CPU in a replay simulation""", default="")
parser.add_option("--no-mcpat", action="store_true", default=False)
# CPU conf. options
parser.add_option("--cpu-pipeline-width", action="store", type="string",# nargs='+',
default="8")
parser.add_option("--cpu-LQentries", action="store", type="string",# nargs='+',
default="32")
parser.add_option("--cpu-SQentries", action="store", type="string",# nargs='+',
default="32")
parser.add_option("--cpu-IQentries", action="store", type="string",# nargs='+',
default="64")
parser.add_option("--cpu-ROBentries", action="store", type="string",# nargs='+',
default="192")
parser.add_option("--cpu-numPhysIntRegs", action="store", type="string",# nargs='+',
default="256")
parser.add_option("--cpu-numPhysFloatRegs", action="store", type="string",# nargs='+',
default="256")
parser.add_option("--cpu-localPredictorSize", action="store", type="string",# nargs='+',
default="2048")
parser.add_option("--cpu-globalPredictorSize", action="store", type="string",# nargs='+',
default="8192")
parser.add_option("--cpu-choicePredictorSize", action="store", type="string",# nargs='+',
default="8192")
parser.add_option("--cpu-BTBEntries", action="store", type="string",# nargs='+',
default="4096")
parser.add_option("--cpu-RASSize", action="store", type="string",# nargs='+',
default="16")
# Memory Options
parser.add_option("--list-mem-types",
action="callback", callback=_listMemTypes,
help="List available memory types")
parser.add_option("--mem-type", type="choice", default="DDR3_1600_x64",
choices=MemConfig.mem_names(),
help = "type of memory to use")
parser.add_option("--mem-channels", type="int", default=1,
help = "number of memory channels")
parser.add_option("--mem-ranks", type="int", default=None,
help = "number of memory ranks per channel")
parser.add_option("--mem-size", action="store", type="string",
default="512MB",
help="Specify the physical memory size (single memory)")
parser.add_option("-l", "--lpae", action="store_true")
parser.add_option("-V", "--virtualisation", action="store_true")
parser.add_option("--memchecker", action="store_true")
# Cache Options
parser.add_option("--external-memory-system", type="string",
help="use external ports of this port_type for caches")
parser.add_option("--tlm-memory", type="string",
help="use external port for SystemC TLM cosimulation")
parser.add_option("--fastmem", action="store_true")
parser.add_option("--caches", action="store_true")
parser.add_option("--num-dirs", type="int", default=1)
parser.add_option("--num-l2caches", type="string", default="1")#, nargs='+')
parser.add_option("--l1d-size", type="string", default="32kB")#, nargs='+')
parser.add_option("--l1i-size", type="string", default="16kB")#, nargs='+')
parser.add_option("--l2-size", type="string", default="1MB")#, nargs='+')
parser.add_option("--l1d-assoc", type="string", default="2")#, nargs='+')
parser.add_option("--l1i-assoc", type="string", default="2")#, nargs='+')
parser.add_option("--l2-assoc", type="string", default="8")#, nargs='+')
parser.add_option("--cacheline_size", type="int", default=64)
# Enable Ruby
parser.add_option("--ruby", action="store_true")
# Run duration options
parser.add_option("-m", "--abs-max-tick", type="int", default=m5.MaxTick,
metavar="TICKS", help="Run to absolute simulated tick " \
"specified including ticks from a restored checkpoint")
parser.add_option("--rel-max-tick", type="int", default=None,
metavar="TICKS", help="Simulate for specified number of" \
" ticks relative to the simulation start tick (e.g. if " \
"restoring a checkpoint)")
parser.add_option("--maxtime", type="float", default=None,
help="Run to the specified absolute simulated time in " \
"seconds")
parser.add_option("-I", "--maxinsts", action="store", type="int",
default=None, help="""Total number of instructions to
simulate (default: run forever)""")
parser.add_option("--work-item-id", action="store", type="int",
help="the specific work id for exit & checkpointing")
parser.add_option("--num-work-ids", action="store", type="int",
help="Number of distinct work item types")
parser.add_option("--work-begin-cpu-id-exit", action="store", type="int",
help="exit when work starts on the specified cpu")
parser.add_option("--work-end-exit-count", action="store", type="int",
help="exit at specified work end count")
parser.add_option("--work-begin-exit-count", action="store", type="int",
help="exit at specified work begin count")
parser.add_option("--init-param", action="store", type="int", default=0,
help="""Parameter available in simulation with m5
initparam""")
parser.add_option("--initialize-only", action="store_true", default=False,
help="""Exit after initialization. Do not simulate time.
Useful when gem5 is run as a library.""")
# Simpoint options
parser.add_option("--simpoint-profile", action="store_true",
help="Enable basic block profiling for SimPoints")
parser.add_option("--simpoint-interval", type="int", default=10000000,
help="SimPoint interval in num of instructions")
parser.add_option("--take-simpoint-checkpoints", action="store", type="string",
help="<simpoint file,weight file,interval-length,warmup-length>")
parser.add_option("--restore-simpoint-checkpoint", action="store_true",
help="restore from a simpoint checkpoint taken with " +
"--take-simpoint-checkpoints")
# Checkpointing options
###Note that performing checkpointing via python script files will override
###checkpoint instructions built into binaries.
parser.add_option("--take-checkpoints", action="store", type="string",
help="<M,N> take checkpoints at tick M and every N ticks thereafter")
parser.add_option("--max-checkpoints", action="store", type="int",
help="the maximum number of checkpoints to drop", default=5)
parser.add_option("--checkpoint-dir", action="store", type="string",
help="Place all checkpoints in this absolute directory")
parser.add_option("-r", "--checkpoint-restore", action="store", type="int",
help="restore from checkpoint <N>")
parser.add_option("--checkpoint-at-end", action="store_true",
help="take a checkpoint at end of run")
parser.add_option("--work-begin-checkpoint-count", action="store", type="int",
help="checkpoint at specified work begin count")
parser.add_option("--work-end-checkpoint-count", action="store", type="int",
help="checkpoint at specified work end count")
parser.add_option("--work-cpus-checkpoint-count", action="store", type="int",
help="checkpoint and exit when active cpu count is reached")
parser.add_option("--restore-with-cpu", action="store", type="choice",
default="atomic", choices=CpuConfig.cpu_names(),
help = "cpu type for restoring from a checkpoint")
# CPU Switching - default switch model goes from a checkpoint
# to a timing simple CPU with caches to warm up, then to detailed CPU for
# data measurement
parser.add_option("--repeat-switch", action="store", type="int",
default=None,
help="switch back and forth between CPUs with period <N>")
parser.add_option("-s", "--standard-switch", action="store", type="int",
default=None,
help="switch from timing to Detailed CPU after warmup period of <N>")
parser.add_option("-p", "--prog-interval", type="str",
help="CPU Progress Interval")
# Fastforwarding and simpoint related materials
parser.add_option("-W", "--warmup-insts", action="store", type="int",
default=None,
help="Warmup period in total instructions (requires --standard-switch)")
parser.add_option("--bench", action="store", type="string", default=None,
help="base names for --take-checkpoint and --checkpoint-restore")
parser.add_option("-F", "--fast-forward", action="store", type="string",
default=None,
help="Number of instructions to fast forward before switching")
parser.add_option("-S", "--simpoint", action="store_true", default=False,
help="""Use workload simpoints as an instruction offset for
--checkpoint-restore or --take-checkpoint.""")
parser.add_option("--at-instruction", action="store_true", default=False,
help="""Treat value of --checkpoint-restore or --take-checkpoint as a
number of instructions.""")
parser.add_option("--spec-input", default="ref", type="choice",
choices=["ref", "test", "train", "smred", "mdred",
"lgred"],
help="Input set size for SPEC CPU2000 benchmarks.")
parser.add_option("--arm-iset", default="arm", type="choice",
choices=["arm", "thumb", "aarch64"],
help="ARM instruction set.")
def addSEOptions(parser):
# Benchmark options
parser.add_option("-c", "--cmd", default="",
help="The binary to run in syscall emulation mode.")
parser.add_option("-o", "--options", default="",
help="""The options to pass to the binary, use " "
around the entire string""")
parser.add_option("-e", "--env", default="",
help="Initialize workload environment from text file.")
parser.add_option("-i", "--input", default="",
help="Read stdin from a file.")
parser.add_option("--output", default="",
help="Redirect stdout to a file.")
parser.add_option("--errout", default="",
help="Redirect stderr to a file.")
def addFSOptions(parser):
# Simulation options
parser.add_option("--timesync", action="store_true",
help="Prevent simulated time from getting ahead of real time")
# System options
parser.add_option("--kernel", action="store", type="string")
parser.add_option("--os-type", action="store", type="choice",
choices=os_types[buildEnv['TARGET_ISA']], default="linux",
help="Specifies type of OS to boot")
parser.add_option("--script", action="store", type="string")
parser.add_option("--frame-capture", action="store_true",
help="Stores changed frame buffers from the VNC server to compressed "\
"files in the gem5 output directory")
if buildEnv['TARGET_ISA'] == "arm":
parser.add_option("--bare-metal", action="store_true",
help="Provide the raw system without the linux specific bits")
parser.add_option("--list-machine-types",
action="callback", callback=_listPlatformTypes,
help="List available platform types")
parser.add_option("--machine-type", action="store", type="choice",
choices=PlatformConfig.platform_names(),
default="VExpress_EMM")
parser.add_option("--dtb-filename", action="store", type="string",
help="Specifies device tree blob file to use with device-tree-"\
"enabled kernels")
parser.add_option("--enable-context-switch-stats-dump", \
action="store_true", help="Enable stats dump at context "\
"switches and dump tasks file (required for Streamline)")
# Benchmark options
parser.add_option("--dual", action="store_true",
help="Simulate two systems attached with an ethernet link")
parser.add_option("--dist", action="store_true",
help="Parallel distributed gem5 simulation.")
parser.add_option("--is-switch", action="store_true",
help="Select the network switch simulator process for a"\
"distributed gem5 run")
parser.add_option("--dist-rank", default=0, action="store", type="int",
help="Rank of this system within the dist gem5 run.")
parser.add_option("--dist-size", default=0, action="store", type="int",
help="Number of gem5 processes within the dist gem5 run.")
parser.add_option("--dist-server-name",
default="127.0.0.1",
action="store", type="string",
help="Name of the message server host\nDEFAULT: localhost")
parser.add_option("--dist-server-port",
default=2200,
action="store", type="int",
help="Message server listen port\nDEFAULT: 2200")
parser.add_option("--dist-sync-repeat",
default="0us",
action="store", type="string",
help="Repeat interval for synchronisation barriers among dist-gem5 processes\nDEFAULT: --ethernet-linkdelay")
parser.add_option("--dist-sync-start",
default="5200000000000t",
action="store", type="string",
help="Time to schedule the first dist synchronisation barrier\nDEFAULT:5200000000000t")
parser.add_option("-b", "--benchmark", action="store", type="string",
dest="benchmark",
help="Specify the benchmark to run. Available benchmarks: %s"\
% DefinedBenchmarks)
parser.add_option("--ethernet-linkspeed", default="10Gbps",
action="store", type="string",
help="Link speed in bps\nDEFAULT: 10Gbps")
parser.add_option("--ethernet-linkdelay", default="10us",
action="store", type="string",
help="Link delay in seconds\nDEFAULT: 10us")
# Metafile options
parser.add_option("--etherdump", action="store", type="string", dest="etherdump",
help="Specify the filename to dump a pcap capture of the" \
"ethernet traffic")
# Disk Image Options
parser.add_option("--disk-image", action="store", type="string", default=None,
help="Path to the disk image to use.")
parser.add_option("--root-device", action="store", type="string", default=None,
help="OS device name for root partition")
# Command line options
parser.add_option("--command-line", action="store", type="string",
default=None,
help="Template for the kernel command line.")
parser.add_option("--command-line-file", action="store",
default=None, type="string",
help="File with a template for the kernel command line")
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for various tensorflow.ops.tf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class ShapeOpsTest(test.TestCase):
def _compareShape(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.shape(x)
tf_ans_64 = array_ops.shape(x, out_type=dtypes.int64)
result = tf_ans.eval()
result_64 = tf_ans_64.eval()
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
def _compareShapeSparse(self, x_np, use_gpu=False):
np_ans = np.array(np.shape(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.shape(x_tf)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareShapeN(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.test_session(use_gpu=use_gpu) as sess:
tf_ans = array_ops.shape_n([x, x, x])
tf_ans_64 = array_ops.shape_n([x, x, x], out_type=dtypes.int64)
result = sess.run(tf_ans)
result_64 = sess.run(tf_ans_64)
for i in range(3):
self.assertAllEqual(np_ans, result[i])
self.assertAllEqual(np_ans, result_64[i])
self.assertShapeEqual(np_ans, tf_ans[i])
def _compareRank(self, x, use_gpu=False):
np_ans = np.asarray(np.ndim(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.rank(x)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareRankSparse(self, x_np, use_gpu=False):
np_ans = np.asarray(np.ndim(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.rank(x_tf)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSize(self, x, use_gpu=False):
np_ans = np.asarray(np.size(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.size(x)
result = tf_ans.eval()
tf_ans_64 = array_ops.size(x, out_type=dtypes.int64)
result_64 = tf_ans_64.eval()
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSizeSparse(self, x_np, use_gpu=False):
np_ans = np.asarray(np.size(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.size(x_tf)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _testCpu(self, x):
self._compareShape(x, use_gpu=False)
self._compareShapeN(x, use_gpu=False)
self._compareRank(x, use_gpu=False)
self._compareSize(x, use_gpu=False)
self._compareShapeSparse(x, use_gpu=False)
self._compareRankSparse(x, use_gpu=False)
self._compareSizeSparse(x, use_gpu=False)
def _testGpu(self, x):
self._compareShape(x, use_gpu=True)
self._compareShapeN(x, use_gpu=True)
self._compareRank(x, use_gpu=True)
self._compareSize(x, use_gpu=True)
self._compareShapeSparse(x, use_gpu=True)
self._compareRankSparse(x, use_gpu=True)
self._compareSizeSparse(x, use_gpu=True)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testBasic(self):
self._testAll(np.random.randn(2))
self._testAll(np.random.randn(2, 3))
self._testAll(np.random.randn(2, 3, 5))
self._testAll(np.random.randn(2, 3, 5, 7))
self._testAll(np.random.randn(2, 3, 5, 7, 11))
self._testAll(np.random.randn(2, 3, 5, 7, 11, 13))
def testBool(self):
self._testAll(np.random.choice((False, True), size=(2,)))
self._testAll(np.random.choice((False, True), size=(2, 3)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7, 11)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7, 11, 13)))
# Disabled because it takes too long to run, but manually verified
# as passing at time of writing.
def _test64BitOutput(self):
with self.cached_session():
inp = array_ops.zeros([2**31])
num_elements = array_ops.size_internal(
inp, optimize=False, out_type=dtypes.int64)
self.assertEqual(2**31, num_elements.eval())
# Too large for tf.int32 output.
with self.assertRaises(errors_impl.InvalidArgumentError):
with self.cached_session():
inp = array_ops.zeros([2**31])
num_elements = array_ops.size_internal(
inp, optimize=False, out_type=dtypes.int32)
self.assertEqual(2**31, num_elements.eval())
def _compareExpandDims(self, x, dim, use_gpu):
np_ans = np.expand_dims(x, axis=dim)
with self.test_session(use_gpu=use_gpu):
tensor = array_ops.expand_dims(x, dim)
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareExpandDimsAll(self, x, dim):
self._compareExpandDims(x, dim, False)
self._compareExpandDims(x, dim, True)
def testExpandDims(self):
self._compareExpandDimsAll(np.zeros([2]), 0)
self._compareExpandDimsAll(np.zeros([2]), 1)
self._compareExpandDimsAll(np.zeros([2]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), 0)
self._compareExpandDimsAll(np.zeros([2, 3]), 1)
self._compareExpandDimsAll(np.zeros([2, 3]), 2)
self._compareExpandDimsAll(np.zeros([2, 3]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 0)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -4)
def testExpandDimsBool(self):
choice = lambda s: np.random.choice((False, True), size=s)
self._compareExpandDimsAll(choice([2]), 0)
self._compareExpandDimsAll(choice([2]), 1)
self._compareExpandDimsAll(choice([2]), -1)
self._compareExpandDimsAll(choice([2, 3]), 0)
self._compareExpandDimsAll(choice([2, 3]), 1)
self._compareExpandDimsAll(choice([2, 3]), 2)
self._compareExpandDimsAll(choice([2, 3]), -1)
self._compareExpandDimsAll(choice([2, 3]), -2)
self._compareExpandDimsAll(choice([2, 3, 5]), 0)
self._compareExpandDimsAll(choice([2, 3, 5]), 1)
self._compareExpandDimsAll(choice([2, 3, 5]), 2)
self._compareExpandDimsAll(choice([2, 3, 5]), 3)
self._compareExpandDimsAll(choice([2, 3, 5]), -1)
self._compareExpandDimsAll(choice([2, 3, 5]), -2)
self._compareExpandDimsAll(choice([2, 3, 5]), -3)
self._compareExpandDimsAll(choice([2, 3, 5]), -4)
def testExpandDimsErrors(self):
with self.cached_session():
self.assertRaises(ValueError, array_ops.expand_dims,
np.zeros([2, 3, 5]), -5)
self.assertRaises(ValueError, array_ops.expand_dims,
[False, True, True], -5)
self.assertRaises(ValueError, array_ops.expand_dims,
np.zeros([2, 3, 5]), 4)
self.assertRaises(ValueError, array_ops.expand_dims,
[False, True, True], 4)
def testExpandDimsGradient(self):
with self.cached_session():
inp = constant_op.constant(
np.random.rand(4, 2).astype("f"), dtype=dtypes.float32)
squeezed = array_ops.expand_dims(inp, 1)
err = gradient_checker.compute_gradient_error(inp, [4, 2], squeezed,
[4, 1, 2])
self.assertLess(err, 1e-3)
def testExpandDimsScalar(self):
with self.cached_session():
inp = constant_op.constant(7)
self.assertAllEqual([7], array_ops.expand_dims(inp, 0).eval())
self.assertAllEqual([7], array_ops.expand_dims(inp, -1).eval())
inp = constant_op.constant(True)
self.assertAllEqual([True], array_ops.expand_dims(inp, 0).eval())
self.assertAllEqual([True], array_ops.expand_dims(inp, -1).eval())
def testExpandDimsDimType(self):
for dtype in [dtypes.int32, dtypes.int64]:
x = np.zeros([2])
np_ans = np.expand_dims(x, axis=0)
with self.test_session(use_gpu=True):
tensor = array_ops.expand_dims(x, constant_op.constant(0, dtype))
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareSqueeze(self, x, squeeze_dims, use_gpu):
with self.test_session(use_gpu=use_gpu):
if squeeze_dims:
np_ans = np.squeeze(x, axis=tuple(squeeze_dims))
tensor = array_ops.squeeze(x, squeeze_dims)
tf_ans = tensor.eval()
else:
np_ans = np.squeeze(x)
tensor = array_ops.squeeze(x)
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareSqueezeAll(self, x, squeeze_dims=None):
if squeeze_dims is None:
squeeze_dims = []
self._compareSqueeze(x, squeeze_dims, False)
self._compareSqueeze(x, squeeze_dims, True)
def testSqueeze(self):
# Nothing to squeeze.
self._compareSqueezeAll(np.zeros([2]))
self._compareSqueezeAll(np.zeros([2, 3]))
# Squeeze the middle element away.
self._compareSqueezeAll(np.zeros([2, 1, 2]))
# Squeeze on both ends.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]))
def testSqueezeBool(self):
choice = lambda s: np.random.choice((False, True), size=s)
# Nothing to squeeze.
self._compareSqueezeAll(choice([2]))
self._compareSqueezeAll(choice([2, 3]))
# Squeeze the middle element away.
self._compareSqueezeAll(choice([2, 1, 2]))
# Squeeze on both ends.
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]))
def testSqueezeSpecificDimension(self):
# Positive squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [2, 4])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0, 4, 2])
# Negative squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-1])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5, -1])
def testSqueezeSpecificDimensionBool(self):
choice = lambda s: np.random.choice((False, True), size=s)
# Positive squeeze dim index.
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [0])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [2, 4])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [0, 4, 2])
# Negative squeeze dim index.
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-1])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-3, -5])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-3, -5, -1])
def testSqueezeAllOnes(self):
# Numpy squeezes a 1 element tensor into a zero dimensional tensor.
# Verify that we do the same.
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
tensor = array_ops.squeeze(np.zeros([1, 1, 1]), [])
self.assertEqual(np.shape(1), tensor.get_shape())
tf_ans = tensor.eval()
self.assertEqual(np.shape(1), tf_ans.shape)
def testSqueezeAllOnesBool(self):
# Numpy squeezes a 1 element tensor into a zero dimensional tensor.
# Verify that we do the same.
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
tensor = array_ops.squeeze([[[False]]], [])
self.assertEqual(np.shape(1), tensor.get_shape())
tf_ans = tensor.eval()
self.assertEqual(np.shape(1), tf_ans.shape)
def testSqueezeOnlyOnes(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
input_1x1x3 = np.zeros([1, 1, 3])
self._compareSqueezeAll(input_1x1x3)
self._compareSqueezeAll(input_1x1x3, [0])
self._compareSqueezeAll(input_1x1x3, [1])
self.assertRaises(ValueError, array_ops.squeeze, input_1x1x3, [2])
def testSqueezeErrors(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [-4])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [0, -4])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [3])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [2, 3])
def testSqueezeGradient(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = array_ops.reshape(inp, [4, 1, 2])
squeezed = array_ops.squeeze(a, [])
err = gradient_checker.compute_gradient_error(a, [4, 1, 2], squeezed,
[4, 2])
self.assertLess(err, 1e-3)
def testSqueezeGradientWithSqueezeDims(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = array_ops.reshape(inp, [4, 1, 2, 1])
squeezed = array_ops.squeeze(a, [1])
err = gradient_checker.compute_gradient_error(a, [4, 1, 2, 1], squeezed,
[4, 2, 1])
self.assertLess(err, 1e-3)
def testSqueezeWithUnknownShape(self):
with self.cached_session():
a = array_ops.placeholder(dtypes.float32, shape=[2, None])
squeezed = array_ops.squeeze(a, [1])
self.assertEqual([2], squeezed.get_shape().as_list())
squeezed = array_ops.squeeze(a)
self.assertEqual(None, squeezed.get_shape())
self.assertRaises(ValueError, array_ops.squeeze, a, [0])
self.assertRaises(ValueError, array_ops.squeeze, a, [100])
class TileTest(test.TestCase):
def testScalar(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
a = constant_op.constant(7, shape=[], dtype=dtypes.float32)
tiled = array_ops.tile(a, [])
result = tiled.eval()
self.assertEqual(result.shape, ())
self.assertEqual([], tiled.get_shape())
self.assertEqual(7, result)
def testSimple(self):
# multiples could be int32 or int64
for dtype in [dtypes.int32, dtypes.int64]:
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 1).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, constant_op.constant([1, 4], dtype=dtype))
result = tiled.eval()
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 4))).all())
def testIdentityTileAndGrad(self):
with self.cached_session():
inp = np.random.rand(4, 1).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, [1, 1])
result = tiled.eval()
self.assertEqual(result.shape, (4, 1))
self.assertEqual([4, 1], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 1))).all())
def testEmpty(self):
with self.cached_session():
inp = np.random.rand(2, 3).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, [5, 0])
result = tiled.eval()
self.assertEqual(result.shape, (10, 0))
self.assertEqual([10, 0], tiled.get_shape())
def testUnknownInputShape(self):
"""Importing can call _TileShape without shape of <multiples> known."""
with self.cached_session():
inp = array_ops.placeholder(dtypes.float32) # unknown shape
multiples = constant_op.constant([1, 2, 3, 4], dtype=np.int32)
tiled = array_ops.tile(inp, multiples)
gdef = tiled.graph.as_graph_def()
# Move the tile op to the start of the graph so that shapes of its inputs
# are not available when the shape function runs on import.
swapped = False
for i, n in enumerate(gdef.node):
if n.op == "Tile":
# Swap tile op to be first in gdef.node
assert i != 0
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(gdef.node[i])
gdef.node[i].CopyFrom(gdef.node[0])
gdef.node[0].CopyFrom(new_node)
swapped = True
assert swapped
tiled_imported, = importer.import_graph_def(
gdef, return_elements=[tiled.name])
self.assertEqual(4, tiled_imported.get_shape().ndims)
def testTypes(self):
types_to_test = {
"bool": (dtypes.bool, bool),
"float32": (dtypes.float32, float),
"float64": (dtypes.float64, float),
"complex64": (dtypes.complex64, complex),
"complex128": (dtypes.complex128, complex),
"uint8": (dtypes.uint8, int),
"int32": (dtypes.int32, int),
"int64": (dtypes.int64, int),
bytes: (dtypes.string, bytes)
}
for dtype_np, (dtype_tf, cast) in types_to_test.items():
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 1).astype(dtype_np)
a = constant_op.constant(
[cast(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtype_tf)
tiled = array_ops.tile(a, [1, 4])
result = tiled.eval()
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertAllEqual(result, np.tile(inp, (1, 4)))
def testInvalidDim(self):
with self.cached_session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtypes.float32)
# Wrong length of multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [1, 4, 2])
# Wrong rank for multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [[2, 3], [3, 4]]).eval()
def _RunAndVerifyResult(self, rank, use_gpu):
with self.test_session(use_gpu=use_gpu):
# Random dims of given rank
input_shape = np.random.randint(1, 4, size=rank)
inp = np.random.rand(*input_shape).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
multiples = np.random.randint(1, 4, size=rank).astype(np.int32)
tiled = array_ops.tile(a, multiples)
result = tiled.eval()
self.assertTrue((np.array(multiples) * np.array(inp.shape) == np.array(
result.shape)).all())
self.assertAllEqual(result, np.tile(inp, tuple(multiples)))
self.assertShapeEqual(result, tiled)
def testRandom(self):
# test low rank, like 5
for _ in range(5):
self._RunAndVerifyResult(5, use_gpu=False)
for _ in range(5):
self._RunAndVerifyResult(5, use_gpu=True)
# test high rank, like 10
for _ in range(5):
self._RunAndVerifyResult(10, use_gpu=False)
for _ in range(5):
self._RunAndVerifyResult(10, use_gpu=True)
def testGradientSimpleReduction(self):
with self.cached_session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = grad.eval()
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
def testGradientStridedReduction(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = grad.eval()
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertTrue((np.abs(expected - result) < 1e-3).all())
def testGradientSimpleReductionOnGPU(self):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = grad.eval()
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
def testGradientStridedReductionOnGPU(self):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = grad.eval()
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertAllClose(expected, result, 1e-3)
def _RunAndVerifyGradientResult(self, input_shape, multiples):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
# Random values
inp = np.asarray(np.random.rand(*input_shape))
a = constant_op.constant(inp, dtype=dtypes.float64)
tiled = array_ops.tile(a, multiples)
grad_shape = list(np.array(multiples) * np.array(inp.shape))
err = gradient_checker.compute_gradient_error(
a, list(input_shape), tiled, grad_shape, x_init_value=inp)
print("tile(float) error = ", err)
self.assertLess(err, 1e-3)
def testGradientRandomScalar(self):
self._RunAndVerifyGradientResult([], [])
def testGradientRandom(self):
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 1, 1, 1, 1])
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 2, 1, 3, 1])
self._RunAndVerifyGradientResult([2, 3, 1, 1, 3], [3, 1, 1, 2, 2])
self._RunAndVerifyGradientResult([2, 1, 3, 3, 2], [1, 3, 3, 1, 2])
def testGradientStridedReductionGC(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
err = gradient_checker.compute_gradient_error(a, [4, 2], tiled, [4, 4])
self.assertLess(err, 1e-3)
def testGradientWithSparseGradWithRank1(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0],
dtype=dtypes.float32)
outputs = array_ops.gather(array_ops.tile(inputs, [3]),
[1, 5, 9, 3, 7, 2, 2, 2])
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
def testGradientWithSparseGradWithRank3(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0],
dtype=dtypes.float32)
inputs = array_ops.reshape(inputs, [-1, 1, 1])
outputs = array_ops.gather(array_ops.tile(inputs, [3, 4, 2]),
[1, 5, 9, 3, 7, 2, 2, 2])
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
def testShapeFunctionEdgeCases(self):
# Unknown multiples shape.
inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, [2, 2, 2, 2])
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input and multiples shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertIs(None, tiled.get_shape().ndims)
# Known input and partially known multiples.
inp = constant_op.constant(0.0, shape=[1, 1])
tiled = array_ops.tile(inp, [array_ops.placeholder(dtypes.int32), 7])
self.assertEqual([None, 7], tiled.get_shape().as_list())
# Mismatched input rank and multiples length.
inp = array_ops.placeholder(dtypes.float32, shape=[None, None])
with self.assertRaises(ValueError):
tiled = array_ops.tile(
inp, array_ops.placeholder(
dtypes.int32, shape=[3]))
if __name__ == "__main__":
test.main()
|
|
import datetime
import json
import logging
import urllib
from jinja2 import Environment
from jinja2 import PackageLoader
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Content
from sendgrid.helpers.mail import Email
from sendgrid.helpers.mail import Mail
from sendgrid.helpers.mail import To
from yelp_beans.logic.meeting_spec import get_meeting_datetime
from yelp_beans.logic.meeting_spec import get_users_from_spec
from yelp_beans.models import User
secrets = None
send_grid_client = None
SENDGRID_SENDER = None
def load_secrets():
global secrets, send_grid_client, SENDGRID_SENDER
if secrets is not None:
return
secrets = json.loads(open("client_secrets.json").read())
# TODO (rkwills) switch to a yelp sendgrid account
send_grid_client = SendGridAPIClient(api_key=secrets["SENDGRID_API_KEY"])
SENDGRID_SENDER = secrets["SENDGRID_SENDER"]
def send_single_email(email, subject, template, template_arguments):
""" Send an email using the SendGrid API
Args:
- email :string => the user's work email (ie [email protected])
- subject :string => the subject line for the email
- template :string => the template file, corresponding to the email sent.
- template_arguments :dictionary => keyword arguments to specify to render_template
Returns:
- SendGrid response
"""
load_secrets()
env = Environment(loader=PackageLoader('yelp_beans', 'templates'))
template = env.get_template(template)
rendered_template = template.render(template_arguments)
message = Mail(
Email(SENDGRID_SENDER),
To(email),
subject,
Content("text/html", rendered_template)
)
return send_grid_client.client.mail.send.post(request_body=message.get())
def send_batch_initial_opt_in_email(users):
"""Sends the initial batch email to ask if people want to join Beans"""
load_secrets()
for user in users:
send_single_email(
user.email,
"Want to meet other employees through Beans?",
"welcome_email.html",
{
'first_name': user.first_name,
'project': secrets['PROJECT']
}
)
def send_batch_weekly_opt_in_email(meeting_spec):
"""Sends an email for the week asking if members want a meeting"""
load_secrets()
create_url = 'https://{}.appspot.com/meeting_request/{}'.format(
secrets["PROJECT"],
meeting_spec.id
)
logging.info('created url ' + create_url)
users = get_users_from_spec(meeting_spec)
users = [user for user in users if user]
logging.info(len(users))
logging.info(meeting_spec)
meeting_datetime = get_meeting_datetime(meeting_spec)
subscription = meeting_spec.meeting_subscription
logging.info(meeting_datetime.strftime('%I:%M %p %Z'))
for user in users:
if not user.terminated:
logging.info(user)
logging.info(meeting_datetime)
send_single_email(
user.email,
"Want a beans meeting this week?",
"weekly_opt_in_email.html",
{
'first_name': user.first_name,
'office': subscription.office,
'location': subscription.location,
'meeting_day': meeting_datetime.strftime('%A'),
'meeting_time': meeting_datetime.strftime('%I:%M %p %Z'),
'meeting_url': create_url,
'link_to_change_pref': 'https://{}.appspot.com/'.format(secrets["PROJECT"]),
'project': secrets["PROJECT"]
}
)
logging.info('sent email')
else:
logging.info(user)
logging.info('terminated')
def send_batch_meeting_confirmation_email(matches, spec):
"""
Sends an email to all of the participants in a match for the week
matches - list of meetings to participants
spec - meeting spec
"""
for match in matches:
participants = {participant for participant in match if isinstance(participant, User)}
for participant in participants:
others = participants - {participant}
send_match_email(participant, [participant for participant in others], spec)
def send_match_email(user, participants, meeting_spec):
"""
Sends an email to one of the matches for the week
user - user receiving the email
participants - other people in the meeting
meeting_spec - meeting specification
"""
meeting_datetime = get_meeting_datetime(meeting_spec)
meeting_datetime_end = meeting_datetime + datetime.timedelta(minutes=30)
subscription = meeting_spec.meeting_subscription
send_single_email(
user.email,
'Yelp Beans Meeting',
'match_email.html',
{
'user': user,
'participants': participants,
'location': subscription.office + " " + subscription.location,
'meeting_title': subscription.title,
'meeting_start_day': meeting_datetime.strftime('%A'),
'meeting_start_date': meeting_datetime.strftime('%m/%d/%Y'),
'meeting_start_time': meeting_datetime.strftime('%I:%M %p %Z'),
'meeting_end_time': meeting_datetime_end.strftime('%I:%M %p %Z'),
'calendar_invite_url': create_google_calendar_invitation_link(
participants,
subscription.title,
subscription.office,
subscription.location,
meeting_datetime,
meeting_datetime_end
),
'project': secrets["PROJECT"]
}
)
def create_google_calendar_invitation_link(user_list, title, office, location, meeting_datetime, end_time):
invite_url = "https://www.google.com/calendar/render?action=TEMPLATE&"
url_params = {
'text': "Meeting with {users} for {title}".format(
users=', '.join([user.get_username() for user in user_list]),
title=title
),
# ToDo (xili|20161110) Fix the time zone issue for remote/HH
'dates': "{begin_date}T{begin_time}/{end_date}T{end_time}".format(
begin_date=meeting_datetime.strftime("%Y%m%d"),
begin_time=meeting_datetime.strftime("%H%M%S"),
end_date=end_time.strftime("%Y%m%d"),
end_time=end_time.strftime("%H%M%S"),
),
'details': "Yelp Beans Coffee time!",
# ToDo (xili|20161110) Fix the location if one of the users is remote
'location': office + " " + location,
'add': ','.join([user.email for user in user_list])
}
invite_url += urllib.parse.urlencode(url_params)
return invite_url
def send_batch_unmatched_email(unmatched):
"""Sends an email to a person that couldn't be matched for the week"""
load_secrets()
for user in unmatched:
send_single_email(
user.email,
'Your Beans meeting this week',
'unmatched_email.html',
{
'first_name': user.first_name,
'project': secrets['PROJECT']
}
)
|
|
from twisted.internet.protocol import Protocol, Factory, ClientFactory
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor, error
import json
import base64
from database import *
from packettypes import *
from datahandler import *
from utils.utils import *
from gui.dialogs import alertMessageDialog
from constants import *
import global_vars as g
from objects import *
dataHandler = None
def startConnection():
global dataHandler
factory = gameClientFactory()
g.connector = reactor.connectTCP(GAME_IP, GAME_PORT, factory)
dataHandler = DataHandler()
return factory.protocol
class gameClientProtocol(LineReceiver):
MAX_LENGTH = 999999 #todo: find a suitable size (see client: sendMap (in clienttcp.py))
def __init__(self, factory):
self.factory = factory
def connectionMade(self):
''' called when connection has been made '''
''' used for logging in and new account '''
if g.gameState == MENU_LOGIN:
# logging in, so send login after connection has been established
username = g.gameEngine.menuLogin.username
password = g.gameEngine.menuLogin.password
g.tcpConn.sendLogin(username, password)
log("Connection established to server")
def lineReceived(self, data):
global dataHandler
# handle base64 data
decodedData = base64.b64decode(data)
log("Received data from server")
log(" -> " + decodedData)
dataHandler.handleData(decodedData)
def sendData(self, data):
# encode data using base64
encodedData = base64.b64encode(data)
self.sendLine(encodedData)
class gameClientFactory(ClientFactory):
def __init__(self):
self.protocol = p = gameClientProtocol(self)
def startedConnecting(self, connector):
log("Connecting to server...")
def buildProtocol(self, addr):
return self.protocol
def clientConnectionFailed(self, connector, reason):
errorMsg = reason.getErrorMessage().split(':')
alertMessageDialog('Unable to connect to server: ' + errorMsg[1] + errorMsg[2], 'An error occured')
print reason.getErrorMessage()
def clientConnectionLost(self, connector, reason):
print reason.getErrorMessage()
try:
#reactor.stop()
log("Disconnection from server")
except error.ReactorNotRunning:
pass
class TCPConnection():
def __init__(self, protocol):
self.protocol = protocol
def sendData(self, data):
self.protocol.sendData(data)
def sendNewAccount(self, username, password):
packet = json.dumps([{"packet": ClientPackets.CNewAccount, "name": username, "password": password}])
self.sendData(packet)
def sendDelAccount(self, username, password):
packet = json.dumps([{"packet": ClientPackets.CDelAccount, "name": username, "password": password}])
self.sendData(packet)
def sendLogin(self, username, password):
packet = json.dumps([{"packet": ClientPackets.CLogin, "name": username, "password": password}], ensure_ascii=False)
self.sendData(packet)
def sendAddChar(self, name, sex, classNum, slot):
packet = json.dumps([{"packet": ClientPackets.CAddChar, "name": name, "sex": sex, "class": classNum, "slot": slot}])
self.sendData(packet)
def sendDelChar(self, slot):
packet = json.dumps([{"packet": ClientPackets.CDelChar}])
self.sendData(packet)
def sendGetClasses(self):
packet = json.dumps([{"packet": ClientPackets.CGetClasses}])
self.sendData(packet)
def sendUseChar(self, charslot):
packet = json.dumps([{"packet": ClientPackets.CUseChar, "charslot": charslot}])
self.sendData(packet)
def sayMsg(self, msg):
packet = json.dumps([{"packet": ClientPackets.CSayMsg, "msg": msg}])
self.sendData(packet)
def globalMsg(self, msg):
packet = json.dumps([{"packet": ClientPackets.CGlobalMsg, "msg": msg}])
self.sendData(packet)
def broadcastMsg(self, msg):
packet = json.dumps([{"packet": ClientPackets.CBroadcastMsg, "msg": msg}])
self.sendData(packet)
def emoteMsg(self, msg):
packet = json.dumps([{"packet": ClientPackets.CEmoteMsg, "msg": msg}])
self.sendData(packet)
def playerMsg(self, msg, msgTo):
packet = json.dumps([{"packet": ClientPackets.CPlayerMsg, "msg": msg, "msgto": msgTo}])
self.sendData(packet)
def adminMsg(self, msg):
packet = json.dumps([{"packet": ClientPackets.CAdminMsg, "msg": msg}])
self.sendData(packet)
def sendPlayerMove(self):
packet = json.dumps([{"packet": ClientPackets.CPlayerMove, "direction": getPlayerDir(g.myIndex), "moving": Player[g.myIndex].moving}])
self.sendData(packet)
def sendPlayerAttack(self):
packet = json.dumps([{"packet": ClientPackets.CAttack}])
self.sendData(packet)
def sendPlayerDir(self):
packet = json.dumps([{"packet": ClientPackets.CPlayerDir, "direction": getPlayerDir(g.myIndex)}])
self.sendData(packet)
def sendPlayerRequestNewMap(self):
packet = json.dumps([{"packet": ClientPackets.CRequestNewMap, "direction": getPlayerDir(g.myIndex)}])
self.sendData(packet)
def sendMapGetItem(self):
packet = json.dumps([{"packet": ClientPackets.CMapGetItem}])
self.sendData(packet)
def sendMap(self):
#todo: npc
#canMoveNow = false
packet = []
packet.append({"packet": ClientPackets.CMapData, \
"mapname": Map.name, \
"moral": Map.moral, \
"tileset": Map.tileSet, \
"up": Map.up, \
"down": Map.down, \
"left": Map.left, \
"right": Map.right, \
"bootmap": Map.bootMap, \
"bootx": Map.bootX, \
"booty": Map.bootY})
for x in range(MAX_MAPX):
for y in range(MAX_MAPY):
tempTile = Map.tile[x][y]
packet.append([{"layer1": tempTile.layer1, \
"layer2": tempTile.layer2, \
"layer3": tempTile.layer3, \
"mask": tempTile.mask, \
"animation": tempTile.anim, \
"fringe": tempTile.fringe, \
"type": tempTile.type, \
"data1": tempTile.data1, \
"data2": tempTile.data2, \
"data3": tempTile.data3}])
for i in range(MAX_MAP_NPCS):
packet.append([{'npcnum': Map.npc[i]}])
packet = json.dumps(packet)
self.sendData(packet)
def sendNeedMap(self, answer=0):
packet = json.dumps([{"packet": ClientPackets.CNeedMap, "answer": answer}])
self.sendData(packet)
def warpMeTo(self, name):
packet = json.dumps([{'packet': ClientPackets.CWarpMeTo, 'name': name}])
self.sendData(packet)
def warpToMe(self, name):
packet = json.dumps([{'packet': ClientPackets.CWarpToMe, 'name': name}])
self.sendData(packet)
def warpTo(self, mapNum):
packet = json.dumps([{'packet': ClientPackets.CWarpTo, 'map': mapNum}])
self.sendData(packet)
def sendSetAccess(self, name, access):
packet = json.dumps([{'packet': ClientPackets.CSetAccess, 'name': name, 'access': access}])
self.sendData(packet)
def sendGiveItem(self, name, itemnum):
packet = json.dumps([{'packet': ClientPackets.CGiveItem, 'name': name, 'itemnum': itemnum}])
self.sendData(packet)
def sendSetSprite(self, spriteNum):
packet = json.dumps([{"packet": ClientPackets.CSetSprite, "sprite": spriteNum}])
self.sendData(packet)
def sendMapReport(self):
packet = json.dumps([{"packet": ClientPackets.CMapReport}])
self.sendData(packet)
def sendMapRespawn(self):
packet = json.dumps([{"packet": ClientPackets.CMapRespawn}])
self.sendData(packet)
def sendUseItem(self, invNum):
packet = json.dumps([{"packet": ClientPackets.CUseItem, "invnum": invNum}])
self.sendData(packet)
def sendCastSpell(self, spellslot):
packet = json.dumps([{"packet": ClientPackets.CCast, "spellslot": spellslot}])
self.sendData(packet)
def sendTarget(self, x, y):
packet = json.dumps([{"packet": ClientPackets.CTarget, 'x': x, 'y': y}])
self.sendData(packet)
def sendInfoRequest(self, plrName):
packet = json.dumps([{"packet": ClientPackets.CPlayerInfoRequest, "name": plrName}])
self.sendData(packet)
def sendWhosOnline(self):
packet = json.dumps([{'packet': ClientPackets.CWhosOnline}])
self.sendData(packet)
def sendRequestEditMap(self):
packet = json.dumps([{"packet": ClientPackets.CRequestEditMap}])
self.sendData(packet)
def sendRequestEditItem(self):
packet = json.dumps([{"packet": ClientPackets.CRequestEditItem}])
self.sendData(packet)
def sendSaveItem(self, itemNum):
packet = json.dumps([{"packet": ClientPackets.CSaveItem, 'itemnum': itemNum, 'itemname': Item[itemNum].name, 'itempic': Item[itemNum].pic, 'itemtype': Item[itemNum].type, 'itemdata1': Item[itemNum].data1, 'itemdata2': Item[itemNum].data2, 'itemdata3': Item[itemNum].data3}])
self.sendData(packet)
def sendRequestSpells(self):
packet = json.dumps([{"packet": ClientPackets.CSpells}])
self.sendData(packet)
def sendRequestEditSpell(self):
packet = json.dumps([{"packet": ClientPackets.CRequestEditSpell}])
self.sendData(packet)
def sendEditSpell(self, spellNum):
packet = json.dumps([{"packet": ClientPackets.CEditSpell, 'spellnum': spellNum}])
self.sendData(packet)
def sendSaveSpell(self, spellNum):
packet = json.dumps([{"packet": ClientPackets.CSaveSpell, 'spellnum': spellNum, 'spellname': Spell[spellNum].name, 'spellpic': Spell[spellNum].pic, 'spelltype': Spell[spellNum].type, 'mpreq': Spell[spellNum].reqMp, 'classreq': Spell[spellNum].reqClass, 'levelreq': Spell[spellNum].reqLevel, 'data1': Spell[spellNum].data1, 'data2': Spell[spellNum].data2, 'data3': Spell[spellNum].data3}])
self.sendData(packet)
def sendRequestEditNpc(self):
packet = json.dumps([{"packet": ClientPackets.CRequestEditNpc}])
self.sendData(packet)
def sendEditNpc(self, npcNum):
packet = json.dumps([{"packet": ClientPackets.CEditNpc, 'npcnum': npcNum}])
self.sendData(packet)
def sendSaveNpc(self, npcNum):
packet = json.dumps([{"packet": ClientPackets.CSaveNpc, 'npcnum': npcNum, 'name': NPC[npcNum].name, 'attacksay': NPC[npcNum].attackSay, 'sprite': NPC[npcNum].sprite, 'spawnsec': NPC[npcNum].spawnSecs, 'behavior': NPC[npcNum].behaviour, 'range': NPC[npcNum].range, \
'dropchance': NPC[npcNum].dropChance, 'dropitem': NPC[npcNum].dropItem, 'dropitemval': NPC[npcNum].dropItemValue, \
'strength': NPC[npcNum].stat[Stats.strength], 'defense': NPC[npcNum].stat[Stats.defense], 'magic': NPC[npcNum].stat[Stats.magic], 'speed': NPC[npcNum].stat[Stats.speed]}])
self.sendData(packet)
def sendQuit(self):
packet = json.dumps([{"packet": ClientPackets.CQuit}])
self.sendData(packet)
|
|
'''blox/base.py
The base for all blox
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
'''
from collections import OrderedDict
import re
from itertools import chain
from connectable import Connectable
from connectable.base import CombineSignals
from blox.attributes import (AbstractAttribute, Attribute, RenderedDirect, SetAttribute,
BooleanAttribute, IntegerAttribute, DirectAttribute, BlokAttribute,
AccessorAttribute, NestedBlokAttribute)
from io import StringIO
UNDERSCORE = (re.compile('(.)([A-Z][a-z]+)'), re.compile('([a-z0-9])([A-Z])'))
class Blox(list):
def __getitem__(self, index):
if type(index) in (int, slice):
return super().__getitem__(index)
return self and self[0][index]
def __setitem__(self, index, value):
if type(index) in (int, slice):
return super().__setitem__(index, value)
for blok in self:
blok[index] = value
def get(self, index, default=None):
if type(index) in (int, slice):
return self[index] if (self and index > 0 and index < len(self)) else default
return self[0].get(index, default) if self else default
def output(self, to=None, *args, **kwargs):
'''Outputs to a stream (like a file or request)'''
for blok in self:
blok.output(to, *args, **kwargs)
return self
def render(self, *args, **kwargs):
'''Renders as a str'''
render_to = StringIO()
self.output(render_to, *args, **kwargs)
return render_to.getvalue()
def __str__(self):
return self.render(formatted=True)
def __getattr__(self, attribute):
return self and getattr(self[0], attribute, None)
def __setattr__(self, attribute, value):
for blok in self:
setattr(self, attribute, value)
def first(self):
return self.__class__((self[:1], ))
def last(self):
return self.__class__((self[-1:], ))
def __call__(self, *blox, position=None):
for blok in self:
if callable(blok):
blok(*blox, position=None)
return self
def add_class(self, class_name):
for blok in self:
if hasattr(blok, 'classes'):
blok.classes.add(class_name)
return self
def remove_class(self, class_name):
for blok in self:
if hasattr(blok, 'classes'):
blok.classes.discard(class_name)
return self
def filter(self, **attributes):
return self.__class__((blok for blok in self if self._matches(blok, **attributes)))
def _matches(self, blok, **attributes):
for attribute, expected_value in attributes.items():
if type(expected_value) in (list, tuple):
check_in = getattr(blok, attribute, ())
for value in expected_value:
if not value in check_in:
return False
elif getattr(blok, attribute, None) != expected_value:
return False
return True
def walk(self):
for blok in self:
yield blok
if hasattr(blok, '_blox'):
for item in blok.blox.walk():
yield item
def all(self):
return self.__class__(self.walk())
def query(self, **attributes):
return self.__class__((blok for blok in self.walk() if self._matches(blok, **attributes)))
class TagAttributes(CombineSignals):
'''A meta class to automatically register signals for tag attributes'''
def __new__(metaclass, name, parents, class_dict, *kargs, **kwargs):
'''Updates a tag class to automatically register all signals'''
attributes = {name: attribute for name, attribute in class_dict.items() if isinstance(attribute,
AbstractAttribute)}
if attributes:
if hasattr(parents[0], 'attribute_descriptors'):
full_attributes = parents[0].attribute_descriptors.copy()
full_attributes.update(attributes)
attributes = full_attributes
blok_attributes = {}
render_attributes = []
direct_attributes = []
init_attributes = []
accessor_attributes = []
attribute_map = {}
for attribute_name, attribute in attributes.items():
if not hasattr(attribute, 'name'):
attribute.name = attribute_name
if isinstance(attribute, DirectAttribute):
direct_attributes.append(attribute)
if hasattr(attribute, 'render'):
render_attributes.append(attribute)
if not hasattr(attribute, 'object_attribute'):
attribute.object_attribute = '_{0}'.format(attribute_name)
if getattr(attribute, 'init', False):
init_attributes.append(attribute_name)
if isinstance(attribute, (BlokAttribute, NestedBlokAttribute)) and hasattr(attribute.type, 'tag'):
blok_attributes[attribute.type.tag] = attribute
if isinstance(attribute, AccessorAttribute):
accessor_attributes.append(attribute)
if not hasattr(attribute, 'parent_attribute'):
attribute.parent_attribute = '_{0}_parent'.format(attribute_name)
attribute_map[attribute.name] = attribute_name
if direct_attributes and not name == 'AbstractTag' and '__slots__' in class_dict:
class_dict['__slots__'] += tuple(attribute.object_attribute for attribute in direct_attributes)
class_dict['__slots__'] += tuple(attribute.parent_attribute for attribute in accessor_attributes)
if render_attributes:
if hasattr(parents[0], 'render_attributes'):
render_attributes = list(parents[0].render_attributes) + render_attributes
class_dict['render_attributes'] = set(render_attributes)
if init_attributes:
if hasattr(parents[0], 'init_attributes'):
init_attributes = list(parents[0].init_attributes) + init_attributes
class_dict['init_attributes'] = init_attributes
if blok_attributes:
if hasattr(parents[0], 'blok_attributes'):
full_blok_attributes = dict(parents[0].blok_attributes)
full_blok_attributes.update(blok_attributes)
blok_attributes = full_blok_attributes
class_dict['blok_attributes'] = blok_attributes
if attribute_map:
if hasattr(parents[0], 'attribute_map'):
full_attribute_map = dict(parents[0].attribute_map)
full_attribute_map.update(attribute_map)
attribute_map = full_attribute_map
class_dict['attribute_map'] = attribute_map
class_dict['attribute_descriptors'] = attributes
attribute_signals = (attribute.signal for attribute in attributes.values() if getattr(attribute, 'signal'))
if attribute_signals:
class_dict['signals'] = class_dict.get('signals', ()) + tuple(attribute_signals)
return super(TagAttributes, metaclass).__new__(metaclass, name, parents, class_dict, *kargs, **kwargs)
class Blok(Connectable, metaclass=TagAttributes):
'''Defines the base blox blok object which can render itself and be instanciated'''
__slots__ = ()
def output(self, to=None, *args, **kwargs):
'''Outputs to a stream (like a file or request)'''
to.write('')
return self
def render(self, *args, **kwargs):
'''Renders as a str'''
render_to = StringIO()
self.output(render_to, *args, **kwargs)
return render_to.getvalue()
def __str__(self):
return self.render(formatted=True)
def __repr_self__(self, identifiers=()):
return "{0}({1})".format(self.__class__.__name__, " ".join(identifiers))
def __repr__(self):
return self.__repr_self__()
class Invalid(Blok):
'''Defines how the lack of a vaild Blok should be rendered'''
__slots__ = ()
def output(self, to=None, *args, **kwargs):
to.write('<h2>Invalid</h2>')
return self
class Container(Blok):
'''A Block that can contain child blocks'''
__slots__ = ('_blox', )
def __init__(self, *blox):
super().__init__()
if hasattr(self, 'init_attributes'):
for attribute_name in self.init_attributes:
getattr(self, attribute_name)
for blok in blox:
self(blok)
@property
def blox_container(self):
'''Returns the container that should be responsible adding children, outside of init'''
return self
@property
def blox(self):
'''Lazily creates and returns the list of child blox'''
if not hasattr(self, '_blox'):
self._blox = Blox()
return self._blox
def __call__(self, *blox, position=None):
'''Adds a nested blok to this blok'''
if position is not None:
for blok in blox:
self.blox_container.blox.insert(position, blok)
else:
for blok in blox:
self.blox_container.blox.append(blok)
return blok
def __iter__(self):
return self.blox_container.blox.__iter__()
def __contains__(self, blok):
return blok in self.blox_container.blox
def get(self, index, default=None):
return self[index] if (len(self) and index > 0 and index < len(self)) else default
def __getitem__(self, index):
return self.blox_container.blox[index]
def __setitem__(self, index, value):
self.blox_container.blox.__setitem__(index, value)
def __delitem__(self, index):
return self.blox_container.blox.__delitem__(index)
def __isub__(self, blok):
self.blox_container.blox.remove(blok)
return self
def __iadd__(self, blok):
self(blok)
return self
def __len__(self):
return len(self.blox_container.blox)
def __repr__(self):
representation = [self.__repr_self__()]
for child in self:
for index, line in enumerate(repr(child).split("\n")):
representation.append(("|---" if index == 0 else "| ") + line)
return "\n".join(representation)
def output(self, to=None, formatted=False, indent=0, indentation=' ', *args, **kwargs):
'''Outputs to a stream (like a file or request)'''
if formatted and self.blox:
self.blox[0].output(to=to, formatted=True, indent=indent, indentation=indentation, *args, **kwargs)
for blok in self.blox[1:]:
to.write('\n')
to.write(indent * indentation)
blok.output(to=to, formatted=True, indent=indent, indentation=indentation, *args, **kwargs)
if not indent:
to.write('\n')
else:
for blok in self.blox:
blok.output(to=to, *args, **kwargs)
return self
class AbstractTag(Blok):
'''A Blok that renders a single tag'''
__slots__ = ()
tag_self_closes = True
tag = ""
id = RenderedDirect()
classes = SetAttribute(name="class")
accesskey = Attribute()
contenteditable = BooleanAttribute(default=True)
contextmenu = Attribute()
dir = Attribute()
draggable = BooleanAttribute()
dropzone = Attribute()
hidden = BooleanAttribute()
lang = Attribute()
spellcheck = BooleanAttribute()
style = Attribute()
tabindex = IntegerAttribute()
translate = BooleanAttribute(true_string="yes", false_string="no")
render_attributes = ()
def __init__(self, **attributes):
super().__init__()
for name, value in attributes.items():
setattr(self, name, value)
@property
def attributes(self):
'''Lazily creates and returns a tags attributes'''
if not hasattr(self, '_attributes'):
self._attributes = {}
return self._attributes
@property
def start_tag(self):
'''Returns the elements HTML start tag'''
direct_attributes = (attribute.render(self) for attribute in self.render_attributes)
attributes = ()
if hasattr(self, '_attributes'):
attributes = ('{0}="{1}"'.format(key, value)
for key, value in self.attributes.items() if value)
rendered_attributes = " ".join(filter(bool, chain(direct_attributes, attributes)))
return '<{0}{1}{2}{3}>'.format(self.tag, ' ' if rendered_attributes else '',
rendered_attributes, ' /' if self.tag_self_closes else "")
@property
def end_tag(self):
'''Returns the elements HTML end tag'''
if self.tag_self_closes:
return ''
return "</{0}>".format(self.tag)
def output(self, to=None, *args, **kwargs):
'''Outputs to a stream (like a file or request)'''
to.write(self.start_tag)
if not self.tag_self_closes:
to.write(self.end_tag)
def get(self, attribute, default=None):
if attribute in self.attribute_descriptors.keys():
return getattr(self, attribute, default)
else:
return self.attributes.get(default)
def __contains__(self, attribute):
return attribute in self.attributes
def __getitem__(self, attribute):
if attribute in self.attribute_descriptors.keys():
return getattr(self, attribute)
else:
return self.attributes[attribute]
def __setitem__(self, attribute, value):
if attribute in self.attribute_descriptors.keys():
setattr(self, attribute, value)
else:
self.attributes[attribute] = value
def __delitem__(self, attribute):
del self.attributes[attribute]
def __repr_self__(self, identifiers=()):
if getattr(self, '_id', None):
identifiers = ('id="{0}"'.format(self.id), ) + identifiers
return super().__repr_self__(identifiers)
class Tag(AbstractTag):
'''A Blok that renders a single tag'''
__slots__ = ('_attributes', '_id', '_classes')
class NamedTag(Tag):
'''A Tag with an attached name'''
__slots__ = ('_name', )
name = RenderedDirect()
def __repr_self__(self, identifiers=()):
if getattr(self, '_name', None):
identifiers += ('name="{0}"'.format(self.name), )
return super().__repr_self__(identifiers)
class TagWithChildren(Container, AbstractTag):
'''Defines a tag that can contain children'''
__slots__ = ('_attributes', '_id', '_classes')
tag = ""
tag_self_closes = False
def __init__(self, *blox, **attributes):
super().__init__()
for blok in blox:
self(blok)
for name, value in attributes.items():
setattr(self, name, value)
def output(self, to=None, formatted=False, indent=0, indentation=' ', *args, **kwargs):
'''Outputs to a stream (like a file or request)'''
if formatted:
to.write(self.start_tag)
to.write('\n')
if not self.tag_self_closes:
for blok in self.blox:
to.write(indentation * (indent + 1))
blok.output(to=to, indent=indent + 1, formatted=True, indentation=indentation, *args, **kwargs)
to.write('\n')
to.write(indentation * indent)
to.write(self.end_tag)
if not indentation:
to.write('\n')
else:
to.write(self.start_tag)
if not self.tag_self_closes:
for blok in self.blox:
blok.output(to=to, *args, **kwargs)
to.write(self.end_tag)
def __contains__(self, attribute_or_blok):
return Container.__contains__(self, attribute_or_blok) or AbstractTag.__contains__(self, attribute_or_blok)
def get(self, attribute_or_blok, default=None):
if type(attribute_or_blok) == int:
return Container.get(self, attribute_or_blok, default)
else:
return AbstractTag.get(self, attribute_or_blok, default)
def __getitem__(self, attribute_or_blok):
if type(attribute_or_blok) in (int, slice):
return Container.__getitem__(self, attribute_or_blok)
else:
return AbstractTag.__getitem__(self, attribute_or_blok)
def __setitem__(self, attribute_or_blok, value):
if type(attribute_or_blok) in (int, slice):
return Container.__setitem__(self, attribute_or_blok, value)
else:
return AbstractTag.__setitem__(self, attribute_or_blok, value)
def __delitem__(self, attribute_or_blok):
if type(attribute_or_blok) in (int, slice):
return Container.__delitem__(self, attribute_or_blok)
else:
return AbstractTag.__delitem__(self, attribute_or_blok)
class Wildcard(TagWithChildren):
'''Can represent any element that does not have a built in representation, not very efficient'''
__slots__ = ('tag', )
def __init__(self, tag, *kargs, **kwargs):
self.tag = tag
|
|
""" """
# Standard library modules.
import unittest
import logging
import datetime
# Third party modules.
# Local modules.
from pyhmsa.util.parameter import \
(Parameter, _Attribute, FrozenAttribute, NumericalAttribute, TextAttribute,
AtomicNumberAttribute, UnitAttribute, XRayLineAttribute, ObjectAttribute,
EnumAttribute, NumericalRangeAttribute, DateAttribute, TimeAttribute,
ChecksumAttribute)
from pyhmsa.type.checksum import Checksum
from pyhmsa.type.xrayline import xrayline
# Globals and constants variables.
from pyhmsa.type.xrayline import NOTATION_IUPAC, NOTATION_SIEGBAHN
class MockParameter(Parameter):
required = _Attribute(True, xmlname='XMLNAME')
notrequired = _Attribute(False)
frozen = FrozenAttribute(list)
numerical = NumericalAttribute('m')
text = TextAttribute()
atomic_number = AtomicNumberAttribute()
unit = UnitAttribute()
line = XRayLineAttribute()
object = ObjectAttribute(int)
enum = EnumAttribute(['a', 'b', 'c'])
numerical_range = NumericalRangeAttribute('s', -4.0, 4.0)
date = DateAttribute()
time = TimeAttribute()
checksum = ChecksumAttribute()
class TestModule(unittest.TestCase):
def setUp(self):
super().setUp()
self.mock = MockParameter()
def tearDown(self):
unittest.TestCase.tearDown(self)
def test__repr__(self):
self.assertEqual('<MockParameter(', repr(self.mock)[:15])
def test_attribute(self):
self.mock.required = 'abc'
self.assertEqual('abc', self.mock.required)
self.mock.set_required('def')
self.assertEqual('def', self.mock.get_required())
self.assertRaises(ValueError, self.mock.set_required, None)
self.assertRaises(ValueError, delattr, self.mock, 'required')
self.assertEqual('required', self.mock.__class__.required.name)
self.assertEqual('XMLNAME', self.mock.__class__.required.xmlname)
self.assertTrue(self.mock.__class__.required.is_required())
self.assertEqual('<_Attribute(required)>', repr(self.mock.__class__.required))
self.mock.notrequired = 'hkl'
del self.mock.notrequired
self.assertIsNone(self.mock.notrequired)
def testfrozen_attribute(self):
self.assertIsInstance(self.mock.frozen, list)
self.assertIsInstance(self.mock.get_frozen(), list)
self.assertFalse(hasattr(self.mock, 'set_frozen'))
self.assertRaises(AttributeError, setattr, self.mock, 'frozen', 'abc')
self.assertTrue(self.mock.__class__.frozen.is_required())
def testnumerical_attribute(self):
self.mock.numerical = (9.0, 'nm')
self.assertAlmostEqual(9.0, self.mock.numerical, 4)
self.assertEqual('nm', self.mock.numerical.unit)
self.mock.numerical = 11.0
self.assertAlmostEqual(11.0, self.mock.numerical, 4)
self.assertEqual('m', self.mock.numerical.unit)
def testtext_attribute(self):
self.assertTrue(True)
def testatomic_number_attribute(self):
self.mock.atomic_number = 25
self.assertEqual(25, self.mock.atomic_number)
self.mock.atomic_number = None
self.assertIsNone(self.mock.atomic_number)
self.assertRaises(ValueError, self.mock.set_atomic_number, 0)
self.assertRaises(ValueError, self.mock.set_atomic_number, 119)
def testunit_attribute(self):
self.mock.unit = 'A'
self.assertEqual('A', self.mock.unit)
self.mock.unit = None
self.assertIsNone(self.mock.unit)
self.assertRaises(ValueError, self.mock.set_unit, 'mmHg')
def testline_attribute(self):
self.mock.line = 'Ma'
self.assertEqual('Ma', self.mock.line)
self.assertEqual(NOTATION_SIEGBAHN, self.mock.line.notation)
self.mock.line = ('M5-N6,7', NOTATION_IUPAC)
self.assertEqual('M5-N6,7', self.mock.line)
self.assertEqual(NOTATION_IUPAC, self.mock.line.notation)
self.mock.line = xrayline('Ma', NOTATION_SIEGBAHN, 'M5-N6,7')
self.assertEqual('Ma', self.mock.line)
self.assertEqual(NOTATION_SIEGBAHN, self.mock.line.notation)
self.assertEqual('M5-N6,7', self.mock.line.alternative)
self.assertEqual(NOTATION_IUPAC, self.mock.line.alternative.notation)
self.mock.line = None
self.assertIsNone(self.mock.line)
def testobject_attribute(self):
self.mock.object = 5
self.assertEqual(5, self.mock.object)
self.mock.object = None
self.assertIsNone(self.mock.object)
self.assertRaises(ValueError, self.mock.set_object, 5.0)
self.assertIs(int, self.mock.__class__.object.type_)
def testenum_attribute(self):
self.mock.enum = 'b'
self.assertEqual('b', self.mock.enum)
self.mock.enum = None
self.assertIsNone(self.mock.enum)
self.assertRaises(ValueError, self.mock.set_enum, 'd')
def testnumerical_range_attribute(self):
self.mock.numerical_range = (2.0, 3.0)
self.assertAlmostEqual(2.0, self.mock.numerical_range[0], 4)
self.assertAlmostEqual(3.0, self.mock.numerical_range[1], 4)
self.assertEqual('s', self.mock.numerical_range.unit)
self.mock.set_numerical_range(-1.0, 2.0, 'A')
self.assertAlmostEqual(-1.0, self.mock.numerical_range[0], 4)
self.assertAlmostEqual(2.0, self.mock.numerical_range[1], 4)
self.assertEqual('A', self.mock.numerical_range.unit)
self.mock.numerical_range = None
self.assertIsNone(self.mock.numerical_range)
self.assertRaises(ValueError, self.mock.set_numerical_range, -5.0, 2.0)
self.assertRaises(ValueError, self.mock.set_numerical_range, 1.0, 5.0)
self.assertRaises(ValueError, self.mock.set_numerical_range, 3.0, 2.0)
self.assertRaises(ValueError, setattr, self.mock, 'numerical_range', (1.0, 2.0, 3.0))
def testdate_attribute(self):
self.mock.date = '2013-12-24'
self.assertEqual(2013, self.mock.date.year)
self.assertEqual(12, self.mock.date.month)
self.assertEqual(24, self.mock.date.day)
self.mock.date = datetime.date(2013, 12, 25)
self.assertEqual(2013, self.mock.date.year)
self.assertEqual(12, self.mock.date.month)
self.assertEqual(25, self.mock.date.day)
self.mock.date = None
self.assertIsNone(self.mock.date)
def testtime_attribute(self):
self.mock.time = '20:31:15'
self.assertEqual(20, self.mock.time.hour)
self.assertEqual(31, self.mock.time.minute)
self.assertEqual(15, self.mock.time.second)
self.mock.time = datetime.time(20, 31, 16)
self.assertEqual(20, self.mock.time.hour)
self.assertEqual(31, self.mock.time.minute)
self.assertEqual(16, self.mock.time.second)
self.mock.time = None
self.assertIsNone(self.mock.time)
def testchecksum_attribute(self):
self.mock.checksum = Checksum('53AAD59C05D59A40AD746D6928EA6D2D526865FD', 'SHA-1')
self.assertEqual('53AAD59C05D59A40AD746D6928EA6D2D526865FD', self.mock.checksum.value)
self.assertEqual('SHA-1', self.mock.checksum.algorithm)
self.mock.checksum = None
self.assertIsNone(self.mock.checksum)
self.assertRaises(ValueError, self.mock.set_checksum, object())
if __name__ == '__main__': # pragma: no cover
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
|
|
from netaddr import IPNetwork
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth.models import User
from django.urls import reverse
from dcim.models import Device, DeviceRole, DeviceType, Manufacturer, Site
from ipam.models import (
Aggregate, IPAddress, IP_PROTOCOL_TCP, IP_PROTOCOL_UDP, Prefix, RIR, Role, Service, VLAN, VLANGroup, VRF,
)
from users.models import Token
from utilities.tests import HttpStatusMixin
class VRFTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.vrf1 = VRF.objects.create(name='Test VRF 1', rd='65000:1')
self.vrf2 = VRF.objects.create(name='Test VRF 2', rd='65000:2')
self.vrf3 = VRF.objects.create(name='Test VRF 3', rd='65000:3')
def test_get_vrf(self):
url = reverse('ipam-api:vrf-detail', kwargs={'pk': self.vrf1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.vrf1.name)
def test_list_vrfs(self):
url = reverse('ipam-api:vrf-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_vrf(self):
data = {
'name': 'Test VRF 4',
'rd': '65000:4',
}
url = reverse('ipam-api:vrf-list')
response = self.client.post(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(VRF.objects.count(), 4)
vrf4 = VRF.objects.get(pk=response.data['id'])
self.assertEqual(vrf4.name, data['name'])
self.assertEqual(vrf4.rd, data['rd'])
def test_update_vrf(self):
data = {
'name': 'Test VRF X',
'rd': '65000:99',
}
url = reverse('ipam-api:vrf-detail', kwargs={'pk': self.vrf1.pk})
response = self.client.put(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(VRF.objects.count(), 3)
vrf1 = VRF.objects.get(pk=response.data['id'])
self.assertEqual(vrf1.name, data['name'])
self.assertEqual(vrf1.rd, data['rd'])
def test_delete_vrf(self):
url = reverse('ipam-api:vrf-detail', kwargs={'pk': self.vrf1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(VRF.objects.count(), 2)
class RIRTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.rir1 = RIR.objects.create(name='Test RIR 1', slug='test-rir-1')
self.rir2 = RIR.objects.create(name='Test RIR 2', slug='test-rir-2')
self.rir3 = RIR.objects.create(name='Test RIR 3', slug='test-rir-3')
def test_get_rir(self):
url = reverse('ipam-api:rir-detail', kwargs={'pk': self.rir1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.rir1.name)
def test_list_rirs(self):
url = reverse('ipam-api:rir-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_rir(self):
data = {
'name': 'Test RIR 4',
'slug': 'test-rir-4',
}
url = reverse('ipam-api:rir-list')
response = self.client.post(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(RIR.objects.count(), 4)
rir4 = RIR.objects.get(pk=response.data['id'])
self.assertEqual(rir4.name, data['name'])
self.assertEqual(rir4.slug, data['slug'])
def test_update_rir(self):
data = {
'name': 'Test RIR X',
'slug': 'test-rir-x',
}
url = reverse('ipam-api:rir-detail', kwargs={'pk': self.rir1.pk})
response = self.client.put(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(RIR.objects.count(), 3)
rir1 = RIR.objects.get(pk=response.data['id'])
self.assertEqual(rir1.name, data['name'])
self.assertEqual(rir1.slug, data['slug'])
def test_delete_rir(self):
url = reverse('ipam-api:rir-detail', kwargs={'pk': self.rir1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(RIR.objects.count(), 2)
class AggregateTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.rir1 = RIR.objects.create(name='Test RIR 1', slug='test-rir-1')
self.rir2 = RIR.objects.create(name='Test RIR 2', slug='test-rir-2')
self.aggregate1 = Aggregate.objects.create(prefix=IPNetwork('10.0.0.0/8'), rir=self.rir1)
self.aggregate2 = Aggregate.objects.create(prefix=IPNetwork('172.16.0.0/12'), rir=self.rir1)
self.aggregate3 = Aggregate.objects.create(prefix=IPNetwork('192.168.0.0/16'), rir=self.rir1)
def test_get_aggregate(self):
url = reverse('ipam-api:aggregate-detail', kwargs={'pk': self.aggregate1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['prefix'], str(self.aggregate1.prefix))
def test_list_aggregates(self):
url = reverse('ipam-api:aggregate-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_aggregate(self):
data = {
'prefix': '192.0.2.0/24',
'rir': self.rir1.pk,
}
url = reverse('ipam-api:aggregate-list')
response = self.client.post(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Aggregate.objects.count(), 4)
aggregate4 = Aggregate.objects.get(pk=response.data['id'])
self.assertEqual(str(aggregate4.prefix), data['prefix'])
self.assertEqual(aggregate4.rir_id, data['rir'])
def test_update_aggregate(self):
data = {
'prefix': '11.0.0.0/8',
'rir': self.rir2.pk,
}
url = reverse('ipam-api:aggregate-detail', kwargs={'pk': self.aggregate1.pk})
response = self.client.put(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Aggregate.objects.count(), 3)
aggregate1 = Aggregate.objects.get(pk=response.data['id'])
self.assertEqual(str(aggregate1.prefix), data['prefix'])
self.assertEqual(aggregate1.rir_id, data['rir'])
def test_delete_aggregate(self):
url = reverse('ipam-api:aggregate-detail', kwargs={'pk': self.aggregate1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Aggregate.objects.count(), 2)
class RoleTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.role1 = Role.objects.create(name='Test Role 1', slug='test-role-1')
self.role2 = Role.objects.create(name='Test Role 2', slug='test-role-2')
self.role3 = Role.objects.create(name='Test Role 3', slug='test-role-3')
def test_get_role(self):
url = reverse('ipam-api:role-detail', kwargs={'pk': self.role1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.role1.name)
def test_list_roles(self):
url = reverse('ipam-api:role-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_role(self):
data = {
'name': 'Test Role 4',
'slug': 'test-role-4',
}
url = reverse('ipam-api:role-list')
response = self.client.post(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Role.objects.count(), 4)
role4 = Role.objects.get(pk=response.data['id'])
self.assertEqual(role4.name, data['name'])
self.assertEqual(role4.slug, data['slug'])
def test_update_role(self):
data = {
'name': 'Test Role X',
'slug': 'test-role-x',
}
url = reverse('ipam-api:role-detail', kwargs={'pk': self.role1.pk})
response = self.client.put(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Role.objects.count(), 3)
role1 = Role.objects.get(pk=response.data['id'])
self.assertEqual(role1.name, data['name'])
self.assertEqual(role1.slug, data['slug'])
def test_delete_role(self):
url = reverse('ipam-api:role-detail', kwargs={'pk': self.role1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Role.objects.count(), 2)
class PrefixTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.site1 = Site.objects.create(name='Test Site 1', slug='test-site-1')
self.vrf1 = VRF.objects.create(name='Test VRF 1', rd='65000:1')
self.vlan1 = VLAN.objects.create(vid=1, name='Test VLAN 1')
self.role1 = Role.objects.create(name='Test Role 1', slug='test-role-1')
self.prefix1 = Prefix.objects.create(prefix=IPNetwork('192.168.1.0/24'))
self.prefix2 = Prefix.objects.create(prefix=IPNetwork('192.168.2.0/24'))
self.prefix3 = Prefix.objects.create(prefix=IPNetwork('192.168.3.0/24'))
def test_get_prefix(self):
url = reverse('ipam-api:prefix-detail', kwargs={'pk': self.prefix1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['prefix'], str(self.prefix1.prefix))
def test_list_prefixs(self):
url = reverse('ipam-api:prefix-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_prefix(self):
data = {
'prefix': '192.168.4.0/24',
'site': self.site1.pk,
'vrf': self.vrf1.pk,
'vlan': self.vlan1.pk,
'role': self.role1.pk,
}
url = reverse('ipam-api:prefix-list')
response = self.client.post(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Prefix.objects.count(), 4)
prefix4 = Prefix.objects.get(pk=response.data['id'])
self.assertEqual(str(prefix4.prefix), data['prefix'])
self.assertEqual(prefix4.site_id, data['site'])
self.assertEqual(prefix4.vrf_id, data['vrf'])
self.assertEqual(prefix4.vlan_id, data['vlan'])
self.assertEqual(prefix4.role_id, data['role'])
def test_update_prefix(self):
data = {
'prefix': '192.168.99.0/24',
'site': self.site1.pk,
'vrf': self.vrf1.pk,
'vlan': self.vlan1.pk,
'role': self.role1.pk,
}
url = reverse('ipam-api:prefix-detail', kwargs={'pk': self.prefix1.pk})
response = self.client.put(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Prefix.objects.count(), 3)
prefix1 = Prefix.objects.get(pk=response.data['id'])
self.assertEqual(str(prefix1.prefix), data['prefix'])
self.assertEqual(prefix1.site_id, data['site'])
self.assertEqual(prefix1.vrf_id, data['vrf'])
self.assertEqual(prefix1.vlan_id, data['vlan'])
self.assertEqual(prefix1.role_id, data['role'])
def test_delete_prefix(self):
url = reverse('ipam-api:prefix-detail', kwargs={'pk': self.prefix1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Prefix.objects.count(), 2)
class IPAddressTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.vrf1 = VRF.objects.create(name='Test VRF 1', rd='65000:1')
self.ipaddress1 = IPAddress.objects.create(address=IPNetwork('192.168.0.1/24'))
self.ipaddress2 = IPAddress.objects.create(address=IPNetwork('192.168.0.2/24'))
self.ipaddress3 = IPAddress.objects.create(address=IPNetwork('192.168.0.3/24'))
def test_get_ipaddress(self):
url = reverse('ipam-api:ipaddress-detail', kwargs={'pk': self.ipaddress1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['address'], str(self.ipaddress1.address))
def test_list_ipaddresss(self):
url = reverse('ipam-api:ipaddress-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_ipaddress(self):
data = {
'address': '192.168.0.4/24',
'vrf': self.vrf1.pk,
}
url = reverse('ipam-api:ipaddress-list')
response = self.client.post(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(IPAddress.objects.count(), 4)
ipaddress4 = IPAddress.objects.get(pk=response.data['id'])
self.assertEqual(str(ipaddress4.address), data['address'])
self.assertEqual(ipaddress4.vrf_id, data['vrf'])
def test_update_ipaddress(self):
data = {
'address': '192.168.0.99/24',
'vrf': self.vrf1.pk,
}
url = reverse('ipam-api:ipaddress-detail', kwargs={'pk': self.ipaddress1.pk})
response = self.client.put(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(IPAddress.objects.count(), 3)
ipaddress1 = IPAddress.objects.get(pk=response.data['id'])
self.assertEqual(str(ipaddress1.address), data['address'])
self.assertEqual(ipaddress1.vrf_id, data['vrf'])
def test_delete_ipaddress(self):
url = reverse('ipam-api:ipaddress-detail', kwargs={'pk': self.ipaddress1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(IPAddress.objects.count(), 2)
class VLANGroupTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.vlangroup1 = VLANGroup.objects.create(name='Test VLAN Group 1', slug='test-vlan-group-1')
self.vlangroup2 = VLANGroup.objects.create(name='Test VLAN Group 2', slug='test-vlan-group-2')
self.vlangroup3 = VLANGroup.objects.create(name='Test VLAN Group 3', slug='test-vlan-group-3')
def test_get_vlangroup(self):
url = reverse('ipam-api:vlangroup-detail', kwargs={'pk': self.vlangroup1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.vlangroup1.name)
def test_list_vlangroups(self):
url = reverse('ipam-api:vlangroup-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_vlangroup(self):
data = {
'name': 'Test VLAN Group 4',
'slug': 'test-vlan-group-4',
}
url = reverse('ipam-api:vlangroup-list')
response = self.client.post(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(VLANGroup.objects.count(), 4)
vlangroup4 = VLANGroup.objects.get(pk=response.data['id'])
self.assertEqual(vlangroup4.name, data['name'])
self.assertEqual(vlangroup4.slug, data['slug'])
def test_update_vlangroup(self):
data = {
'name': 'Test VLAN Group X',
'slug': 'test-vlan-group-x',
}
url = reverse('ipam-api:vlangroup-detail', kwargs={'pk': self.vlangroup1.pk})
response = self.client.put(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(VLANGroup.objects.count(), 3)
vlangroup1 = VLANGroup.objects.get(pk=response.data['id'])
self.assertEqual(vlangroup1.name, data['name'])
self.assertEqual(vlangroup1.slug, data['slug'])
def test_delete_vlangroup(self):
url = reverse('ipam-api:vlangroup-detail', kwargs={'pk': self.vlangroup1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(VLANGroup.objects.count(), 2)
class VLANTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.vlan1 = VLAN.objects.create(vid=1, name='Test VLAN 1')
self.vlan2 = VLAN.objects.create(vid=2, name='Test VLAN 2')
self.vlan3 = VLAN.objects.create(vid=3, name='Test VLAN 3')
def test_get_vlan(self):
url = reverse('ipam-api:vlan-detail', kwargs={'pk': self.vlan1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.vlan1.name)
def test_list_vlans(self):
url = reverse('ipam-api:vlan-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_vlan(self):
data = {
'vid': 4,
'name': 'Test VLAN 4',
}
url = reverse('ipam-api:vlan-list')
response = self.client.post(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(VLAN.objects.count(), 4)
vlan4 = VLAN.objects.get(pk=response.data['id'])
self.assertEqual(vlan4.vid, data['vid'])
self.assertEqual(vlan4.name, data['name'])
def test_update_vlan(self):
data = {
'vid': 99,
'name': 'Test VLAN X',
}
url = reverse('ipam-api:vlan-detail', kwargs={'pk': self.vlan1.pk})
response = self.client.put(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(VLAN.objects.count(), 3)
vlan1 = VLAN.objects.get(pk=response.data['id'])
self.assertEqual(vlan1.vid, data['vid'])
self.assertEqual(vlan1.name, data['name'])
def test_delete_vlan(self):
url = reverse('ipam-api:vlan-detail', kwargs={'pk': self.vlan1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(VLAN.objects.count(), 2)
class ServiceTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
site = Site.objects.create(name='Test Site 1', slug='test-site-1')
manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
devicetype = DeviceType.objects.create(manufacturer=manufacturer, model='Test Device Type 1')
devicerole = DeviceRole.objects.create(name='Test Device Role 1', slug='test-device-role-1')
self.device1 = Device.objects.create(
name='Test Device 1', site=site, device_type=devicetype, device_role=devicerole
)
self.device2 = Device.objects.create(
name='Test Device 2', site=site, device_type=devicetype, device_role=devicerole
)
self.service1 = Service.objects.create(
device=self.device1, name='Test Service 1', protocol=IP_PROTOCOL_TCP, port=1
)
self.service1 = Service.objects.create(
device=self.device1, name='Test Service 2', protocol=IP_PROTOCOL_TCP, port=2
)
self.service1 = Service.objects.create(
device=self.device1, name='Test Service 3', protocol=IP_PROTOCOL_TCP, port=3
)
def test_get_service(self):
url = reverse('ipam-api:service-detail', kwargs={'pk': self.service1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.service1.name)
def test_list_services(self):
url = reverse('ipam-api:service-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_service(self):
data = {
'device': self.device1.pk,
'name': 'Test Service 4',
'protocol': IP_PROTOCOL_TCP,
'port': 4,
}
url = reverse('ipam-api:service-list')
response = self.client.post(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Service.objects.count(), 4)
service4 = Service.objects.get(pk=response.data['id'])
self.assertEqual(service4.device_id, data['device'])
self.assertEqual(service4.name, data['name'])
self.assertEqual(service4.protocol, data['protocol'])
self.assertEqual(service4.port, data['port'])
def test_update_service(self):
data = {
'device': self.device2.pk,
'name': 'Test Service X',
'protocol': IP_PROTOCOL_UDP,
'port': 99,
}
url = reverse('ipam-api:service-detail', kwargs={'pk': self.service1.pk})
response = self.client.put(url, data, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Service.objects.count(), 3)
service1 = Service.objects.get(pk=response.data['id'])
self.assertEqual(service1.device_id, data['device'])
self.assertEqual(service1.name, data['name'])
self.assertEqual(service1.protocol, data['protocol'])
self.assertEqual(service1.port, data['port'])
def test_delete_service(self):
url = reverse('ipam-api:service-detail', kwargs={'pk': self.service1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Service.objects.count(), 2)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Ken Pepple
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Built-in instance properties."""
import re
import sys
import uuid
from oslo.config import cfg
import six
from nova import context
from nova import db
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.pci import pci_request
from nova import utils
flavor_opts = [
cfg.StrOpt('default_flavor',
default='m1.small',
help='Default flavor to use for the EC2 API only. The Nova API '
'does not support a default flavor.'),
]
CONF = cfg.CONF
CONF.register_opts(flavor_opts)
LOG = logging.getLogger(__name__)
# NOTE(luisg): Flavor names can include non-ascii characters so that users can
# create flavor names in locales that use them, however flavor IDs are limited
# to ascii characters.
VALID_ID_REGEX = re.compile("^[\w\.\- ]*$")
VALID_NAME_REGEX = re.compile("^[\w\.\- ]*$", re.UNICODE)
# Validate extra specs key names.
VALID_EXTRASPEC_NAME_REGEX = re.compile(r"[\w\.\- :]+$", re.UNICODE)
def _int_or_none(val):
if val is not None:
return int(val)
system_metadata_flavor_props = {
'id': int,
'name': str,
'memory_mb': int,
'vcpus': int,
'root_gb': int,
'ephemeral_gb': int,
'flavorid': str,
'swap': int,
'rxtx_factor': float,
'vcpu_weight': _int_or_none,
}
def create(name, memory, vcpus, root_gb, ephemeral_gb=0, flavorid=None,
swap=0, rxtx_factor=1.0, is_public=True):
"""Creates flavors."""
if not flavorid:
flavorid = uuid.uuid4()
kwargs = {
'memory_mb': memory,
'vcpus': vcpus,
'root_gb': root_gb,
'ephemeral_gb': ephemeral_gb,
'swap': swap,
'rxtx_factor': rxtx_factor,
}
if isinstance(name, six.string_types):
name = name.strip()
# ensure name do not exceed 255 characters
utils.check_string_length(name, 'name', min_length=1, max_length=255)
# ensure name does not contain any special characters
valid_name = VALID_NAME_REGEX.search(name)
if not valid_name:
msg = _("Flavor names can only contain alphanumeric characters, "
"periods, dashes, underscores and spaces.")
raise exception.InvalidInput(reason=msg)
# NOTE(vish): Internally, flavorid is stored as a string but it comes
# in through json as an integer, so we convert it here.
flavorid = unicode(flavorid)
# ensure leading/trailing whitespaces not present.
if flavorid.strip() != flavorid:
msg = _("id cannot contain leading and/or trailing whitespace(s)")
raise exception.InvalidInput(reason=msg)
# ensure flavor id does not exceed 255 characters
utils.check_string_length(flavorid, 'id', min_length=1,
max_length=255)
# ensure flavor id does not contain any special characters
valid_flavor_id = VALID_ID_REGEX.search(flavorid)
if not valid_flavor_id:
msg = _("Flavor id can only contain letters from A-Z (both cases), "
"periods, dashes, underscores and spaces.")
raise exception.InvalidInput(reason=msg)
# Some attributes are positive ( > 0) integers
for option in ['memory_mb', 'vcpus']:
kwargs[option] = utils.validate_integer(kwargs[option], option, 1,
sys.maxint)
# Some attributes are non-negative ( >= 0) integers
for option in ['root_gb', 'ephemeral_gb', 'swap']:
kwargs[option] = utils.validate_integer(kwargs[option], option, 0,
sys.maxint)
# rxtx_factor should be a positive float
try:
kwargs['rxtx_factor'] = float(kwargs['rxtx_factor'])
if kwargs['rxtx_factor'] <= 0:
raise ValueError()
except ValueError:
msg = _("'rxtx_factor' argument must be a positive float")
raise exception.InvalidInput(reason=msg)
kwargs['name'] = name
kwargs['flavorid'] = flavorid
# ensure is_public attribute is boolean
try:
kwargs['is_public'] = strutils.bool_from_string(
is_public, strict=True)
except ValueError:
raise exception.InvalidInput(reason=_("is_public must be a boolean"))
try:
return db.flavor_create(context.get_admin_context(), kwargs)
except db_exc.DBError as e:
LOG.exception(_('DB error: %s') % e)
raise exception.FlavorCreateFailed()
def destroy(name):
"""Marks flavor as deleted."""
try:
if not name:
raise ValueError()
db.flavor_destroy(context.get_admin_context(), name)
except (ValueError, exception.NotFound):
LOG.exception(_('Instance type %s not found for deletion') % name)
raise exception.FlavorNotFoundByName(flavor_name=name)
def get_all_flavors(ctxt=None, inactive=False, filters=None):
"""Get all non-deleted flavors as a dict.
Pass true as argument if you want deleted flavors returned also.
"""
if ctxt is None:
ctxt = context.get_admin_context()
inst_types = db.flavor_get_all(
ctxt, inactive=inactive, filters=filters)
inst_type_dict = {}
for inst_type in inst_types:
inst_type_dict[inst_type['id']] = inst_type
return inst_type_dict
def get_all_flavors_sorted_list(ctxt=None, inactive=False, filters=None,
sort_key='flavorid', sort_dir='asc',
limit=None, marker=None):
"""Get all non-deleted flavors as a sorted list.
Pass true as argument if you want deleted flavors returned also.
"""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_get_all(ctxt, filters=filters, sort_key=sort_key,
sort_dir=sort_dir, limit=limit, marker=marker)
def get_default_flavor():
"""Get the default flavor."""
name = CONF.default_flavor
return get_flavor_by_name(name)
def get_flavor(instance_type_id, ctxt=None, inactive=False):
"""Retrieves single flavor by id."""
if instance_type_id is None:
return get_default_flavor()
if ctxt is None:
ctxt = context.get_admin_context()
if inactive:
ctxt = ctxt.elevated(read_deleted="yes")
return db.flavor_get(ctxt, instance_type_id)
def get_flavor_by_name(name, ctxt=None):
"""Retrieves single flavor by name."""
if name is None:
return get_default_flavor()
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_get_by_name(ctxt, name)
# TODO(termie): flavor-specific code should probably be in the API that uses
# flavors.
def get_flavor_by_flavor_id(flavorid, ctxt=None, read_deleted="yes"):
"""Retrieve flavor by flavorid.
:raises: FlavorNotFound
"""
if ctxt is None:
ctxt = context.get_admin_context(read_deleted=read_deleted)
return db.flavor_get_by_flavor_id(ctxt, flavorid, read_deleted)
def get_flavor_access_by_flavor_id(flavorid, ctxt=None):
"""Retrieve flavor access list by flavor id."""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_access_get_by_flavor_id(ctxt, flavorid)
def add_flavor_access(flavorid, projectid, ctxt=None):
"""Add flavor access for project."""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_access_add(ctxt, flavorid, projectid)
def remove_flavor_access(flavorid, projectid, ctxt=None):
"""Remove flavor access for project."""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_access_remove(ctxt, flavorid, projectid)
def extract_flavor(instance, prefix=''):
"""Create an InstanceType-like object from instance's system_metadata
information.
"""
instance_type = {}
sys_meta = utils.instance_sys_meta(instance)
for key, type_fn in system_metadata_flavor_props.items():
type_key = '%sinstance_type_%s' % (prefix, key)
instance_type[key] = type_fn(sys_meta[type_key])
return instance_type
def save_flavor_info(metadata, instance_type, prefix=''):
"""Save properties from instance_type into instance's system_metadata,
in the format of:
[prefix]instance_type_[key]
This can be used to update system_metadata in place from a type, as well
as stash information about another instance_type for later use (such as
during resize).
"""
for key in system_metadata_flavor_props.keys():
to_key = '%sinstance_type_%s' % (prefix, key)
metadata[to_key] = instance_type[key]
pci_request.save_flavor_pci_info(metadata, instance_type, prefix)
return metadata
def delete_flavor_info(metadata, *prefixes):
"""Delete flavor instance_type information from instance's system_metadata
by prefix.
"""
for key in system_metadata_flavor_props.keys():
for prefix in prefixes:
to_key = '%sinstance_type_%s' % (prefix, key)
del metadata[to_key]
pci_request.delete_flavor_pci_info(metadata, *prefixes)
return metadata
def validate_extra_spec_keys(key_names_list):
for key_name in key_names_list:
if not VALID_EXTRASPEC_NAME_REGEX.match(key_name):
expl = _('Key Names can only contain alphanumeric characters, '
'periods, dashes, underscores, colons and spaces.')
raise exception.InvalidInput(message=expl)
|
|
'''
.. _simple_netem_daemon:
serve_pyro4r module for the simple_netem package
:module: daemon
:copyright:
Copyright 2017 Serban Teodorescu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
:contact: [email protected]
exposes the functionality provided by the simple_netem package for remote
access. this script can be executed as a system service on a Linux host
'''
from __future__ import unicode_literals, absolute_import, division
import sys
import copy
import logging
import argparse
import Pyro4
import Pyro4.naming
import config
from control import NetemInterface
def main(argv=None):
'''
main
'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
_args = _get_args()
serve_pyro4(p4_args=_args)
def configure_pyro4():
'''
the Pyro4 settings required by this program are not the defaults
we must use a multiplexed server because we are accessing fixed, shared
resources.
we need to use a serializer that accepts Python class types so that we can
pass emulation objects from the client side to the server side. Pyro4
considers such serializers as not secure and will not expose them unless
so configured. there are 2 such serializers available: dill and pickle.
'''
Pyro4.config.SERVERTYPE = config.P4_SERVERTYPE
Pyro4.config.SERIALIZERS_ACCEPTED.add('pickle')
def _locate_pyro4_ns(p4_args):
'''
locate a Pyro4 naming server if so specified in the start-up arguments
if there is a Pyro4 naming server running on this subnet, then return a
proxy for it.
note that this function as written and invoked in this module can only
locate name servers that accept the serializer specified in the config
module. by default the pyro4 name server (pyro4-ns) doesn't accept
the insecure serializers required by this module.
to start a stand-alone pyro4 nameserver that can be used by this module,
execute:
export PYRO_SERIALIZERS_ACCEPTED= export PYRO_SERIALIZERS_ACCEPTED=serpent,json,marshal,the_P4_PICKLE_config_value
pyro4-ns -n host [-p port] -k the_P4_HMAC_config_value
where host must be resolvable host name or IP address
:arg p4_args: the arguments used to start the daemon
:type p4_args: :class:`<_Args>`
:returns: a pyro4 proxy for the name server or None
:rtype: :class:`<Pyro4.core.Proxy>`
#TODO: figure out a way to check the registration and handle problems
with serializers. hmmm, quickie server with one of the emulation
classes and try to register it and then list the methods?
'''
p4_args.logger.debug('looking for external name server')
try:
name_server = Pyro4.locateNS(hmac_key=config.P4_HMAC)
p4_args.logger.debug('found name server %s' % name_server)
except Pyro4.errors.NamingError as err:
p4_args.logger.debug('...not found, error %s' % err)
name_server = None
return name_server
def serve_pyro4(p4_args):
'''
start a Pyro4 server that exposes a :class:`,NetemInterface>` instance
for each interface definition present in the command line arguments
need to convince Pyro4 to use pickle for serialization; otherwise we
won't be able to pass emulation objects directly on the client side. yes,
that is not secure but then this entire package is not secure to begin
with
'''
configure_pyro4()
netem_interfaces = []
if not isinstance(p4_args, _Args):
raise TypeError('invalid arguments %s' % p4_args)
p4_args.logger.info('starting simple_netem Pyro4 daemon...')
if p4_args.name_server:
name_server = _locate_pyro4_ns(p4_args)
p4_netem_interface_class = Pyro4.expose(NetemInterface)
for interface in p4_args.interfaces:
netem_interfaces.append(
p4_netem_interface_class(interface=interface[0],
side=interface[1], logger=p4_args.logger))
with Pyro4.Daemon(host=p4_args.server, port=p4_args.port) as p4_daemon:
p4_daemon._pyroHmacKey = config.P4_HMAC # pylint:disable=W0212
for netem_interface in netem_interfaces:
uri = p4_daemon.register(
netem_interface, objectId=netem_interface.interface)
p4_args.logger.info('Pyro4 URI for interface %s: %s' % (
netem_interface.interface, uri))
if name_server:
p4_args.logger.debug('registering with name server')
try:
name_server.register('bala', uri)
except Exception as err:
p4_args.logger.error(err)
p4_args.logger.info('simple_netem Pyro4 daemon running...')
p4_daemon.requestLoop()
def _log_level_to_int(level):
'''
custom type casting: string to logging.log_level
:returns: one of the `logging` log level `const`s
:rtype: int
:raises: :exception:`<argparse.ArgumentTypeError>` if input is invalid
'''
_level = level.upper()
if _level not in config.LOG_LEVELS:
raise argparse.ArgumentTypeError(
'invalid choice {}, must be one of {}'.format(
level, ', '.join(config.LOG_LEVELS)))
return getattr(logging, _level)
class _Args(object):
# pylint:disable=R0903
'''
adapter class creates `object` from `dict` for .attribute access
each member is an argument for the function(s) called in
:function:`main` after :function:`get_args` is invoked
'''
def __init__(self, args_as_dict):
'''
:arg dict args_as_dict: a dictionary
'''
# pylint:disable=no-member
# no-member (pylint E1101): instance has no member. in this case it
# does because we're messing with the __dict__ member
self.__dict__ = dict(args_as_dict)
if self.debug:
self.log_level = logging.DEBUG
self.logger = config.get_logger(
'pyro4_netem',
log_path=self.log_directory, log_level=self.log_level)
class _AppendTuple(argparse.Action):
# pylint:disable=R0903
'''
custom class that provides an append tuple action
same as action='append' but will append tuple objects instead of
`str` objects.
'''
@staticmethod
def ensure_value(namespace, name, value):
'''
make sure that the namespace object has a name
straight from the lib/argparse.py and used in the class definition
of _AppendAction(Action). not sure exactly why but i'm guessing
that it is needed to actually append entries to the namespace
instead of over-writing them
'''
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
def __call__(self, parser, namespace, values, option_string=None):
'''
overload :method:`<ArgumentParser.Action.__call__>`
will place a `tuple` in args.option_string.
it is expected that the arg values are as follows:
* --interface eth0
in this case, the tuple ('eth0: ``None``) is appended
to the namespace.interface
* --interface eth0:
this case will be resolved the same as above
* --interface eth0:inside
in this case the tuple ('eth0': 'inside') is appended
to the namespace.interface
* --interface eth0[:[symbolic name]] eth1[:[symbolic name]]
is treated as a generalization of the above. both interfaces
are cast to tuples as above and each tuple is
appended to the namespace.interface
:raises: :exception:`<argparse.ArgumentTypeError>` if more than one
':' is present in the input value
'''
items = copy.copy(self.ensure_value(namespace, self.dest, []))
for value in values:
value = value.split(':')
if len(value) < 2:
items.append((value[0], None))
elif len(value) == 2:
items.append((value[0], value[1]))
else:
raise argparse.ArgumentTypeError(
'invalid format for interface argument'
' %s. can contain at most one : character' % value)
setattr(namespace, self.dest, items)
def _get_args(description=config.DESCRIPTION, epilog=config.EPILOG):
'''
parse the command line arguments
:arg str description: a description of this script, default ``None``
:arg str: epilog:
an epilogue to be appended to the USAGE message, default ``None``
'''
parser = argparse.ArgumentParser(
description=description, epilog=epilog,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-v', '--version', action='version',
version='simple_netem package {}'.format(config.__version__),
help='show the version of %(prog)s')
parser.add_argument(
'-s', '--server', action='store', default=config.HOST,
help='the network address of the netem node')
parser.add_argument(
'-p', '--port', action='store', type=int, default=config.PORT,
help='the network port on which the netem server is listening')
parser.add_argument(
'-i', '--interface', dest='interfaces', nargs='+', action=_AppendTuple,
required=True,
help='a list of network interfaces that will be subject to netem'
' in a device_name[:symbolic_name][,device_name[:symbolic_name]]...'
' format. at least one device must be specified.'
' example: --interfaces=eth1:inside,eth2:outside')
parser.add_argument(
'-d', '--debug', action='store_true',
help='show debug information on the console and in the log files')
parser.add_argument(
'-l', '--log-level', dest='log_level', metavar='LEVEL', action='store',
type=_log_level_to_int, default=config.DEFAULT_LOG_LEVEL,
help='set the log level to one of {} not case sensitive'.format(
', '.join(config.LOG_LEVELS)
))
parser.add_argument(
'-o', '--log-directory', dest='log_directory', action='store',
default=config.LOGS, help='the directory to store log files')
parser.add_argument(
'-r', '--register-with-name-server', dest='name_server',
action='store_true',
help='register the Pyro4 URL(s) with a name server')
parser.add_argument(
'-n', '--start-name-server', dest='start_name_server',
action='store_true',
help='launch a Pyro4 name server if one cannot be found')
args_as_dict = vars(parser.parse_args())
return _Args(args_as_dict)
if __name__ == '__main__':
main()
|
|
"""Support for August devices."""
import asyncio
import itertools
import logging
from aiohttp import ClientError
from august.authenticator import ValidationResult
from august.exceptions import AugustApiAIOHTTPError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady, HomeAssistantError
import homeassistant.helpers.config_validation as cv
from .activity import ActivityStream
from .const import (
AUGUST_COMPONENTS,
CONF_ACCESS_TOKEN_CACHE_FILE,
CONF_INSTALL_ID,
CONF_LOGIN_METHOD,
DATA_AUGUST,
DEFAULT_AUGUST_CONFIG_FILE,
DEFAULT_NAME,
DEFAULT_TIMEOUT,
DOMAIN,
LOGIN_METHODS,
MIN_TIME_BETWEEN_DETAIL_UPDATES,
VERIFICATION_CODE_KEY,
)
from .exceptions import InvalidAuth, RequireValidation
from .gateway import AugustGateway
from .subscriber import AugustSubscriberMixin
_LOGGER = logging.getLogger(__name__)
TWO_FA_REVALIDATE = "verify_configurator"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_LOGIN_METHOD): vol.In(LOGIN_METHODS),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_INSTALL_ID): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_request_validation(hass, config_entry, august_gateway):
"""Request a new verification code from the user."""
#
# In the future this should start a new config flow
# instead of using the legacy configurator
#
_LOGGER.error("Access token is no longer valid")
configurator = hass.components.configurator
entry_id = config_entry.entry_id
async def async_august_configuration_validation_callback(data):
code = data.get(VERIFICATION_CODE_KEY)
result = await august_gateway.authenticator.async_validate_verification_code(
code
)
if result == ValidationResult.INVALID_VERIFICATION_CODE:
configurator.async_notify_errors(
hass.data[DOMAIN][entry_id][TWO_FA_REVALIDATE],
"Invalid verification code, please make sure you are using the latest code and try again.",
)
elif result == ValidationResult.VALIDATED:
return await async_setup_august(hass, config_entry, august_gateway)
return False
if TWO_FA_REVALIDATE not in hass.data[DOMAIN][entry_id]:
await august_gateway.authenticator.async_send_verification_code()
entry_data = config_entry.data
login_method = entry_data.get(CONF_LOGIN_METHOD)
username = entry_data.get(CONF_USERNAME)
hass.data[DOMAIN][entry_id][TWO_FA_REVALIDATE] = configurator.async_request_config(
f"{DEFAULT_NAME} ({username})",
async_august_configuration_validation_callback,
description=(
"August must be re-verified. "
f"Please check your {login_method} ({username}) "
"and enter the verification code below"
),
submit_caption="Verify",
fields=[
{"id": VERIFICATION_CODE_KEY, "name": "Verification code", "type": "string"}
],
)
return
async def async_setup_august(hass, config_entry, august_gateway):
"""Set up the August component."""
entry_id = config_entry.entry_id
hass.data[DOMAIN].setdefault(entry_id, {})
try:
await august_gateway.async_authenticate()
except RequireValidation:
await async_request_validation(hass, config_entry, august_gateway)
return False
except InvalidAuth:
_LOGGER.error("Password is no longer valid. Please set up August again")
return False
# We still use the configurator to get a new 2fa code
# when needed since config_flow doesn't have a way
# to re-request if it expires
if TWO_FA_REVALIDATE in hass.data[DOMAIN][entry_id]:
hass.components.configurator.async_request_done(
hass.data[DOMAIN][entry_id].pop(TWO_FA_REVALIDATE)
)
hass.data[DOMAIN][entry_id][DATA_AUGUST] = AugustData(hass, august_gateway)
await hass.data[DOMAIN][entry_id][DATA_AUGUST].async_setup()
for component in AUGUST_COMPONENTS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
return True
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the August component from YAML."""
conf = config.get(DOMAIN)
hass.data.setdefault(DOMAIN, {})
if not conf:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_LOGIN_METHOD: conf.get(CONF_LOGIN_METHOD),
CONF_USERNAME: conf.get(CONF_USERNAME),
CONF_PASSWORD: conf.get(CONF_PASSWORD),
CONF_INSTALL_ID: conf.get(CONF_INSTALL_ID),
CONF_ACCESS_TOKEN_CACHE_FILE: DEFAULT_AUGUST_CONFIG_FILE,
},
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up August from a config entry."""
august_gateway = AugustGateway(hass)
try:
await august_gateway.async_setup(entry.data)
return await async_setup_august(hass, entry, august_gateway)
except asyncio.TimeoutError:
raise ConfigEntryNotReady
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in AUGUST_COMPONENTS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class AugustData(AugustSubscriberMixin):
"""August data object."""
def __init__(self, hass, august_gateway):
"""Init August data object."""
super().__init__(hass, MIN_TIME_BETWEEN_DETAIL_UPDATES)
self._hass = hass
self._august_gateway = august_gateway
self.activity_stream = None
self._api = august_gateway.api
self._device_detail_by_id = {}
self._doorbells_by_id = {}
self._locks_by_id = {}
self._house_ids = set()
async def async_setup(self):
"""Async setup of august device data and activities."""
locks = (
await self._api.async_get_operable_locks(self._august_gateway.access_token)
or []
)
doorbells = (
await self._api.async_get_doorbells(self._august_gateway.access_token) or []
)
self._doorbells_by_id = {device.device_id: device for device in doorbells}
self._locks_by_id = {device.device_id: device for device in locks}
self._house_ids = {
device.house_id for device in itertools.chain(locks, doorbells)
}
await self._async_refresh_device_detail_by_ids(
[device.device_id for device in itertools.chain(locks, doorbells)]
)
# We remove all devices that we are missing
# detail as we cannot determine if they are usable.
# This also allows us to avoid checking for
# detail being None all over the place
self._remove_inoperative_locks()
self._remove_inoperative_doorbells()
self.activity_stream = ActivityStream(
self._hass, self._api, self._august_gateway, self._house_ids
)
await self.activity_stream.async_setup()
@property
def doorbells(self):
"""Return a list of py-august Doorbell objects."""
return self._doorbells_by_id.values()
@property
def locks(self):
"""Return a list of py-august Lock objects."""
return self._locks_by_id.values()
def get_device_detail(self, device_id):
"""Return the py-august LockDetail or DoorbellDetail object for a device."""
return self._device_detail_by_id[device_id]
async def _async_refresh(self, time):
await self._async_refresh_device_detail_by_ids(self._subscriptions.keys())
async def _async_refresh_device_detail_by_ids(self, device_ids_list):
for device_id in device_ids_list:
if device_id in self._locks_by_id:
await self._async_update_device_detail(
self._locks_by_id[device_id], self._api.async_get_lock_detail
)
# keypads are always attached to locks
if (
device_id in self._device_detail_by_id
and self._device_detail_by_id[device_id].keypad is not None
):
keypad = self._device_detail_by_id[device_id].keypad
self._device_detail_by_id[keypad.device_id] = keypad
elif device_id in self._doorbells_by_id:
await self._async_update_device_detail(
self._doorbells_by_id[device_id],
self._api.async_get_doorbell_detail,
)
_LOGGER.debug(
"async_signal_device_id_update (from detail updates): %s", device_id
)
self.async_signal_device_id_update(device_id)
async def _async_update_device_detail(self, device, api_call):
_LOGGER.debug(
"Started retrieving detail for %s (%s)",
device.device_name,
device.device_id,
)
try:
self._device_detail_by_id[device.device_id] = await api_call(
self._august_gateway.access_token, device.device_id
)
except ClientError as ex:
_LOGGER.error(
"Request error trying to retrieve %s details for %s. %s",
device.device_id,
device.device_name,
ex,
)
_LOGGER.debug(
"Completed retrieving detail for %s (%s)",
device.device_name,
device.device_id,
)
def _get_device_name(self, device_id):
"""Return doorbell or lock name as August has it stored."""
if self._locks_by_id.get(device_id):
return self._locks_by_id[device_id].device_name
if self._doorbells_by_id.get(device_id):
return self._doorbells_by_id[device_id].device_name
async def async_lock(self, device_id):
"""Lock the device."""
return await self._async_call_api_op_requires_bridge(
device_id,
self._api.async_lock_return_activities,
self._august_gateway.access_token,
device_id,
)
async def async_unlock(self, device_id):
"""Unlock the device."""
return await self._async_call_api_op_requires_bridge(
device_id,
self._api.async_unlock_return_activities,
self._august_gateway.access_token,
device_id,
)
async def _async_call_api_op_requires_bridge(
self, device_id, func, *args, **kwargs
):
"""Call an API that requires the bridge to be online and will change the device state."""
ret = None
try:
ret = await func(*args, **kwargs)
except AugustApiAIOHTTPError as err:
device_name = self._get_device_name(device_id)
if device_name is None:
device_name = f"DeviceID: {device_id}"
raise HomeAssistantError(f"{device_name}: {err}")
return ret
def _remove_inoperative_doorbells(self):
doorbells = list(self.doorbells)
for doorbell in doorbells:
device_id = doorbell.device_id
doorbell_is_operative = False
doorbell_detail = self._device_detail_by_id.get(device_id)
if doorbell_detail is None:
_LOGGER.info(
"The doorbell %s could not be setup because the system could not fetch details about the doorbell",
doorbell.device_name,
)
else:
doorbell_is_operative = True
if not doorbell_is_operative:
del self._doorbells_by_id[device_id]
del self._device_detail_by_id[device_id]
def _remove_inoperative_locks(self):
# Remove non-operative locks as there must
# be a bridge (August Connect) for them to
# be usable
locks = list(self.locks)
for lock in locks:
device_id = lock.device_id
lock_is_operative = False
lock_detail = self._device_detail_by_id.get(device_id)
if lock_detail is None:
_LOGGER.info(
"The lock %s could not be setup because the system could not fetch details about the lock",
lock.device_name,
)
elif lock_detail.bridge is None:
_LOGGER.info(
"The lock %s could not be setup because it does not have a bridge (Connect)",
lock.device_name,
)
elif not lock_detail.bridge.operative:
_LOGGER.info(
"The lock %s could not be setup because the bridge (Connect) is not operative",
lock.device_name,
)
else:
lock_is_operative = True
if not lock_is_operative:
del self._locks_by_id[device_id]
del self._device_detail_by_id[device_id]
|
|
"""
pocket.py
PocketWorld and PocketChunksFile for reading
saved files from Minecraft Pocket Edition.
Currently, only Block editing is supported. Little-endian NBT
support is needed before Entity and Player editing is possible.
"""
from __future__ import absolute_import
import logging
import os
import struct
from numpy import array, fromstring, zeros
import numpy
from mceditlib.blocktypes import pocket_blocktypes
from mceditlib.exceptions import ChunkNotPresent
from mceditlib.util import notclosing
from mceditlib.nbt import TAG_List
logger = logging.getLogger(__name__)
class PocketChunksFile(object):
holdFileOpen = False # if False, reopens and recloses the file on each access
SECTOR_BYTES = 4096
CHUNK_HEADER_SIZE = 4
@property
def file(self):
openfile = lambda: file(self.path, "rb+")
if PocketChunksFile.holdFileOpen:
if self._file is None:
self._file = openfile()
return notclosing(self._file)
else:
return openfile()
def close(self):
if PocketChunksFile.holdFileOpen:
self._file.close()
self._file = None
def __init__(self, path):
self.path = path
self._file = None
if not os.path.exists(path):
file(path, "w").close()
with self.file as f:
filesize = os.path.getsize(path)
if filesize & 0xfff:
filesize = (filesize | 0xfff) + 1
f.truncate(filesize)
if filesize == 0:
filesize = self.SECTOR_BYTES
f.truncate(filesize)
f.seek(0)
offsetsData = f.read(self.SECTOR_BYTES)
self.freeSectors = [True] * (filesize / self.SECTOR_BYTES)
self.freeSectors[0] = False
self.offsets = fromstring(offsetsData, dtype='<u4')
needsRepair = False
for index, offset in enumerate(self.offsets):
sector = offset >> 8
count = offset & 0xff
for i in xrange(sector, sector + count):
if i >= len(self.freeSectors):
# raise RegionMalformed("Region file offset table points to sector {0} (past the end of the file)".format(i))
logger.info("Region file offset table points to sector {0} (past the end of the file)".format(i))
needsRepair = True
break
if self.freeSectors[i] is False:
logger.debug("Double-allocated sector number %s (offset %s @ %s)", i, offset, index)
needsRepair = True
break
self.freeSectors[i] = False
if needsRepair:
self.repair()
logger.info("Found region file {file} with {used}/{total} sectors used and {chunks} chunks present".format(
file=os.path.basename(path), used=self.usedSectors, total=self.sectorCount, chunks=self.chunkCount))
@property
def usedSectors(self):
return len(self.freeSectors) - sum(self.freeSectors)
@property
def sectorCount(self):
return len(self.freeSectors)
@property
def chunkCount(self):
return sum(self.offsets > 0)
def repair(self):
raise NotImplementedError("Cannot repair old Pocket Edition worlds")
# lostAndFound = {}
# _freeSectors = [True] * len(self.freeSectors)
# _freeSectors[0] = _freeSectors[1] = False
# deleted = 0
# recovered = 0
# logger.info("Beginning repairs on {file} ({chunks} chunks)".format(file=os.path.basename(self.path), chunks=sum(self.offsets > 0)))
# rx, rz = self.regionCoords
# for index, offset in enumerate(self.offsets):
# if offset:
# cx = index & 0x1f
# cz = index >> 5
# cx += rx << 5
# cz += rz << 5
# sectorStart = offset >> 8
# sectorCount = offset & 0xff
# try:
#
# if sectorStart + sectorCount > len(self.freeSectors):
# raise RegionMalformed("Offset {start}:{end} ({offset}) at index {index} pointed outside of the file".format()
# start=sectorStart, end=sectorStart + sectorCount, index=index, offset=offset)
#
# compressedData = self._readChunk(cx, cz)
# if compressedData is None:
# raise RegionMalformed("Failed to read chunk data for {0}".format((cx, cz)))
#
# format, data = self.decompressSectors(compressedData)
# chunkTag = nbt.load(buf=data)
# lev = chunkTag["Level"]
# xPos = lev["xPos"].value
# zPos = lev["zPos"].value
# overlaps = False
#
# for i in xrange(sectorStart, sectorStart + sectorCount):
# if _freeSectors[i] is False:
# overlaps = True
# _freeSectors[i] = False
#
#
# if xPos != cx or zPos != cz or overlaps:
# lostAndFound[xPos, zPos] = (format, compressedData)
#
# if (xPos, zPos) != (cx, cz):
# raise RegionMalformed("Chunk {found} was found in the slot reserved for {expected}".format(found=(xPos, zPos), expected=(cx, cz)))
# else:
# raise RegionMalformed("Chunk {found} (in slot {expected}) has overlapping sectors with another chunk!".format(found=(xPos, zPos), expected=(cx, cz)))
#
#
#
# except Exception as e:
# logger.info("Unexpected chunk data at sector {sector} ({exc})".format(sector=sectorStart, exc=e))
# self.setOffset(cx, cz, 0)
# deleted += 1
#
# for cPos, (format, foundData) in lostAndFound.iteritems():
# cx, cz = cPos
# if self.getOffset(cx, cz) == 0:
# logger.info("Found chunk {found} and its slot is empty, recovering it".format(found=cPos))
# self._saveChunk(cx, cz, foundData[5:], format)
# recovered += 1
#
# logger.info("Repair complete. Removed {0} chunks, recovered {1} chunks, net {2}".format(deleted, recovered, recovered - deleted))
#
def _readChunk(self, cx, cz):
cx &= 0x1f
cz &= 0x1f
offset = self.getOffset(cx, cz)
if offset == 0:
return None
sectorStart = offset >> 8
numSectors = offset & 0xff
if numSectors == 0:
return None
if sectorStart + numSectors > len(self.freeSectors):
return None
with self.file as f:
f.seek(sectorStart * self.SECTOR_BYTES)
data = f.read(numSectors * self.SECTOR_BYTES)
assert(len(data) > 0)
logger.debug("REGION LOAD %s,%s sector %s", cx, cz, sectorStart)
return data
def loadChunk(self, cx, cz, world):
data = self._readChunk(cx, cz)
if data is None:
raise ChunkNotPresent((cx, cz, self))
chunk = PocketChunk(cx, cz, data[4:], world)
return chunk
def saveChunk(self, chunk):
cx = chunk.cx & 0x1f
cz = chunk.cz & 0x1f
offset = self.getOffset(cx, cz)
sectorNumber = offset >> 8
sectorsAllocated = offset & 0xff
data = chunk._savedData()
sectorsNeeded = (len(data) + self.CHUNK_HEADER_SIZE) / self.SECTOR_BYTES + 1
if sectorsNeeded >= 256:
return
if sectorNumber != 0 and sectorsAllocated >= sectorsNeeded:
logger.debug("REGION SAVE {0},{1} rewriting {2}b".format(cx, cz, len(data)))
self.writeSector(sectorNumber, data, format)
else:
# we need to allocate new sectors
# mark the sectors previously used for this chunk as free
for i in xrange(sectorNumber, sectorNumber + sectorsAllocated):
self.freeSectors[i] = True
runLength = 0
try:
runStart = self.freeSectors.index(True)
for i in range(runStart, len(self.freeSectors)):
if runLength:
if self.freeSectors[i]:
runLength += 1
else:
runLength = 0
elif self.freeSectors[i]:
runStart = i
runLength = 1
if runLength >= sectorsNeeded:
break
except ValueError:
pass
# we found a free space large enough
if runLength >= sectorsNeeded:
logger.debug("REGION SAVE {0},{1}, reusing {2}b".format(cx, cz, len(data)))
sectorNumber = runStart
self.setOffset(cx, cz, sectorNumber << 8 | sectorsNeeded)
self.writeSector(sectorNumber, data, format)
self.freeSectors[sectorNumber:sectorNumber + sectorsNeeded] = [False] * sectorsNeeded
else:
# no free space large enough found -- we need to grow the
# file
logger.debug("REGION SAVE {0},{1}, growing by {2}b".format(cx, cz, len(data)))
with self.file as f:
f.seek(0, 2)
filesize = f.tell()
sectorNumber = len(self.freeSectors)
assert sectorNumber * self.SECTOR_BYTES == filesize
filesize += sectorsNeeded * self.SECTOR_BYTES
f.truncate(filesize)
self.freeSectors += [False] * sectorsNeeded
self.setOffset(cx, cz, sectorNumber << 8 | sectorsNeeded)
self.writeSector(sectorNumber, data, format)
def writeSector(self, sectorNumber, data, format):
with self.file as f:
logger.debug("REGION: Writing sector {0}".format(sectorNumber))
f.seek(sectorNumber * self.SECTOR_BYTES)
f.write(struct.pack("<I", len(data) + self.CHUNK_HEADER_SIZE)) # // chunk length
f.write(data) # // chunk data
# f.flush()
def containsChunk(self, cx, cz):
return self.getOffset(cx, cz) != 0
def getOffset(self, cx, cz):
cx &= 0x1f
cz &= 0x1f
return self.offsets[cx + cz * 32]
def setOffset(self, cx, cz, offset):
cx &= 0x1f
cz &= 0x1f
self.offsets[cx + cz * 32] = offset
with self.file as f:
f.seek(0)
f.write(self.offsets.tostring())
def chunkCoords(self):
indexes = (i for (i, offset) in enumerate(self.offsets) if offset)
coords = ((i % 32, i // 32) for i in indexes)
return coords
class PocketWorldAdapter(object):
Height = 128
Length = 512
Width = 512
blocktypes = pocket_blocktypes
def __init__(self, filename):
raise NotImplementedError("Adapter for PocketWorld still incomplete!!!")
if not os.path.isdir(filename):
filename = os.path.dirname(filename)
self.filename = filename
self.dimensions = {}
self.chunkFile = PocketChunksFile(os.path.join(filename, "chunks.dat"))
self._loadedChunks = {}
@classmethod
def canOpenFile(cls, filename):
clp = ("chunks.dat", "level.dat")
if not os.path.isdir(filename):
f = os.path.basename(filename)
if f not in clp:
return False
filename = os.path.dirname(filename)
return all([os.path.exists(os.path.join(filename, f)) for f in clp])
# --- ISectionWorld methods ---
@property
def chunkPositions(self):
return self.chunkFile.chunkCoords()
@property
def chunkCount(self):
return self.chunkFile.chunkCount
def getChunk(self, cx, cz, create=False):
for p in cx, cz:
if not 0 <= p <= 31:
raise ChunkNotPresent((cx, cz, self))
c = self._loadedChunks.get((cx, cz))
if c is None:
c = self.chunkFile.loadChunk(cx, cz, self)
self._loadedChunks[cx, cz] = c
return c
def containsChunk(self, cx, cz):
if cx > 31 or cz > 31 or cx < 0 or cz < 0:
return False
return self.chunkFile.getOffset(cx, cz) != 0
# --- IWorld methods ---
def getEntities(self, *a, **kw):
return ()
def addEntity(self, tag):
pass
def getTileEntities(self, *a, **kw):
return ()
def addTileEntity(self, tag):
pass
def playerNames(self):
return ()
def getPlayer(self, playerName):
raise NotImplementedError
def save(self):
for chunk in self._loadedChunks.itervalues():
if chunk.dirty:
self.chunkFile.saveChunk(chunk)
chunk.dirty = False
def close(self):
pass
class PocketChunk(object):
Entities = TileEntities = property(lambda self: TAG_List())
dirty = False
filename = "chunks.dat"
def __init__(self, cx, cz, data, world):
self.cx = cx
self.cz = cz
self.world = world
data = fromstring(data, dtype='uint8')
self.Blocks, data = data[:32768], data[32768:]
self.Data, data = data[:16384], data[16384:]
self.SkyLight, data = data[:16384], data[16384:]
self.BlockLight, data = data[:16384], data[16384:]
self.DirtyColumns = data[:256]
self.unpackChunkData()
self.shapeChunkData()
def unpackChunkData(self):
for key in ('SkyLight', 'BlockLight', 'Data'):
dataArray = getattr(self, key)
dataArray.shape = (16, 16, 64)
s = dataArray.shape
# assert s[2] == self.world.Height / 2
# unpackedData = insert(dataArray[...,newaxis], 0, 0, 3)
unpackedData = numpy.empty((s[0], s[1], s[2] * 2), dtype='uint8')
unpackedData[:, :, ::2] = dataArray
unpackedData[:, :, ::2] &= 0xf
unpackedData[:, :, 1::2] = dataArray
unpackedData[:, :, 1::2] >>= 4
setattr(self, key, unpackedData)
def shapeChunkData(self):
chunkSize = 16
self.Blocks.shape = (chunkSize, chunkSize, self.world.Height)
self.SkyLight.shape = (chunkSize, chunkSize, self.world.Height)
self.BlockLight.shape = (chunkSize, chunkSize, self.world.Height)
self.Data.shape = (chunkSize, chunkSize, self.world.Height)
self.DirtyColumns.shape = chunkSize, chunkSize
def _savedData(self):
def packData(dataArray):
assert dataArray.shape[2] == self.world.Height
data = array(dataArray).reshape(16, 16, self.world.Height / 2, 2)
data[..., 1] <<= 4
data[..., 1] |= data[..., 0]
return array(data[:, :, :, 1])
if self.dirty:
# elements of DirtyColumns are bitfields. Each bit corresponds to a
# 16-block segment of the column. We set all of the bits because
# we only track modifications at the chunk level.
self.DirtyColumns[:] = 255
return "".join([self.Blocks.tostring(),
packData(self.Data).tostring(),
packData(self.SkyLight).tostring(),
packData(self.BlockLight).tostring(),
self.DirtyColumns.tostring(),
])
|
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract base class for clustering algorithm."""
import abc
import six
import tensorflow as tf
from tensorflow_model_optimization.python.core.clustering.keras.cluster_config import GradientAggregation
@six.add_metaclass(abc.ABCMeta)
class ClusteringAlgorithm(object):
"""Class to implement highly efficient vectorised look-ups.
We do not utilise looping for that purpose, instead we `smartly` reshape and
tile arrays. The trade-off is that we are potentially using way more memory
than we would have if looping is used.
Each class that inherits from this class is supposed to implement a
particular lookup function for a certain shape.
For example, look-ups for 2D table will be different in the case of 3D.
"""
def __init__(
self,
clusters_centroids,
cluster_gradient_aggregation=GradientAggregation.SUM,
data_format=None
):
"""Generating clustered tensors.
For generating clustered tensors we will need two things: cluster
centroids and the final shape tensor must have.
Args:
clusters_centroids: An array of shape (N,) that contains initial values of
clusters centroids.
cluster_gradient_aggregation: An enum that specify the aggregation method
of the cluster gradient.
data_format: To be used in cluster_per_channel to ensure the weight
kernel is permuted properly when updating the weights and calculating
gradients
"""
if not isinstance(clusters_centroids, tf.Variable):
raise ValueError("clusters_centroids should be a tf.Variable.")
self.cluster_centroids = clusters_centroids
self.cluster_gradient_aggregation = cluster_gradient_aggregation
self.data_format = data_format
@tf.custom_gradient
def average_centroids_gradient_by_cluster_size(self, cluster_centroids,
cluster_sizes):
"""Average the gradient based on the number of weights."""
def grad(d_cluster_centroids):
# Average the gradient based on the number of weights belonging to each
# cluster
d_cluster_centroids = tf.math.divide_no_nan(d_cluster_centroids,
cluster_sizes)
return d_cluster_centroids, None
return cluster_centroids, grad
@tf.custom_gradient
def add_gradient_to_original_weight(self, clustered_weight, original_weight):
"""Overrides gradients in the backprop stage.
This function overrides gradients in the backprop stage: the Jacobian
matrix of multiplication is replaced with the identity matrix, which
effectively changes multiplication into add in the backprop. Since
the gradient of tf.sign is 0, overwriting it with identity follows
the design of straight-through-estimator, which accepts all upstream
gradients and uses them to update original non-clustered weights of
the layer. Here, we assume the gradient updates on individual elements
inside a cluster will be different so that there is no point in mapping
the gradient updates back to original non-clustered weights using the LUT.
Args:
clustered_weight: clustered weights
original_weight: original weights
Returns:
result and custom gradient, as expected by @tf.custom_gradient
"""
override_weights = tf.sign(original_weight + 1e+6)
override_clustered_weight = clustered_weight * override_weights
def grad(d_override_clustered_weight):
return d_override_clustered_weight, d_override_clustered_weight
return override_clustered_weight, grad
def get_pulling_indices(self, weight, centroids=None):
"""Returns indices of closest cluster centroids.
Takes a weight(can be 1D, 2D or ND) and creates tf.int32 array of the
same shape that will hold indices of cluster centroids clustered arrays
elements will be pulled from.
In the current setup pulling indices are meant to be created once and
used everywhere.
Args:
weight: ND array of weights. For each weight in this array the closest
cluster centroids is found.
centroids: Optional list of cluster centroids.
Returns:
ND array of the same shape as `weight` parameter of the type
tf.int32. The returned array contain weight lookup indices
"""
cluster_centroids = centroids if centroids is not None else (
self.cluster_centroids)
# We find the nearest cluster centroids and store them so that ops can build
# their kernels upon it.
pulling_indices = tf.argmin(
tf.abs(tf.expand_dims(weight, axis=-1) - cluster_centroids),
axis=-1)
return pulling_indices
def get_clustered_weight(self, pulling_indices, original_weight):
"""Returns clustered weights with custom gradients.
Take indices (pulling_indices) as input and then form a new array
by gathering cluster centroids based on the given pulling indices.
The original gradients will also be modified in two ways:
- By averaging the gradient of cluster_centroids based on the size of
each cluster.
- By adding an estimated gradient onto the non-differentiable
original weight.
Args:
pulling_indices: a tensor of indices used for lookup of the same size as
original_weight.
original_weight: the original weights of the wrapped layer.
Returns:
array with the same shape as `pulling_indices`. Each array element
is a member of self.cluster_centroids. The backward pass is modified by
adding custom gradients.
"""
if self.cluster_gradient_aggregation == GradientAggregation.SUM:
cluster_centroids = self.cluster_centroids
elif self.cluster_gradient_aggregation == GradientAggregation.AVG:
cluster_centroids = self.cluster_centroids
# Compute the size of each cluster
# (number of weights belonging to each cluster)
cluster_sizes = tf.math.bincount(
arr=tf.cast(pulling_indices, dtype=tf.int32),
minlength=tf.size(cluster_centroids),
dtype=cluster_centroids.dtype,
)
# Modify the gradient of cluster_centroids to be averaged by cluster sizes
cluster_centroids = self.average_centroids_gradient_by_cluster_size(
cluster_centroids,
tf.stop_gradient(cluster_sizes),
)
else:
raise ValueError(f"self.cluster_gradient_aggregation="
f"{self.cluster_gradient_aggregation} not implemented.")
# Gather the clustered weights based on cluster centroids and
# pulling indices.
clustered_weight = tf.gather(cluster_centroids, pulling_indices)
# Add an estimated gradient to the original weight
clustered_weight = self.add_gradient_to_original_weight(
clustered_weight,
# Fix the bug with MirroredVariable and tf.custom_gradient:
# tf.identity will transform a MirroredVariable into a Variable
tf.identity(original_weight),
)
return clustered_weight
class ClusteringAlgorithmPerChannel(ClusteringAlgorithm):
"""Class for Per-channel clustering of Conv2D layers."""
def get_pulling_indices(self, weight):
"""Returns indices of closest cluster centroids.
This function is based on the function get_pulling_indices
of the base class ClusteringAlgorithm. We apply each per
channel of the convolutional layer.
Args:
weight: ND array of weights. For each weight in this array the closest
cluster centroids is found.
Returns:
ND array of the same shape as `weight` parameter of the type
tf.int32. The returned array contain weight lookup indices.
"""
channel_indices = []
num_channels = (weight.shape[1] if self.data_format == "channels_first"
else weight.shape[-1])
for channel in range(num_channels):
channel_weights = (
weight[:, channel, :, :]
if self.data_format == "channels_first" else weight[:, :, :, channel])
channel_centroids = self.cluster_centroids[channel]
pulling_indices = super().get_pulling_indices(channel_weights,
channel_centroids)
channel_indices.append(pulling_indices)
pulling_indices = tf.convert_to_tensor(channel_indices)
pulling_indices = tf.transpose(
pulling_indices,
perm=(1, 0, 2, 3) if self.data_format == "channels_first" else
(1, 2, 3, 0))
return pulling_indices
def get_clustered_weight(self, pulling_indices, original_weight):
"""Returns clustered weights with custom gradients.
Take indices the per-channel pulling_indices as input and retrieve
the corresponding clustered weights by using the gather operation
for each of the channels.
The original gradients will also be modified in two ways:
- By averaging the gradient of cluster_centroids based on the size of
each cluster.
- By adding an estimated gradient onto the non-differentiable
original weight.
Args:
pulling_indices: a tensor of per-channel indices used for lookup of the
same size as original_weight.
original_weight: the original weights of the wrapped layer.
Returns:
array with the same shape as `pulling_indices`. Each array element
is a member of self.cluster_centroids. The backward pass is modified by
adding custom gradients.
"""
num_channels = (
original_weight.shape[1]
if self.data_format == "channels_first" else original_weight.shape[-1])
# In case of channels_last, we have NHWC.
# In case of channels_first, we have NCHW.
# We need to transpose the tensor, so C is the first dimension
# and then we could loop over channels
pulling_indices = (
tf.transpose(
pulling_indices,
perm=(1, 0, 2, 3) if self.data_format == "channels_first" else
(3, 0, 1, 2)))
if self.cluster_gradient_aggregation == GradientAggregation.SUM:
cluster_centroids = self.cluster_centroids
elif self.cluster_gradient_aggregation == GradientAggregation.AVG:
cluster_sizes = []
for i in range(num_channels):
# Compute the size of each cluster for each channel
# (number of weights belonging to each cluster)
cluster_sizes.append(tf.math.bincount(
arr=tf.cast(pulling_indices[i], dtype=tf.int32),
minlength=tf.size(self.cluster_centroids[i]),
dtype=self.cluster_centroids.dtype,
))
cluster_sizes = tf.convert_to_tensor(cluster_sizes)
# Modify the gradient of cluster_centroids to be averaged by cluster sizes
cluster_centroids = self.average_centroids_gradient_by_cluster_size(
self.cluster_centroids,
tf.stop_gradient(cluster_sizes),
)
else:
raise ValueError(f"self.cluster_gradient_aggregation="
f"{self.cluster_gradient_aggregation} not implemented.")
clustered_weights = []
for i in range(num_channels):
clustered_weights.append(
tf.gather(cluster_centroids[i], pulling_indices[i]))
clustered_weight = tf.convert_to_tensor(clustered_weights)
# Permute weights to ensure the channels are first or last, as expected
# based on the data_format attribute
clustered_weight = (
tf.transpose(clustered_weights, perm=[1, 0, 2, 3])
if self.data_format == "channels_first" else tf.transpose(
clustered_weights, perm=[1, 2, 3, 0]))
# Add an estimated gradient to the original weight
clustered_weight = self.add_gradient_to_original_weight(
clustered_weight,
tf.identity(original_weight),
)
return clustered_weight
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup module for end-to-end dsub tests."""
# pylint: disable=line-too-long
# test_setup_e2e.py
#
# Intended to be imported into a test.
# Automatically imports variables from test_setup.py
#
# * Automatically determine PROJECT_ID
# * Automatically pick up a bucket name for tests.
#
# * Automatically set environment variables:
# * LOGGING=gs://${DSUB_BUCKET}/dsub/py/${DSUB_PROVIDER}/${TEST_NAME}/logging
# (task file tests)
# * LOGGING=gs://${DSUB_BUCKET}/dsub/py/${DSUB_PROVIDER}/${TEST_NAME}/logging/${TEST_NAME}.log
# (non-task file tests)
# * INPUTS=gs://${DSUB_BUCKET}/dsub/py/${DSUB_PROVIDER}/${TEST_NAME}/input
# * OUTPUTS=gs://${DSUB_BUCKET}/dsub/py/${DSUB_PROVIDER}/${TEST_NAME}/output
#
# * Check if LOGGING, INPUTS, and OUTPUTS are empty.
# * For task file tests, generate the file from TASKS_FILE_TMPL.
# pylint: enable=line-too-long
from __future__ import print_function
import os
import subprocess
import sys
from dsub.commands import dsub as dsub_command
# Because this may be invoked from another directory (treated as a library) or
# invoked localy (treated as a binary) both import styles need to be supported.
# pylint: disable=g-import-not-at-top
try:
from . import test_setup
from . import test_util
except SystemError:
import test_setup
import test_util
TEST_VARS = ("TEST_NAME", "TEST_DIR", "TEST_TMP", "TASKS_FILE",
"TASKS_FILE_TMPL",)
TEST_E2E_VARS = ("PROJECT_ID", "DSUB_BUCKET", "LOGGING", "INPUTS", "OUTPUTS",
"DOCKER_GCS_INPUTS", "DOCKER_GCS_OUTPUTS",)
def _environ():
"""Merge the current enviornment and test variables into a dictionary."""
e = dict(os.environ)
for var in TEST_VARS + TEST_E2E_VARS:
e[var] = globals()[var]
return e
# Copy test_setup variables
DSUB_PROVIDER = test_setup.DSUB_PROVIDER
TEST_NAME = test_setup.TEST_NAME
TEST_DIR = test_setup.TEST_DIR
TEST_TMP = test_setup.TEST_TMP
TASKS_FILE = test_setup.TASKS_FILE
TASKS_FILE_TMPL = test_setup.TASKS_FILE_TMPL
print("Checking that required environment values are set:")
if "YOUR_PROJECT" in os.environ:
PROJECT_ID = os.environ["YOUR_PROJECT"]
else:
print("Checking configured gcloud project")
PROJECT_ID = subprocess.check_output(
'gcloud config list core/project --format="value(core.project)"',
shell=True,
universal_newlines=True).strip()
if not PROJECT_ID:
print("Your project ID could not be determined.")
print("Set the environment variable YOUR_PROJECT or run \"gcloud init\".")
sys.exit(1)
print(" Project ID detected as: %s" % PROJECT_ID)
if "YOUR_BUCKET" in os.environ:
DSUB_BUCKET = os.environ["YOUR_BUCKET"]
else:
DSUB_BUCKET = "%s-dsub-test" % os.environ["USER"]
print(" Bucket detected as: %s" % DSUB_BUCKET)
print(" Checking if bucket exists")
if not test_util.gsutil_ls_check("gs://%s" % DSUB_BUCKET):
print("Bucket does not exist: %s" % DSUB_BUCKET, file=sys.stderr)
print("Create the bucket with \"gsutil mb\".", file=sys.stderr)
sys.exit(1)
# Set standard LOGGING, INPUTS, and OUTPUTS values
TEST_GCS_ROOT = "gs://%s/dsub/py/%s/%s" % (DSUB_BUCKET, DSUB_PROVIDER,
TEST_NAME)
TEST_GCS_DOCKER_ROOT = "gs/%s/dsub/py/%s/%s" % (DSUB_BUCKET, DSUB_PROVIDER,
TEST_NAME)
if TASKS_FILE:
# For task file tests, the logging path is a directory.
LOGGING = "%s/logging" % TEST_GCS_ROOT
else:
# For regular tests, the logging path is a named file.
LOGGING = TEST_GCS_ROOT + "/%s/logging/%s.log" % (TEST_NAME, TEST_NAME)
STDOUT_LOG = "%s/%s-stdout.log" % (os.path.dirname(LOGGING), TEST_NAME)
STDERR_LOG = "%s/%s-stderr.log" % (os.path.dirname(LOGGING), TEST_NAME)
INPUTS = "%s/input" % TEST_GCS_ROOT
OUTPUTS = "%s/output" % TEST_GCS_ROOT
DOCKER_GCS_INPUTS = "%s/input" % TEST_GCS_DOCKER_ROOT
DOCKER_GCS_OUTPUTS = "%s/output" % TEST_GCS_DOCKER_ROOT
print("Logging path: %s" % LOGGING)
print("Input path: %s" % INPUTS)
print("Output path: %s" % OUTPUTS)
print(" Checking if remote test files already exists")
if test_util.gsutil_ls_check("%s/**" % TEST_GCS_ROOT):
print("Test files exist: %s" % TEST_GCS_ROOT, file=sys.stderr)
print("Remove contents:", file=sys.stderr)
print(" gsutil -m rm %s/**" % TEST_GCS_ROOT, file=sys.stderr)
sys.exit(1)
if TASKS_FILE:
# For a task file test, set up the task file from its template
print("Setting up task file %s" % TASKS_FILE)
if not os.path.exists(os.path.dirname(TASKS_FILE)):
os.makedirs(os.path.dirname(TASKS_FILE))
if os.path.exists(TASKS_FILE_TMPL):
test_util.expand_tsv_fields(_environ(), TASKS_FILE_TMPL, TASKS_FILE)
# Functions for launching dsub
#
# Tests should generally just call "run_dsub" which will then invoke
# the provider-specific function.
def run_dsub(dsub_args):
# Execute the appropriate dsub_<provider> function
return globals()["dsub_%s" % DSUB_PROVIDER.replace("-", "_")](dsub_args)
def dsub_google_cls_v2(dsub_args):
"""Call dsub appending google-cls-v2 required arguments."""
# pyformat: disable
google_cls_v2_opt_args = [
("BOOT_DISK_SIZE", "--boot-disk-size"),
("DISK_SIZE", "--disk-size")
]
# pyformat: enable
opt_args = []
for var in google_cls_v2_opt_args:
val = globals().get(var[0])
if val:
opt_args.append(var[1], val)
# pyformat: disable
return dsub_command.call([
"--provider", "google-cls-v2",
"--project", PROJECT_ID,
"--logging", LOGGING,
"--regions", "us-central1"
] + opt_args + dsub_args)
# pyformat: enable
def dsub_google_v2(dsub_args):
"""Call dsub appending google-v2 required arguments."""
# pyformat: disable
google_v2_opt_args = [
("BOOT_DISK_SIZE", "--boot-disk-size"),
("DISK_SIZE", "--disk-size")
]
# pyformat: enable
opt_args = []
for var in google_v2_opt_args:
val = globals().get(var[0])
if val:
opt_args.append(var[1], val)
# pyformat: disable
return dsub_command.call([
"--provider", "google-v2",
"--project", PROJECT_ID,
"--logging", LOGGING,
"--regions", "us-central1"
] + opt_args + dsub_args)
# pyformat: enable
def dsub_local(dsub_args):
"""Call dsub appending local-provider required arguments."""
# pyformat: disable
return dsub_command.call([
"--provider", "local",
"--logging", LOGGING,
] + dsub_args)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Evaluation related functions for BigMLer
"""
import os
import json
import numbers
import math
from bigml.util import slugify
import bigmler.utils as u
import bigmler.resourcesapi.evaluations as r
import bigmler.checkpoint as c
from bigmler.resourcesapi.common import shared_changed
def evaluate(models_or_ensembles, datasets, api, args, resume,
session_file=None, path=None, log=None,
fields=None, dataset_fields=None,
labels=None, all_labels=None,
objective_field=None):
"""Evaluates a list of models or ensembles with the given dataset
"""
output = args.predictions
evaluation_files = []
evaluations, resume = evaluations_process(
models_or_ensembles, datasets, fields,
dataset_fields, api, args, resume,
session_file=session_file, path=path, log=log,
labels=labels, all_labels=all_labels, objective_field=objective_field)
if hasattr(args, 'multi_label') and args.multi_label:
file_labels = [slugify(name) for name in
u.objective_field_names(models_or_ensembles, api)]
for index, evaluation in enumerate(evaluations):
evaluation = r.get_evaluation(evaluation, api, args.verbosity,
session_file)
if shared_changed(args.shared, evaluation):
evaluation_args = {"shared": args.shared}
evaluation = r.update_evaluation(evaluation, evaluation_args,
args, api=api, path=path,
session_file=session_file)
file_name = output
if hasattr(args, 'multi_label') and args.multi_label:
suffix = file_labels[index]
file_name += "_%s" % suffix
evaluation_files.append("%s.json" % file_name)
if args.test_datasets or args.dataset_off:
suffix = evaluation['resource'].replace('evaluation/', '_')
file_name += "_%s" % suffix
evaluation_files.append("%s.json" % file_name)
r.save_evaluation(evaluation, file_name, api)
if (hasattr(args, 'multi_label') and args.multi_label) or \
args.test_datasets or args.dataset_off:
mean_evaluation = average_evaluations(evaluation_files)
r.save_evaluation(mean_evaluation, output, api)
return resume
def cross_validate(models, dataset, fields, api, args, resume,
session_file=None, path=None, log=None):
"""Cross-validates using a MONTE-CARLO variant
"""
evaluations, resume = evaluations_process(
models, [dataset],
fields, fields, api, args, resume,
session_file=session_file, path=path, log=log)
if not resume:
evaluation_files = []
for evaluation in evaluations:
evaluation = r.get_evaluation(evaluation, api, args.verbosity,
session_file)
model_id = evaluation['object']['model']
file_name = "%s%s%s__evaluation" % (path, os.sep,
model_id.replace("/", "_"))
evaluation_files.append(file_name + ".json")
r.save_evaluation(evaluation, file_name, api)
cross_validation = average_evaluations(evaluation_files)
file_name = "%s%scross_validation" % (path, os.sep)
r.save_evaluation(cross_validation, file_name, api)
def evaluations_process(models_or_ensembles, datasets,
fields, dataset_fields, api, args, resume,
session_file=None, path=None, log=None, labels=None,
all_labels=None, objective_field=None):
"""Evaluates models or ensembles against datasets
"""
existing_evaluations = 0
evaluations = []
number_of_evaluations = len(models_or_ensembles)
if resume:
resume, evaluations = c.checkpoint(c.are_evaluations_created, path,
number_of_evaluations,
debug=args.debug)
if not resume:
existing_evaluations = len(evaluations)
message = u.dated("Found %s evaluations from %s. Resuming.\n" %
(existing_evaluations,
number_of_evaluations))
number_of_evaluations -= existing_evaluations
u.log_message(message, log_file=session_file,
console=args.verbosity)
if not resume:
if hasattr(args, 'multi_label') and args.multi_label:
evaluation_args = r.set_label_evaluation_args(
args, labels, all_labels,
number_of_evaluations, fields, dataset_fields,
objective_field)
else:
evaluation_args = r.set_evaluation_args(args, fields,
dataset_fields)
evaluations.extend(r.create_evaluations(
models_or_ensembles, datasets, evaluation_args,
args, api, path=path, session_file=session_file,
log=log, existing_evaluations=existing_evaluations))
return evaluations, resume
def standard_deviation(points, mean):
"""Computes the standard deviation
"""
total = float(len(points))
if total > 0:
return math.sqrt(sum([(point - mean) ** 2 for point in points]) /
total)
return float('nan')
def traverse_for_std_dev(tree):
"""Traverses the tree to find measure lists and compute standard deviation
"""
if isinstance(tree, dict):
keys = list(tree.keys())
for key in keys:
if (isinstance(key, tuple) and
key[0].endswith('_standard_deviation')):
tree[key[0]] = standard_deviation(tree[key], tree[key[1]])
del tree[key]
else:
traverse_for_std_dev(tree[key])
elif isinstance(tree, list):
for subtree in tree:
traverse_for_std_dev(subtree)
def average_evaluations(evaluation_files):
"""Reads the contents of the evaluations files and averages its measures
"""
averaged_evaluation = {}
number_of_evaluations = float(len(evaluation_files))
if number_of_evaluations > 0:
for evaluation_file in evaluation_files:
with open(evaluation_file, 'U') as evaluation_file:
evaluation = json.loads(evaluation_file.read())
avg_evaluation(averaged_evaluation,
evaluation, number_of_evaluations)
traverse_for_std_dev(averaged_evaluation)
return averaged_evaluation
def avg_evaluation(total, component, number_of_evaluations):
"""Adds a new set of evaluation measures to the cumulative average
"""
if number_of_evaluations > 0:
for key, value in list(component.items()):
# Handle the non-averageable values in
# classifications' evaluation data
if key == "class_names":
if not key in total:
total[key] = []
total[key].extend(value)
total[key] = list(set(total[key]))
elif key == "confusion_matrix":
if not key in total:
total[key] = value
else:
total[key] = add_matrices(total[key], value)
elif key == "per_class_statistics":
if not key in total:
total[key] = []
total[key] = avg_class_statistics(total[key], value,
number_of_evaluations)
else:
# Average numerical values
if isinstance(value, numbers.Number):
new_key = (key if key.startswith("average_")
else ("average_%s" % key))
if not new_key in total:
total[new_key] = 0
total[new_key] += value / number_of_evaluations
sd_key = "%s_standard_deviation" % key
if not (sd_key, new_key) in total:
total[(sd_key, new_key)] = []
total[(sd_key, new_key)].append(value)
# Handle grouping keys
elif isinstance(value, (dict, list)):
if not key in total:
total[key] = [] if isinstance(value, list) else {}
avg_evaluation(total[key], value,
number_of_evaluations)
def add_matrices(matrix_a, matrix_b):
"""Add two n x n matrices
"""
return [[x + y for x, y in zip(matrix_a[i], matrix_b[i])]
for i in range(len(matrix_a))]
def avg_class_statistics(total, component, number_of_evaluations):
"""Adds a new set of per class evaluation measures to the total average
"""
special_keys = ['class_name', 'present_in_test_data', 'occurrences']
remove_keys = ['ks_statistic', 'max_phi',
'per_threshold_confusion_matrices',
'roc_curve', 'pr_curve', 'negative_cdf', 'max_phi',
'lift_curve', 'ks_statistic', 'gain_curve']
for class_info in component:
for key in remove_keys:
if key in class_info:
del class_info[key]
class_name = class_info['class_name']
found = False
for total_class_info in total:
if class_name == total_class_info['class_name']:
found = True
flag = class_info['present_in_test_data']
# If the class is not present in the evaluation test data set,
# the measures for that class are not affected by it
if not flag:
total_class_info['occurrences'] -= 1
occurrences = float(total_class_info['occurrences'])
for key in total_class_info:
try:
# renormalizing previous average count
if not (isinstance(key, tuple) or
key in special_keys):
total_class_info[key] *= ((occurrences + 1) /
occurrences)
except (ValueError, TypeError):
pass
if not total_class_info['present_in_test_data']:
total_class_info['present_in_test_data'] = flag
occurrences = float(total_class_info['occurrences'])
for key in class_info:
try:
if not (isinstance(key, tuple) or key in special_keys):
new_key = (key if key.startswith("average_")
else ("average_%s" % key))
if new_key in total_class_info:
total_class_info[new_key] += (class_info[key] /
occurrences)
else:
total_class_info[new_key] = (class_info[key] /
occurrences)
sd_key = "%s_standard_deviation" % key
if not (sd_key, new_key) in total_class_info:
total_class_info[(sd_key, new_key)] = []
total_class_info[
(sd_key, new_key)].append(class_info[key])
except (ValueError, TypeError):
pass
break
if not found:
flag = class_info['present_in_test_data']
class_info['occurrences'] = int(number_of_evaluations)
if not flag:
class_info['occurrences'] -= 1
keys = list(class_info.keys())
for key in keys:
try:
if not key in special_keys:
sd_key = "%s_standard_deviation" % key
if not key.startswith("average_"):
new_key = "average_%s" % key
class_info[new_key] = (float(class_info[key]) /
class_info['occurrences'])
if not (sd_key, new_key) in class_info:
class_info[(sd_key, new_key)] = []
class_info[(sd_key, new_key)].append( \
class_info[key])
del class_info[key]
else:
new_key = key
class_info[key] = (float(class_info[key]) /
class_info['occurrences'])
if not (sd_key, new_key) in class_info:
class_info[(sd_key, new_key)] = []
class_info[(sd_key, new_key)].append( \
class_info[key])
except (ValueError, TypeError):
pass
total.append(class_info)
return total
|
|
from rathings.phase_unwrap import phase_unwrapp1d
import numpy as np
from scipy.optimize import least_squares
TECU = 1e16
def calc_phase(tec, freqs, cs = 0.):
'''Return the phase (num_freqs, num_times) from tec and CS.
`tec` : `numpy.ndarray`
tec in TECU of shape (num_times,)
`freqs` : `numpy.ndarray`
freqs in Hz of shape (num_freqs,)
`cs` : `numpy.ndarray` or float (optional)
Can be zero to disregard (default)
'''
TECU=1e16
phase = 8.44797256e-7*TECU * np.multiply.outer(1./freqs,tec) + cs
return phase
def robust_l2(obs_phase, freqs, solve_cs=True):
'''Solve the tec and cs for multiple datasets.
`obs_phase` : `numpy.ndarray`
the measured phase with shape (num_freqs, )
`freqs` : `numpy.ndarray`
the frequencies at the datapoints (num_freqs,)
`solve_cs` : (optional) bool
Whether to solve cs (True)
'''
obs_phase = phase_unwrapp1d(obs_phase)
if solve_cs:
def residuals(m, freqs, obs_phase):
tec,cs = m[0],m[1]
return calc_phase(tec,freqs,cs=cs) - obs_phase
else:
def residuals(m, freqs, obs_phase):
tec,cs = m[0],m[1]
return calc_phase(tec,freqs,cs=0.) - obs_phase
nan_mask = np.bitwise_not(np.isnan(obs_phase))
obs_phase_ = obs_phase[nan_mask]
freqs_ = freqs[nan_mask]
m0 = [0.0, 0.]
m = least_squares(residuals,m0,loss='soft_l1',f_scale=90.*np.pi/180.,args=(freqs_,obs_phase_))
if solve_cs:
return m.x[0], m.x[1]
else:
return m.x[0], 0.
def robust_l2_parallel(obs_phase, freqs, solve_cs=True, num_threads = None):
'''Solve the tec and cs for multiple datasets.
`obs_phase` : `numpy.ndarray`
the measured phase with shape (num_freqs, num_datasets)
`freqs` : `numpy.ndarray`
the frequencies at the datapoints (num_freqs,)
`solve_cs` : (optional) bool
Whether to solve cs (True)
`num_threads` : (optional) `int`
number of parallel threads to run. default None is num_cpu
'''
from dask import delayed, compute
from dask.threaded import get
from functools import partial
dsk = {}
N = obs_phase.shape[1]
values = [delayed(partial(robust_l2, solve_cs=solve_cs), pure=True)( obs_phase[:,i], freqs) for i in range(N)]
results = compute(*values, get=get, num_workers=num_threads)
return results
def l1_lpsolver(obs_phase, freqs, sigma_max = np.pi, fout=0.5, solve_cs=True, problem_name="l1_tec_solver"):
'''Formulate the linear problem:
Minimize 1'.(z1 + z2) s.t.
phase = (z1 - z2) + K/nu*TEC
|z1 + z2| < max_allowed
min_tec < TEC < max_tec
assumes obs_phase and freqs are for a single timestamp.
'''
nan_mask = np.isnan(obs_phase)
obs_phase = obs_phase[np.bitwise_not(nan_mask)]
freqs = freqs[np.bitwise_not(nan_mask)]
if (len(freqs)<10):
return 0.,0.,0.
obs_phase_unwrap = phase_unwrapp1d(obs_phase)
K = 8.44797256e-7*TECU
#z+, z-, a+, a-, asigma, a, sigma, tec, cs
N = len(freqs)
ncols = N*6 + 3
A_eq, b_eq = [],[]
A_lt, b_lt = [],[]
A_gt, b_gt = [],[]
c_obj = np.zeros(ncols,dtype=np.double)
for i in range(N):
idx_p = i
idx_m = N + i
idx_ap = 2*N + i
idx_am = 3*N + i
idx_as = 4*N + i
idx_a = 5*N + i
idx_s = 6*N
idx_tec = 6*N + 1
idx_cs = 6*N + 2
# 0<= a+ <= asigma
row = np.zeros(ncols,dtype=np.double)
row[[idx_ap,idx_as]] = 1., -1.
A_lt.append(row)
b_lt.append(0.)
# 0 <= z+ - a+ <= sigma - asigma
row = np.zeros(ncols,dtype=np.double)
row[[idx_p,idx_ap, idx_s, idx_as]] = 1., -1., -1., 1.
A_lt.append(row)
b_lt.append(0.)
row = np.zeros(ncols,dtype=np.double)
row[[idx_p,idx_ap]] = 1., -1.
A_gt.append(row)
b_gt.append(0.)
#same for a-
row = np.zeros(ncols,dtype=np.double)
row[[idx_am,idx_as]] = 1., -1.
A_lt.append(row)
b_lt.append(0.)
row = np.zeros(ncols,dtype=np.double)
row[[idx_m,idx_am, idx_s, idx_as]] = 1., -1., -1., 1.
A_lt.append(row)
b_lt.append(0.)
row = np.zeros(ncols,dtype=np.double)
row[[idx_m,idx_am]] = 1., -1.
A_gt.append(row)
b_gt.append(0.)
# 0 <= asigma <= a*sigma_max
row = np.zeros(ncols,dtype=np.double)
row[[idx_s,idx_a]] = 1., -sigma_max
A_lt.append(row)
b_lt.append(0.)
# 0 <= sigma - asigma <= sigma_max - a*sigma_max
row = np.zeros(ncols,dtype=np.double)
row[[idx_s,idx_as, idx_a]] = 1., -1., sigma_max
A_lt.append(row)
b_lt.append(sigma_max)
row = np.zeros(ncols,dtype=np.double)
row[[idx_s,idx_as]] = 1., -1.
A_gt.append(row)
b_gt.append(0.)
# a+ + a- >= asigma
row = np.zeros(ncols,dtype=np.double)
row[[idx_ap,idx_am, idx_as]] = 1., -1., -1.
A_gt.append(row)
b_gt.append(0.)
# z+ + z- - a+ - a- <= sigma - asigma
row = np.zeros(ncols,dtype=np.double)
row[[idx_p, idx_m, idx_ap, idx_am, idx_s, idx_as]] = 1., 1., -1., -1., -1., 1.
A_lt.append(row)
b_lt.append(0.)
# z+ - z- + K/nu*tec + cs = phase
row = np.zeros(ncols,dtype=np.double)
if solve_cs:
row[[idx_p, idx_m, idx_tec, idx_cs]] = 1., -1., K/freqs[i], 1.
else:
row[[idx_p, idx_m, idx_tec, idx_cs]] = 1., -1., K/freqs[i], 0.
A_eq.append(row)
b_eq.append(obs_phase_unwrap[i])
# minimize z+ + z- - a+ - a- + Nsigma_max
c_obj[[idx_p, idx_m, idx_ap, idx_am,idx_s]] = 1., 1., -1., -1.,N
row = np.zeros(ncols,dtype=np.double)
for i in range(N):
idx_a = 5*N + i
# sum a < fout * N
row[idx_a] = 1.
A_lt.append(row)
b_lt.append(fout*N)
A_eq, b_eq = np.array(A_eq), np.array(b_eq)
A_lt, b_lt = np.array(A_lt), np.array(b_lt)
A_gt, b_gt = np.array(A_gt), np.array(b_gt)
from mippy.lpsolver import LPSolver
lp = LPSolver(c_obj,A_eq=A_eq, b_eq=b_eq, A_lt=A_lt, b_lt=b_lt, A_gt=A_gt, b_gt=b_gt, maximize=False,problem_name=problem_name, solver_type='SIMP')
for i in range(len(freqs)):
idx_p = i
idx_m = N + i
idx_ap = 2*N + i
idx_am = 3*N + i
idx_as = 4*N + i
idx_a = 5*N + i
idx_s = 6*N
idx_tec = 6*N + 1
idx_cs = 6*N + 2
lp.set_variable_type(idx_p,'c',('>',0.))
lp.set_variable_type(idx_m,'c',('>',0.))
lp.set_variable_type(idx_ap,'c',('>',0.))
lp.set_variable_type(idx_am,'c',('>',0.))
lp.set_variable_type(idx_as,'c',('>',0.))
lp.set_variable_type(idx_a,'i',('<>',0., 1.))
lp.set_variable_type(idx_s,'c',('<>',0., sigma_max))
lp.set_variable_type(idx_tec,'c',('*',))
lp.set_variable_type(idx_cs,'c',('*',))
lp.compile()
res = lp.submit_problem()
for i in range(len(freqs)):
idx_p = i
idx_m = N + i
idx_ap = 2*N + i
idx_am = 3*N + i
idx_as = 4*N + i
idx_a = 5*N + i
idx_s = 6*N
idx_tec = 6*N + 1
idx_cs = 6*N + 2
assert np.isclose(res[idx_p]*res[idx_m], 0.) , "infeasible solution, {},{}".format(res[idx_p],res[idx_m])
return res[[6*N, 6*N+1, 6*N+2]]
def l1_lpsolver_parallel(obs_phase, freqs, sigma_max = np.pi, fout=0.5, solve_cs=True, problem_name="l1_tec_solver",num_threads = None):
'''Solve the tec and cs for multiple datasets.
`obs_phase` : `numpy.ndarray`
the measured phase with shape (num_freqs, num_datasets)
`freqs` : `numpy.ndarray`
the frequencies at the datapoints (num_freqs,)
`sigma_max` : (optional) `float`
the maximum allowed deviation for outlier detection. default np.pi
`fout` : (optional) `float`
The maximum fraction of allowed outliers out of total number of datapoints. default 0.5
`solve_cs` : (optional) bool
Whether to solve cs (True)
`num_threads` : (optional) `int`
number of parallel threads to run. default None is num_cpu
`problem_name` : (optional) `str`
name of problem "l1_tec_solver"
'''
from dask import delayed, compute
from dask.threaded import get
from functools import partial
dsk = {}
assert len(obs_phase.shape) == 2, "obs_phase not dim 2 {}".format(obs_phase.shape)
N = obs_phase.shape[1]
values = [delayed(partial(l1_lpsolver, sigma_max=sigma_max, fout=fout,solve_cs=solve_cs, problem_name="{}{:03d}".format(problem_name,i)), pure=True)( obs_phase[:,i], freqs) for i in range(N)]
#client = Client()
results = compute(*values, get=get, num_workers=num_threads)
return results
def l1data_l2model_solve(obs_phase,freqs,Cd_error,Ct_ratio=0.01,m0=None, CS_solve=False):
'''Solves for the terms phase(CS,TEC) = CS + e^2/(4pi ep0 me c) * TEC/nu
Delay is taken out.
If CS is optionally solved for with option `CS_solve`.
`obs_phase` : `numpy.ndarray`
The phase as observable in radians. The shape is assumed to be (len(freqs), num_timestamps)
`freqs` : `numpy.ndarray`
The frequencies in Hz at midpoints of observables.
`Cd_error` : `float` or `numpy.ndarray`
The uncertainty of the measured `obs_phase` in degrees.
If not a float then must be of shape `obs_phase.shape`
`Ct_ratio` : `float` (optional)
The systematic uncertainty in fraction of absolute phase.
Ct will be calculated as Ct_ratio*abs(obs_phase)
`m0` : `numpy.ndarray` (optional)
The initial guess of the model. If not given then set to zeros.
Shape must be (num_timestamps, 2) [even if CS is not solved for]
m0[:,0] = tec0, m0[:,1] = CS0
`CS_solve` : bool (optional)
Whether or not to solve for CS or set it to 0.
Returns model of shape (num_timestamps, 2) where,
m[:,0] = tec, m[:,1] = CS
'''
obs_phase = phase_unwrapp1d(obs_phase,axis=0)
alpha = 0.
if CS_solve:
alpha = 1.
#whethre or not to use a priori information (regularization) (soft problem makes little difference)
beta = 0.
def calc_phase(m, freqs):
tec = m[:,0]
cs = m[:,1]
phase = 8.44797256e-7*TECU * np.multiply.outer(1./freqs,tec) + alpha*cs
return phase
def neglogL(obs_phase,m,CdCt_phase,m0,cov_m,freqs):
'''Return per timestep'''
K = 8.44797256e-7*TECU
nu = np.multiply.outer(1./freqs,np.ones(obs_phase.shape[1]))
tec = m[:,0]
cs = m[:,1]
tec_p = m0[:,0]
cs_p = m0[:,1]
sigma_tec2 = cov_m[0]
sigma_cs2 = cov_m[1]
sigma_phase = np.sqrt(CdCt_phase)
phase = obs_phase
#return np.nansum(np.abs(K*np.multiply.outer(1./freqs,tec) - phase)/sigma_phase,axis=0)
return beta*((tec - tec_p)**2/sigma_tec2 + (cs - cs_p)**2/sigma_cs2)/2 + np.nansum(np.abs(K*np.multiply.outer(1./freqs,tec) + alpha*cs - phase)/sigma_phase,axis=0)
def calc_grad(obs_phase,m,CdCt_phase,m0,cov_m,freqs):
K = 8.44797256e-7*TECU
nu = np.multiply.outer(1./freqs,np.ones(obs_phase.shape[1]))
tec = m[:,0]
cs = m[:,1]
tec_p = m0[:,0]
cs_p = m0[:,1]
sigma_tec2 = cov_m[0]
sigma_cs2 = cov_m[1]
sigma_phase = np.sqrt(CdCt_phase)
phase = obs_phase
x0 = sigma_tec2
x1 = K/nu
x1_ = K*np.multiply.outer(1./freqs,tec)
x2 = np.sign(alpha*cs - phase + x1_)/sigma_phase
x3 = sigma_cs2
grad = np.zeros([obs_phase.shape[1],2])
grad[:,0] = x0*(beta*(tec - tec_p)/x0 + np.nansum((x1*x2),axis=0))
grad[:,1] = x3 * (beta*(cs - cs_p)/x3 + np.nansum(alpha*x2,axis=0))
return grad
def calc_epsilon_n(dir,m,freqs,CdCt,obs_phase,step=1e-3):
"""Approximate stepsize"""
g0 = calc_phase(m, freqs)
gp = calc_phase(m + step*dir, freqs)
Gm = (gp - g0)/step
dd = obs_phase - g0
epsilon_n = (np.nansum(Gm*dd/CdCt,axis=0)/np.nansum(Gm/CdCt*Gm,axis=0))
return epsilon_n
if m0 is None:
m0 = np.zeros([obs_phase.shape[1],2],dtype=np.double)
m = m0.copy()
cov_m = np.array([1e-4,1e-4])
#print( calc_phase(m,freqs) - obs_phase)
Ct = (Ct_ratio*np.abs(obs_phase))**2
Cd = (Cd_error*np.pi/180.)**2
CdCt = Cd+Ct
#print(np.sqrt(CdCt))
#print( np.nansum(np.abs(calc_phase(m,freqs) - obs_phase)/np.sqrt(CdCt),axis=0))
S = neglogL(obs_phase,m,CdCt,m0,cov_m,freqs)
#print("Initial neglogL: {}".format(S))
iter = 0
Nmax = 2#one is enough
while iter < Nmax:
grad = calc_grad(obs_phase,m,CdCt,m0,cov_m,freqs)
dir = grad
epsilon_n = calc_epsilon_n(dir,m,freqs,CdCt,obs_phase,step=1e-3)
#print("epsilon_n: {}".format(epsilon_n))
m += dir*epsilon_n
S = neglogL(obs_phase,m,CdCt,m0,cov_m,freqs)
#print("Model: {}".format(m))
#print("iter {}: neglogL: {}, log|dm/m|: {}, |grad|: {}".format(iter, S, np.mean(np.log(np.abs(np.einsum("i,ij->ij",epsilon_n,dir)/m))),np.nansum(np.abs(grad))))
iter += 1
#print("Final neglogL: {}".format(S))
return m
|
|
'''tzinfo timezone information for Europe/Tirane.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Tirane(DstTzInfo):
'''Europe/Tirane timezone definition. See datetime.tzinfo for details'''
zone = 'Europe/Tirane'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1913,12,31,22,40,40),
d(1940,6,15,23,0,0),
d(1942,11,2,1,0,0),
d(1943,3,29,1,0,0),
d(1943,4,10,1,0,0),
d(1974,5,3,23,0,0),
d(1974,10,1,22,0,0),
d(1975,4,30,23,0,0),
d(1975,10,1,22,0,0),
d(1976,5,1,23,0,0),
d(1976,10,2,22,0,0),
d(1977,5,7,23,0,0),
d(1977,10,1,22,0,0),
d(1978,5,5,23,0,0),
d(1978,9,30,22,0,0),
d(1979,5,4,23,0,0),
d(1979,9,29,22,0,0),
d(1980,5,2,23,0,0),
d(1980,10,3,22,0,0),
d(1981,4,25,23,0,0),
d(1981,9,26,22,0,0),
d(1982,5,1,23,0,0),
d(1982,10,2,22,0,0),
d(1983,4,17,23,0,0),
d(1983,9,30,22,0,0),
d(1984,3,31,23,0,0),
d(1984,9,30,1,0,0),
d(1985,3,31,1,0,0),
d(1985,9,29,1,0,0),
d(1986,3,30,1,0,0),
d(1986,9,28,1,0,0),
d(1987,3,29,1,0,0),
d(1987,9,27,1,0,0),
d(1988,3,27,1,0,0),
d(1988,9,25,1,0,0),
d(1989,3,26,1,0,0),
d(1989,9,24,1,0,0),
d(1990,3,25,1,0,0),
d(1990,9,30,1,0,0),
d(1991,3,31,1,0,0),
d(1991,9,29,1,0,0),
d(1992,3,29,1,0,0),
d(1992,9,27,1,0,0),
d(1993,3,28,1,0,0),
d(1993,9,26,1,0,0),
d(1994,3,27,1,0,0),
d(1994,9,25,1,0,0),
d(1995,3,26,1,0,0),
d(1995,9,24,1,0,0),
d(1996,3,31,1,0,0),
d(1996,10,27,1,0,0),
d(1997,3,30,1,0,0),
d(1997,10,26,1,0,0),
d(1998,3,29,1,0,0),
d(1998,10,25,1,0,0),
d(1999,3,28,1,0,0),
d(1999,10,31,1,0,0),
d(2000,3,26,1,0,0),
d(2000,10,29,1,0,0),
d(2001,3,25,1,0,0),
d(2001,10,28,1,0,0),
d(2002,3,31,1,0,0),
d(2002,10,27,1,0,0),
d(2003,3,30,1,0,0),
d(2003,10,26,1,0,0),
d(2004,3,28,1,0,0),
d(2004,10,31,1,0,0),
d(2005,3,27,1,0,0),
d(2005,10,30,1,0,0),
d(2006,3,26,1,0,0),
d(2006,10,29,1,0,0),
d(2007,3,25,1,0,0),
d(2007,10,28,1,0,0),
d(2008,3,30,1,0,0),
d(2008,10,26,1,0,0),
d(2009,3,29,1,0,0),
d(2009,10,25,1,0,0),
d(2010,3,28,1,0,0),
d(2010,10,31,1,0,0),
d(2011,3,27,1,0,0),
d(2011,10,30,1,0,0),
d(2012,3,25,1,0,0),
d(2012,10,28,1,0,0),
d(2013,3,31,1,0,0),
d(2013,10,27,1,0,0),
d(2014,3,30,1,0,0),
d(2014,10,26,1,0,0),
d(2015,3,29,1,0,0),
d(2015,10,25,1,0,0),
d(2016,3,27,1,0,0),
d(2016,10,30,1,0,0),
d(2017,3,26,1,0,0),
d(2017,10,29,1,0,0),
d(2018,3,25,1,0,0),
d(2018,10,28,1,0,0),
d(2019,3,31,1,0,0),
d(2019,10,27,1,0,0),
d(2020,3,29,1,0,0),
d(2020,10,25,1,0,0),
d(2021,3,28,1,0,0),
d(2021,10,31,1,0,0),
d(2022,3,27,1,0,0),
d(2022,10,30,1,0,0),
d(2023,3,26,1,0,0),
d(2023,10,29,1,0,0),
d(2024,3,31,1,0,0),
d(2024,10,27,1,0,0),
d(2025,3,30,1,0,0),
d(2025,10,26,1,0,0),
d(2026,3,29,1,0,0),
d(2026,10,25,1,0,0),
d(2027,3,28,1,0,0),
d(2027,10,31,1,0,0),
d(2028,3,26,1,0,0),
d(2028,10,29,1,0,0),
d(2029,3,25,1,0,0),
d(2029,10,28,1,0,0),
d(2030,3,31,1,0,0),
d(2030,10,27,1,0,0),
d(2031,3,30,1,0,0),
d(2031,10,26,1,0,0),
d(2032,3,28,1,0,0),
d(2032,10,31,1,0,0),
d(2033,3,27,1,0,0),
d(2033,10,30,1,0,0),
d(2034,3,26,1,0,0),
d(2034,10,29,1,0,0),
d(2035,3,25,1,0,0),
d(2035,10,28,1,0,0),
d(2036,3,30,1,0,0),
d(2036,10,26,1,0,0),
d(2037,3,29,1,0,0),
d(2037,10,25,1,0,0),
]
_transition_info = [
i(4740,0,'LMT'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
]
Tirane = Tirane()
|
|
"""
Profiling hooks
This module contains a couple of decorators (`profile` and `coverage`) that
can be used to wrap functions and/or methods to produce profiles and line
coverage reports. There's a third convenient decorator (`timecall`) that
measures the duration of function execution without the extra profiling
overhead.
Usage example (Python 2.4 or newer)::
from profilehooks import profile, coverage
@profile # or @coverage
def fn(n):
if n < 2: return 1
else: return n * fn(n-1)
print(fn(42))
Or without imports, with some hack
$ python -m profilehooks yourmodule
@profile # or @coverage
def fn(n):
if n < 2: return 1
else: return n * fn(n-1)
print(fn(42))
Usage example (Python 2.3 or older)::
from profilehooks import profile, coverage
def fn(n):
if n < 2: return 1
else: return n * fn(n-1)
# Now wrap that function in a decorator
fn = profile(fn) # or coverage(fn)
print fn(42)
Reports for all thusly decorated functions will be printed to sys.stdout
on program termination. You can alternatively request for immediate
reports for each call by passing immediate=True to the profile decorator.
There's also a @timecall decorator for printing the time to sys.stderr
every time a function is called, when you just want to get a rough measure
instead of a detailed (but costly) profile.
Caveats
A thread on python-dev convinced me that hotshot produces bogus numbers.
See http://mail.python.org/pipermail/python-dev/2005-November/058264.html
I don't know what will happen if a decorated function will try to call
another decorated function. All decorators probably need to explicitly
support nested profiling (currently TraceFuncCoverage is the only one
that supports this, while HotShotFuncProfile has support for recursive
functions.)
Profiling with hotshot creates temporary files (*.prof for profiling,
*.cprof for coverage) in the current directory. These files are not
cleaned up. Exception: when you specify a filename to the profile
decorator (to store the pstats.Stats object for later inspection),
the temporary file will be the filename you specified with '.raw'
appended at the end.
Coverage analysis with hotshot seems to miss some executions resulting
in lower line counts and some lines errorneously marked as never
executed. For this reason coverage analysis now uses trace.py which is
slower, but more accurate.
Copyright (c) 2004--2014 Marius Gedminas <[email protected]>
Copyright (c) 2007 Hanno Schlichting
Copyright (c) 2008 Florian Schulze
Released under the MIT licence since December 2006:
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
(Previously it was distributed under the GNU General Public Licence.)
"""
__author__ = "Marius Gedminas <[email protected]>"
__copyright__ = "Copyright 2004-2014 Marius Gedminas"
__license__ = "MIT"
__version__ = "1.7.1"
__date__ = "2014-12-02"
import atexit
import inspect
import sys
import re
# For profiling
from profile import Profile
import pstats
# For hotshot profiling (inaccurate!)
try:
import hotshot
import hotshot.stats
except ImportError:
hotshot = None
# For trace.py coverage
import trace
# For hotshot coverage (inaccurate!; uses undocumented APIs; might break)
if hotshot is not None:
import _hotshot
import hotshot.log
# For cProfile profiling (best)
try:
import cProfile
except ImportError:
cProfile = None
# For timecall
import time
# registry of available profilers
AVAILABLE_PROFILERS = {}
__all__ = ['coverage', 'coverage_with_hotshot', 'profile', 'timecall']
def profile(fn=None, skip=0, filename=None, immediate=False, dirs=False,
sort=None, entries=40,
profiler=('cProfile', 'profile', 'hotshot')):
"""Mark `fn` for profiling.
If `skip` is > 0, first `skip` calls to `fn` will not be profiled.
If `immediate` is False, profiling results will be printed to
sys.stdout on program termination. Otherwise results will be printed
after each call.
If `dirs` is False only the name of the file will be printed.
Otherwise the full path is used.
`sort` can be a list of sort keys (defaulting to ['cumulative',
'time', 'calls']). The following ones are recognized::
'calls' -- call count
'cumulative' -- cumulative time
'file' -- file name
'line' -- line number
'module' -- file name
'name' -- function name
'nfl' -- name/file/line
'pcalls' -- call count
'stdname' -- standard name
'time' -- internal time
`entries` limits the output to the first N entries.
`profiler` can be used to select the preferred profiler, or specify a
sequence of them, in order of preference. The default is ('cProfile'.
'profile', 'hotshot').
If `filename` is specified, the profile stats will be stored in the
named file. You can load them pstats.Stats(filename).
Usage::
def fn(...):
...
fn = profile(fn, skip=1)
If you are using Python 2.4, you should be able to use the decorator
syntax::
@profile(skip=3)
def fn(...):
...
or just ::
@profile
def fn(...):
...
"""
if fn is None: # @profile() syntax -- we are a decorator maker
def decorator(fn):
return profile(fn, skip=skip, filename=filename,
immediate=immediate, dirs=dirs,
sort=sort, entries=entries,
profiler=profiler)
return decorator
# @profile syntax -- we are a decorator.
if isinstance(profiler, str):
profiler = [profiler]
for p in profiler:
if p in AVAILABLE_PROFILERS:
profiler_class = AVAILABLE_PROFILERS[p]
break
else:
raise ValueError('only these profilers are available: %s'
% ', '.join(sorted(AVAILABLE_PROFILERS)))
fp = profiler_class(fn, skip=skip, filename=filename,
immediate=immediate, dirs=dirs,
sort=sort, entries=entries)
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
def coverage(fn):
"""Mark `fn` for line coverage analysis.
Results will be printed to sys.stdout on program termination.
Usage::
def fn(...):
...
fn = coverage(fn)
If you are using Python 2.4, you should be able to use the decorator
syntax::
@coverage
def fn(...):
...
"""
fp = TraceFuncCoverage(fn) # or HotShotFuncCoverage
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
def coverage_with_hotshot(fn):
"""Mark `fn` for line coverage analysis.
Uses the 'hotshot' module for fast coverage analysis.
BUG: Produces inaccurate results.
See the docstring of `coverage` for usage examples.
"""
fp = HotShotFuncCoverage(fn)
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
class FuncProfile(object):
"""Profiler for a function (uses profile)."""
# This flag is shared between all instances
in_profiler = False
Profile = Profile
def __init__(self, fn, skip=0, filename=None, immediate=False, dirs=False,
sort=None, entries=40):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
FuncProfile registers an atexit handler that prints profiling
information to sys.stderr when the program terminates.
"""
self.fn = fn
self.skip = skip
self.filename = filename
self.immediate = immediate
self.dirs = dirs
self.sort = sort or ('cumulative', 'time', 'calls')
if isinstance(self.sort, str):
self.sort = (self.sort, )
self.entries = entries
self.reset_stats()
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
if self.skip > 0:
self.skip -= 1
self.skipped += 1
return self.fn(*args, **kw)
if FuncProfile.in_profiler:
# handle recursive calls
return self.fn(*args, **kw)
# You cannot reuse the same profiler for many calls and accumulate
# stats that way. :-/
profiler = self.Profile()
try:
FuncProfile.in_profiler = True
return profiler.runcall(self.fn, *args, **kw)
finally:
FuncProfile.in_profiler = False
self.stats.add(profiler)
if self.immediate:
self.print_stats()
self.reset_stats()
def print_stats(self):
"""Print profile information to sys.stdout."""
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** PROFILER RESULTS ***")
print("%s (%s:%s)" % (funcname, filename, lineno))
if self.skipped:
skipped = " (%d calls not profiled)" % self.skipped
else:
skipped = ""
print("function called %d times%s" % (self.ncalls, skipped))
print("")
stats = self.stats
if self.filename:
stats.dump_stats(self.filename)
if not self.dirs:
stats.strip_dirs()
stats.sort_stats(*self.sort)
stats.print_stats(self.entries)
def reset_stats(self):
"""Reset accumulated profiler statistics."""
# Note: not using self.Profile, since pstats.Stats() fails then
self.stats = pstats.Stats(Profile())
self.ncalls = 0
self.skipped = 0
def atexit(self):
"""Stop profiling and print profile information to sys.stdout.
This function is registered as an atexit hook.
"""
if not self.immediate:
self.print_stats()
AVAILABLE_PROFILERS['profile'] = FuncProfile
if cProfile is not None:
class CProfileFuncProfile(FuncProfile):
"""Profiler for a function (uses cProfile)."""
Profile = cProfile.Profile
AVAILABLE_PROFILERS['cProfile'] = CProfileFuncProfile
if hotshot is not None:
class HotShotFuncProfile(FuncProfile):
"""Profiler for a function (uses hotshot)."""
# This flag is shared between all instances
in_profiler = False
def __init__(self, fn, skip=0, filename=None, immediate=False,
dirs=False, sort=None, entries=40):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
HotShotFuncProfile registers an atexit handler that prints
profiling information to sys.stderr when the program terminates.
The log file is not removed and remains there to clutter the
current working directory.
"""
if filename:
self.logfilename = filename + ".raw"
else:
self.logfilename = fn.__name__ + ".prof"
super(HotShotFuncProfile, self).__init__(
fn, skip=skip, filename=filename, immediate=immediate,
dirs=dirs, sort=sort, entries=entries)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
if self.skip > 0:
self.skip -= 1
self.skipped += 1
return self.fn(*args, **kw)
if HotShotFuncProfile.in_profiler:
# handle recursive calls
return self.fn(*args, **kw)
if self.profiler is None:
self.profiler = hotshot.Profile(self.logfilename)
try:
HotShotFuncProfile.in_profiler = True
return self.profiler.runcall(self.fn, *args, **kw)
finally:
HotShotFuncProfile.in_profiler = False
if self.immediate:
self.print_stats()
self.reset_stats()
def print_stats(self):
if self.profiler is None:
self.stats = pstats.Stats(Profile())
else:
self.profiler.close()
self.stats = hotshot.stats.load(self.logfilename)
super(HotShotFuncProfile, self).print_stats()
def reset_stats(self):
self.profiler = None
self.ncalls = 0
self.skipped = 0
AVAILABLE_PROFILERS['hotshot'] = HotShotFuncProfile
class HotShotFuncCoverage:
"""Coverage analysis for a function (uses _hotshot).
HotShot coverage is reportedly faster than trace.py, but it appears to
have problems with exceptions; also line counts in coverage reports
are generally lower from line counts produced by TraceFuncCoverage.
Is this my bug, or is it a problem with _hotshot?
"""
def __init__(self, fn):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
HotShotFuncCoverage registers an atexit handler that prints
profiling information to sys.stderr when the program terminates.
The log file is not removed and remains there to clutter the
current working directory.
"""
self.fn = fn
self.logfilename = fn.__name__ + ".cprof"
self.profiler = _hotshot.coverage(self.logfilename)
self.ncalls = 0
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
old_trace = sys.gettrace()
try:
return self.profiler.runcall(self.fn, args, kw)
finally: # pragma: nocover
sys.settrace(old_trace)
def atexit(self):
"""Stop profiling and print profile information to sys.stderr.
This function is registered as an atexit hook.
"""
self.profiler.close()
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** COVERAGE RESULTS ***")
print("%s (%s:%s)" % (funcname, filename, lineno))
print("function called %d times" % self.ncalls)
print("")
fs = FuncSource(self.fn)
reader = hotshot.log.LogReader(self.logfilename)
for what, (filename, lineno, funcname), tdelta in reader:
if filename != fs.filename:
continue
if what == hotshot.log.LINE:
fs.mark(lineno)
if what == hotshot.log.ENTER:
# hotshot gives us the line number of the function definition
# and never gives us a LINE event for the first statement in
# a function, so if we didn't perform this mapping, the first
# statement would be marked as never executed
if lineno == fs.firstlineno:
lineno = fs.firstcodelineno
fs.mark(lineno)
reader.close()
print(fs)
never_executed = fs.count_never_executed()
if never_executed:
print("%d lines were not executed." % never_executed)
class TraceFuncCoverage:
"""Coverage analysis for a function (uses trace module).
HotShot coverage analysis is reportedly faster, but it appears to have
problems with exceptions.
"""
# Shared between all instances so that nested calls work
tracer = trace.Trace(count=True, trace=False,
ignoredirs=[sys.prefix, sys.exec_prefix])
# This flag is also shared between all instances
tracing = False
def __init__(self, fn):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
TraceFuncCoverage registers an atexit handler that prints
profiling information to sys.stderr when the program terminates.
The log file is not removed and remains there to clutter the
current working directory.
"""
self.fn = fn
self.logfilename = fn.__name__ + ".cprof"
self.ncalls = 0
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
if TraceFuncCoverage.tracing: # pragma: nocover
return self.fn(*args, **kw)
old_trace = sys.gettrace()
try:
TraceFuncCoverage.tracing = True
return self.tracer.runfunc(self.fn, *args, **kw)
finally: # pragma: nocover
sys.settrace(old_trace)
TraceFuncCoverage.tracing = False
def atexit(self):
"""Stop profiling and print profile information to sys.stderr.
This function is registered as an atexit hook.
"""
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** COVERAGE RESULTS ***")
print("%s (%s:%s)" % (funcname, filename, lineno))
print("function called %d times" % self.ncalls)
print("")
fs = FuncSource(self.fn)
for (filename, lineno), count in self.tracer.counts.items():
if filename != fs.filename:
continue
fs.mark(lineno, count)
print(fs)
never_executed = fs.count_never_executed()
if never_executed:
print("%d lines were not executed." % never_executed)
class FuncSource:
"""Source code annotator for a function."""
blank_rx = re.compile(r"^\s*finally:\s*(#.*)?$")
def __init__(self, fn):
self.fn = fn
self.filename = inspect.getsourcefile(fn)
self.sourcelines = {}
self.source = []
self.firstlineno = self.firstcodelineno = 0
try:
self.source, self.firstlineno = inspect.getsourcelines(fn)
self.firstcodelineno = self.firstlineno
self.find_source_lines()
except IOError:
self.filename = None
def find_source_lines(self):
"""Mark all executable source lines in fn as executed 0 times."""
if self.filename is None:
return
strs = trace.find_strings(self.filename)
lines = trace.find_lines_from_code(self.fn.__code__, strs)
for lineno in lines:
self.sourcelines.setdefault(lineno, 0)
if lines:
self.firstcodelineno = min(lines)
else: # pragma: nocover
# This branch cannot be reached, I'm just being paranoid.
self.firstcodelineno = self.firstlineno
def mark(self, lineno, count=1):
"""Mark a given source line as executed count times.
Multiple calls to mark for the same lineno add up.
"""
self.sourcelines[lineno] = self.sourcelines.get(lineno, 0) + count
def count_never_executed(self):
"""Count statements that were never executed."""
lineno = self.firstlineno
counter = 0
for line in self.source:
if self.sourcelines.get(lineno) == 0:
if not self.blank_rx.match(line):
counter += 1
lineno += 1
return counter
def __str__(self):
"""Return annotated source code for the function."""
if self.filename is None:
return "cannot show coverage data since co_filename is None"
lines = []
lineno = self.firstlineno
for line in self.source:
counter = self.sourcelines.get(lineno)
if counter is None:
prefix = ' ' * 7
elif counter == 0:
if self.blank_rx.match(line): # pragma: nocover
# This is an workaround for an ancient bug I can't
# reproduce, perhaps because it was fixed, or perhaps
# because I can't remember all the details.
prefix = ' ' * 7
else:
prefix = '>' * 6 + ' '
else:
prefix = '%5d: ' % counter
lines.append(prefix + line)
lineno += 1
return ''.join(lines)
def timecall(fn=None, immediate=True, timer=None):
"""Wrap `fn` and print its execution time.
Example::
@timecall
def somefunc(x, y):
time.sleep(x * y)
somefunc(2, 3)
will print the time taken by somefunc on every call. If you want just
a summary at program termination, use
@timecall(immediate=False)
You can also choose a timing method other than the default ``time.time()``,
e.g.:
@timecall(timer=time.clock)
"""
if fn is None: # @timecall() syntax -- we are a decorator maker
def decorator(fn):
return timecall(fn, immediate=immediate, timer=timer)
return decorator
# @timecall syntax -- we are a decorator.
if timer is None:
timer = time.time
fp = FuncTimer(fn, immediate=immediate, timer=timer)
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
class FuncTimer(object):
def __init__(self, fn, immediate, timer):
self.fn = fn
self.ncalls = 0
self.totaltime = 0
self.immediate = immediate
self.timer = timer
if not immediate:
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
fn = self.fn
timer = self.timer
self.ncalls += 1
try:
start = timer()
return fn(*args, **kw)
finally:
duration = timer() - start
self.totaltime += duration
if self.immediate:
funcname = fn.__name__
filename = fn.__code__.co_filename
lineno = fn.__code__.co_firstlineno
sys.stderr.write("\n %s (%s:%s):\n %.3f seconds\n\n" % (
funcname, filename, lineno, duration
))
sys.stderr.flush()
def atexit(self):
if not self.ncalls:
return
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("\n %s (%s:%s):\n"
" %d calls, %.3f seconds (%.3f seconds per call)\n" % (
funcname, filename, lineno, self.ncalls,
self.totaltime, self.totaltime / self.ncalls)
)
if __name__ == '__main__':
local = dict((name, globals()[name]) for name in __all__)
message = """********
Injected `profilehooks`
--------
{}
********
""".format("\n".join(local.keys()))
def interact_():
from code import interact
interact(message, local=local)
def run_():
from runpy import run_module
print(message)
run_module(sys.argv[1], init_globals=local)
if len(sys.argv) == 1:
interact_()
else:
run_()
|
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import matplotlib.cbook as cbook
import matplotlib.axes as maxes
#import matplotlib.colorbar as mcolorbar
from . import colorbar as mcolorbar
import matplotlib as mpl
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.ticker as ticker
from matplotlib.gridspec import SubplotSpec
from .axes_divider import Size, SubplotDivider, LocatableAxes, Divider
def _extend_axes_pad(value):
# Check whether a list/tuple/array or scalar has been passed
ret = value
if not hasattr(ret, "__getitem__"):
ret = (value, value)
return ret
def _tick_only(ax, bottom_on, left_on):
bottom_off = not bottom_on
left_off = not left_on
# [l.set_visible(bottom_off) for l in ax.get_xticklabels()]
# [l.set_visible(left_off) for l in ax.get_yticklabels()]
# ax.xaxis.label.set_visible(bottom_off)
# ax.yaxis.label.set_visible(left_off)
ax.axis["bottom"].toggle(ticklabels=bottom_off, label=bottom_off)
ax.axis["left"].toggle(ticklabels=left_off, label=left_off)
class Colorbar(mcolorbar.Colorbar):
def _config_axes_deprecated(self, X, Y):
'''
Make an axes patch and outline.
'''
ax = self.ax
ax.set_frame_on(False)
ax.set_navigate(False)
xy = self._outline(X, Y)
ax.update_datalim(xy)
ax.set_xlim(*ax.dataLim.intervalx)
ax.set_ylim(*ax.dataLim.intervaly)
self.outline = mlines.Line2D(xy[:, 0], xy[:, 1],
color=mpl.rcParams['axes.edgecolor'],
linewidth=mpl.rcParams['axes.linewidth'])
ax.add_artist(self.outline)
self.outline.set_clip_box(None)
self.outline.set_clip_path(None)
c = mpl.rcParams['axes.facecolor']
self.patch = mpatches.Polygon(xy, edgecolor=c,
facecolor=c,
linewidth=0.01,
zorder=-1)
ax.add_artist(self.patch)
ticks, ticklabels, offset_string = self._ticker()
if self.orientation == 'vertical':
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.yaxis.get_major_formatter().set_offset_string(offset_string)
else:
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
ax.xaxis.get_major_formatter().set_offset_string(offset_string)
class CbarAxesBase(object):
def colorbar(self, mappable, **kwargs):
locator = kwargs.pop("locator", None)
if locator is None:
if "ticks" not in kwargs:
kwargs["ticks"] = ticker.MaxNLocator(5)
if locator is not None:
if "ticks" in kwargs:
raise ValueError("Either *locator* or *ticks* need" +
" to be given, not both")
else:
kwargs["ticks"] = locator
self.hold(True)
if self.orientation in ["top", "bottom"]:
orientation = "horizontal"
else:
orientation = "vertical"
cb = Colorbar(self, mappable, orientation=orientation, **kwargs)
self._config_axes()
def on_changed(m):
#print 'calling on changed', m.get_cmap().name
cb.set_cmap(m.get_cmap())
cb.set_clim(m.get_clim())
cb.update_bruteforce(m)
self.cbid = mappable.callbacksSM.connect('changed', on_changed)
mappable.colorbar = cb
self.locator = cb.cbar_axis.get_major_locator()
return cb
def _config_axes(self):
'''
Make an axes patch and outline.
'''
ax = self
ax.set_navigate(False)
ax.axis[:].toggle(all=False)
b = self._default_label_on
ax.axis[self.orientation].toggle(all=b)
# for axis in ax.axis.values():
# axis.major_ticks.set_visible(False)
# axis.minor_ticks.set_visible(False)
# axis.major_ticklabels.set_visible(False)
# axis.minor_ticklabels.set_visible(False)
# axis.label.set_visible(False)
# axis = ax.axis[self.orientation]
# axis.major_ticks.set_visible(True)
# axis.minor_ticks.set_visible(True)
#axis.major_ticklabels.set_size(
# int(axis.major_ticklabels.get_size()*.9))
#axis.major_tick_pad = 3
# axis.major_ticklabels.set_visible(b)
# axis.minor_ticklabels.set_visible(b)
# axis.label.set_visible(b)
def toggle_label(self, b):
self._default_label_on = b
axis = self.axis[self.orientation]
axis.toggle(ticklabels=b, label=b)
#axis.major_ticklabels.set_visible(b)
#axis.minor_ticklabels.set_visible(b)
#axis.label.set_visible(b)
class CbarAxes(CbarAxesBase, LocatableAxes):
def __init__(self, *kl, **kwargs):
orientation = kwargs.pop("orientation", None)
if orientation is None:
raise ValueError("orientation must be specified")
self.orientation = orientation
self._default_label_on = True
self.locator = None
super(LocatableAxes, self).__init__(*kl, **kwargs)
def cla(self):
super(LocatableAxes, self).cla()
self._config_axes()
class Grid(object):
"""
A class that creates a grid of Axes. In matplotlib, the axes
location (and size) is specified in the normalized figure
coordinates. This may not be ideal for images that needs to be
displayed with a given aspect ratio. For example, displaying
images of a same size with some fixed padding between them cannot
be easily done in matplotlib. AxesGrid is used in such case.
"""
_defaultLocatableAxesClass = LocatableAxes
def __init__(self, fig,
rect,
nrows_ncols,
ngrids=None,
direction="row",
axes_pad=0.02,
add_all=True,
share_all=False,
share_x=True,
share_y=True,
#aspect=True,
label_mode="L",
axes_class=None,
):
"""
Build an :class:`Grid` instance with a grid nrows*ncols
:class:`~matplotlib.axes.Axes` in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* (in
:class:`~matplotlib.figure.Figure` coordinates) or
the subplot position code (e.g., "121").
Optional keyword arguments:
================ ======== =========================================
Keyword Default Description
================ ======== =========================================
direction "row" [ "row" | "column" ]
axes_pad 0.02 float| pad between axes given in inches
or tuple-like of floats,
(horizontal padding, vertical padding)
add_all True [ True | False ]
share_all False [ True | False ]
share_x True [ True | False ]
share_y True [ True | False ]
label_mode "L" [ "L" | "1" | "all" ]
axes_class None a type object which must be a subclass
of :class:`~matplotlib.axes.Axes`
================ ======== =========================================
"""
self._nrows, self._ncols = nrows_ncols
if ngrids is None:
ngrids = self._nrows * self._ncols
else:
if (ngrids > self._nrows * self._ncols) or (ngrids <= 0):
raise Exception("")
self.ngrids = ngrids
self._init_axes_pad(axes_pad)
if direction not in ["column", "row"]:
raise Exception("")
self._direction = direction
if axes_class is None:
axes_class = self._defaultLocatableAxesClass
axes_class_args = {}
else:
if (type(axes_class)) == type and \
issubclass(axes_class,
self._defaultLocatableAxesClass.Axes):
axes_class_args = {}
else:
axes_class, axes_class_args = axes_class
self.axes_all = []
self.axes_column = [[] for _ in range(self._ncols)]
self.axes_row = [[] for _ in range(self._nrows)]
h = []
v = []
if cbook.is_string_like(rect) or cbook.is_numlike(rect):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=False)
elif isinstance(rect, SubplotSpec):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=False)
elif len(rect) == 3:
kw = dict(horizontal=h, vertical=v, aspect=False)
self._divider = SubplotDivider(fig, *rect, **kw)
elif len(rect) == 4:
self._divider = Divider(fig, rect, horizontal=h, vertical=v,
aspect=False)
else:
raise Exception("")
rect = self._divider.get_position()
# reference axes
self._column_refax = [None for _ in range(self._ncols)]
self._row_refax = [None for _ in range(self._nrows)]
self._refax = None
for i in range(self.ngrids):
col, row = self._get_col_row(i)
if share_all:
sharex = self._refax
sharey = self._refax
else:
if share_x:
sharex = self._column_refax[col]
else:
sharex = None
if share_y:
sharey = self._row_refax[row]
else:
sharey = None
ax = axes_class(fig, rect, sharex=sharex, sharey=sharey,
**axes_class_args)
if share_all:
if self._refax is None:
self._refax = ax
else:
if sharex is None:
self._column_refax[col] = ax
if sharey is None:
self._row_refax[row] = ax
self.axes_all.append(ax)
self.axes_column[col].append(ax)
self.axes_row[row].append(ax)
self.axes_llc = self.axes_column[0][-1]
self._update_locators()
if add_all:
for ax in self.axes_all:
fig.add_axes(ax)
self.set_label_mode(label_mode)
def _init_axes_pad(self, axes_pad):
axes_pad = _extend_axes_pad(axes_pad)
self._axes_pad = axes_pad
self._horiz_pad_size = Size.Fixed(axes_pad[0])
self._vert_pad_size = Size.Fixed(axes_pad[1])
def _update_locators(self):
h = []
h_ax_pos = []
for _ in self._column_refax:
#if h: h.append(Size.Fixed(self._axes_pad))
if h:
h.append(self._horiz_pad_size)
h_ax_pos.append(len(h))
sz = Size.Scaled(1)
h.append(sz)
v = []
v_ax_pos = []
for _ in self._row_refax[::-1]:
#if v: v.append(Size.Fixed(self._axes_pad))
if v:
v.append(self._vert_pad_size)
v_ax_pos.append(len(v))
sz = Size.Scaled(1)
v.append(sz)
for i in range(self.ngrids):
col, row = self._get_col_row(i)
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_ax_pos[self._nrows - 1 - row])
self.axes_all[i].set_axes_locator(locator)
self._divider.set_horizontal(h)
self._divider.set_vertical(v)
def _get_col_row(self, n):
if self._direction == "column":
col, row = divmod(n, self._nrows)
else:
row, col = divmod(n, self._ncols)
return col, row
# Good to propagate __len__ if we have __getitem__
def __len__(self):
return len(self.axes_all)
def __getitem__(self, i):
return self.axes_all[i]
def get_geometry(self):
"""
get geometry of the grid. Returns a tuple of two integer,
representing number of rows and number of columns.
"""
return self._nrows, self._ncols
def set_axes_pad(self, axes_pad):
"set axes_pad"
self._axes_pad = axes_pad
# These two lines actually differ from ones in _init_axes_pad
self._horiz_pad_size.fixed_size = axes_pad[0]
self._vert_pad_size.fixed_size = axes_pad[1]
def get_axes_pad(self):
"""
get axes_pad
Returns
-------
tuple
Padding in inches, (horizontal pad, vertical pad)
"""
return self._axes_pad
def set_aspect(self, aspect):
"set aspect"
self._divider.set_aspect(aspect)
def get_aspect(self):
"get aspect"
return self._divider.get_aspect()
def set_label_mode(self, mode):
"set label_mode"
if mode == "all":
for ax in self.axes_all:
_tick_only(ax, False, False)
elif mode == "L":
# left-most axes
for ax in self.axes_column[0][:-1]:
_tick_only(ax, bottom_on=True, left_on=False)
# lower-left axes
ax = self.axes_column[0][-1]
_tick_only(ax, bottom_on=False, left_on=False)
for col in self.axes_column[1:]:
# axes with no labels
for ax in col[:-1]:
_tick_only(ax, bottom_on=True, left_on=True)
# bottom
ax = col[-1]
_tick_only(ax, bottom_on=False, left_on=True)
elif mode == "1":
for ax in self.axes_all:
_tick_only(ax, bottom_on=True, left_on=True)
ax = self.axes_llc
_tick_only(ax, bottom_on=False, left_on=False)
def get_divider(self):
return self._divider
def set_axes_locator(self, locator):
self._divider.set_locator(locator)
def get_axes_locator(self):
return self._divider.get_locator()
def get_vsize_hsize(self):
return self._divider.get_vsize_hsize()
# from axes_size import AddList
# vsize = AddList(self._divider.get_vertical())
# hsize = AddList(self._divider.get_horizontal())
# return vsize, hsize
class ImageGrid(Grid):
"""
A class that creates a grid of Axes. In matplotlib, the axes
location (and size) is specified in the normalized figure
coordinates. This may not be ideal for images that needs to be
displayed with a given aspect ratio. For example, displaying
images of a same size with some fixed padding between them cannot
be easily done in matplotlib. ImageGrid is used in such case.
"""
_defaultCbarAxesClass = CbarAxes
def __init__(self, fig,
rect,
nrows_ncols,
ngrids=None,
direction="row",
axes_pad=0.02,
add_all=True,
share_all=False,
aspect=True,
label_mode="L",
cbar_mode=None,
cbar_location="right",
cbar_pad=None,
cbar_size="5%",
cbar_set_cax=True,
axes_class=None,
):
"""
Build an :class:`ImageGrid` instance with a grid nrows*ncols
:class:`~matplotlib.axes.Axes` in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* (in
:class:`~matplotlib.figure.Figure` coordinates) or
the subplot position code (e.g., "121").
Optional keyword arguments:
================ ======== =========================================
Keyword Default Description
================ ======== =========================================
direction "row" [ "row" | "column" ]
axes_pad 0.02 float| pad between axes given in inches
or tuple-like of floats,
(horizontal padding, vertical padding)
add_all True [ True | False ]
share_all False [ True | False ]
aspect True [ True | False ]
label_mode "L" [ "L" | "1" | "all" ]
cbar_mode None [ "each" | "single" | "edge" ]
cbar_location "right" [ "left" | "right" | "bottom" | "top" ]
cbar_pad None
cbar_size "5%"
cbar_set_cax True [ True | False ]
axes_class None a type object which must be a subclass
of axes_grid's subclass of
:class:`~matplotlib.axes.Axes`
================ ======== =========================================
*cbar_set_cax* : if True, each axes in the grid has a cax
attribute that is bind to associated cbar_axes.
"""
self._nrows, self._ncols = nrows_ncols
if ngrids is None:
ngrids = self._nrows * self._ncols
else:
if (ngrids > self._nrows * self._ncols) or (ngrids <= 0):
raise Exception("")
self.ngrids = ngrids
axes_pad = _extend_axes_pad(axes_pad)
self._axes_pad = axes_pad
self._colorbar_mode = cbar_mode
self._colorbar_location = cbar_location
if cbar_pad is None:
# horizontal or vertical arrangement?
if cbar_location in ("left", "right"):
self._colorbar_pad = axes_pad[0]
else:
self._colorbar_pad = axes_pad[1]
else:
self._colorbar_pad = cbar_pad
self._colorbar_size = cbar_size
self._init_axes_pad(axes_pad)
if direction not in ["column", "row"]:
raise Exception("")
self._direction = direction
if axes_class is None:
axes_class = self._defaultLocatableAxesClass
axes_class_args = {}
else:
if isinstance(axes_class, maxes.Axes):
axes_class_args = {}
else:
axes_class, axes_class_args = axes_class
self.axes_all = []
self.axes_column = [[] for _ in range(self._ncols)]
self.axes_row = [[] for _ in range(self._nrows)]
self.cbar_axes = []
h = []
v = []
if cbook.is_string_like(rect) or cbook.is_numlike(rect):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
elif isinstance(rect, SubplotSpec):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
elif len(rect) == 3:
kw = dict(horizontal=h, vertical=v, aspect=aspect)
self._divider = SubplotDivider(fig, *rect, **kw)
elif len(rect) == 4:
self._divider = Divider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
else:
raise Exception("")
rect = self._divider.get_position()
# reference axes
self._column_refax = [None for _ in range(self._ncols)]
self._row_refax = [None for _ in range(self._nrows)]
self._refax = None
for i in range(self.ngrids):
col, row = self._get_col_row(i)
if share_all:
if self.axes_all:
sharex = self.axes_all[0]
sharey = self.axes_all[0]
else:
sharex = None
sharey = None
else:
sharex = self._column_refax[col]
sharey = self._row_refax[row]
ax = axes_class(fig, rect, sharex=sharex, sharey=sharey,
**axes_class_args)
self.axes_all.append(ax)
self.axes_column[col].append(ax)
self.axes_row[row].append(ax)
if share_all:
if self._refax is None:
self._refax = ax
if sharex is None:
self._column_refax[col] = ax
if sharey is None:
self._row_refax[row] = ax
cax = self._defaultCbarAxesClass(fig, rect,
orientation=self._colorbar_location)
self.cbar_axes.append(cax)
self.axes_llc = self.axes_column[0][-1]
self._update_locators()
if add_all:
for ax in self.axes_all+self.cbar_axes:
fig.add_axes(ax)
if cbar_set_cax:
if self._colorbar_mode == "single":
for ax in self.axes_all:
ax.cax = self.cbar_axes[0]
elif self._colorbar_mode == "edge":
for index, ax in enumerate(self.axes_all):
col, row = self._get_col_row(index)
if self._colorbar_location in ("left", "right"):
ax.cax = self.cbar_axes[row]
else:
ax.cax = self.cbar_axes[col]
else:
for ax, cax in zip(self.axes_all, self.cbar_axes):
ax.cax = cax
self.set_label_mode(label_mode)
def _update_locators(self):
h = []
v = []
h_ax_pos = []
h_cb_pos = []
if (self._colorbar_mode == "single" and
self._colorbar_location in ('left', 'bottom')):
if self._colorbar_location == "left":
#sz = Size.Fraction(Size.AxesX(self.axes_llc), self._nrows)
sz = Size.Fraction(self._nrows, Size.AxesX(self.axes_llc))
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, ny=0, ny1=-1)
elif self._colorbar_location == "bottom":
#sz = Size.Fraction(Size.AxesY(self.axes_llc), self._ncols)
sz = Size.Fraction(self._ncols, Size.AxesY(self.axes_llc))
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=0)
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
for col, ax in enumerate(self.axes_row[0]):
if h:
h.append(self._horiz_pad_size) # Size.Fixed(self._axes_pad))
if ax:
sz = Size.AxesX(ax, aspect="axes", ref_ax=self.axes_all[0])
else:
sz = Size.AxesX(self.axes_all[0],
aspect="axes", ref_ax=self.axes_all[0])
if (self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
col == 0)) and self._colorbar_location == "left":
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
h_ax_pos.append(len(h))
h.append(sz)
if ((self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
col == self._ncols - 1)) and
self._colorbar_location == "right"):
h.append(Size.from_any(self._colorbar_pad, sz))
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
v_ax_pos = []
v_cb_pos = []
for row, ax in enumerate(self.axes_column[0][::-1]):
if v:
v.append(self._vert_pad_size) # Size.Fixed(self._axes_pad))
if ax:
sz = Size.AxesY(ax, aspect="axes", ref_ax=self.axes_all[0])
else:
sz = Size.AxesY(self.axes_all[0],
aspect="axes", ref_ax=self.axes_all[0])
if (self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
row == 0)) and self._colorbar_location == "bottom":
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
v_ax_pos.append(len(v))
v.append(sz)
if ((self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
row == self._nrows - 1)) and
self._colorbar_location == "top"):
v.append(Size.from_any(self._colorbar_pad, sz))
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
for i in range(self.ngrids):
col, row = self._get_col_row(i)
#locator = self._divider.new_locator(nx=4*col,
# ny=2*(self._nrows - row - 1))
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_ax_pos[self._nrows-1-row])
self.axes_all[i].set_axes_locator(locator)
if self._colorbar_mode == "each":
if self._colorbar_location in ("right", "left"):
locator = self._divider.new_locator(
nx=h_cb_pos[col], ny=v_ax_pos[self._nrows - 1 - row])
elif self._colorbar_location in ("top", "bottom"):
locator = self._divider.new_locator(
nx=h_ax_pos[col], ny=v_cb_pos[self._nrows - 1 - row])
self.cbar_axes[i].set_axes_locator(locator)
elif self._colorbar_mode == 'edge':
if ((self._colorbar_location == 'left' and col == 0) or
(self._colorbar_location == 'right'
and col == self._ncols-1)):
locator = self._divider.new_locator(
nx=h_cb_pos[0], ny=v_ax_pos[self._nrows -1 - row])
self.cbar_axes[row].set_axes_locator(locator)
elif ((self._colorbar_location == 'bottom' and
row == self._nrows - 1) or
(self._colorbar_location == 'top' and row == 0)):
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_cb_pos[0])
self.cbar_axes[col].set_axes_locator(locator)
if self._colorbar_mode == "single":
if self._colorbar_location == "right":
#sz = Size.Fraction(Size.AxesX(self.axes_llc), self._nrows)
sz = Size.Fraction(self._nrows, Size.AxesX(self.axes_llc))
h.append(Size.from_any(self._colorbar_pad, sz))
h.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=-2, ny=0, ny1=-1)
elif self._colorbar_location == "top":
#sz = Size.Fraction(Size.AxesY(self.axes_llc), self._ncols)
sz = Size.Fraction(self._ncols, Size.AxesY(self.axes_llc))
v.append(Size.from_any(self._colorbar_pad, sz))
v.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=-2)
if self._colorbar_location in ("right", "top"):
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
elif self._colorbar_mode == "each":
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(True)
elif self._colorbar_mode == "edge":
if self._colorbar_location in ('right', 'left'):
count = self._nrows
else:
count = self._ncols
for i in range(count):
self.cbar_axes[i].set_visible(True)
for j in range(i + 1, self.ngrids):
self.cbar_axes[j].set_visible(False)
else:
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[i].set_position([1., 1., 0.001, 0.001],
which="active")
self._divider.set_horizontal(h)
self._divider.set_vertical(v)
AxesGrid = ImageGrid
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from mock import call
from osc_lib import exceptions
from openstackclient.network.v2 import floating_ip as fip
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
from openstackclient.tests.unit import utils as tests_utils
# Tests for Nova network
class TestFloatingIPCompute(compute_fakes.TestComputev2):
def setUp(self):
super(TestFloatingIPCompute, self).setUp()
# Get a shortcut to the compute client
self.compute = self.app.client_manager.compute
@mock.patch(
'openstackclient.api.compute_v2.APIv2.floating_ip_create'
)
class TestCreateFloatingIPCompute(TestFloatingIPCompute):
# The floating ip to be deleted.
_floating_ip = compute_fakes.FakeFloatingIP.create_one_floating_ip()
columns = (
'fixed_ip',
'id',
'instance_id',
'ip',
'pool',
)
data = (
_floating_ip['fixed_ip'],
_floating_ip['id'],
_floating_ip['instance_id'],
_floating_ip['ip'],
_floating_ip['pool'],
)
def setUp(self):
super(TestCreateFloatingIPCompute, self).setUp()
self.app.client_manager.network_endpoint_enabled = False
# self.compute.floating_ips.create.return_value = self.floating_ip
# Get the command object to test
self.cmd = fip.CreateFloatingIP(self.app, None)
def test_floating_ip_create_no_arg(self, fip_mock):
arglist = []
verifylist = []
self.assertRaises(tests_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_floating_ip_create_default(self, fip_mock):
fip_mock.return_value = self._floating_ip
arglist = [
self._floating_ip['pool'],
]
verifylist = [
('network', self._floating_ip['pool']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
fip_mock.assert_called_once_with(self._floating_ip['pool'])
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
@mock.patch(
'openstackclient.api.compute_v2.APIv2.floating_ip_delete'
)
class TestDeleteFloatingIPCompute(TestFloatingIPCompute):
# The floating ips to be deleted.
_floating_ips = compute_fakes.FakeFloatingIP.create_floating_ips(count=2)
def setUp(self):
super(TestDeleteFloatingIPCompute, self).setUp()
self.app.client_manager.network_endpoint_enabled = False
# Get the command object to test
self.cmd = fip.DeleteFloatingIP(self.app, None)
def test_floating_ip_delete(self, fip_mock):
fip_mock.return_value = mock.Mock(return_value=None)
arglist = [
self._floating_ips[0]['id'],
]
verifylist = [
('floating_ip', [self._floating_ips[0]['id']]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
fip_mock.assert_called_once_with(
self._floating_ips[0]['id']
)
self.assertIsNone(result)
def test_floating_ip_delete_multi(self, fip_mock):
fip_mock.return_value = mock.Mock(return_value=None)
arglist = []
verifylist = []
for f in self._floating_ips:
arglist.append(f['id'])
verifylist = [
('floating_ip', arglist),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
calls = []
for f in self._floating_ips:
calls.append(call(f['id']))
fip_mock.assert_has_calls(calls)
self.assertIsNone(result)
def test_floating_ip_delete_multi_exception(self, fip_mock):
fip_mock.return_value = mock.Mock(return_value=None)
fip_mock.side_effect = ([
mock.Mock(return_value=None),
exceptions.CommandError,
])
arglist = [
self._floating_ips[0]['id'],
'unexist_floating_ip',
]
verifylist = [(
'floating_ip',
[self._floating_ips[0]['id'], 'unexist_floating_ip'],
)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
try:
self.cmd.take_action(parsed_args)
self.fail('CommandError should be raised.')
except exceptions.CommandError as e:
self.assertEqual('1 of 2 floating_ips failed to delete.', str(e))
fip_mock.assert_any_call(self._floating_ips[0]['id'])
fip_mock.assert_any_call('unexist_floating_ip')
@mock.patch(
'openstackclient.api.compute_v2.APIv2.floating_ip_list'
)
class TestListFloatingIPCompute(TestFloatingIPCompute):
# The floating ips to be list up
_floating_ips = compute_fakes.FakeFloatingIP.create_floating_ips(count=3)
columns = (
'ID',
'Floating IP Address',
'Fixed IP Address',
'Server',
'Pool',
)
data = []
for ip in _floating_ips:
data.append((
ip['id'],
ip['ip'],
ip['fixed_ip'],
ip['instance_id'],
ip['pool'],
))
def setUp(self):
super(TestListFloatingIPCompute, self).setUp()
self.app.client_manager.network_endpoint_enabled = False
# Get the command object to test
self.cmd = fip.ListFloatingIP(self.app, None)
def test_floating_ip_list(self, fip_mock):
fip_mock.return_value = self._floating_ips
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
fip_mock.assert_called_once_with()
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
@mock.patch(
'openstackclient.api.compute_v2.APIv2.floating_ip_find'
)
class TestShowFloatingIPCompute(TestFloatingIPCompute):
# The floating ip to display.
_floating_ip = compute_fakes.FakeFloatingIP.create_one_floating_ip()
columns = (
'fixed_ip',
'id',
'instance_id',
'ip',
'pool',
)
data = (
_floating_ip['fixed_ip'],
_floating_ip['id'],
_floating_ip['instance_id'],
_floating_ip['ip'],
_floating_ip['pool'],
)
def setUp(self):
super(TestShowFloatingIPCompute, self).setUp()
self.app.client_manager.network_endpoint_enabled = False
# Get the command object to test
self.cmd = fip.ShowFloatingIP(self.app, None)
def test_floating_ip_show(self, fip_mock):
fip_mock.return_value = self._floating_ip
arglist = [
self._floating_ip['id'],
]
verifylist = [
('floating_ip', self._floating_ip['id']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
fip_mock.assert_called_once_with(self._floating_ip['id'])
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
|
|
# lrucache.py -- a simple LRU (Least-Recently-Used) cache class
# Copyright 2004 Evan Prodromou <[email protected]>
# Licensed under the Academic Free License 2.1
# arch-tag: LRU cache main module
"""a simple LRU (Least-Recently-Used) cache module
This module provides very simple LRU (Least-Recently-Used) cache
functionality.
An *in-memory cache* is useful for storing the results of an
'expensive' process (one that takes a lot of time or resources) for
later re-use. Typical examples are accessing data from the filesystem,
a database, or a network location. If you know you'll need to re-read
the data again, it can help to keep it in a cache.
You *can* use a Python dictionary as a cache for some purposes.
However, if the results you're caching are large, or you have a lot of
possible results, this can be impractical memory-wise.
An *LRU cache*, on the other hand, only keeps _some_ of the results in
memory, which keeps you from overusing resources. The cache is bounded
by a maximum size; if you try to add more values to the cache, it will
automatically discard the values that you haven't read or written to
in the longest time. In other words, the least-recently-used items are
discarded. [1]_
.. [1]: 'Discarded' here means 'removed from the cache'.
"""
from __future__ import generators
import time
from heapq import heappush, heappop, heapify
__version__ = "0.2"
__all__ = ['CacheKeyError', 'LRUCache', 'DEFAULT_SIZE']
__docformat__ = 'reStructuredText en'
DEFAULT_SIZE = 16
"""Default size of a new LRUCache object, if no 'size' argument is given."""
class CacheKeyError(KeyError):
"""Error raised when cache requests fail
When a cache record is accessed which no longer exists (or never did),
this error is raised. To avoid it, you may want to check for the existence
of a cache record before reading or deleting it."""
pass
class LRUCache(object):
"""Least-Recently-Used (LRU) cache.
Instances of this class provide a least-recently-used (LRU) cache. They
emulate a Python mapping type. You can use an LRU cache more or less like
a Python dictionary, with the exception that objects you put into the
cache may be discarded before you take them out.
Some example usage::
cache = LRUCache(32) # new cache
cache['foo'] = get_file_contents('foo') # or whatever
if 'foo' in cache: # if it's still in cache...
# use cached version
contents = cache['foo']
else:
# recalculate
contents = get_file_contents('foo')
# store in cache for next time
cache['foo'] = contents
print cache.size # Maximum size
print len(cache) # 0 <= len(cache) <= cache.size
cache.size = 10 # Auto-shrink on size assignment
for i in range(50): # note: larger than cache size
cache[i] = i
if 0 not in cache: print 'Zero was discarded.'
if 42 in cache:
del cache[42] # Manual deletion
for j in cache: # iterate (in LRU order)
print j, cache[j] # iterator produces keys, not values
"""
class __Node(object):
"""Record of a cached value. Not for public consumption."""
def __init__(self, key, obj, timestamp):
object.__init__(self)
self.key = key
self.obj = obj
self.atime = timestamp
self.mtime = self.atime
def __cmp__(self, other):
return cmp(self.atime, other.atime)
def __repr__(self):
return "<%s %s => %s (%s)>" % \
(self.__class__, self.key, self.obj, \
time.asctime(time.localtime(self.atime)))
def __init__(self, size=DEFAULT_SIZE):
# Check arguments
if size <= 0:
raise ValueError, size
elif type(size) is not type(0):
raise TypeError, size
object.__init__(self)
self.__heap = []
self.__dict = {}
self.size = size
"""Maximum size of the cache.
If more than 'size' elements are added to the cache,
the least-recently-used ones will be discarded."""
def __len__(self):
return len(self.__heap)
def __contains__(self, key):
return self.__dict.has_key(key)
def __setitem__(self, key, obj):
if self.__dict.has_key(key):
node = self.__dict[key]
node.obj = obj
node.atime = time.time()
node.mtime = node.atime
heapify(self.__heap)
else:
# size may have been reset, so we loop
while len(self.__heap) >= self.size:
lru = heappop(self.__heap)
del self.__dict[lru.key]
node = self.__Node(key, obj, time.time())
self.__dict[key] = node
heappush(self.__heap, node)
def __getitem__(self, key):
if not self.__dict.has_key(key):
raise CacheKeyError(key)
else:
node = self.__dict[key]
node.atime = time.time()
heapify(self.__heap)
return node.obj
def __delitem__(self, key):
if not self.__dict.has_key(key):
raise CacheKeyError(key)
else:
node = self.__dict[key]
del self.__dict[key]
self.__heap.remove(node)
heapify(self.__heap)
return node.obj
def __iter__(self):
copy = self.__heap[:]
while len(copy) > 0:
node = heappop(copy)
yield node.key
raise StopIteration
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
# automagically shrink heap on resize
if name == 'size':
while len(self.__heap) > value:
lru = heappop(self.__heap)
del self.__dict[lru.key]
def __repr__(self):
return "<%s (%d elements)>" % (str(self.__class__), len(self.__heap))
def mtime(self, key):
"""Return the last modification time for the cache record with key.
May be useful for cache instances where the stored values can get
'stale', such as caching file or network resource contents."""
if not self.__dict.has_key(key):
raise CacheKeyError(key)
else:
node = self.__dict[key]
return node.mtime
if __name__ == "__main__":
cache = LRUCache(25)
print cache
for i in range(50):
cache[i] = str(i)
print cache
if 46 in cache:
del cache[46]
print cache
cache.size = 10
print cache
cache[46] = '46'
print cache
print len(cache)
for c in cache:
print c
print cache
print cache.mtime(46)
for c in cache:
print c
|
|
"""This module contains client that wrap requests and response to Knoema API"""
import json
import urllib.parse
import urllib.request
import time
import datetime
import hmac
import base64
import hashlib
import random
import string
import io
import os
import knoema.api_definitions as definition
import knoema.api_definitions_sema as definition_sema
import knoema.api_definitions_search as definition_search
from urllib.error import HTTPError
def _random_string(length):
return ''.join(random.choice(string.ascii_letters) for ii in range(length + 1))
def _string_to_binary(string_data):
return string_data.encode()
def _crlf():
return _string_to_binary('\r\n')
def _response_to_json(resp):
str_response = resp.read().decode('utf-8')
if resp.status < 200 or resp.status >= 300:
raise ValueError('Error {} from server:{}', resp.status, str_response)
#api response can starts with BOM symbol and it break json parser, so have to strip the symbol
obj_resp = json.loads(str_response.strip('\ufeff'))
if isinstance(obj_resp, str):
raise ValueError(obj_resp)
return obj_resp
class ApiClient:
"""This is client that wrap requests and response to Knoema API"""
def __init__(self, host, appid=None, appsecret=None):
splitted = urllib.parse.urlsplit(host)
self._host = splitted.netloc.strip()
if not self._host:
self._host = splitted.path.strip()
self._schema = splitted.scheme
if not self._schema:
self._schema = 'http'
self._appid = appid
self._appsecret = appsecret
self._opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor)
self._search_config = None
def _get_url(self, apipath):
return urllib.parse.urlunsplit((self._schema, self._host, apipath, '', ''))
def _get_request_headers(self):
if not self._appid or not self._appsecret:
return {
'Content-Type' : 'application/json',
'Accept': 'application/json'
}
key = datetime.datetime.utcnow().strftime('%d-%m-%y-%H').encode()
hashed = hmac.new(key, self._appsecret.encode(), hashlib.sha1)
secrethash = base64.b64encode(hashed.digest()).decode('utf-8')
auth = 'Knoema {}:{}:1.2'.format(self._appid, secrethash)
return {
'Content-Type' : 'application/json',
'Accept': 'application/json',
'Authorization' : auth
}
def _api_get(self, obj, apipath, query=None):
url = self._get_url(apipath)
if query:
url = '{}?{}'.format(url, query)
headers = self._get_request_headers()
req = urllib.request.Request(url, headers=headers)
resp = self._opener.open(req)
return obj(_response_to_json(resp))
def _api_post(self, responseobj, apipath, requestobj):
json_data = requestobj.save_to_json()
return self._api_post_json(responseobj, apipath, json_data)
def _api_post_json(self, responseobj, apipath, requestjson):
url = self._get_url(apipath)
binary_data = requestjson.encode()
headers = self._get_request_headers()
req = urllib.request.Request(url, binary_data, headers)
resp = self._opener.open(req)
return responseobj(_response_to_json(resp))
def check_correct_host(self):
"""The method checks whether the host is correctly set and whether it can configure the connection to this host. Does not check the base host knoema.com """
if self._host == 'knoema.com':
return
url = self._get_url('/api/1.0/frontend/tags')
headers = self._get_request_headers()
req = urllib.request.Request(url, headers=headers)
try:
_ = urllib.request.urlopen(req)
except:
raise ValueError('The specified host {} does not exist'.format(self._host))
def get_dataset(self, datasetid):
"""The method is getting information about dataset byt it's id"""
path = '/api/1.0/meta/dataset/{}'
return self._api_get(definition.Dataset, path.format(datasetid))
def get_dataset_meta(self, datasetid):
path = '/api/1.0/meta/dataset/{}'
return self._api_get(definition.DatasetMetadata, path.format(datasetid))
def get_dimension(self, dataset, dimension):
"""The method is getting information about dimension with items"""
path = '/api/1.0/meta/dataset/{}/dimension/{}'
return self._api_get(definition.Dimension, path.format(dataset, dimension))
def get_daterange(self, dataset):
"""The method is getting information about date range of dataset"""
path = '/api/1.0/meta/dataset/{}/daterange'
return self._api_get(definition.DateRange, path.format(dataset))
def get_data(self, pivotrequest):
"""The method is getting data by pivot request"""
path = '/api/1.0/data/pivot/'
return self._api_post(definition.PivotResponse, path, pivotrequest)
def get_data_by_json(self, pivotrequest_json):
"""The method is getting data by pivot request (json)"""
path = '/api/1.0/data/pivot/'
return self._api_post_json(definition.PivotResponse, path, pivotrequest_json)
def get_dataset_data(self, dataset_id, filters):
"""The method is getting JSON by URL and parses it to specified object"""
try:
return self._api_post(definition.detect_data_response, '/api/2.0/data?datasetId={}'.format(dataset_id), filters)
except HTTPError as ex:
if ex.code == 400:
raise ValueError(ex.read().decode('utf-8'))
else:
raise
def get_data_raw(self, request, metadata_only = False):
"""The method is getting data by raw request"""
path = '/api/1.2/data/raw/' + ('?metadataOnly=true' if metadata_only else '')
res = self._api_post(definition.RawDataResponse, path, request)
token = res.continuation_token
while token is not None:
res2 = self.get_data_raw_with_token(token, metadata_only)
res.series += res2.series
token = res2.continuation_token
return res
def get_data_raw_with_token(self, token, metadata_only = False):
path = '/api/1.0/data/raw/?continuationToken={0}' + ('&metadataOnly=true' if metadata_only else '')
return self._api_get(definition.RawDataResponse, path.format(token))
def get_mnemonics(self, mnemonics, transform, frequency):
"""The method get series by mnemonics"""
path = '/api/1.0/data/mnemonics?mnemonics={0}'
if transform:
path += '&transform=' + transform
if frequency:
path += '&frequency=' + frequency
return self._api_get(definition.MnemonicsResponseList, path.format(mnemonics))
def get_details(self, request):
"""The method is getting data details by request"""
path = '/api/1.1/data/details/'
return self._api_post(definition.DetailsResponse, path, request)
def get_company_info(self, ticker):
"""The method get company data"""
path = 'api/1.0/sema/{0}'
return self._api_get(definition_sema.CompanyInt, path.format(ticker))
def get_indicator_info(self, path):
path = 'api/1.0/sema/{0}'.format(path)
url = self._get_url(path)
headers = self._get_request_headers()
req = urllib.request.Request(url, headers=headers)
resp = self._opener.open(req)
return _response_to_json(resp)
def search(self, query):
if self._search_config == None:
path = '/api/1.0/search/config'
self._search_config = self._api_get(definition_search.SearchConfig, path)
headers = self._get_request_headers()
url = self._search_config.build_search_url(query)
req = urllib.request.Request(url, headers=headers)
req = urllib.request.Request(url)
resp = self._opener.open(req)
return definition_search.SearchResultsInt(_response_to_json(resp))
def upload_file(self, file):
"""The method is posting file to the remote server"""
url = self._get_url('/api/1.0/upload/post')
fcontent = FileContent(file)
binary_data = fcontent.get_binary()
headers = self._get_request_headers()
req = urllib.request.Request(url, binary_data, headers)
req.add_header('Content-type', fcontent.get_content_type())
req.add_header('Content-length', len(binary_data))
resp = urllib.request.urlopen(req)
return definition.UploadPostResponse(_response_to_json(resp))
def upload_verify(self, file_location, dataset=None):
"""This method is verifiing posted file on server"""
path = '/api/1.0/upload/verify'
query = 'doNotGenerateAdvanceReport=true&filePath={}'.format(file_location)
if dataset:
query = 'doNotGenerateAdvanceReport=true&filePath={}&datasetId={}'.format(file_location, dataset)
return self._api_get(definition.UploadVerifyResponse, path, query)
def upload_submit(self, upload_request):
"""The method is submitting dataset upload"""
path = '/api/1.0/upload/save'
return self._api_post(definition.DatasetUploadResponse, path, upload_request)
def upload_status(self, upload_id):
"""The method is checking status of uploaded dataset"""
path = '/api/1.0/upload/status'
query = 'id={}'.format(upload_id)
return self._api_get(definition.DatasetUploadStatusResponse, path, query)
def upload(self, file_path, dataset=None, public=False, name = None):
"""Use this function to upload data to Knoema dataset."""
upload_status = self.upload_file(file_path)
err_msg = 'Dataset has not been uploaded to the remote host'
if not upload_status.successful:
msg = '{}, because of the following error: {}'.format(err_msg, upload_status.error)
raise ValueError(msg)
err_msg = 'File has not been verified'
upload_ver_status = self.upload_verify(upload_status.properties.location, dataset)
if not upload_ver_status.successful:
ver_err = '\r\n'.join(upload_ver_status.errors)
msg = '{}, because of the following error(s): {}'.format(err_msg, ver_err)
raise ValueError(msg)
ds_upload = definition.DatasetUpload(upload_ver_status, upload_status, dataset, public, name)
ds_upload_submit_result = self.upload_submit(ds_upload)
err_msg = 'Dataset has not been saved to the database'
if ds_upload_submit_result.status == 'failed':
ver_err = '\r\n'.join(ds_upload_submit_result.errors)
msg = '{}, because of the following error(s): {}'.format(err_msg, ver_err)
raise ValueError(msg)
ds_upload_result = None
while True:
ds_upload_result = self.upload_status(ds_upload_submit_result.submit_id)
if ds_upload_result.status == 'pending' or ds_upload_result.status == 'processing':
time.sleep(5)
else:
break
if ds_upload_result.status != 'successful':
ver_err = '\r\n'.join(ds_upload_result.errors)
msg = '{}, because of the following error(s): {}'.format(err_msg, ver_err)
raise ValueError(msg)
return ds_upload_result.dataset
def delete(self, dataset):
"""The method is deleting dataset by it's id"""
url = self._get_url('/api/1.0/meta/dataset/{}/delete'.format(dataset))
json_data = ''
binary_data = json_data.encode()
headers = self._get_request_headers()
req = urllib.request.Request(url, binary_data, headers)
resp = urllib.request.urlopen(req)
str_response = resp.read().decode('utf-8')
if str_response != '"successful"' or resp.status < 200 or resp.status >= 300:
msg = 'Dataset has not been deleted, because of the following error(s): {}'.format(str_response)
raise ValueError(msg)
def verify(self, dataset, publication_date, source, refernce_url):
"""The method is verifying dataset by it's id"""
path = '/api/1.0/meta/verifydataset'
req = definition.DatasetVerifyRequest(dataset, publication_date, source, refernce_url)
result = self._api_post(definition.DatasetVerifyResponse, path, req)
if result.status == 'failed':
ver_err = '\r\n'.join(result.errors)
msg = 'Dataset has not been verified, because of the following error(s): {}'.format(ver_err)
raise ValueError(msg)
class FileContent(object):
"""Accumulate the data to be used when posting a form."""
def __init__(self, file):
self.file_name = os.path.basename(file)
self.body = open(file, mode='rb').read()
self.boundary = _random_string(30)
def get_content_type(self):
"""Return a content type"""
return 'multipart/form-data; boundary="{}"'.format(self.boundary)
def get_binary(self):
"""Return a binary buffer containing the file content"""
content_disp = 'Content-Disposition: form-data; name="file"; filename="{}"'
stream = io.BytesIO()
stream.write(_string_to_binary('--{}'.format(self.boundary)))
stream.write(_crlf())
stream.write(_string_to_binary(content_disp.format(self.file_name)))
stream.write(_crlf())
stream.write(_crlf())
stream.write(self.body)
stream.write(_crlf())
stream.write(_string_to_binary('--{}--'.format(self.boundary)))
stream.write(_crlf())
return stream.getvalue()
|
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import subprocess
from sets import Set
import pexpect
import time
import os
import tinctest
import unittest2 as unittest
from mpp.lib.config import GPDBConfig
from mpp.models import MPPTestCase
from tinctest.models.scenario import ScenarioTestCase
from tinctest.lib.system import TINCSystem
from tinctest.lib import local_path, run_shell_command
from tinctest.lib import Gpdiff
from tinctest.main import TINCException
from mpp.lib.filerep_util import Filerepe2e_Util
from mpp.lib.gprecoverseg import GpRecover
from gppylib.commands.base import Command, REMOTE
from mpp.gpdb.tests.storage.lib.dbstate import DbStateClass
from mpp.lib.PSQL import PSQL
class GpInitSystem(Command):
"""This is a wrapper for gpinitsystem."""
def __init__(self, config_file):
cmd_str = 'gpinitsystem -a -c %s' % (config_file)
Command.__init__(self, 'run gpinitsystem', cmd_str)
def run(self, validate=True):
tinctest.logger.info("Running initialization: %s" % self)
Command.run(self, validateAfter=validate)
result = self.get_results()
return result
class GpInitStandby(Command):
"""This is a wrapper for gpinitstandby."""
def __init__(self, standby_host, mdd=None):
if not mdd:
mdd = os.getenv('MASTER_DATA_DIRECTORY')
cmd_str = 'export MASTER_DATA_DIRECTORY=%s; gpinitstandby -a -s %s' % (mdd, standby_host)
Command.__init__(self, 'run gpinitstandby', cmd_str)
def run(self, validate=True):
tinctest.logger.info("Running gpinitstandby: %s" % self)
Command.run(self, validateAfter=validate)
result = self.get_results()
return result
class GpDeleteSystem(Command):
"""This is a wrapper for gpdeletesystem."""
def __init__(self, mdd=None):
if not mdd:
mdd = os.getenv('MASTER_DATA_DIRECTORY')
cmd_str = "export MASTER_DATA_DIRECTORY=%s; echo -e \"y\\ny\\n\" | gpdeletesystem -d %s" % (mdd, mdd)
Command.__init__(self, 'run gpdeletesystem', cmd_str)
def run(self, validate=True):
tinctest.logger.info("Running delete system: %s" %self)
Command.run(self, validateAfter=validate)
result = self.get_results()
return result
class GPAddmirrorsTestCaseException(TINCException):
pass
class GPAddmirrorsTestCase(MPPTestCase):
def __init__(self, methodName):
self.config = GPDBConfig()
self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
self.seg_prefix = os.path.basename(self.mdd).split('-')[0]
self.master_host = self.config.get_masterhost()
self.gpinitconfig_template = local_path('configs/gpinitconfig_template')
self.datadir_config_file = local_path('configs/datadir_config_file')
self.mirror_config_file = local_path('configs/mirror_config_file')
self.gpinitconfig_file = local_path('configs/gpinitconfig')
self.host_file = local_path('configs/hosts')
self.hosts = self.config.get_hosts(segments = True)
self.port_base = '40000'
self.master_port = os.environ.get('PGPORT', '5432')
self.primary_data_dir = self.config.get_host_and_datadir_of_segment(dbid = 2)[1]
# initially set the mirror data dir same to primary's
self.mirror_data_dir = os.path.join(os.path.dirname(os.path.dirname(self.primary_data_dir)), 'mirror')
self.gpinitsystem = True
self.number_of_segments = self.config.get_countprimarysegments()
self.number_of_segments_per_host = self.number_of_segments / len(self.hosts)
self.standby_enabled = False
self.number_of_parallelism = 4
self.fs_location = []
super(GPAddmirrorsTestCase, self).__init__(methodName)
def setUp(self):
super(GPAddmirrorsTestCase, self).setUp()
def _setup_gpaddmirrors(self, port_offset=1000):
"""
Takes care of creating all the directories required for gpaddmirrors
and generating input files for gpaddmirrors
"""
# Generate gpaddmirrors config files
try:
self._generate_gpaddmirrors_input_files(port_offset)
except Exception, e:
tinctest.logger.exception("Encountered exception during generation of input files: %s" % e)
raise
def tearDown(self):
super(GPAddmirrorsTestCase, self).tearDown()
def check_mirror_seg(self, master=False):
tinctest.logger.info("running check mirror")
dbstate = DbStateClass('run_validation')
dbstate.check_mirrorintegrity(master=master)
def _generate_gpinit_config_files(self):
transforms = {'%SEG_PREFIX%': self.seg_prefix,
'%PORT_BASE%': self.port_base,
'%MASTER_HOST%': self.master_host, # First item in self.hosts
'%HOSTFILE%': self.host_file,
'%MASTER_PORT%': self.master_port,
'%MASTER_DATA_DIR%': os.path.dirname(self.mdd),
'%DATA_DIR%': (os.path.dirname(self.primary_data_dir) + ' ') * self.number_of_segments_per_host
}
# First generate host file based on number_of_hosts
with open(self.host_file, 'w') as f:
for host in self.hosts:
f.write(host + '\n')
TINCSystem.substitute_strings_in_file(self.gpinitconfig_template,
self.gpinitconfig_file,
transforms)
def format_sql_result(self, sql_command=None):
if sql_command is None:
tinctest.logger.warning("Please provide a sql command")
return None
result = PSQL.run_sql_command(sql_command, flags='-q -t', dbname='template1')
result = result.strip()
rows = result.split('\n')
formatted_rows = []
for row in rows:
cols = row.split('|')
cols = [col.strip() for col in cols]
formatted_rows.append(cols)
return formatted_rows
def _do_gpdeletesystem(self):
result = GpDeleteSystem(mdd=self.mdd).run(validate=False)
if result.rc > 0:
tinctest.logger.warning("Failed to delete system for the test case, may already be deleted: %s" %result)
def _do_gpinitsystem(self):
# Check the config files to initialize the cluster
self.assertTrue(os.path.exists(self.gpinitconfig_file))
self.assertTrue(os.path.exists(self.host_file))
# cleanup data directories before running gpinitsystem
self._cleanup_segment_data_dir(self.host_file, os.path.dirname(self.primary_data_dir))
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("rm -rf %s; mkdir -p %s" % (os.path.dirname(self.mdd), os.path.dirname(self.mdd)), 'create master dir', res)
if res['rc'] > 0:
raise GPAddmirrorsTestCaseException("Failed to create master directories")
result = GpInitSystem(self.gpinitconfig_file).run(validate=False)
# initsystem returns 1 for warnings and 2 for errors
if result.rc > 1:
tinctest.logger.error("Failed initializing the cluster: %s" % result)
raise GPAddmirrorsTestCaseException("Failed initializing the cluster. Look into gpAdminLogs for more information")
def _cleanup_segment_data_dir(self, host_file, segment_data_dir):
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpssh -f %s -e 'rm -rf %s; mkdir -p %s'" %(host_file, segment_data_dir, segment_data_dir), 'create segment dirs', res)
if res['rc'] > 0:
raise GPAddmirrorsTestCaseException("Failed to create segment directories")
def _do_gpinitstandby(self):
"""
Initializes a standby host on a host which is different from master host.
"""
for host in self.hosts:
if host != self.master_host:
standby_host = host
break
tinctest.logger.info("Initializing standby master on host: %s" % standby_host)
# Create master directory on the standby host
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpssh -h %s -e 'rm -rf %s; mkdir -p %s'" %(standby_host, os.path.dirname(self.mdd), os.path.dirname(self.mdd)), 'create master dir on standby host', res)
if res['rc'] > 0:
raise GPAddmirrorsTestCaseException("Failed to create segment directories")
# Do gpinitstandby
cmd = GpInitStandby(standby_host, mdd=self.mdd)
result = cmd.run(validate=False)
if result.rc > 0:
tinctest.logger.error("gpinitstandby failed with an error code: %s" %result)
raise GPAddmirrorsTestCaseException("gpinitstandby failed with an error code. Failing the test module")
def _generate_gpaddmirrors_input_files(self, port_offset=1000):
with open(self.datadir_config_file, 'w') as f:
for i in range (0, self.number_of_segments_per_host):
f.write(self.mirror_data_dir+'\n')
if port_offset != 1000:
cmdStr = 'gpaddmirrors -p %s -o %s -m %s -d %s' % (port_offset, self.mirror_config_file, self.datadir_config_file, self.mdd)
else:
cmdStr = 'gpaddmirrors -o %s -m %s -d %s' % (self.mirror_config_file, self.datadir_config_file, self.mdd)
Command('generate the sample_mirror_config file', cmdStr).run(validateAfter=True)
def verify_config_file_with_gp_config(self):
"""
compares the gp_segment_configuration with input file mirror_data_dir, double check
if the cluster is configured as intended
"""
with open(self.mirror_config_file, 'r') as f:
next(f)
for line in f:
line = line.strip()
mirror_seg_infor = line.split('=')[1]
cols = mirror_seg_infor.split(':')
content_id = cols[0]
adress = cols[1]
port = cols[2]
query_on_configuration = '''select * from gp_segment_configuration where content=\'%s\' and address=\'%s\'
and port=\'%s\'''' % (content_id, adress, port)
config_info = PSQL.run_sql_command(query_on_configuration, flags='-q -t', dbname='template1')
config_info = config_info.strip()
# as intended, the entry should be existing in the cluster
self.assertNotEqual(0, len(config_info))
def run_simple_ddl_dml(self):
"""
Run simple ddl and dml statement, to verify that cluster is functioning properly
"""
setup_sql = 'DROP TABLE IF EXISTS gpaddmirrors_table; CREATE TABLE gpaddmirrors_table(KEY INTEGER, CONTENT VARCHAR(199));'
insert_sql = 'INSERT INTO gpaddmirrors_table VALUES( generate_series(1,500), \'This is a simple test for addmirrors\' );'
verify_sql = 'SELECT COUNT(*) FROM gpaddmirrors_table;'
PSQL.run_sql_command(setup_sql, flags='-q -t', dbname='template1')
PSQL.run_sql_command(insert_sql, flags='-q -t', dbname='template1')
result = PSQL.run_sql_command(verify_sql, flags='-q -t', dbname='template1')
result = result.strip()
self.assertEqual(500, int(result))
class GpAddmirrorsTests(GPAddmirrorsTestCase):
"""
@description gpaddmirrors test suite
@tags gpaddmirrors
"""
def setUp(self):
super(GpAddmirrorsTests, self).setUp()
if self.config.has_mirror():
self._generate_gpinit_config_files()
self._do_gpdeletesystem()
self._do_gpinitsystem()
time.sleep(5)
def tearDown(self):
super(GpAddmirrorsTests, self).tearDown()
# The following test should not be ported because the gpaddmirror functionality is already tested by other tests, and
# this test in particular is only testing if worker pool can handle a batch size of 4.
def test_batch_size_4(self):
"""
check the batch size option -B of gpaddmirrors, depending on how many mirror segment to setup, otherwise, it will start up to 10
"""
gprecover = GpRecover()
self._setup_gpaddmirrors()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
workers = Set()
batch_size = 4
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpaddmirrors -a -i %s -B %s -d %s --verbose" % (self.mirror_config_file, batch_size, self.mdd), 'run gpaddmirrros batch size %s' % batch_size, res)
self.assertEqual(0, res['rc'])
lines = res['stdout'].split('\n')
for line in lines:
if 'worker' in line and 'haltWork' in line:
elems = line.split(' ')[1]
worker = elems.split('-')[-1]
workers.add(worker)
self.assertEquals(len(workers), batch_size)
gprecover.wait_till_insync_transition()
self.verify_config_file_with_gp_config()
self.check_mirror_seg()
#The following tests need to be ported to Behave.
def test_mirror_spread(self):
"""
Mirror spreading will place each mirror on a different host within the Greenplum Database array
"""
gprecover = GpRecover()
if self.number_of_segments_per_host > len(self.hosts):
self.skipTest('skipping test since the number of host is less than number of segments per hosts')
self._setup_gpaddmirrors()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpaddmirrors -a -i %s -s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with mirror spreading', res)
self.assertEqual(0, res['rc'])
check_mirror_spreading = '''SELECT A.hostname, B.hostname
FROM gp_segment_configuration A, gp_segment_configuration B
WHERE A.preferred_role = \'p\' AND B.preferred_role = \'m\' AND A.content = B.content AND A.hostname <> B.hostname;'''
result = PSQL.run_sql_command(check_mirror_spreading, flags='-q -t', dbname='template1')
result = result.strip()
self.assertNotEqual(0, len(result))
rows = result.split('\n')
self.assertEqual(self.number_of_segments, len(rows))
gprecover.wait_till_insync_transition()
self.verify_config_file_with_gp_config()
self.check_mirror_seg()
def test_with_standby(self):
"""
check that cluster's host address is same when it is with standby and without standby
"""
if not self.config.is_multinode():
self.skipTest('skipping test since the cluster is not multinode')
gprecover = GpRecover()
self._setup_gpaddmirrors()
# adding mirrors first
self._setup_gpaddmirrors()
self._generate_gpinit_config_files()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpaddmirrors -a -i %s -s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with mirror spreading', res)
self.assertEqual(0, res['rc'])
gprecover.wait_till_insync_transition()
get_mirror_address = 'SELECT content, address FROM gp_segment_configuration WHERE preferred_role = \'m\';'
rows = self.format_sql_result(get_mirror_address)
# create a dictionary for mirror and its host address
mirror_hosts_wo_stdby = {}
for row in rows:
content = row[0]
address = row[1]
mirror_hosts_wo_stdby[content] = address
# delete and reinitialize cluster again
self._do_gpdeletesystem()
self._do_gpinitsystem()
gprecover.wait_till_insync_transition()
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
# create standby, needs to get a new config_info instance for new cluster
config_info = GPDBConfig()
if not config_info.has_master_mirror():
self._do_gpinitstandby()
self._setup_gpaddmirrors()
self._generate_gpinit_config_files()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
# add mirror for the new cluster which has standby configured
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpaddmirrors -a -i %s -s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with mirror spreading', res)
self.assertEqual(0, res['rc'])
gprecover.wait_till_insync_transition()
# verify that the configuration will be same as mirror_config_file specified
self.verify_config_file_with_gp_config()
self.check_mirror_seg()
rows = self.format_sql_result(get_mirror_address)
mirror_hosts_with_stdby = {}
for row in rows:
content = row[0]
address = row[1]
mirror_hosts_with_stdby[content] = address
for key in mirror_hosts_wo_stdby:
self.assertEqual(mirror_hosts_wo_stdby[key], mirror_hosts_with_stdby[key])
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpinitstandby -ar", 'remove standby', res)
if res['rc'] > 0:
raise GPAddmirrorsTestCaseException("Failed to remove the standby")
def test_with_fault_injection(self):
"""
add new mirrors run workload to verify if cluster functioning correctly, and
inject the mirror to bring cluster into change tracking, then recoverseg
"""
filerepUtil = Filerepe2e_Util()
gprecover = GpRecover()
self._setup_gpaddmirrors()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpaddmirrors -a -i %s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with fault injection', res)
gprecover.wait_till_insync_transition()
self.assertEqual(0, res['rc'])
self.run_simple_ddl_dml()
# after adding new mirrors, check the intergrity between primary and mirror
self.check_mirror_seg()
out_file = local_path('inject_fault_into_ct')
filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='mirror', H='ALL', outfile=out_file)
# trigger the transtion to change tracking
PSQL.run_sql_command('drop table if exists foo;', dbname = 'template1')
filerepUtil.wait_till_change_tracking_transition()
gprecover.incremental()
gprecover.wait_till_insync_transition()
out_file=local_path('reset_fault')
filerepUtil.inject_fault(f='filerep_consumer', m='async', y='reset', r='mirror', H='ALL', outfile=out_file)
def test_with_concurrent_workload(self):
"""
add new mirrors while concurrent workload in progress, check that mirrors added
and current workload won't get affected, in the end, run checkmirrorseg.
Note that: adding mirrors while running workload has checkmirrorseg issue with MPP-24311
"""
gprecover = GpRecover()
self._setup_gpaddmirrors()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
sql_setup_file = local_path('sql/ao_heap_table_setup.sql')
sql_file = local_path('sql/ao_heap_table.sql')
pg_stat_activity = 'SELECT * FROM pg_stat_activity;'
PSQL.run_sql_file(sql_setup_file)
subprocess.Popen(["psql", "-f", sql_file])
time.sleep(15)
subprocess.Popen(["gpaddmirrors", "-ai", self.mirror_config_file, "-d", self.mdd])
time.sleep(15)
result = PSQL.run_sql_command(pg_stat_activity, flags='-q -t', dbname='template1')
result = result.strip()
rows = result.split('\n')
self.assertTrue(len(rows) > 1)
while len(rows) > 1:
result = PSQL.run_sql_command(pg_stat_activity, flags='-q -t', dbname='template1')
result = result.strip()
rows = result.split('\n')
time.sleep(3)
gprecover.wait_till_insync_transition()
self.verify_config_file_with_gp_config()
# ignore check_mirror_seg due to MPP-24311
#self.check_mirror_seg()
def test_gpaddmirrors_with_workload(self):
"""
add new mirrors after creating some workload in progress, check that mirrors added
and checkmirrorseg passes.
"""
gprecover = GpRecover()
self._setup_gpaddmirrors()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
sql_setup_file = local_path('sql/ao_heap_table_setup.sql')
sql_file = local_path('sql/ao_heap_table.sql')
pg_stat_activity = 'SELECT * FROM pg_stat_activity;'
PSQL.run_sql_file(sql_setup_file)
PSQL.run_sql_file(sql_file)
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpaddmirrors -a -i %s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with fault injection', res)
self.assertEqual(0, res['rc'])
gprecover.wait_till_insync_transition()
self.verify_config_file_with_gp_config()
self.check_mirror_seg()
def test_interview(self):
gprecover = GpRecover()
child = pexpect.spawn('gpaddmirrors')
#child.logfile = sys.stdout
for i in range(0, self.number_of_segments_per_host):
child.expect('Enter mirror segment data directory location.*.\r\n')
child.sendline(self.mirror_data_dir)
child.expect('Continue with add mirrors procedure Yy|Nn (default=N):')
child.sendline('Y')
child.expect(pexpect.EOF)
# wait until cluste totally synced, then run gpcheckmirrorseg
gprecover.wait_till_insync_transition()
self.check_mirror_seg()
self._do_gpdeletesystem()
self._do_gpinitsystem()
|
|
from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin, AnonymousUserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app, request
from . import db, login_manager
class Permission:
CHALLENGE = 0x01
VOTE = 0x02
SUGGEST = 0x04
APPROVE = 0x08
ADMINISTER = 0x80
class Role(db.Model): # FIXME do I need it?
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = (('User', Permission.CHALLENGE | Permission.VOTE |
Permission.SUGGEST, True),
('Moderator', Permission.CHALLENGE | Permission.SUGGEST |
Permission.APPROVE, False), ('Administrator', 0xff, False))
for name, permissions, default in roles:
role = Role.query.filter_by(name=name).first()
if not role:
role = Role(name=name)
role.permissions = permissions
role.default = default
db.session.add(role)
db.session.commit()
def __repr__(self):
return "<Role {}>".format(self.name)
class Vote(db.Model):
__tablename__ = 'votes'
user_id = db.Column(db.Integer,
db.ForeignKey('users.id'),
primary_key=True)
battle_id = db.Column(db.Integer,
db.ForeignKey('battles.id'),
primary_key=True)
choice = db.Column(db.String(16))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return "<Vote by {} for {} in battle #{}>".format(
self.voter.username, self.choice, self.battle_id)
class Battle(db.Model):
__tablename__ = 'battles'
id = db.Column(db.Integer, primary_key=True)
challenger_id = db.Column(db.Integer,
db.ForeignKey('users.id'),
index=True)
challenged_id = db.Column(db.Integer,
db.ForeignKey('users.id'),
index=True)
challenger_filter = db.Column(db.String(64))
challenged_filter = db.Column(db.String(64))
image_id = db.Column(db.Integer, db.ForeignKey('images.id'), index=True)
challenge_accepted = db.Column(db.Boolean, default=False)
challenger_finished = db.Column(db.Boolean, default=False)
challenged_finished = db.Column(db.Boolean, default=False)
is_finished = db.Column(db.Boolean, default=False)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
votes = db.relationship('Vote',
backref='battle',
lazy='dynamic',
cascade='all, delete-orphan')
def decide(self, user, is_accepted):
if user != self.challenged:
raise ValueError("Attemp to accept/decline wrong battle")
if is_accepted:
self.challenge_accepted = True
db.session.add(self)
else:
db.session.delete(self)
db.session.commit()
return self if is_accepted else None
def __repr__(self):
return "<Battle between {} and {}>".format(
User.query.filter_by(id=self.challenger_id).first().username,
User.query.filter_by(id=self.challenged_id).first().username)
EXP_LIMITS = [
('Novice', 0),
('Apprentice', 10),
('Adept', 50),
('Expert', 250),
('Master', 1000)
]
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
experience = db.Column(db.Integer, default=0)
username = db.Column(db.String(64), unique=True, index=True)
name = db.Column(db.String(64))
email = db.Column(db.String(120), unique=True, index=True)
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
images = db.relationship('Image', backref='user', lazy='dynamic')
challenged_by = db.relationship(
'Battle',
foreign_keys=[Battle.challenged_id],
backref=db.backref('challenged', lazy='joined'), # FIXME why lazy='joined'?
lazy='dynamic',
cascade='all, delete-orphan')
challenged_who = db.relationship(
'Battle',
foreign_keys=[Battle.challenger_id],
backref=db.backref('challenger', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
votes = db.relationship('Vote',
backref='voter',
lazy='dynamic',
cascade='all, delete-orphan')
avatar_hash = db.Column(db.String(32))
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py
seed()
for i in range(count):
u = User(email=forgery_py.internet.email_address(),
username=forgery_py.internet.user_name(True),
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __init__(self, **kwargs):
super().__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['ADMIN']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(self.email.encode()).hexdigest()
@property
def password(self):
raise AttributeError("Password is not a readable attribute")
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_reset_token(self, expiration=60 * 60):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
db.session.commit()
return True
def generate_confirmation_token(self, expiration=60 * 60):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
db.session.commit()
return True
def challenge(self, user, image):
if self.id == user.id:
raise ValueError("A user cannot challenge himself")
battle = Battle(challenger=self, # FIXME check
challenged=user,
image=image)
db.session.add(battle)
db.session.commit()
return battle
def vote(self, battle, choice):
if choice not in ("challenger", "challenged"):
raise ValueError(
"You can vote either for challenger or challenged")
if not (battle and battle.challenge_accepted):
raise ValueError(
"Attemp to vote on non-existing or not accepted battle")
old_v = Vote.query.filter_by(battle=battle, voter=self).first()
if old_v is not None:
old_v.choice = choice
db.session.add(old_v)
else:
v = Vote(battle=battle, voter=self, choice=choice)
db.session.add(v)
self.experience += 10
db.session.add(self)
db.session.commit()
return self
def get_rank(self):
rank = EXP_LIMITS[0][0]
for name, exp in EXP_LIMITS:
if exp > self.experience:
return rank
rank = name
return EXP_LIMITS[-1][0]
def get_exp_pc(self):
rank_exp = EXP_LIMITS[0][0]
for name, exp in EXP_LIMITS:
if exp > self.experience:
return (self.experience - rank_exp)/(exp-rank_exp)*100
rank_exp = exp
return 100
def can(self, permissions):
return self.role and (self.role.permissions & permissions
) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
db.session.commit()
def gravatar(self, size=100, default='identicon', rating='pg'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(self.email.encode()).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url,
hash=hash,
size=size,
default=default,
rating=rating)
@property
def battles(self):
return self.challenged_by.union(self.challenged_who).order_by(
Battle.timestamp.desc())
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return "<User {}>".format(self.email)
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Image(db.Model):
__tablename__ = 'images'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(32), unique=True, index=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
battles = db.relationship('Battle', backref='image', lazy='dynamic')
def __repr__(self):
return "<Image {}>".format(self.name)
|
|
import numpy as np
import scipy as sp
import scipy.linalg as LA
from .covar_base import Covariance
from limix.hcache import cached
import pdb
import logging as LG
class FreeFormCov(Covariance):
"""
General semi-definite positive matrix with no contraints.
A free-form covariance matrix of dimension d has 1/2 * d * (d + 1) params
"""
def __init__(self, dim, jitter=1e-4):
"""
Args:
dim: dimension of the free-form covariance
jitter: extent of diagonal offset which is added for numerical stability
(default value: 1e-4)
"""
Covariance.__init__(self, dim)
self._K_act = True
self._calcNumberParams()
self.dim = dim
self.params = sp.zeros(self.n_params)
self.idx_r, self.idx_c = sp.tril_indices(self.dim)
self.set_jitter(jitter)
#####################
# Properties
#####################
@property
def variance(self):
return self.K().diagonal()
@property
def correlation(self):
R = self.K().copy()
inv_diag = 1./sp.sqrt(R.diagonal())[:,sp.newaxis]
R *= inv_diag
R *= inv_diag.T
return R
@property
def variance_ste(self):
if self.getFIinv() is None:
R = None
else:
R = self.K_ste().diagonal()
# IN A VARIANCE / CORRELATION PARAMETRIZATION
#if self.getFIinv() is None:
# R = None
#else:
# R = sp.sqrt(self.getFIinv().diagonal()[:self.dim])
return R
@property
def correlation_ste(self):
if self.getFIinv() is None:
R = None
else:
idx_M = sp.zeros((self.dim,self.dim))
idx_M[sp.tril_indices(self.dim)] = sp.arange( int( 0.5 * self.dim * (self.dim + 1) ) )
R = sp.zeros(idx_M)
for i in range(self.dim):
for j in range(0,self.dim):
ij = idx_M[i,j] # index of cov_ij_ste from fisher
ii = idx_M[i,i] # index of cov_ii_ste from fisher
jj = idx_M[j,j] # index of cov_jj_ste from fisher
#TODO: complete
# IN A VARIANCE / CORRELATION PARAMETRIZATION
#if self.getFIinv() is None:
# R = None
#else:
# R = sp.zeros((self.dim, self.dim))
# R[sp.tril_indices(self.dim, k = -1)] = sp.sqrt(self.getFIinv().diagonal()[self.dim:])
# R += R.T
return R
@property
def X(self):
return self.L()
#####################
# Activation handling
#####################
@property
def act_K(self):
return self._K_act
@act_K.setter
def act_K(self, act):
self._K_act = bool(act)
self._notify()
#####################
# Params handling
#####################
def setParams(self, params):
if not self._K_act and len(params) > 0:
raise ValueError("Trying to set a parameter via setParams that "
"is not active.")
if self._K_act:
self.params[:] = params
self.clear_all()
def getParams(self):
if not self._K_act:
return np.array([])
return self.params
def getNumberParams(self):
return int(self._K_act) * self.n_params
def _calcNumberParams(self):
self.n_params = int(0.5*self.dim*(self.dim+1))
def set_jitter(self,value):
self.jitter = value
def setCovariance(self,cov):
""" set hyperparameters from given covariance """
chol = LA.cholesky(cov,lower=True)
params = chol[sp.tril_indices(self.dim)]
self.setParams(params)
#####################
# Cached
#####################
@cached('covar_base')
def K(self):
RV = sp.dot(self.L(),self.L().T)+self.jitter*sp.eye(self.dim)
return RV
@cached('covar_base')
def K_grad_i(self,i):
if not self._K_act:
raise ValueError("Trying to retrieve the gradient over a "
"parameter that is inactive.")
RV = sp.dot(self.L(),self.Lgrad(i).T)+sp.dot(self.Lgrad(i),self.L(i).T)
return RV
@cached
def K_hess_i_j(self, i, j):
if not self._K_act:
raise ValueError("Trying to retrieve the gradient over a "
"parameter that is inactive.")
RV = sp.dot(self.Lgrad(i),self.Lgrad(j).T)
RV+= RV.T
return RV
def K_ste(self):
if self.getFIinv() is None:
R = None
else:
R = sp.zeros((self.dim, self.dim))
R[sp.tril_indices(self.dim)] = sp.sqrt(self.getFIinv().diagonal())
# symmetrize
R = R + R.T - sp.diag(R.diagonal())
return R
####################
# Interpretable Params
####################
def getInterParams(self):
# VARIANCE + CORRELATIONS
#R1 = self.variance
#R2 = self.correlation[sp.tril_indices(self.dim, k = -1)]
#R = sp.concatenate([R1,R2])
# COVARIANCES
R = self.K()[sp.tril_indices(self.dim)]
return R
# DERIVARIVE WITH RESPECT TO COVARIANCES
def K_grad_interParam_i(self, i):
ix, iy = sp.tril_indices(self.dim)
ix = ix[i]
iy = iy[i]
R = sp.zeros((self.dim,self.dim))
R[ix, iy] = R[iy, ix] = 1
return R
# DERIVARIVE WITH RESPECT TO VARIANCES AND CORRELATIONS
#def K_grad_interParam_i(self, i):
# if i < self.dim:
# # derivative with respect to the variance
# R = sp.zeros((self.dim,self.dim))
# R[i,:] = self.K()[i,:] / (2 * self.variance[i])
# R += R.T
# else:
# # derivarice with respect to a correlation
# ## 1. take the corresponding off diagonal element
# ix, iy = sp.tril_indices(self.dim, k = -1)
# ix = ix[i - self.dim]
# iy = iy[i - self.dim]
# ## 2. fill it with sqrt(var * var)
# R = sp.zeros((self.dim,self.dim))
# R[ix,iy] = R[iy,ix] = sp.sqrt(self.variance[ix] * self.variance[iy])
# return R
######################
# Private functions
######################
@cached('covar_base')
def L(self):
R = sp.zeros((self.dim, self.dim))
R[(self.idx_r, self.idx_c)] = self.params
return R
@cached
def Lgrad(self, i):
R = sp.zeros((self.dim, self.dim))
R[self.idx_r[i], self.idx_c[i]] = 1
return R
def Xgrad(self, i):
return self.Lgrad(i)
if __name__ == '__main__':
n = 2
cov = FreeFormCov(n)
print((cov.K()))
print((cov.K_grad_i(0)))
|
|
from itertools import count
from math import ceil, sqrt
from functools import wraps
import bisect
import os
from toolz import (merge, partial, accumulate, unique, first, dissoc, valmap,
first, partition)
from operator import getitem, setitem
import pandas as pd
import numpy as np
import operator
import gzip
import bz2
from pframe import pframe
import bcolz
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..array.core import partial_by_order
from ..async import get_sync
from ..threaded import get as get_threaded
from ..compatibility import unicode, apply
from ..utils import repr_long_list, IndexCallable, pseudorandom
def _concat(args):
""" Generic concat operation """
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.concatenate3(args)
if len(args) == 1:
return args[0]
if isinstance(args[0], (pd.DataFrame, pd.Series)):
args2 = [arg for arg in args if len(arg)]
if not args2:
return args[0]
return pd.concat(args2)
if isinstance(args[0], (pd.Index)):
args = [arg for arg in args if len(arg)]
result = pd.concat(map(pd.Series, args))
result = type(args[0])(result.values)
result.name = args[0].name
return result
return args
def compute(*args, **kwargs):
""" Compute multiple frames at once """
if len(args) == 1 and isinstance(args[0], (tuple, list)):
args = args[0]
dsk = merge(*[arg.dask for arg in args])
keys = [arg._keys() for arg in args]
results = get(dsk, keys, **kwargs)
return list(map(_concat, results))
names = ('f-%d' % i for i in count(1))
class Scalar(object):
""" A Dask-thing to represent a scalar
TODO: Clean up this abstraction
"""
def __init__(self, dsk, _name):
self.dask = dsk
self._name = _name
self.divisions = []
@property
def _args(self):
return (self.dask, self._name)
def _keys(self):
return [(self._name, 0)]
def compute(self, **kwargs):
return compute(self, **kwargs)[0]
class _Frame(object):
""" Superclass for DataFrame and Series """
@property
def npartitions(self):
return len(self.divisions) + 1
def compute(self, **kwargs):
return compute(self, **kwargs)[0]
def _keys(self):
return [(self._name, i) for i in range(self.npartitions)]
def _visualize(self, optimize_graph=False):
from dask.dot import dot_graph
if optimize_graph:
dot_graph(optimize(self.dask, self._keys()))
else:
dot_graph(self.dask)
@property
def index(self):
name = self._name + '-index'
dsk = dict(((name, i), (getattr, key, 'index'))
for i, key in enumerate(self._keys()))
return Index(merge(dsk, self.dask), name, None, self.divisions)
@property
def known_divisions(self):
return len(self.divisions) > 0 and self.divisions[0] is not None
def cache(self, cache=Cache):
""" Evaluate Dataframe and store in local cache
Uses chest by default to store data on disk
"""
if callable(cache):
cache = cache()
# Evaluate and store in cache
name = next(names)
dsk = dict(((name, i), (setitem, cache, (tuple, list(key)), key))
for i, key in enumerate(self._keys()))
get(merge(dsk, self.dask), list(dsk.keys()))
# Create new Frame pointing to that cache
dsk2 = dict((key, (getitem, cache, (tuple, list(key))))
for key in self._keys())
return type(self)(dsk2, name, self.column_info, self.divisions)
def drop_duplicates(self):
chunk = lambda s: s.drop_duplicates()
return aca(self, chunk=chunk, aggregate=chunk, columns=self.columns)
def __len__(self):
return reduction(self, len, np.sum).compute()
def map_blocks(self, func, columns=None):
""" Apply Python function on each DataFrame block
Provide columns of the output if they are not the same as the input.
"""
if columns is None:
columns = self.column_info
name = next(names)
dsk = dict(((name, i), (func, (self._name, i)))
for i in range(self.npartitions))
return type(self)(merge(dsk, self.dask), name,
columns, self.divisions)
def random_split(self, p, seed=None):
""" Pseudorandomly split dataframe into different pieces row-wise
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent seed
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], seed=123) # doctest: +SKIP
"""
if seed is not None:
np.random.seed(seed)
seeds = np.random.randint(0, 2**31, self.npartitions)
dsk_full = dict(((self._name + '-split-full', i),
(pd_split, (self._name, i), p, seed))
for i, seed in enumerate(seeds))
dsks = [dict(((self._name + '-split-%d' % i, j),
(getitem, (self._name + '-split-full', j), i))
for j in range(self.npartitions))
for i in range(len(p))]
return [type(self)(merge(self.dask, dsk_full, dsk),
self._name + '-split-%d' % i,
self.column_info,
self.divisions)
for i, dsk in enumerate(dsks)]
def head(self, n=10, compute=True):
""" First n rows of the dataset
Caveat, the only checks the first n rows of the first partition.
"""
name = next(names)
dsk = {(name, 0): (head, (self._name, 0), n)}
result = type(self)(merge(self.dask, dsk), name,
self.column_info, [])
if compute:
result = result.compute()
return result
def _partition_of_index_value(self, val):
""" In which partition does this value lie? """
return bisect.bisect_right(self.divisions, val)
def _loc(self, ind):
""" Helper function for the .loc accessor """
if not self.known_divisions:
raise ValueError(
"Can not use loc on DataFrame without known divisions")
name = next(names)
if not isinstance(ind, slice):
part = self._partition_of_index_value(ind)
dsk = {(name, 0): (lambda df: df.loc[ind], (self._name, part))}
return type(self)(merge(self.dask, dsk), name,
self.column_info, [])
else:
assert ind.step in (None, 1)
if ind.start:
start = self._partition_of_index_value(ind.start)
else:
start = 0
if ind.stop is not None:
stop = self._partition_of_index_value(ind.stop)
else:
stop = self.npartitions - 1
if stop == start:
dsk = {(name, 0): (_loc, (self._name, start), ind.start, ind.stop)}
else:
dsk = merge(
{(name, 0): (_loc, (self._name, start), ind.start, None)},
dict(((name, i), (self._name, start + i))
for i in range(1, stop - start)),
{(name, stop - start): (_loc, (self._name, stop), None, ind.stop)})
return type(self)(merge(self.dask, dsk), name, self.column_info,
self.divisions[start:stop])
@property
def loc(self):
return IndexCallable(self._loc)
@property
def iloc(self):
raise AttributeError("Dask Dataframe does not support iloc")
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
class Series(_Frame):
""" Out-of-core Series object
Mimics ``pandas.Series``.
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
def __init__(self, dsk, _name, name, divisions):
self.dask = dsk
self._name = _name
self.name = name
self.divisions = divisions
@property
def _args(self):
return (self.dask, self._name, self.name, self.divisions)
@property
def dtype(self):
return self.head().dtype
@property
def column_info(self):
return self.name
@property
def columns(self):
return (self.name,)
def __repr__(self):
return ("dd.Series<%s, divisions=%s>" %
(self._name, repr_long_list(self.divisions)))
def quantiles(self, q):
""" Approximate quantiles of column
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
return quantiles(self, q)
def __getitem__(self, key):
name = next(names)
if isinstance(key, Series) and self.divisions == key.divisions:
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return Series(merge(self.dask, key.dask, dsk), name,
self.name, self.divisions)
raise NotImplementedError()
def __abs__(self):
return elemwise(operator.abs, self)
def __add__(self, other):
return elemwise(operator.add, self, other)
def __radd__(self, other):
return elemwise(operator.add, other, self)
def __and__(self, other):
return elemwise(operator.and_, self, other)
def __rand__(self, other):
return elemwise(operator.and_, other, self)
def __div__(self, other):
return elemwise(operator.div, self, other)
def __rdiv__(self, other):
return elemwise(operator.div, other, self)
def __eq__(self, other):
return elemwise(operator.eq, self, other)
def __gt__(self, other):
return elemwise(operator.gt, self, other)
def __ge__(self, other):
return elemwise(operator.ge, self, other)
def __invert__(self):
return elemwise(operator.inv, self)
def __lt__(self, other):
return elemwise(operator.lt, self, other)
def __le__(self, other):
return elemwise(operator.le, self, other)
def __mod__(self, other):
return elemwise(operator.mod, self, other)
def __rmod__(self, other):
return elemwise(operator.mod, other, self)
def __mul__(self, other):
return elemwise(operator.mul, self, other)
def __rmul__(self, other):
return elemwise(operator.mul, other, self)
def __ne__(self, other):
return elemwise(operator.ne, self, other)
def __neg__(self):
return elemwise(operator.neg, self)
def __or__(self, other):
return elemwise(operator.or_, self, other)
def __ror__(self, other):
return elemwise(operator.or_, other, self)
def __pow__(self, other):
return elemwise(operator.pow, self, other)
def __rpow__(self, other):
return elemwise(operator.pow, other, self)
def __sub__(self, other):
return elemwise(operator.sub, self, other)
def __rsub__(self, other):
return elemwise(operator.sub, other, self)
def __truediv__(self, other):
return elemwise(operator.truediv, self, other)
def __rtruediv__(self, other):
return elemwise(operator.truediv, other, self)
def __floordiv__(self, other):
return elemwise(operator.floordiv, self, other)
def __rfloordiv__(self, other):
return elemwise(operator.floordiv, other, self)
def __xor__(self, other):
return elemwise(operator.xor, self, other)
def __rxor__(self, other):
return elemwise(operator.xor, other, self)
def sum(self):
return reduction(self, pd.Series.sum, np.sum)
def max(self):
return reduction(self, pd.Series.max, np.max)
def min(self):
return reduction(self, pd.Series.min, np.min)
def count(self):
return reduction(self, pd.Series.count, np.sum)
def mean(self):
def chunk(ser):
return (ser.sum(), ser.count())
def agg(seq):
sums, counts = list(zip(*seq))
return 1.0 * sum(sums) / sum(counts)
return reduction(self, chunk, agg)
def var(self, ddof=1):
def chunk(ser):
return (ser.sum(), (ser**2).sum(), ser.count())
def agg(seq):
x, x2, n = list(zip(*seq))
x = float(sum(x))
x2 = float(sum(x2))
n = sum(n)
result = (x2 / n) - (x / n)**2
if ddof:
result = result * n / (n - ddof)
return result
return reduction(self, chunk, agg)
def std(self, ddof=1):
name = next(names)
f = self.var(ddof=ddof)
dsk = {(name, 0): (sqrt, (f._name, 0))}
return Scalar(merge(f.dask, dsk), name)
def value_counts(self):
chunk = lambda s: s.value_counts()
agg = lambda s: s.groupby(level=0).sum()
return aca(self, chunk=chunk, aggregate=agg, columns=self.columns)
def isin(self, other):
return elemwise(pd.Series.isin, self, other)
@wraps(pd.Series.map)
def map(self, arg, na_action=None):
return elemwise(pd.Series.map, self, arg, na_action, name=self.name)
class Index(Series):
pass
class DataFrame(_Frame):
"""
Implements out-of-core DataFrame as a sequence of pandas DataFrames
This is a work in progress. It is buggy and far from complete.
Please do not use it yet.
Parameters
----------
dask: dict
The dask graph to compute this Dataframe
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
columns: list of strings
Column names. This metadata aids usability
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
def __init__(self, dask, name, columns, divisions):
self.dask = dask
self._name = name
self.columns = tuple(columns)
self.divisions = tuple(divisions)
@property
def _args(self):
return (self.dask, self._name, self.columns, self.divisions)
def __getitem__(self, key):
if isinstance(key, (str, unicode)):
name = self._name + '.' + key
if key in self.columns:
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return Series(merge(self.dask, dsk), name,
key, self.divisions)
if isinstance(key, list):
name = '%s[%s]' % (self._name, str(key))
if all(k in self.columns for k in key):
dsk = dict(((name, i), (operator.getitem,
(self._name, i),
(list, key)))
for i in range(self.npartitions))
return DataFrame(merge(self.dask, dsk), name,
key, self.divisions)
if isinstance(key, Series) and self.divisions == key.divisions:
name = next(names)
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return DataFrame(merge(self.dask, key.dask, dsk), name,
self.columns, self.divisions)
raise NotImplementedError()
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError as e:
try:
return self[key]
except NotImplementedError:
raise e
def __dir__(self):
return sorted(set(list(dir(type(self))) + list(self.columns)))
def __repr__(self):
return ("dd.DataFrame<%s, divisions=%s>" %
(self._name, repr_long_list(self.divisions)))
@property
def dtypes(self):
return get(self.dask, self._keys()[0]).dtypes
def set_index(self, other, **kwargs):
return set_index(self, other, **kwargs)
def set_partition(self, column, divisions, **kwargs):
""" Set explicit divisions for new column index
>>> df2 = df.set_partition('new-index-column', divisions=[10, 20, 50]) # doctest: +SKIP
See also:
set_index
"""
return set_partition(self, column, divisions, **kwargs)
@property
def column_info(self):
return self.columns
def groupby(self, key, **kwargs):
return GroupBy(self, key, **kwargs)
def categorize(self, columns=None, **kwargs):
return categorize(self, columns, **kwargs)
@wraps(pd.DataFrame.assign)
def assign(self, **kwargs):
pairs = list(sum(kwargs.items(), ()))
# Figure out columns of the output
df = pd.DataFrame(columns=self.columns)
df2 = df.assign(**dict((k, []) for k in kwargs))
return elemwise(_assign, self, *pairs, columns=list(df2.columns))
def _assign(df, *pairs):
kwargs = dict(partition(2, pairs))
return df.assign(**kwargs)
def _loc(df, start, stop):
return df.loc[slice(start, stop)]
def head(x, n):
""" First n elements of dask.Dataframe or dask.Series """
return x.head(n)
def consistent_name(names):
""" New name for series in elementwise operation
If all truthy names are the same, choose that one, otherwise, choose None
"""
allnames = set()
for name in names:
if name is None:
continue
if isinstance(name, (tuple, list)):
allnames.update(name)
else:
allnames.add(name)
if len(allnames) == 1:
return first(allnames)
else:
return None
def elemwise(op, *args, **kwargs):
""" Elementwise operation for dask.Dataframes """
columns = kwargs.get('columns', None)
name = kwargs.get('name', None)
_name = next(names)
frames = [arg for arg in args if isinstance(arg, _Frame)]
other = [(i, arg) for i, arg in enumerate(args)
if not isinstance(arg, _Frame)]
if other:
op2 = partial_by_order(op, other)
else:
op2 = op
assert all(f.divisions == frames[0].divisions for f in frames)
assert all(f.npartitions == frames[0].npartitions for f in frames)
dsk = dict(((_name, i), (op2,) + frs)
for i, frs in enumerate(zip(*[f._keys() for f in frames])))
if columns is not None:
return DataFrame(merge(dsk, *[f.dask for f in frames]),
_name, columns, frames[0].divisions)
else:
column_name = name or consistent_name(n for f in frames
for n in f.columns)
return Series(merge(dsk, *[f.dask for f in frames]),
_name, column_name, frames[0].divisions)
def reduction(x, chunk, aggregate):
""" General version of reductions
>>> reduction(my_frame, np.sum, np.sum) # doctest: +SKIP
"""
a = next(names)
dsk = dict(((a, i), (chunk, (x._name, i)))
for i in range(x.npartitions))
b = next(names)
dsk2 = {(b, 0): (aggregate, (tuple, [(a,i) for i in range(x.npartitions)]))}
return Scalar(merge(x.dask, dsk, dsk2), b)
def concat(dfs):
""" Concatenate dataframes along rows
Currently only supports unknown divisions
"""
if any(df.known_divisions for df in dfs):
# For this to work we need to add a final division for "maximum element"
raise NotImplementedError("Concat can't currently handle dataframes"
" with known divisions")
name = next(names)
dsk = dict()
i = 0
for df in dfs:
for key in df._keys():
dsk[(name, i)] = key
i += 1
divisions = [None] * (i - 1)
return DataFrame(merge(dsk, *[df.dask for df in dfs]), name,
dfs[0].columns, divisions)
class GroupBy(object):
def __init__(self, frame, index=None, **kwargs):
self.frame = frame
self.index = index
self.kwargs = kwargs
if isinstance(index, list):
assert all(i in frame.columns for i in index)
elif isinstance(index, Series):
assert index.divisions == frame.divisions
else:
assert index in frame.columns
def apply(self, func, columns=None):
if isinstance(self.index, Series) and self.index._name == self.frame.index._name:
f = self.frame
else:
f = set_index(self.frame, self.index, **self.kwargs)
return f.map_blocks(lambda df: df.groupby(level=0).apply(func),
columns=columns)
def __getitem__(self, key):
if key in self.frame.columns:
return SeriesGroupBy(self.frame, self.index, key)
else:
raise KeyError()
def __dir__(self):
return sorted(set(list(dir(type(self))) + list(self.frame.columns)))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
try:
return self[key]
except KeyError:
raise AttributeError()
class SeriesGroupBy(object):
def __init__(self, frame, index, key, **kwargs):
self.frame = frame
self.index = index
self.key = key
self.kwargs = kwargs
def apply(func, columns=None):
f = set_index(self.frame, self.index, **self.kwargs)
return f.map_blocks(lambda df:df.groupby(level=0)[self.key].apply(func),
columns=columns)
def sum(self):
chunk = lambda df, index: df.groupby(index)[self.key].sum()
agg = lambda df: df.groupby(level=0).sum()
return aca([self.frame, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def min(self):
chunk = lambda df, index: df.groupby(index)[self.key].min()
agg = lambda df: df.groupby(level=0).min()
return aca([self.frame, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def max(self):
chunk = lambda df, index: df.groupby(index)[self.key].max()
agg = lambda df: df.groupby(level=0).max()
return aca([self.frame, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def count(self):
chunk = lambda df, index: df.groupby(index)[self.key].count()
agg = lambda df: df.groupby(level=0).sum()
return aca([self.frame, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def mean(self):
def chunk(df, index):
g = df.groupby(index)
return g.agg({self.key: ['sum', 'count']})
def agg(df):
g = df.groupby(level=0)
x = g.agg({(self.key, 'sum'): 'sum',
(self.key, 'count'): 'sum'})
return 1.0 * x[self.key]['sum'] / x[self.key]['count']
return aca([self.frame, self.index],
chunk=chunk, aggregate=agg, columns=[])
def apply_concat_apply(args, chunk=None, aggregate=None, columns=None):
""" Apply a function to blocks, the concat, then apply again
Parameters
----------
args: dask.DataFrames
All Dataframes should be partitioned and indexed equivalently
chunk: function [block-per-arg] -> block
Function to operate on each block of data
aggregate: function concatenated-block -> block
Function to operate on the concatenated result of chunk
>>> def chunk(a_block, b_block):
... pass
>>> def agg(df):
... pass
>>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP
"""
if not isinstance(args, (tuple, list)):
args = [args]
assert all(arg.npartitions == args[0].npartitions
for arg in args
if isinstance(arg, _Frame))
a = next(names)
dsk = dict(((a, i), (apply, chunk, (list, [(x._name, i)
if isinstance(x, _Frame)
else x for x in args])))
for i in range(args[0].npartitions))
b = next(names)
dsk2 = {(b, 0): (aggregate,
(pd.concat,
(list, [(a, i) for i in range(args[0].npartitions)])))}
return type(args[0])(
merge(dsk, dsk2, *[a.dask for a in args
if isinstance(a, _Frame)]),
b, columns, [])
aca = apply_concat_apply
def categorize_block(df, categories):
""" Categorize a dataframe with given categories
df: DataFrame
categories: dict mapping column name to iterable of categories
"""
df = df.copy()
for col, vals in categories.items():
df[col] = pd.Categorical(df[col], categories=vals,
ordered=False, name=col)
return df
def categorize(f, columns=None, **kwargs):
"""
Convert columns of dask.frame to category dtype
This greatly aids performance, both in-memory and in spilling to disk
"""
if columns is None:
dtypes = f.dtypes
columns = [name for name, dt in zip(dtypes.index, dtypes.values)
if dt == 'O']
if not isinstance(columns, (list, tuple)):
columns = [columns]
distincts = [f[col].drop_duplicates() for col in columns]
values = compute(distincts, **kwargs)
func = partial(categorize_block, categories=dict(zip(columns, values)))
return f.map_blocks(func, columns=f.columns)
def quantiles(f, q, **kwargs):
""" Approximate quantiles of column
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
assert len(f.columns) == 1
from dask.array.percentile import _percentile, merge_percentiles
name = next(names)
val_dsk = dict(((name, i), (_percentile, (getattr, key, 'values'), q))
for i, key in enumerate(f._keys()))
name2 = next(names)
len_dsk = dict(((name2, i), (len, key)) for i, key in enumerate(f._keys()))
name3 = next(names)
merge_dsk = {(name3, 0): (merge_percentiles, q, [q] * f.npartitions,
sorted(val_dsk),
sorted(len_dsk))}
dsk = merge(f.dask, val_dsk, len_dsk, merge_dsk)
return da.Array(dsk, name3, chunks=((len(q),),))
def get(dsk, keys, get=get_sync, **kwargs):
""" Get function with optimizations specialized to dask.Dataframe """
from .optimize import optimize
dsk2 = optimize(dsk, keys, **kwargs)
return get(dsk2, keys, **kwargs) # use synchronous scheduler for now
def pd_split(df, p, seed=0):
""" Split DataFrame into multiple pieces pseudorandomly
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [2, 3, 4, 5, 6, 7]})
>>> a, b = pd_split(df, [0.5, 0.5], seed=123) # roughly 50/50 split
>>> a
a b
1 2 3
2 3 4
5 6 7
>>> b
a b
0 1 2
3 4 5
4 5 6
"""
p = list(p)
index = pseudorandom(len(df), p, seed)
return [df.iloc[index == i] for i in range(len(p))]
from .shuffle import set_index, set_partition
|
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.auditor
:platform: Unix
:synopsis: This class is subclassed to add audit rules.
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <[email protected]>
"""
import datastore
from security_monkey import app, db
from security_monkey.watcher import ChangeItem
from security_monkey.common.jinja import get_jinja_env
from security_monkey.datastore import User
from security_monkey.common.utils.utils import send_email
class Auditor(object):
"""
This class (and subclasses really) run a number of rules against the configurations
and look for any violations. These violations are saved with the object and a report
is made available via the Web UI and through email.
"""
index = None # Should be overridden
i_am_singular = None # Should be overridden
i_am_plural = None # Should be overridden
def __init__(self, accounts=None, debug=False):
self.datastore = datastore.Datastore()
self.accounts = accounts
self.debug = debug
self.items = []
self.team_emails = app.config.get('SECURITY_TEAM_EMAIL')
self.emails = []
self.emails.extend(self.team_emails)
for account in self.accounts:
users = User.query.filter(User.daily_audit_email==True).filter(User.accounts.any(name=accounts[0])).all()
new_emails = [user.email for user in users]
self.emails.extend(new_emails)
def add_issue(self, score, issue, item, notes=None):
"""
Adds a new issue to an item, if not already reported.
:return: The new issue
"""
if notes and len(notes) > 512:
notes = notes[0:512]
for existing_issue in item.audit_issues:
if existing_issue.issue == issue:
if existing_issue.notes == notes:
if existing_issue.score == score:
app.logger.debug(
"Not adding issue because it was already found:{}/{}/{}/{}\n\t{} -- {}"
.format(item.index, item.region, item.account, item.name, issue, notes))
return existing_issue
app.logger.debug("Adding issue: {}/{}/{}/{}\n\t{} -- {}"
.format(item.index, item.region, item.account, item.name, issue, notes))
new_issue = datastore.ItemAudit(score=score,
issue=issue,
notes=notes,
justified=False,
justified_user_id=None,
justified_date=None,
justification=None)
item.audit_issues.append(new_issue)
return new_issue
def prep_for_audit(self):
"""
To be overridden by child classes who
need a way to prepare for the next run.
"""
pass
def audit_these_objects(self, items):
"""
Only inspect the given items.
"""
app.logger.debug("Asked to audit {} Objects".format(len(items)))
self.prep_for_audit()
methods = [getattr(self, method_name) for method_name in dir(self) if method_name.find("check_") == 0]
app.logger.debug("methods: {}".format(methods))
for item in items:
for method in methods:
method(item)
self.items = items
def audit_all_objects(self):
"""
Read all items from the database and inspect them all.
"""
self.items = self.read_previous_items()
self.audit_these_objects(self.items)
def read_previous_items(self):
"""
Pulls the last-recorded configuration from the database.
:return: List of all items for the given technology and the given account.
"""
prev_list = []
for account in self.accounts:
prev = self.datastore.get_all_ctype_filtered(tech=self.index, account=account, include_inactive=False)
# Returns a map of {Item: ItemRevision}
for item in prev:
item_revision = prev[item]
new_item = ChangeItem(index=self.index,
region=item.region,
account=item.account.name,
name=item.name,
new_config=item_revision.config)
new_item.audit_issues.extend(item.issues)
new_item.audit_issues = []
new_item.db_item = item
prev_list.append(new_item)
return prev_list
def save_issues(self):
"""
Save all new issues. Delete all fixed issues.
"""
app.logger.debug("\n\nSaving Issues.")
for item in self.items:
if not hasattr(item, 'db_item'):
item.db_item = self.datastore._get_item(item.index, item.region, item.account, item.name)
existing_issues = item.db_item.issues
new_issues = item.audit_issues
# Add new issues
for new_issue in new_issues:
nk = "{} -- {}".format(new_issue.issue, new_issue.notes)
if nk not in ["{} -- {}".format(old_issue.issue, old_issue.notes) for old_issue in existing_issues]:
app.logger.debug("Saving NEW issue {}".format(nk))
item.found_new_issue = True
item.confirmed_new_issues.append(new_issue)
item.db_item.issues.append(new_issue)
db.session.add(item.db_item)
db.session.add(new_issue)
else:
for issue in existing_issues:
if issue.issue == new_issue.issue and issue.notes == new_issue.notes:
item.confirmed_existing_issues.append(issue)
break
key = "{}/{}/{}/{}".format(item.index, item.region, item.account, item.name)
app.logger.debug("Issue was previously found. Not overwriting.\n\t{}\n\t{}".format(key, nk))
# Delete old issues
for old_issue in existing_issues:
ok = "{} -- {}".format(old_issue.issue, old_issue.notes)
if ok not in ["{} -- {}".format(new_issue.issue, new_issue.notes) for new_issue in new_issues]:
app.logger.debug("Deleting FIXED issue {}".format(ok))
item.confirmed_fixed_issues.append(old_issue)
db.session.delete(old_issue)
db.session.commit()
def email_report(self, report):
"""
Given a report, send an email using SES.
"""
if not report:
app.logger.info("No Audit issues. Not sending audit email.")
return
subject = "Security Monkey {} Auditor Report".format(self.i_am_singular)
send_email(subject=subject, recipients=self.emails, html=report)
def create_report(self):
"""
Using a Jinja template (jinja_audit_email.html), create a report that can be emailed.
:return: HTML - The output of the rendered template.
"""
jenv = get_jinja_env()
template = jenv.get_template('jinja_audit_email.html')
# This template expects a list of items that have been sorted by total score in
# decending order.
for item in self.items:
item.totalscore = 0
for issue in item.audit_issues:
item.totalscore = item.totalscore + issue.score
sorted_list = sorted(self.items, key=lambda item: item.totalscore)
sorted_list.reverse()
report_list = []
for item in sorted_list:
if item.totalscore > 0:
report_list.append(item)
else:
break
if len(report_list) > 0:
return template.render({'items': report_list})
else:
return False
|
|
import numpy as n, calendar, datetime
def circularStatistics(population, period):
pop=n.array(population)
pop_=pop*2*n.pi/period
j=n.complex(0,1)
vec_n=n.e**(j*pop_)
mean_vec=vec_n.sum()/len(vec_n) # first moment
mean_angle=n.arctan2(mean_vec.real,mean_vec.imag)
size_mean_vec=n.abs(mean_vec)
variance_unity_radius=1-size_mean_vec
std_unity_radius=n.sqrt(-2*n.log(size_mean_vec))
circular_mean=mean_angle*(period/(2*n.pi))
circular_variance=variance_unity_radius*(period**2/(2*n.pi))
circular_std=std_unity_radius*(period/(2*n.pi))
second_moment=(vec_n**2).sum()/len(vec_n)
size_second_moment=n.abs(second_moment)
circular_dispersion=(1-size_second_moment)/(2*(size_mean_vec**2))
return dict(mean_vec=mean_vec,
mean_angle=mean_angle,
size_mean_vec=size_mean_vec,
circular_mean=circular_mean,
circular_variance=circular_variance,
circular_std=circular_std,
variance_unity_radius=variance_unity_radius,
std_unity_radius=std_unity_radius,
circular_dispersion=circular_dispersion)
class TimeStatistics:
def __init__(self,list_datastructures=None):
if not list_datastructures:
print("input datastructures, please")
datetimes=[]
for datetime in list_datastructures.raw_clean_dates:
datetimes.append(datetime[1])
self.datetimes=datetimes
self.n_observations=len(datetimes)
self.bad_datetimes=[]
self.makeStatistics()
def makeStatistics(self):
"""Make statistics from seconds to years"""
self.uniformComparisson()
self.secondsStats()
self.minutesStats()
self.hoursStats()
self.weekdaysStats()
self.monthdaysStats_()
self.monthsStats()
self.yearsStats()
def uniformComparisson(self):
ar=n.random.randint(0,60,(1000,self.n_observations))
cc=n.array([n.histogram(i,60)[0] for i in ar])
cc_=cc.min(1)/cc.max(1)
self.obs60=(cc_.mean(),cc_.std())
self.obs60_=cc_
ar=n.random.randint(0,24,(1000,self.n_observations))
cc=n.array([n.histogram(i,24)[0] for i in ar])
cc_=cc.min(1)/cc.max(1)
self.obs24=(cc_.mean(),cc_.std())
self.obs24_=cc_
ar=n.random.randint(0,30,(1000,self.n_observations))
cc=n.array([n.histogram(i,30)[0] for i in ar])
cc_=cc.min(1)/cc.max(1)
self.obs30=(cc_.mean(),cc_.std())
self.obs30_=cc_
ar=n.random.randint(0,7,(1000,self.n_observations))
cc=n.array([n.histogram(i,7)[0] for i in ar])
cc_=cc.min(1)/cc.max(1)
self.obs7=(cc_.mean(),cc_.std())
self.obs7_=cc_
#self.obs60=n.random.randint(0,60,(1000, self.n_observations))
#self.count_obs60=[obs60.count(i) for i in set(obs60)]
#self.obs24=n.random.randint(0,24,self.n_observations)
#self.count_obs24=[obs24.count(i) for i in set(obs24)]
# IN MONTHs function:
#self.obs12=n.random.randint(0,12,len(self.months.samples))
#self.obs30=n.random.randint(0,30,self.n_observations)
#self.count_obs12=[obs12.count(i) for i in set(obs12)]
#self.obs7=n.random.randint(0,7,self.n_observations)
#self.count_obs12=[obs12.count(i) for i in set(obs12)]
def secondsStats(self):
# contagem para histograma
seconds=[i.second for i in self.datetimes]
histogram=n.histogram(seconds,bins=list(range(61)))[0]
max_discrepancy=histogram.min()/histogram.max()
# medidas circulares
circular_measures=circularStatistics(seconds,60)
seconds=dict(
circular_measures=circular_measures,
max_discrepancy=max_discrepancy,
max_discrepancy_=self.obs60,
samples=seconds,
histogram=histogram)
self.seconds=seconds
def minutesStats(self):
samples=[i.minute for i in self.datetimes]
histogram=n.histogram(samples,bins=list(range(61)))[0]
max_discrepancy=histogram.min()/histogram.max()
# medidas circulares
circular_measures=circularStatistics(samples,60)
minutes=dict(
samples=samples,
histogram=histogram,
max_discrepancy=max_discrepancy,
max_discrepancy_=self.obs60,
circular_measures=circular_measures
)
self.minutes=minutes
def hoursStats(self):
samples=[i.hour for i in self.datetimes]
histogram=n.histogram(samples,bins=list(range(25)))[0]
max_discrepancy=histogram.min()/histogram.max()
# medidas circulares
circular_measures=circularStatistics(samples,24)
hours=dict(
samples=samples,
histogram=histogram,
max_discrepancy=max_discrepancy,
max_discrepancy_=self.obs24,
circular_measures=circular_measures
)
self.hours=hours
def weekdaysStats(self):
samples=[i.weekday() for i in self.datetimes]
histogram=n.histogram(samples,bins=list(range(8)))[0]
max_discrepancy=histogram.min()/histogram.max()
# medidas circulares
circular_measures=circularStatistics(samples,7)
self.weekdays=dict(
samples=samples,
histogram=histogram,
max_discrepancy=max_discrepancy,
max_discrepancy_=self.obs7,
circular_measures=circular_measures
)
def monthdaysStats_(self):
def aux(xx):
#return (xx.day-1)/(
# calendar.monthrange(xx.year, xx.month)[1] )
return ((xx.day-1)*24*60+xx.hour*60+xx.minute )/(
calendar.monthrange(xx.year, xx.month)[1]*24*60)
samples=[aux(i) for i in self.datetimes]
mean_month_size=n.mean([calendar.monthrange(xx.year, xx.month)[1]
for xx in self.datetimes])
mean_month_size=n.round(mean_month_size)
histogram=n.histogram(samples,bins=n.linspace(0,1,mean_month_size+1))[0]
max_discrepancy=histogram.min()/histogram.max()
# medidas circulares
circular_measures=circularStatistics([i*mean_month_size for i in samples],mean_month_size)
self.monthdays=dict(
mean_month_size=mean_month_size,
samples=samples,
histogram=histogram,
max_discrepancy=max_discrepancy,
max_discrepancy_=self.obs30,
circular_measures=circular_measures,
)
def monthdaysStats(self):
def aux(xx):
return (xx.day-1)/(
calendar.monthrange(xx.year, xx.month)[1] )
samples=[aux(i) for i in self.datetimes]
mean_month_size=n.mean([calendar.monthrange(xx.year, xx.month)[1]
for xx in self.datetimes])
mean_month_size=n.round(mean_month_size)
histogram=n.histogram(samples,bins=n.linspace(0,1,mean_month_size+1))[0]
max_discrepancy=histogram.min()/histogram.max()
# medidas circulares
circular_measures=circularStatistics(samples,1)
self.monthdays=dict(
mean_month_size=mean_month_size,
samples=samples,
histogram=histogram,
max_discrepancy=max_discrepancy,
max_discrepancy_=self.obs30,
circular_measures=circular_measures,
)
def monthsStats(self,truncate=True):
year=365.242199 # days
if truncate:
delta=self.datetimes[-1]-self.datetimes[0]
if delta.days > year:
delta_=(delta.total_seconds()/(24*60*60))%year
max_date=self.datetimes[-1]-datetime.timedelta(delta_%year)
else:
max_date=self.datetimes[-1]
try:
samples=[i.month-1 for i in self.datetimes if i <= max_date]
except:
samples=[]
for i in self.datetimes:
try:
foo=i<=max_date
if foo:
samples.append(i.month-1)
except:
self.bad_datetimes+=[i]
else:
samples=[i.month-1 for i in self.datetimes]
histogram=n.histogram(samples,bins=list(range(13)))[0]
max_discrepancy=histogram.min()/histogram.max()
# medidas circulares
circular_measures=circularStatistics(samples,12)
ar=n.random.randint(0,12,(1000,len(samples)))
cc=n.array([n.histogram(i,12)[0] for i in ar])
cc_=cc.min(1)/cc.max(1)
self.obs12=(cc_.mean(),cc_.std())
self.obs12_=cc_
self.months=dict(
samples=samples,
histogram=histogram,
max_discrepancy=max_discrepancy,
max_discrepancy_=self.obs12,
circular_measures=circular_measures
)
def yearsStats(self):
samples=[i.year for i in self.datetimes]
smin=min(samples)
smax=max(samples)
histogram=n.histogram(samples,bins=list(range(smin,smax+2)))[0]
max_discrepancy=histogram.min()/histogram.max()
self.years=dict(
samples=samples,
histogram=histogram,
max_discrepancy=max_discrepancy,
)
|
|
from __future__ import absolute_import, division, print_function
import sys
import platform
import _pytest._code
import pytest
def runpdb_and_get_report(testdir, source):
p = testdir.makepyfile(source)
result = testdir.runpytest_inprocess("--pdb", p)
reports = result.reprec.getreports("pytest_runtest_logreport")
assert len(reports) == 3, reports # setup/call/teardown
return reports[1]
@pytest.fixture
def custom_pdb_calls():
called = []
# install dummy debugger class and track which methods were called on it
class _CustomPdb(object):
def __init__(self, *args, **kwargs):
called.append("init")
def reset(self):
called.append("reset")
def interaction(self, *args):
called.append("interaction")
_pytest._CustomPdb = _CustomPdb
return called
class TestPDB(object):
@pytest.fixture
def pdblist(self, request):
monkeypatch = request.getfixturevalue("monkeypatch")
pdblist = []
def mypdb(*args):
pdblist.append(args)
plugin = request.config.pluginmanager.getplugin('debugging')
monkeypatch.setattr(plugin, 'post_mortem', mypdb)
return pdblist
def test_pdb_on_fail(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
def test_func():
assert 0
""")
assert rep.failed
assert len(pdblist) == 1
tb = _pytest._code.Traceback(pdblist[0][0])
assert tb[-1].name == "test_func"
def test_pdb_on_xfail(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import pytest
@pytest.mark.xfail
def test_func():
assert 0
""")
assert "xfail" in rep.keywords
assert not pdblist
def test_pdb_on_skip(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import pytest
def test_func():
pytest.skip("hello")
""")
assert rep.skipped
assert len(pdblist) == 0
def test_pdb_on_BdbQuit(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import bdb
def test_func():
raise bdb.BdbQuit
""")
assert rep.failed
assert len(pdblist) == 0
def test_pdb_interaction(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
i = 0
assert i == 1
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*i = 0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" not in rest
self.flush(child)
@staticmethod
def flush(child):
if platform.system() == 'Darwin':
return
if child.isalive():
child.wait()
def test_pdb_unittest_postmortem(self, testdir):
p1 = testdir.makepyfile("""
import unittest
class Blub(unittest.TestCase):
def tearDown(self):
self.filename = None
def test_false(self):
self.filename = 'debug' + '.me'
assert 0
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect('(Pdb)')
child.sendline('p self.filename')
child.sendeof()
rest = child.read().decode("utf8")
assert 'debug.me' in rest
self.flush(child)
def test_pdb_unittest_skip(self, testdir):
"""Test for issue #2137"""
p1 = testdir.makepyfile("""
import unittest
@unittest.skipIf(True, 'Skipping also with pdb active')
class MyTestCase(unittest.TestCase):
def test_one(self):
assert 0
""")
child = testdir.spawn_pytest("-rs --pdb %s" % p1)
child.expect('Skipping also with pdb active')
child.expect('1 skipped in')
child.sendeof()
self.flush(child)
def test_pdb_interaction_capture(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
print("getrekt")
assert False
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("getrekt")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "getrekt" not in rest
self.flush(child)
def test_pdb_interaction_exception(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def globalfunc():
pass
def test_1():
pytest.raises(ValueError, globalfunc)
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*pytest.raises.*globalfunc")
child.expect("(Pdb)")
child.sendline("globalfunc")
child.expect(".*function")
child.sendeof()
child.expect("1 failed")
self.flush(child)
def test_pdb_interaction_on_collection_issue181(self, testdir):
p1 = testdir.makepyfile("""
import pytest
xxx
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
#child.expect(".*import pytest.*")
child.expect("(Pdb)")
child.sendeof()
child.expect("1 error")
self.flush(child)
def test_pdb_interaction_on_internal_error(self, testdir):
testdir.makeconftest("""
def pytest_runtest_protocol():
0/0
""")
p1 = testdir.makepyfile("def test_func(): pass")
child = testdir.spawn_pytest("--pdb %s" % p1)
#child.expect(".*import pytest.*")
child.expect("(Pdb)")
child.sendeof()
self.flush(child)
def test_pdb_interaction_capturing_simple(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf-8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
self.flush(child)
def test_pdb_set_trace_interception(self, testdir):
p1 = testdir.makepyfile("""
import pdb
def test_1():
pdb.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
self.flush(child)
def test_pdb_and_capsys(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1(capsys):
print ("hello1")
pytest.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("capsys.readouterr()\n")
child.expect("hello1")
child.sendeof()
child.read()
self.flush(child)
def test_set_trace_capturing_afterwards(self, testdir):
p1 = testdir.makepyfile("""
import pdb
def test_1():
pdb.set_trace()
def test_2():
print ("hello")
assert 0
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("c\n")
child.expect("test_2")
child.expect("Captured")
child.expect("hello")
child.sendeof()
child.read()
self.flush(child)
def test_pdb_interaction_doctest(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def function_1():
'''
>>> i = 0
>>> assert i == 1
'''
""")
child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1)
child.expect("(Pdb)")
child.sendline('i')
child.expect("0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
self.flush(child)
def test_pdb_interaction_capturing_twice(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
print ("hello18")
pytest.set_trace()
x = 4
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendline('c')
child.expect("x = 4")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
assert "hello18" in rest # out is captured
self.flush(child)
def test_pdb_used_outside_test(self, testdir):
p1 = testdir.makepyfile("""
import pytest
pytest.set_trace()
x = 5
""")
child = testdir.spawn("%s %s" %(sys.executable, p1))
child.expect("x = 5")
child.sendeof()
self.flush(child)
def test_pdb_used_in_generate_tests(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def pytest_generate_tests(metafunc):
pytest.set_trace()
x = 5
def test_foo(a):
pass
""")
child = testdir.spawn_pytest(str(p1))
child.expect("x = 5")
child.sendeof()
self.flush(child)
def test_pdb_collection_failure_is_shown(self, testdir):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_subprocess("--pdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
def test_enter_pdb_hook_is_called(self, testdir):
testdir.makeconftest("""
def pytest_enter_pdb(config):
assert config.testing_verification == 'configured'
print 'enter_pdb_hook'
def pytest_configure(config):
config.testing_verification = 'configured'
""")
p1 = testdir.makepyfile("""
import pytest
def test_foo():
pytest.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("enter_pdb_hook")
child.send('c\n')
child.sendeof()
self.flush(child)
def test_pdb_custom_cls(self, testdir, custom_pdb_calls):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_inprocess(
"--pdb", "--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
assert custom_pdb_calls == ["init", "reset", "interaction"]
def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_inprocess(
"--pdbcls=_pytest:_CustomPdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
assert custom_pdb_calls == []
def test_pdb_custom_cls_with_settrace(self, testdir, monkeypatch):
testdir.makepyfile(custom_pdb="""
class CustomPdb(object):
def set_trace(*args, **kwargs):
print 'custom set_trace>'
""")
p1 = testdir.makepyfile("""
import pytest
def test_foo():
pytest.set_trace()
""")
monkeypatch.setenv('PYTHONPATH', str(testdir.tmpdir))
child = testdir.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1))
child.expect('custom set_trace>')
if child.isalive():
child.wait()
|
|
from copy import deepcopy
from datetime import date, datetime, timedelta
import pytz
from mock import patch
from werkzeug.exceptions import InternalServerError
from rdr_service import config
from rdr_service.api_util import HEALTHPRO, PTC
from rdr_service.dao import database_factory
from rdr_service.dao.deceased_report_dao import DeceasedReportDao
from rdr_service.model.api_user import ApiUser
from rdr_service.model.deceased_report import DeceasedReport
from rdr_service.model.participant_summary import ParticipantSummary
from rdr_service.participant_enums import DeceasedNotification, DeceasedReportDenialReason, DeceasedReportStatus,\
DeceasedStatus, SuspensionStatus, WithdrawalStatus
from tests.helpers.unittest_base import BaseTestCase
class DeceasedReportTestBase(BaseTestCase):
def overwrite_test_user_roles(self, roles):
new_user_info = deepcopy(config.getSettingJson(config.USER_INFO))
new_user_info['[email protected]']['roles'] = roles
self.temporarily_override_config_setting(config.USER_INFO, new_user_info)
@staticmethod
def get_deceased_report_id(response):
return int(response['identifier'][0]['value'])
class DeceasedReportApiTest(DeceasedReportTestBase):
def setUp(self):
super(DeceasedReportApiTest, self).setUp()
hpo = self.data_generator.create_database_hpo()
self.paired_participant_without_summary = self.data_generator.create_database_participant(hpoId=hpo.hpoId)
self.paired_participant_with_summary = self.data_generator.create_database_participant(hpoId=hpo.hpoId)
self.data_generator.create_database_participant_summary(participant=self.paired_participant_with_summary)
self.unpaired_participant_with_summary = self.data_generator.create_database_participant()
self.data_generator.create_database_participant_summary(participant=self.unpaired_participant_with_summary)
def post_report(self, report_json, participant_id=None, expected_status=200):
if participant_id is None:
participant_id = self.paired_participant_without_summary.participantId
return self.send_post(f'Participant/P{participant_id}/Observation',
request_data=report_json,
expected_status=expected_status)
def post_report_review(self, review_json, report_id, participant_id, expected_status=200):
return self.send_post(f'Participant/P{participant_id}/Observation/{report_id}/Review',
request_data=review_json,
expected_status=expected_status)
def get_report_from_db(self, report_id):
# The report might already be in the session, resetting just in case to make sure we get the latest data
self.session.commit()
self.session.close()
return self.session.query(DeceasedReport).filter(DeceasedReport.id == report_id).one()
def get_participant_summary_from_db(self, participant_id):
# The participant summary exists in the session, so we need to reset the session to query the database for
# new values
self.session.commit()
self.session.close()
return self.session.query(ParticipantSummary).filter(
ParticipantSummary.participantId == participant_id
).one()
@staticmethod
def build_deceased_report_json(status='preliminary', date_of_death='2020-01-01',
notification=DeceasedNotification.EHR, notification_other=None, user_system='system',
user_name='name', authored='2020-01-01T00:00:00Z', reporter_name='Jane Doe',
reporter_relation='SPOUSE', reporter_phone=None,
reporter_email=None, cause_of_death='Heart disease'):
report_json = {
'code': {
'text': 'DeceasedReport'
},
'status': status,
'effectiveDateTime': date_of_death,
'performer': [{
'type': user_system,
'reference': user_name
}],
'valueString': cause_of_death,
'issued': authored
}
encounter_json = {
'reference': str(notification)
}
if notification == DeceasedNotification.OTHER:
encounter_json['display'] = notification_other
report_json['encounter'] = encounter_json
if not (notification == DeceasedNotification.EHR or notification == DeceasedNotification.OTHER):
extensions = [{
'url': 'http://hl7.org/fhir/ValueSet/relatedperson-relationshiptype',
'valueCode': reporter_relation
}]
if reporter_email:
extensions.append({
'url': 'https://www.pmi-ops.org/email-address',
'valueString': reporter_email
})
if reporter_phone:
extensions.append({
'url': 'https://www.pmi-ops.org/phone-number',
'valueString': reporter_phone
})
report_json['extension'] = [{
'url': 'https://www.pmi-ops.org/deceased-reporter',
'valueHumanName': {
'text': reporter_name,
'extension': extensions
}
}]
return report_json
@staticmethod
def build_report_review_json(user_system='system', user_name='name', authored='2020-01-01T00:00:00Z',
status='final', denial_reason=DeceasedReportDenialReason.MARKED_IN_ERROR,
denial_reason_other='Another reason', date_of_death='2020-01-01'):
report_json = {
'code': {
'text': 'DeceasedReport'
},
'status': status,
'effectiveDateTime': date_of_death,
'performer': [{
'type': user_system,
'reference': user_name
}],
'issued': authored
}
if status == 'cancelled':
denial_reference = {
'reference': str(denial_reason)
}
if denial_reason == DeceasedReportDenialReason.OTHER:
denial_reference['display'] = denial_reason_other
report_json['extension'] = [{
'url': 'https://www.pmi-ops.org/observation-denial-reason',
'valueReference': denial_reference
}]
return report_json
def assertReportResponseMatches(self, expected, actual):
del actual['identifier']
del actual['subject']
del actual['resourceType']
if 'performer' in actual:
for performer_json in actual['performer']:
del performer_json['extension']
self.assertJsonResponseMatches(expected, actual, strip_tz=False)
def test_creating_minimal_deceased_report(self):
report_json = self.build_deceased_report_json(
status='preliminary',
date_of_death='2020-01-02',
notification=DeceasedNotification.EHR,
user_system='https://example.com',
user_name='[email protected]',
authored='2020-01-05T13:43:21Z',
cause_of_death='Heart disease'
)
response = self.post_report(report_json, participant_id=self.paired_participant_with_summary.participantId)
# Check data saved to the database
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(DeceasedReportStatus.PENDING, created_report.status)
self.assertEqual(date(2020, 1, 2), created_report.dateOfDeath)
self.assertEqual(DeceasedNotification.EHR, created_report.notification)
self.assertEqual('https://example.com', created_report.author.system)
self.assertEqual('[email protected]', created_report.author.username)
self.assertEqual(datetime(2020, 1, 5, 13, 43, 21), created_report.authored)
self.assertEqual('Heart disease', created_report.causeOfDeath)
# Check participant summary data
participant_summary = self.get_participant_summary_from_db(
participant_id=self.paired_participant_with_summary.participantId
)
self.assertEqual(DeceasedStatus.PENDING, participant_summary.deceasedStatus)
self.assertEqual(datetime(2020, 1, 5, 13, 43, 21), participant_summary.deceasedAuthored)
self.assertEqual(date(2020, 1, 2), participant_summary.dateOfDeath)
# Check response for extra performer extension
performer_extension = response['performer'][0]['extension'][0]
self.assertEqual('https://www.pmi-ops.org/observation/authored', performer_extension['url'])
self.assertEqual('2020-01-05T13:43:21Z', performer_extension['valueDateTime'])
# Check that the rest of the response matches what was sent
self.assertReportResponseMatches(report_json, response)
def test_other_notification_method(self):
report_json = self.build_deceased_report_json(
notification=DeceasedNotification.OTHER,
notification_other='Another reason'
)
response = self.post_report(report_json)
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(DeceasedNotification.OTHER, created_report.notification)
self.assertEqual('Another reason', created_report.notificationOther)
self.assertReportResponseMatches(report_json, response)
def test_reporter_info(self):
report_json = self.build_deceased_report_json(
notification=DeceasedNotification.NEXT_KIN_SUPPORT,
reporter_name='Jane Doe',
reporter_relation='SPOUSE',
reporter_phone='123-456-7890',
reporter_email='[email protected]'
)
response = self.post_report(report_json)
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(DeceasedNotification.NEXT_KIN_SUPPORT, created_report.notification)
self.assertEqual('Jane Doe', created_report.reporterName)
self.assertEqual('SPOUSE', created_report.reporterRelationship)
self.assertEqual('123-456-7890', created_report.reporterPhone)
self.assertEqual('[email protected]', created_report.reporterEmail)
self.assertReportResponseMatches(report_json, response)
def test_naive_issued_timestamp(self):
report_json = self.build_deceased_report_json(
authored='2020-01-05T13:43:21'
)
response = self.post_report(report_json)
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(datetime(2020, 1, 5, 13, 43, 21), created_report.authored)
self.assertEqual('2020-01-05T13:43:21Z', response['issued'])
def test_cst_issued_timestamp(self):
report_json = self.build_deceased_report_json(
authored='2020-01-05T13:43:21-06:00'
)
response = self.post_report(report_json)
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(datetime(2020, 1, 5, 19, 43, 21), created_report.authored)
self.assertEqual('2020-01-05T19:43:21Z', response['issued'])
def test_post_with_invalid_fields(self):
# Check missing status response
report_json = self.build_deceased_report_json()
del report_json['status']
self.post_report(report_json, expected_status=400)
# Check unauthorized status when creating
report_json = self.build_deceased_report_json(status='final')
self.post_report(report_json, expected_status=400)
# Check missing code response
report_json = self.build_deceased_report_json()
del report_json['code']
self.post_report(report_json, expected_status=400)
# Check missing notification data response
report_json = self.build_deceased_report_json()
del report_json['encounter']
self.post_report(report_json, expected_status=400)
# Check missing 'other text' when notification is OTHER
report_json = self.build_deceased_report_json(notification=DeceasedNotification.OTHER)
del report_json['encounter']['display']
self.post_report(report_json, expected_status=400)
# Check for different states of missing author information
report_json = self.build_deceased_report_json()
del report_json['performer']
self.post_report(report_json, expected_status=400)
report_json = self.build_deceased_report_json()
del report_json['performer'][0]['type']
self.post_report(report_json, expected_status=400)
report_json = self.build_deceased_report_json()
del report_json['performer'][0]['reference']
self.post_report(report_json, expected_status=400)
# Check for missing authored date (referred to as 'issued' for FHIR compliance)
report_json = self.build_deceased_report_json()
del report_json['issued']
self.post_report(report_json, expected_status=400)
# Check for response when missing pieces of reporter information
report_json = self.build_deceased_report_json(notification=DeceasedNotification.NEXT_KIN_SUPPORT)
del report_json['extension']
self.post_report(report_json, expected_status=400)
report_json = self.build_deceased_report_json(notification=DeceasedNotification.NEXT_KIN_SUPPORT)
del report_json['extension'][0]['valueHumanName']['text']
self.post_report(report_json, expected_status=400)
report_json = self.build_deceased_report_json(notification=DeceasedNotification.NEXT_KIN_SUPPORT)
del report_json['extension'][0]['valueHumanName']['extension'][0] # deleting association (only required one)
self.post_report(report_json, expected_status=400)
# Try invalid status
report_json = self.build_deceased_report_json(status='unknown')
self.post_report(report_json, expected_status=400)
# Check for response when trying to use future date for authored
three_days_from_now = datetime.now() + timedelta(days=3)
report_json = self.build_deceased_report_json(authored=three_days_from_now.isoformat())
self.post_report(report_json, expected_status=400)
# Check for response when trying to use future date for date of death
three_days_from_now = date.today() + timedelta(days=3)
report_json = self.build_deceased_report_json(date_of_death=three_days_from_now.isoformat())
self.post_report(report_json, expected_status=400)
def test_post_with_only_required_fields(self):
report_json = self.build_deceased_report_json()
del report_json['effectiveDateTime']
del report_json['valueString']
response = self.post_report(report_json, participant_id=self.paired_participant_with_summary.participantId)
del response['effectiveDateTime']
self.assertReportResponseMatches(report_json, response)
participant_summary = self.get_participant_summary_from_db(
participant_id=self.paired_participant_with_summary.participantId
)
self.assertIsNone(participant_summary.dateOfDeath)
def test_other_roles_not_allowed_to_create(self):
report_json = self.build_deceased_report_json()
self.overwrite_test_user_roles(['testing'])
self.post_report(report_json, expected_status=403)
def test_health_pro_can_create(self):
report_json = self.build_deceased_report_json()
self.overwrite_test_user_roles([HEALTHPRO])
self.post_report(report_json)
def test_ptsc_can_create(self):
report_json = self.build_deceased_report_json()
self.overwrite_test_user_roles([PTC])
self.post_report(report_json)
def test_report_auto_approve(self):
# Deceased reports made for unpaired participants don't need second approval.
# So these reports should be approved upon creation.
unpaired_participant_id = self.unpaired_participant_with_summary.participantId
report_json = self.build_deceased_report_json()
response = self.post_report(report_json, participant_id=unpaired_participant_id)
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(DeceasedReportStatus.APPROVED, created_report.status)
self.assertEqual('final', response['status'])
participant_summary = self.get_participant_summary_from_db(participant_id=unpaired_participant_id)
self.assertEqual(DeceasedStatus.APPROVED, participant_summary.deceasedStatus)
self.assertEqual(datetime(2020, 1, 1), participant_summary.deceasedAuthored)
def create_pending_deceased_report(self, participant_id=None, **kwargs):
if participant_id is None:
participant_id = self.paired_participant_without_summary.participantId
return self.data_generator.create_database_deceased_report(participantId=participant_id, **kwargs)
def test_multiple_pending_reports_not_allowed(self):
report = self.create_pending_deceased_report()
# Try creating another deceased report and check for Conflict status code
report_json = self.build_deceased_report_json()
self.post_report(report_json, participant_id=report.participantId, expected_status=409)
@patch('rdr_service.dao.deceased_report_dao.logging')
def test_concurrent_reports_not_allowed(self, mock_logging):
"""Make sure two simultaneous requests for creating deceased reports can't both complete"""
# We need to ensure only one pending or active report can exist for a participant.
# To do that, we first check that there are no other pending or active reports, and then we insert one.
# Without a proper strategy, two separate processes can both get past the check
# and then both be allowed to insert.
# There's an implementation for obtaining a lock when inserting a report for a participant. If a session
# checks that it can move forward with inserting a report, then until that session closes
participant = self.data_generator.create_database_participant()
another_participant = self.data_generator.create_database_participant()
self.assertTrue(
DeceasedReportDao._can_insert_active_report(self.session, participant.participantId),
"The first session to check to see if it can insert should be allowed"
)
with database_factory.get_database().session() as new_session:
# Using a separate connection/transaction on the database
self.assertTrue(
DeceasedReportDao._can_insert_active_report(new_session, another_participant.participantId),
"A separate transaction should be allowed to concurrently make a report for another participant"
)
with database_factory.get_database().session() as new_session:
# Using a separate connection/transaction on the database
with self.assertRaises(InternalServerError):
DeceasedReportDao._can_insert_active_report(new_session, participant.participantId, 1)
mock_logging.error.assert_called_with(
f'Database error retrieving named lock for P{participant.participantId}, received result: "0"'
)
def test_approving_report(self):
report = self.create_pending_deceased_report(
participant_id=self.paired_participant_with_summary.participantId,
authored='2020-06-01T00:00:00Z',
)
review_json = self.build_report_review_json(
status='final',
authored='2020-07-01T00:00:00Z',
user_system='https://example.com',
user_name='[email protected]'
)
review_response = self.post_report_review(review_json, report.id, report.participantId)
created_report = self.get_report_from_db(report.id)
self.assertEqual(DeceasedReportStatus.APPROVED, created_report.status)
self.assertEqual(datetime(2020, 7, 1), created_report.reviewed)
self.assertEqual('https://example.com', created_report.reviewer.system)
self.assertEqual('[email protected]', created_report.reviewer.username)
self.assertEqual('final', review_response['status'])
participant_summary = self.get_participant_summary_from_db(participant_id=report.participantId)
self.assertEqual(DeceasedStatus.APPROVED, participant_summary.deceasedStatus)
self.assertEqual(datetime(2020, 7, 1), participant_summary.deceasedAuthored)
# Check create/approve performer dates in response
author_extension_json = review_response['performer'][0]['extension'][0]
self.assertEqual('https://www.pmi-ops.org/observation/authored', author_extension_json['url'])
self.assertEqual('2020-06-01T00:00:00Z', author_extension_json['valueDateTime'])
reviewer_extension_json = review_response['performer'][1]['extension'][0]
self.assertEqual('https://www.pmi-ops.org/observation/reviewed', reviewer_extension_json['url'])
self.assertEqual('2020-07-01T00:00:00Z', reviewer_extension_json['valueDateTime'])
def test_approving_can_overwrite_date_of_death(self):
participant_id = self.paired_participant_with_summary.participantId
report_json = self.build_deceased_report_json(date_of_death='2020-01-01')
response = self.post_report(report_json, participant_id=participant_id)
report_id = self.get_deceased_report_id(response)
participant_summary = self.get_participant_summary_from_db(participant_id=participant_id)
self.assertEqual(date(2020, 1, 1), participant_summary.dateOfDeath)
review_json = self.build_report_review_json(
date_of_death='2019-06-01'
)
self.post_report_review(review_json, report_id, participant_id)
created_report = self.get_report_from_db(report_id)
self.assertEqual(date(2019, 6, 1), created_report.dateOfDeath)
participant_summary = self.get_participant_summary_from_db(participant_id=participant_id)
self.assertEqual(date(2019, 6, 1), participant_summary.dateOfDeath)
def test_only_healthpro_can_review(self):
report = self.create_pending_deceased_report()
review_json = self.build_report_review_json()
self.overwrite_test_user_roles(['testing'])
self.post_report_review(review_json, report.id, report.participantId, expected_status=403)
self.overwrite_test_user_roles([PTC])
self.post_report_review(review_json, report.id, report.participantId, expected_status=403)
self.overwrite_test_user_roles([HEALTHPRO])
self.post_report_review(review_json, report.id, report.participantId, expected_status=200)
def test_report_denial(self):
report = self.create_pending_deceased_report(
participant_id=self.paired_participant_with_summary.participantId
)
review_json = self.build_report_review_json(
status='cancelled',
denial_reason=DeceasedReportDenialReason.OTHER,
denial_reason_other='Another reason'
)
review_response = self.post_report_review(review_json, report.id, report.participantId)
created_report = self.get_report_from_db(report.id)
self.assertEqual(DeceasedReportStatus.DENIED, created_report.status)
self.assertEqual(DeceasedReportDenialReason.OTHER, created_report.denialReason)
self.assertEqual('Another reason', created_report.denialReasonOther)
participant_summary = self.get_participant_summary_from_db(participant_id=report.participantId)
self.assertEqual(DeceasedStatus.UNSET, participant_summary.deceasedStatus)
self.assertIsNone(participant_summary.deceasedAuthored)
self.assertIsNone(participant_summary.dateOfDeath)
# Check that the denial reason comes through on the response
self.assertEqual('cancelled', review_response['status'])
denial_extension = review_response['extension'][0]['valueReference']
self.assertEqual('OTHER', denial_extension['reference'])
self.assertEqual('Another reason', denial_extension['display'])
def test_pending_report_not_allowed_when_approved_report_exists(self):
report = self.create_pending_deceased_report()
review_json = self.build_report_review_json()
self.post_report_review(review_json, report.id, report.participantId)
created_report = self.get_report_from_db(report.id)
self.assertEqual(DeceasedReportStatus.APPROVED, created_report.status,
"Test is built assuming an APPROVED report would be created")
# Try creating another deceased report and check for Conflict status code
report_json = self.build_deceased_report_json()
self.post_report(report_json, participant_id=report.participantId, expected_status=409)
def test_multiple_denied_reports(self):
report = self.create_pending_deceased_report()
review_json = self.build_report_review_json(status='cancelled')
self.post_report_review(review_json, report.id, report.participantId)
# Build another report and deny it too
report = self.create_pending_deceased_report(participant_id=report.participantId)
self.post_report_review(review_json, report.id, report.participantId)
# Try creating another deceased report, expecting it to work
report = self.create_pending_deceased_report(participant_id=report.participantId)
created_report = self.get_report_from_db(report.id)
self.assertEqual(DeceasedReportStatus.PENDING, created_report.status)
def test_approving_denied_report_not_allowed(self):
report = self.create_pending_deceased_report()
review_json = self.build_report_review_json(status='cancelled')
self.post_report_review(review_json, report.id, report.participantId)
# Try approving the denied report
review_json = self.build_report_review_json(status='final')
self.post_report_review(review_json, report.id, report.participantId, expected_status=400)
def test_denying_approved_report_not_allowed(self):
report = self.create_pending_deceased_report()
review_json = self.build_report_review_json(status='final')
self.post_report_review(review_json, report.id, report.participantId)
# Try approving the denied report
review_json = self.build_report_review_json(status='cancelled')
self.post_report_review(review_json, report.id, report.participantId, expected_status=400)
def test_api_users_not_duplicated(self):
report = self.create_pending_deceased_report()
created_report = self.get_report_from_db(report.id)
review_json = self.build_report_review_json(
user_system=created_report.author.system,
user_name=created_report.author.username
)
self.post_report_review(review_json, report.id, report.participantId)
self.assertEqual(1, self.session.query(ApiUser).count())
def test_participant_summary_fields_redacted(self):
"""Should still see contact information, but contact method should be updated for deceased participants"""
participant = self.data_generator.create_database_participant()
summary_obj = self.data_generator.create_database_participant_summary(
participant=participant,
phoneNumber='123-456-7890',
loginPhoneNumber='1-800-555-5555',
email='[email protected]',
streetAddress='123 Elm',
streetAddress2='Unit A',
city='Eureka',
zipCode='12345'
)
participant_id = participant.participantId
report_json = self.build_deceased_report_json(authored="2020-01-01T00:00:00Z")
response = self.post_report(report_json, participant_id=participant_id)
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(DeceasedReportStatus.APPROVED, created_report.status,
"Test is built assuming an APPROVED report would be created")
summary_response = self.send_get(f'Participant/P{participant_id}/Summary')
for field_name, value in [
('phoneNumber', summary_obj.phoneNumber),
('loginPhoneNumber', summary_obj.loginPhoneNumber),
('email', summary_obj.email),
('streetAddress', summary_obj.streetAddress),
('streetAddress2', summary_obj.streetAddress2),
('city', summary_obj.city),
('zipCode', summary_obj.zipCode)
]:
self.assertEqual(value, summary_response[field_name])
self.assertEqual('NO_CONTACT', summary_response['recontactMethod'])
def test_participant_summary_redact_time_window(self):
# Fields should still be available for a short time window
participant = self.data_generator.create_database_participant()
self.data_generator.create_database_participant_summary(
participant=participant,
phoneNumber='123-456-7890'
)
participant_id = participant.participantId
yesterday = datetime.now() - timedelta(days=1)
report_json = self.build_deceased_report_json(authored=yesterday.isoformat())
response = self.post_report(report_json, participant_id=participant_id)
report_id = self.get_deceased_report_id(response)
created_report = self.get_report_from_db(report_id)
self.assertEqual(DeceasedReportStatus.APPROVED, created_report.status,
"Test is built assuming an APPROVED report would be created")
summary_response = self.send_get(f'Participant/P{participant_id}/Summary')
self.assertEqual('123-456-7890', summary_response['phoneNumber'])
self.assertEqual('NO_CONTACT', summary_response['recontactMethod'])
class ParticipantDeceasedReportApiTest(DeceasedReportTestBase):
def test_report_list_for_participant(self):
participant = self.data_generator.create_database_participant()
self.data_generator.create_database_deceased_report(
participantId=participant.participantId,
status=DeceasedReportStatus.DENIED,
reviewed=datetime(2020, 3, 18, tzinfo=pytz.utc)
)
self.data_generator.create_database_deceased_report(
participantId=participant.participantId,
status=DeceasedReportStatus.DENIED,
reviewed=datetime(2020, 2, 27, tzinfo=pytz.utc)
)
self.data_generator.create_database_deceased_report(
participantId=participant.participantId,
status=DeceasedReportStatus.DENIED,
reviewed=datetime(2020, 4, 1, tzinfo=pytz.utc)
)
report_list_response = self.send_get(f'Participant/P{participant.participantId}/DeceasedReport')
first_report = report_list_response[0] # Most recent report
self.assertEqual('cancelled', first_report['status'])
self.assertEqual('2020-04-01T00:00:00Z', first_report['issued'])
second_report = report_list_response[1]
self.assertEqual('cancelled', second_report['status'])
self.assertEqual('2020-03-18T00:00:00Z', second_report['issued'])
third_report = report_list_response[2]
self.assertEqual('cancelled', third_report['status'])
self.assertEqual('2020-02-27T00:00:00Z', third_report['issued'])
class SearchDeceasedReportApiTest(DeceasedReportTestBase):
def setUp(self):
super(SearchDeceasedReportApiTest, self).setUp()
# Shortening the following lines
create_participant_func = self.data_generator.create_database_participant
create_deceased_report_func = self.data_generator.create_database_deceased_report
unpaired_participant_id_1 = create_participant_func().participantId
self.unpaired_1_report_id = create_deceased_report_func(
participantId=unpaired_participant_id_1,
status=DeceasedReportStatus.PENDING,
authored=datetime(2020, 4, 1)
).id
unpaired_participant_id_2 = create_participant_func().participantId
self.unpaired_2_report_id = create_deceased_report_func(
participantId=unpaired_participant_id_2,
status=DeceasedReportStatus.DENIED,
reviewed=datetime(2020, 1, 5)
).id
unpaired_participant_id_3 = create_participant_func().participantId
self.unpaired_3_report_id = create_deceased_report_func(
participantId=unpaired_participant_id_3,
status=DeceasedReportStatus.PENDING,
authored=datetime(2020, 2, 18)
).id
unpaired_suspended_participant_id = create_participant_func(
suspensionStatus=SuspensionStatus.NO_CONTACT
).participantId
create_deceased_report_func(
participantId=unpaired_suspended_participant_id,
status=DeceasedReportStatus.PENDING,
authored=datetime(2020, 2, 18)
)
test_org = self.data_generator.create_database_organization(externalId='TEST')
test_participant_1_id = create_participant_func(organizationId=test_org.organizationId).participantId
self.test_1_report_id = create_deceased_report_func(
participantId=test_participant_1_id,
status=DeceasedReportStatus.PENDING,
authored=datetime(2020, 12, 5)
).id
test_participant_2_id = create_participant_func(organizationId=test_org.organizationId).participantId
self.test_2_report_id = create_deceased_report_func(
participantId=test_participant_2_id,
status=DeceasedReportStatus.DENIED,
authored=datetime(2018, 1, 1), # Setting authored date in the past to check reviewed is used when ordering
reviewed=datetime(2020, 8, 9)
).id
test_participant_3_id = create_participant_func(organizationId=test_org.organizationId).participantId
self.test_3_report_id = create_deceased_report_func(
participantId=test_participant_3_id,
status=DeceasedReportStatus.APPROVED,
reviewed=datetime(2020, 2, 3)
).id
test_withdrawn_participant_id = create_participant_func(
organizationId=test_org.organizationId,
withdrawalStatus=WithdrawalStatus.NO_USE
).participantId
create_deceased_report_func(
participantId=test_withdrawn_participant_id,
status=DeceasedReportStatus.PENDING,
reviewed=datetime(2020, 2, 3)
)
other_org = self.data_generator.create_database_organization(externalId='')
other_participant_1_id = create_participant_func(organizationId=other_org.organizationId).participantId
self.other_1_report_id = create_deceased_report_func(
participantId=other_participant_1_id,
status=DeceasedReportStatus.DENIED,
reviewed=datetime(2020, 5, 19)
).id
other_participant_2_id = create_participant_func(organizationId=other_org.organizationId).participantId
self.other_2_report_id = create_deceased_report_func(
participantId=other_participant_2_id,
status=DeceasedReportStatus.DENIED,
reviewed=datetime(2020, 9, 5)
).id
other_participant_3_id = create_participant_func(organizationId=other_org.organizationId).participantId
self.other_3_report_id = create_deceased_report_func(
participantId=other_participant_3_id,
status=DeceasedReportStatus.APPROVED,
reviewed=datetime(2020, 9, 7)
).id
def assertListResponseMatches(self, expected_report_ids, actual_json):
self.assertEqual(len(expected_report_ids), len(actual_json), "Unexpected number of reports returned")
for index in range(len(expected_report_ids)):
expected_id = expected_report_ids[index]
report_json = actual_json[index]
self.assertEqual(int(expected_id), self.get_deceased_report_id(report_json), 'Report id mismatch')
def test_searching_api_by_status(self):
self.assertListResponseMatches([
self.other_2_report_id, # Authored 09/05
self.test_2_report_id, # Authored 08/09
self.other_1_report_id, # Authored 05/19
self.unpaired_2_report_id # Authored 01/05
], self.send_get(f'DeceasedReports?status=cancelled'))
# This also implicitly checks that the suspended and withdrawn participants are left out
self.assertListResponseMatches([
self.test_1_report_id, # Authored 12/05
self.unpaired_1_report_id, # Authored 04/01
self.unpaired_3_report_id # Authored 02/18
], self.send_get(f'DeceasedReports?status=preliminary'))
def test_searching_api_by_organization(self):
# This also implicitly checks that the withdrawn participant is left out
self.assertListResponseMatches([
self.test_1_report_id, # Authored 12/05
self.test_2_report_id, # Authored 08/09
self.test_3_report_id # Authored 02/03
], self.send_get(f'DeceasedReports?org_id=TEST'))
# This also implicitly checks that the suspended participant is left out
self.assertListResponseMatches([
self.unpaired_1_report_id, # Authored 04/01
self.unpaired_3_report_id, # Authored 02/18
self.unpaired_2_report_id # Authored 01/05
], self.send_get(f'DeceasedReports?org_id=UNSET'))
def test_searching_api_by_org_and_status(self):
self.assertListResponseMatches(
[],
self.send_get(f'DeceasedReports?org_id=OTHER&status=preliminary'))
self.assertListResponseMatches([
self.unpaired_1_report_id, # Authored 04/01
self.unpaired_3_report_id # Authored 02/18
], self.send_get(f'DeceasedReports?org_id=UNSET&status=preliminary'))
def test_searching_api_by_org_and_status(self):
self.overwrite_test_user_roles(['TEST'])
self.send_get(f'DeceasedReports', expected_status=403)
self.overwrite_test_user_roles([PTC])
self.send_get(f'DeceasedReports', expected_status=403)
self.overwrite_test_user_roles([HEALTHPRO])
self.send_get(f'DeceasedReports', expected_status=200)
|
|
"""
Parser for TOPOLOGY.xml
----------------------------
PDB2PQR -- An automated pipeline for the setup, execution, and analysis of
Poisson-Boltzmann electrostatics calculations
Copyright (c) 2002-2010, Jens Erik Nielsen, University College Dublin;
Nathan A. Baker, Washington University in St. Louis; Paul Czodrowski &
Gerhard Klebe, University of Marburg
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of University College Dublin, Washington University in
St. Louis, or University of Marburg nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------
"""
__date__ = "12 November 2008"
__author__ = "Nathan Baker, Yong Huang"
TOPOLOGYPATH = "TOPOLOGY.xml"
from sys import stderr
from xml import sax
class TopologyHandler(sax.ContentHandler):
""" Handler for XML-based topology files. Assumes the following hierarchy of tags:
topology
-->residue
|-->reference
|-->titrationstate
|-->tautomer
|-->conformer
"""
def __init__(self):
self.currentElement = None
self.currentAtom = None
self.currentDihedral = None
self.currentReference = None
self.currentResidue = None
self.currentTitrationState = None
self.currentTautomer = None
self.currentConformer = None
self.currentConformerAdd = None
self.currentConformerRemove = None
self.residues = []
self.incomplete = 0
def startElement(self, tagName, attributes):
if not self.incomplete:
#print "Processing %s start tag" % tagName
if tagName == "topology":
pass
elif tagName == "residue":
if self.currentResidue != None:
print "** Overwriting current TopologyResidue object!"
self.currentResidue = TopologyResidue(self)
elif tagName == "reference":
if self.currentReference != None:
print "** Overwriting current TopologyReference object!"
self.currentReference = TopologyReference(self.currentResidue)
elif tagName == "titrationstate":
if self.currentTitrationState != None:
print "** Overwriting current TopologyTitrationState object!"
self.currentTitrationState = TopologyTitrationState(self.currentResidue)
elif tagName == "tautomer":
if self.currentTautomer != None:
print "** Overwriting current Tautomer object!"
self.currentTautomer = TopologyTautomer(self.currentTitrationState)
elif tagName == "conformer":
if self.currentConformer != None:
print "** Overwriting current Conformer object!"
self.currentConformer = TopologyConformer(self.currentTautomer)
elif tagName == "name":
self.currentElement = tagName
elif tagName == "atom":
if self.currentConformerAdd != None:
#print " Adding atom to conformerAdd..."
self.currentAtom = TopologyAtom(self.currentConformerAdd)
elif self.currentConformerRemove != None:
#print " Adding atom to conformerRemove..."
self.currentAtom = TopologyAtom(self.currentConformerRemove)
elif self.currentReference != None:
#print " Adding atom to reference..."
self.currentAtom = TopologyAtom(self.currentReference)
else:
print "** Don't know what to do with this atom!"
elif tagName == "x":
self.currentElement = tagName
elif tagName == "y":
self.currentElement = tagName
elif tagName == "z":
self.currentElement = tagName
elif tagName == "bond":
self.currentElement = tagName
elif tagName == "altname":
self.currentElement = tagName
elif tagName == "dihedral":
self.currentElement = tagName
if self.currentConformerAdd != None:
#print " Adding dihedral to conformerAdd..."
self.currentDihedral = TopologyDihedral(self.currentConformerAdd)
elif self.currentConformerRemove != None:
#print " Adding dihedral to conformerRemove..."
self.currentDihedral = TopologyDihedral(self.currentConformerRemove)
elif self.currentReference != None:
#print " Adding dihedral to reference..."
self.currentDihedral = TopologyDihedral(self.currentReference)
else:
print "** Don't know what to do with this dihedral!"
elif tagName == "add":
self.currentConformerAdd = TopologyConformerAdd(self.currentConformer)
elif tagName == "remove":
#print "currentConformer: %s" % (self.currentConformer)
self.currentConformerRemove = TopologyConformerRemove(self.currentConformer)
elif tagName == "incomplete":
#print "incomplete state encounted, skipping!"
self.incomplete = 1
else:
print "** NOT handling %s start tag" % tagName
def endElement(self, tagName):
if not self.incomplete:
#print "Processing %s end tag" % tagName
self.currentElement == None
if tagName == "x":
pass
elif tagName == "y":
pass
elif tagName == "z":
pass
elif tagName == "name":
pass
elif tagName == "bond":
pass
elif tagName == "altname":
pass
elif tagName == "atom":
self.currentAtom = None
elif tagName == "dihedral":
self.currentDihedral = None
elif tagName == "reference":
self.currentReference = None
elif tagName == "add":
self.currentConformerAdd = None
elif tagName == "remove":
self.currentConformerRemove = None
elif tagName == "titrationstate":
residue = self.currentTitrationState.topologyResidue
#print "Residue %s has titration states: %s" % (residue.name, residue.titrationStates)
self.currentTitrationState = None
elif tagName == "conformer":
tautomer = self.currentConformer.topologyTautomer
#print "Tautomer %s has conformers: %s" % (tautomer.name, tautomer.conformers)
self.currentConformer = None
elif tagName == "tautomer":
titrationState = self.currentTautomer.topologyTitrationState
#print "Titration state %s has tautomers: %s" % (titrationState.name, titrationState.tautomers)
self.currentTautomer = None
elif tagName == "residue":
self.currentResidue = None
elif tagName == "topology":
pass
else:
print "** NOT handling %s end tag" % tagName
else:
if tagName == "incomplete":
self.incomplete = 0
def characters(self, text):
if text.isspace(): return
if not self.incomplete:
if self.currentElement == "name":
# Processing a name based on current context
if self.currentAtom != None:
#print " Setting atom name to %s" % text
self.currentAtom.name = text
elif self.currentConformer != None:
#print " Setting conformer name to %s" % text
self.currentConformer.name = text
elif self.currentTautomer != None:
#print " Setting tautomer name to %s" % text
self.currentTautomer.name = text
elif self.currentTitrationState != None:
#print " Setting titration state name to %s" % text
self.currentTitrationState.name = text
elif self.currentResidue != None:
#print " Setting residue name to %s" % text
self.currentResidue.name = text
else:
print " *** Don't know what to do with name %s!" % text
elif self.currentElement == "x":
#print " Setting atom x coordinate to %s" % text
self.currentAtom.x = float(text)
elif self.currentElement == "y":
#print " Setting atom y coordinate to %s" % text
self.currentAtom.y = float(text)
elif self.currentElement == "z":
#print " Setting atom z coordinate to %s" % text
self.currentAtom.z = float(text)
elif self.currentElement == "bond":
#print " Setting bond text to %s" % text
self.currentAtom.bonds.append(text)
elif self.currentElement == "altname":
#print " Setting altname text to %s" % text
self.currentAtom.altname = text
elif self.currentElement == "dihedral":
#print " Setting dihedral text to %s" % text
self.currentDihedral.atomList = text
else:
print "** NOT handling character text: %s" % text
class TopologyResidue:
""" A class for residue topology information """
def __init__(self, topology):
""" Initialize with a Topology object """
self.name = None
self.reference = None
self.titrationStates = []
self.topology = topology
self.topology.residues.append(self)
def __str__(self):
return self.name
class TopologyDihedral:
""" A class for dihedral topology information. """
def __init__(self, parent):
""" Needs a parent that has a dihedral list. """
self.parent = parent
self.parent.dihedrals.append(self)
self.atomList = None
def __str__(self):
return self.atomList
class TopologyAtom:
""" A class for atom topology information """
def __init__(self, parent):
""" Needs to be intialized with an upper-level class that contains an atoms array (e.g., TopologyReference
or TopologyConformerAddition)"""
self.parent = parent
self.parent.atoms.append(self)
self.name = None
self.x = None
self.y = None
self.z = None
self.bonds = []
self.altname = None
def __str__(self):
return self.name
class TopologyTitrationState:
""" A class for the titration state of a residue """
def __init__(self, topologyResidue):
""" Initialize with a TopologyResidue object """
self.topologyResidue = topologyResidue
self.topologyResidue.titrationStates.append(self)
self.tautomers = []
self.name = None
def __str__(self):
return self.name
class TopologyTautomer:
""" A class for topology tautomer information """
def __init__(self, topologyTitrationState):
""" Initialize with a TopologyTitrationState object """
self.topologyTitrationState = topologyTitrationState
self.topologyTitrationState.tautomers.append(self)
self.conformers = []
self.name = None
def __str__(self):
return self.name
class TopologyConformer:
""" A class for topology conformer information """
def __init__(self, topologyTautomer):
""" Initialize with a TopologyTautomer object """
self.topologyTautomer = topologyTautomer
self.topologyTautomer.conformers.append(self)
self.name = None
self.conformerAdds = []
self.conformerRemoves = []
def __str__(self):
return self.name
class TopologyReference:
""" A class for the reference structure of a residue """
def __init__(self, topologyResidue):
""" Initialize with a TopologyResidue object """
self.topologyResidue = topologyResidue
self.topologyResidue.reference = self
self.atoms = []
self.dihedrals = []
class TopologyConformerAdd:
""" A class for adding atoms to a conformer """
def __init__(self, topologyConformer):
""" Initialize with a TopologyConformer object """
self.topologyConformer = topologyConformer
self.topologyConformer.conformerAdds.append(self)
self.atoms = []
self.name = None
self.dihedrals = []
class TopologyConformerRemove:
""" A class for removing atoms to a conformer """
def __init__(self, topologyConformer):
""" Initialize with a TopologyConformer object """
self.topologyConformer = topologyConformer
self.topologyConformer.conformerRemoves.append(self)
self.atoms = []
self.name = None
class Topology:
""" Contains the structured definitions of residue reference coordinates as well as alternate titration,
conformer, and tautomer states.
"""
def __init__(self, topologyFile):
""" Initialize with the topology file reference ready for reading """
handler = TopologyHandler()
sax.make_parser()
sax.parseString(topologyFile.read(), handler)
self.residues = handler.residues
if __name__ == "__main__":
topologyFile = open(TOPOLOGYPATH, "r")
topology = Topology(topologyFile)
"""
print "####### SUMMARY ########"
print "Topology has %d residues:" % len(topology.residues)
for residue in topology.residues:
print "-- Residue %s has 1 reference and %d titration states" % (residue.name, len(residue.titrationStates))
reference = residue.reference
print "---- Reference %s has %d atoms and %d dihedrals" % (reference, len(reference.atoms), len(reference.dihedrals))
for atom in reference.atoms:
print "------ Atom %s" % atom.name
print "-------- Alt name %s" % atom.altname
print "-------- Coordinate %g %g %g" % (atom.x, atom.y, atom.z)
for dihedral in reference.dihedrals:
print "------ Dihedral %s" % dihedral
for titrationState in residue.titrationStates:
print "---- Titration state %s has %d tautomers" % (titrationState.name, len(titrationState.tautomers))
for tautomer in titrationState.tautomers:
print "-------- Tautomer %s has %d conformers" % (tautomer.name, len(tautomer.conformers))
for conformer in tautomer.conformers:
print "---------- Conformer %s has %d removes" % (conformer.name, len(conformer.conformerRemoves))
for remove in conformer.conformerRemoves:
print "------------ Remove %d atoms" % (len(remove.atoms))
for atom in remove.atoms:
print "-------------- Atom %s" % (atom.name)
print "---------- Conformer %s has %d adds" % (conformer.name, len(conformer.conformerAdds))
for add in conformer.conformerAdds:
print "------------ Add %d atoms and %d dihedrals" % (len(add.atoms), len(add.dihedrals))
for atom in add.atoms:
print "-------------- Atom %s/%s (%g, %g, %g) bonded to %s" % (atom.name, atom.altname, atom.x, atom.y, atom.z, atom.bonds)
for dihedral in add.dihedrals:
print "-------------- Dihedral %s" % dihedral
"""
|
|
# Django settings for webapp project.
import os.path
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SERVE_MEDIA = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
('Jeffrey', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
# Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_ROOT, 'dev.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
gettext = lambda s: s
LANGUAGES = (
('en', gettext('English')),
#('fr', gettext('France')),
('zh-cn', gettext('Simplified Chinese')),
('zh-tw', gettext('Chinese Traditional')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: '/home/media/media.lawrence.com/media/'
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'site_media', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: 'http://media.lawrence.com/media/', 'http://example.com/media/'
MEDIA_URL = '/site_media/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' 'static/' subdirectories and in STATICFILES_DIRS.
# Example: '/home/media/media.lawrence.com/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'site_media', 'static')
# URL prefix for static files.
# Example: 'http://media.lawrence.com/static/'
STATIC_URL = '/site_media/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: 'http://foo.com/static/admin/', '/static/admin/'.
ADMIN_MEDIA_PREFIX = '/site_media/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
# Put strings here, like '/home/html/static' or 'C:/www/django/static'.
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'u&rt+(7tp)x&=1&cuw$@x5kzsvwkm!x_x*m25skxs@tx0!x0c%'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'webapp.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates')
# Put strings here, like '/home/html/django_templates' or 'C:/www/django/templates'.
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'timezones',
'emailconfirmation',
'social_auth',
'taggit',
# theme
'theme',
# Building apps
'account',
'signup_codes',
'sina_oauth2',
'about',
# Apps
'bookmark',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'social_auth.context_processors.social_auth_backends',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.static',
'context_processors.site_settings',
'account.context_processors.account',
)
ACCOUNT_OPEN_SIGNUP = True
#ACCOUNT_USE_OPENID = False
ACCOUNT_USE_SOCIAL=True
ACCOUNT_REQUIRED_EMAIL = True
ACCOUNT_EMAIL_VERIFICATION = True
ACCOUNT_EMAIL_AUTHENTICATION = False
ACCOUNT_UNIQUE_EMAIL = EMAIL_CONFIRMATION_UNIQUE_EMAIL = True
AUTHENTICATION_BACKENDS = (
'account.auth_backends.AuthenticationBackend',
)
LOGIN_URL = '/account/login/' # @@@ any way this can be a url name?
LOGIN_REDIRECT_URLNAME = 'my_bookmark'
LOGOUT_REDIRECT_URLNAME = 'home'
EMAIL_CONFIRMATION_DAYS = 2
EMAIL_DEBUG = DEBUG
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
FIXTURE_DIRS = (
os.path.join(PROJECT_ROOT, 'fixtures'),
)
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'your mail here'
EMAIL_HOST_PASSWORD = 'xxxxxx'
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
DATETIME_FORMAT = 'Y-m-d H:i:s'
try:
from local_settings import *
except ImportError:
pass
try:
from social_auth_settings import *
except ImportError:
pass
|
|
"""This component provides basic support for Foscam IP cameras."""
import asyncio
from libpyfoscam import FoscamCamera
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, SUPPORT_STREAM, Camera
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
)
from homeassistant.helpers import config_validation as cv, entity_platform
from .const import (
CONF_RTSP_PORT,
CONF_STREAM,
DOMAIN,
LOGGER,
SERVICE_PTZ,
SERVICE_PTZ_PRESET,
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required("ip"): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_NAME, default="Foscam Camera"): cv.string,
vol.Optional(CONF_PORT, default=88): cv.port,
vol.Optional(CONF_RTSP_PORT): cv.port,
}
)
DIR_UP = "up"
DIR_DOWN = "down"
DIR_LEFT = "left"
DIR_RIGHT = "right"
DIR_TOPLEFT = "top_left"
DIR_TOPRIGHT = "top_right"
DIR_BOTTOMLEFT = "bottom_left"
DIR_BOTTOMRIGHT = "bottom_right"
MOVEMENT_ATTRS = {
DIR_UP: "ptz_move_up",
DIR_DOWN: "ptz_move_down",
DIR_LEFT: "ptz_move_left",
DIR_RIGHT: "ptz_move_right",
DIR_TOPLEFT: "ptz_move_top_left",
DIR_TOPRIGHT: "ptz_move_top_right",
DIR_BOTTOMLEFT: "ptz_move_bottom_left",
DIR_BOTTOMRIGHT: "ptz_move_bottom_right",
}
DEFAULT_TRAVELTIME = 0.125
ATTR_MOVEMENT = "movement"
ATTR_TRAVELTIME = "travel_time"
ATTR_PRESET_NAME = "preset_name"
PTZ_GOTO_PRESET_COMMAND = "ptz_goto_preset"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a Foscam IP Camera."""
LOGGER.warning(
"Loading foscam via platform config is deprecated, it will be automatically imported. Please remove it afterwards."
)
config_new = {
CONF_NAME: config[CONF_NAME],
CONF_HOST: config["ip"],
CONF_PORT: config[CONF_PORT],
CONF_USERNAME: config[CONF_USERNAME],
CONF_PASSWORD: config[CONF_PASSWORD],
CONF_STREAM: "Main",
CONF_RTSP_PORT: config.get(CONF_RTSP_PORT, 554),
}
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config_new
)
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add a Foscam IP camera from a config entry."""
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_PTZ,
{
vol.Required(ATTR_MOVEMENT): vol.In(
[
DIR_UP,
DIR_DOWN,
DIR_LEFT,
DIR_RIGHT,
DIR_TOPLEFT,
DIR_TOPRIGHT,
DIR_BOTTOMLEFT,
DIR_BOTTOMRIGHT,
]
),
vol.Optional(ATTR_TRAVELTIME, default=DEFAULT_TRAVELTIME): cv.small_float,
},
"async_perform_ptz",
)
platform.async_register_entity_service(
SERVICE_PTZ_PRESET,
{
vol.Required(ATTR_PRESET_NAME): cv.string,
},
"async_perform_ptz_preset",
)
camera = FoscamCamera(
config_entry.data[CONF_HOST],
config_entry.data[CONF_PORT],
config_entry.data[CONF_USERNAME],
config_entry.data[CONF_PASSWORD],
verbose=False,
)
async_add_entities([HassFoscamCamera(camera, config_entry)])
class HassFoscamCamera(Camera):
"""An implementation of a Foscam IP camera."""
def __init__(self, camera, config_entry):
"""Initialize a Foscam camera."""
super().__init__()
self._foscam_session = camera
self._name = config_entry.title
self._username = config_entry.data[CONF_USERNAME]
self._password = config_entry.data[CONF_PASSWORD]
self._stream = config_entry.data[CONF_STREAM]
self._unique_id = config_entry.entry_id
self._rtsp_port = config_entry.data[CONF_RTSP_PORT]
self._motion_status = False
async def async_added_to_hass(self):
"""Handle entity addition to hass."""
# Get motion detection status
ret, response = await self.hass.async_add_executor_job(
self._foscam_session.get_motion_detect_config
)
if ret == -3:
LOGGER.info(
"Can't get motion detection status, camera %s configured with non-admin user",
self._name,
)
elif ret != 0:
LOGGER.error(
"Error getting motion detection status of %s: %s", self._name, ret
)
else:
self._motion_status = response == 1
@property
def unique_id(self):
"""Return the entity unique ID."""
return self._unique_id
def camera_image(self):
"""Return a still image response from the camera."""
# Send the request to snap a picture and return raw jpg data
# Handle exception if host is not reachable or url failed
result, response = self._foscam_session.snap_picture_2()
if result != 0:
return None
return response
@property
def supported_features(self):
"""Return supported features."""
if self._rtsp_port:
return SUPPORT_STREAM
return None
async def stream_source(self):
"""Return the stream source."""
if self._rtsp_port:
return f"rtsp://{self._username}:{self._password}@{self._foscam_session.host}:{self._rtsp_port}/video{self._stream}"
return None
@property
def motion_detection_enabled(self):
"""Camera Motion Detection Status."""
return self._motion_status
def enable_motion_detection(self):
"""Enable motion detection in camera."""
try:
ret = self._foscam_session.enable_motion_detection()
if ret != 0:
if ret == -3:
LOGGER.info(
"Can't set motion detection status, camera %s configured with non-admin user",
self._name,
)
return
self._motion_status = True
except TypeError:
LOGGER.debug(
"Failed enabling motion detection on '%s'. Is it supported by the device?",
self._name,
)
def disable_motion_detection(self):
"""Disable motion detection."""
try:
ret = self._foscam_session.disable_motion_detection()
if ret != 0:
if ret == -3:
LOGGER.info(
"Can't set motion detection status, camera %s configured with non-admin user",
self._name,
)
return
self._motion_status = False
except TypeError:
LOGGER.debug(
"Failed disabling motion detection on '%s'. Is it supported by the device?",
self._name,
)
async def async_perform_ptz(self, movement, travel_time):
"""Perform a PTZ action on the camera."""
LOGGER.debug("PTZ action '%s' on %s", movement, self._name)
movement_function = getattr(self._foscam_session, MOVEMENT_ATTRS[movement])
ret, _ = await self.hass.async_add_executor_job(movement_function)
if ret != 0:
LOGGER.error("Error moving %s '%s': %s", movement, self._name, ret)
return
await asyncio.sleep(travel_time)
ret, _ = await self.hass.async_add_executor_job(
self._foscam_session.ptz_stop_run
)
if ret != 0:
LOGGER.error("Error stopping movement on '%s': %s", self._name, ret)
return
async def async_perform_ptz_preset(self, preset_name):
"""Perform a PTZ preset action on the camera."""
LOGGER.debug("PTZ preset '%s' on %s", preset_name, self._name)
preset_function = getattr(self._foscam_session, PTZ_GOTO_PRESET_COMMAND)
ret, _ = await self.hass.async_add_executor_job(preset_function, preset_name)
if ret != 0:
LOGGER.error(
"Error moving to preset %s on '%s': %s", preset_name, self._name, ret
)
return
@property
def name(self):
"""Return the name of this camera."""
return self._name
|
|
from collections import namedtuple
import cx_Oracle
from django.db import models
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo as BaseFieldInfo, TableInfo,
)
from django.utils.functional import cached_property
FieldInfo = namedtuple('FieldInfo', BaseFieldInfo._fields + ('is_autofield', 'is_json'))
class DatabaseIntrospection(BaseDatabaseIntrospection):
cache_bust_counter = 1
# Maps type objects to Django Field types.
@cached_property
def data_types_reverse(self):
if self.connection.cx_oracle_version < (8,):
return {
cx_Oracle.BLOB: 'BinaryField',
cx_Oracle.CLOB: 'TextField',
cx_Oracle.DATETIME: 'DateField',
cx_Oracle.FIXED_CHAR: 'CharField',
cx_Oracle.FIXED_NCHAR: 'CharField',
cx_Oracle.INTERVAL: 'DurationField',
cx_Oracle.NATIVE_FLOAT: 'FloatField',
cx_Oracle.NCHAR: 'CharField',
cx_Oracle.NCLOB: 'TextField',
cx_Oracle.NUMBER: 'DecimalField',
cx_Oracle.STRING: 'CharField',
cx_Oracle.TIMESTAMP: 'DateTimeField',
}
else:
return {
cx_Oracle.DB_TYPE_DATE: 'DateField',
cx_Oracle.DB_TYPE_BINARY_DOUBLE: 'FloatField',
cx_Oracle.DB_TYPE_BLOB: 'BinaryField',
cx_Oracle.DB_TYPE_CHAR: 'CharField',
cx_Oracle.DB_TYPE_CLOB: 'TextField',
cx_Oracle.DB_TYPE_INTERVAL_DS: 'DurationField',
cx_Oracle.DB_TYPE_NCHAR: 'CharField',
cx_Oracle.DB_TYPE_NCLOB: 'TextField',
cx_Oracle.DB_TYPE_NVARCHAR: 'CharField',
cx_Oracle.DB_TYPE_NUMBER: 'DecimalField',
cx_Oracle.DB_TYPE_TIMESTAMP: 'DateTimeField',
cx_Oracle.DB_TYPE_VARCHAR: 'CharField',
}
def get_field_type(self, data_type, description):
if data_type == cx_Oracle.NUMBER:
precision, scale = description[4:6]
if scale == 0:
if precision > 11:
return 'BigAutoField' if description.is_autofield else 'BigIntegerField'
elif 1 < precision < 6 and description.is_autofield:
return 'SmallAutoField'
elif precision == 1:
return 'BooleanField'
elif description.is_autofield:
return 'AutoField'
else:
return 'IntegerField'
elif scale == -127:
return 'FloatField'
elif data_type == cx_Oracle.NCLOB and description.is_json:
return 'JSONField'
return super().get_field_type(data_type, description)
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
cursor.execute("""
SELECT table_name, 't'
FROM user_tables
WHERE
NOT EXISTS (
SELECT 1
FROM user_mviews
WHERE user_mviews.mview_name = user_tables.table_name
)
UNION ALL
SELECT view_name, 'v' FROM user_views
UNION ALL
SELECT mview_name, 'v' FROM user_mviews
""")
return [TableInfo(self.identifier_converter(row[0]), row[1]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
# user_tab_columns gives data default for columns
cursor.execute("""
SELECT
user_tab_cols.column_name,
user_tab_cols.data_default,
CASE
WHEN user_tab_cols.collation = user_tables.default_collation
THEN NULL
ELSE user_tab_cols.collation
END collation,
CASE
WHEN user_tab_cols.char_used IS NULL
THEN user_tab_cols.data_length
ELSE user_tab_cols.char_length
END as internal_size,
CASE
WHEN user_tab_cols.identity_column = 'YES' THEN 1
ELSE 0
END as is_autofield,
CASE
WHEN EXISTS (
SELECT 1
FROM user_json_columns
WHERE
user_json_columns.table_name = user_tab_cols.table_name AND
user_json_columns.column_name = user_tab_cols.column_name
)
THEN 1
ELSE 0
END as is_json
FROM user_tab_cols
LEFT OUTER JOIN
user_tables ON user_tables.table_name = user_tab_cols.table_name
WHERE user_tab_cols.table_name = UPPER(%s)
""", [table_name])
field_map = {
column: (internal_size, default if default != 'NULL' else None, collation, is_autofield, is_json)
for column, default, collation, internal_size, is_autofield, is_json in cursor.fetchall()
}
self.cache_bust_counter += 1
cursor.execute("SELECT * FROM {} WHERE ROWNUM < 2 AND {} > 0".format(
self.connection.ops.quote_name(table_name),
self.cache_bust_counter))
description = []
for desc in cursor.description:
name = desc[0]
internal_size, default, collation, is_autofield, is_json = field_map[name]
name = name % {} # cx_Oracle, for some reason, doubles percent signs.
description.append(FieldInfo(
self.identifier_converter(name), *desc[1:3], internal_size, desc[4] or 0,
desc[5] or 0, *desc[6:], default, collation, is_autofield, is_json,
))
return description
def identifier_converter(self, name):
"""Identifier comparison is case insensitive under Oracle."""
return name.lower()
def get_sequences(self, cursor, table_name, table_fields=()):
cursor.execute("""
SELECT
user_tab_identity_cols.sequence_name,
user_tab_identity_cols.column_name
FROM
user_tab_identity_cols,
user_constraints,
user_cons_columns cols
WHERE
user_constraints.constraint_name = cols.constraint_name
AND user_constraints.table_name = user_tab_identity_cols.table_name
AND cols.column_name = user_tab_identity_cols.column_name
AND user_constraints.constraint_type = 'P'
AND user_tab_identity_cols.table_name = UPPER(%s)
""", [table_name])
# Oracle allows only one identity column per table.
row = cursor.fetchone()
if row:
return [{
'name': self.identifier_converter(row[0]),
'table': self.identifier_converter(table_name),
'column': self.identifier_converter(row[1]),
}]
# To keep backward compatibility for AutoFields that aren't Oracle
# identity columns.
for f in table_fields:
if isinstance(f, models.AutoField):
return [{'table': table_name, 'column': f.column}]
return []
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
table_name = table_name.upper()
cursor.execute("""
SELECT ca.column_name, cb.table_name, cb.column_name
FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb
WHERE user_constraints.table_name = %s AND
user_constraints.constraint_name = ca.constraint_name AND
user_constraints.r_constraint_name = cb.constraint_name AND
ca.position = cb.position""", [table_name])
return {
self.identifier_converter(field_name): (
self.identifier_converter(rel_field_name),
self.identifier_converter(rel_table_name),
) for field_name, rel_table_name, rel_field_name in cursor.fetchall()
}
def get_key_columns(self, cursor, table_name):
cursor.execute("""
SELECT ccol.column_name, rcol.table_name AS referenced_table, rcol.column_name AS referenced_column
FROM user_constraints c
JOIN user_cons_columns ccol
ON ccol.constraint_name = c.constraint_name
JOIN user_cons_columns rcol
ON rcol.constraint_name = c.r_constraint_name
WHERE c.table_name = %s AND c.constraint_type = 'R'""", [table_name.upper()])
return [
tuple(self.identifier_converter(cell) for cell in row)
for row in cursor.fetchall()
]
def get_primary_key_column(self, cursor, table_name):
cursor.execute("""
SELECT
cols.column_name
FROM
user_constraints,
user_cons_columns cols
WHERE
user_constraints.constraint_name = cols.constraint_name AND
user_constraints.constraint_type = 'P' AND
user_constraints.table_name = UPPER(%s) AND
cols.position = 1
""", [table_name])
row = cursor.fetchone()
return self.identifier_converter(row[0]) if row else None
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Loop over the constraints, getting PKs, uniques, and checks
cursor.execute("""
SELECT
user_constraints.constraint_name,
LISTAGG(LOWER(cols.column_name), ',') WITHIN GROUP (ORDER BY cols.position),
CASE user_constraints.constraint_type
WHEN 'P' THEN 1
ELSE 0
END AS is_primary_key,
CASE
WHEN user_constraints.constraint_type IN ('P', 'U') THEN 1
ELSE 0
END AS is_unique,
CASE user_constraints.constraint_type
WHEN 'C' THEN 1
ELSE 0
END AS is_check_constraint
FROM
user_constraints
LEFT OUTER JOIN
user_cons_columns cols ON user_constraints.constraint_name = cols.constraint_name
WHERE
user_constraints.constraint_type = ANY('P', 'U', 'C')
AND user_constraints.table_name = UPPER(%s)
GROUP BY user_constraints.constraint_name, user_constraints.constraint_type
""", [table_name])
for constraint, columns, pk, unique, check in cursor.fetchall():
constraint = self.identifier_converter(constraint)
constraints[constraint] = {
'columns': columns.split(','),
'primary_key': pk,
'unique': unique,
'foreign_key': None,
'check': check,
'index': unique, # All uniques come with an index
}
# Foreign key constraints
cursor.execute("""
SELECT
cons.constraint_name,
LISTAGG(LOWER(cols.column_name), ',') WITHIN GROUP (ORDER BY cols.position),
LOWER(rcols.table_name),
LOWER(rcols.column_name)
FROM
user_constraints cons
INNER JOIN
user_cons_columns rcols ON rcols.constraint_name = cons.r_constraint_name AND rcols.position = 1
LEFT OUTER JOIN
user_cons_columns cols ON cons.constraint_name = cols.constraint_name
WHERE
cons.constraint_type = 'R' AND
cons.table_name = UPPER(%s)
GROUP BY cons.constraint_name, rcols.table_name, rcols.column_name
""", [table_name])
for constraint, columns, other_table, other_column in cursor.fetchall():
constraint = self.identifier_converter(constraint)
constraints[constraint] = {
'primary_key': False,
'unique': False,
'foreign_key': (other_table, other_column),
'check': False,
'index': False,
'columns': columns.split(','),
}
# Now get indexes
cursor.execute("""
SELECT
ind.index_name,
LOWER(ind.index_type),
LISTAGG(LOWER(cols.column_name), ',') WITHIN GROUP (ORDER BY cols.column_position),
LISTAGG(cols.descend, ',') WITHIN GROUP (ORDER BY cols.column_position)
FROM
user_ind_columns cols, user_indexes ind
WHERE
cols.table_name = UPPER(%s) AND
NOT EXISTS (
SELECT 1
FROM user_constraints cons
WHERE ind.index_name = cons.index_name
) AND cols.index_name = ind.index_name
GROUP BY ind.index_name, ind.index_type
""", [table_name])
for constraint, type_, columns, orders in cursor.fetchall():
constraint = self.identifier_converter(constraint)
constraints[constraint] = {
'primary_key': False,
'unique': False,
'foreign_key': None,
'check': False,
'index': True,
'type': 'idx' if type_ == 'normal' else type_,
'columns': columns.split(','),
'orders': orders.split(','),
}
return constraints
|
|
import os
from unittest import skip
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from onadata.apps.main.views import show, form_photos, update_xform, profile,\
enketo_preview
from onadata.apps.logger.models import XForm
from onadata.apps.logger.views import download_xlsform, download_jsonform,\
download_xform, delete_xform
from onadata.apps.viewer.models.parsed_instance import ParsedInstance
from onadata.apps.viewer.views import export_list, map_view
from onadata.libs.utils.logger_tools import publish_xml_form
from onadata.libs.utils.user_auth import http_auth_string
from onadata.libs.utils.user_auth import get_user_default_project
from test_base import TestBase
class TestFormShow(TestBase):
def setUp(self):
TestBase.setUp(self)
self._create_user_and_login()
self._publish_transportation_form()
self.url = reverse(show, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
def test_show_form_name(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.xform.id_string)
def test_hide_from_anon(self):
response = self.anon.get(self.url)
self.assertEqual(response.status_code, 302)
def test_hide_from_not_user(self):
self._create_user_and_login("jo")
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_show_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertEqual(response.status_code, 200)
def test_dl_xlsx_xlsform(self):
self._publish_xlsx_file()
response = self.client.get(reverse(download_xlsform, kwargs={
'username': self.user.username,
'id_string': 'exp_one'
}))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['Content-Disposition'],
"attachment; filename=exp_one.xlsx")
def test_dl_xls_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(reverse(download_xlsform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_dl_xls_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION':
http_auth_string(self.login_username, self.login_password)
}
response = self.anon.get(reverse(download_xlsform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), **extra)
self.assertEqual(response.status_code, 200)
def test_dl_json_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_dl_jsonp_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
callback = 'jsonpCallback'
response = self.anon.get(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), {'callback': callback})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.startswith(callback + '('), True)
self.assertEqual(response.content.endswith(')'), True)
def test_dl_json_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION':
http_auth_string(self.login_username, self.login_password)
}
response = self.anon.get(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), **extra)
self.assertEqual(response.status_code, 200)
def test_dl_json_for_cors_options(self):
response = self.anon.options(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
allowed_headers = ['Accept', 'Origin', 'X-Requested-With',
'Authorization']
control_headers = response['Access-Control-Allow-Headers']
provided_headers = [h.strip() for h in control_headers.split(',')]
self.assertListEqual(allowed_headers, provided_headers)
self.assertEqual(response['Access-Control-Allow-Methods'], 'GET')
self.assertEqual(response['Access-Control-Allow-Origin'], '*')
def test_dl_xform_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(reverse(download_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_dl_xform_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION':
http_auth_string(self.login_username, self.login_password)
}
response = self.anon.get(reverse(download_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), **extra)
self.assertEqual(response.status_code, 200)
def test_dl_xform_for_authenticated_non_owner(self):
self._create_user_and_login('alice', 'alice')
response = self.client.get(reverse(download_xform, kwargs={
'username': 'bob',
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_show_private_if_shared_but_not_data(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertContains(response, 'PRIVATE')
def test_show_link_if_shared_and_data(self):
self.xform.shared = True
self.xform.shared_data = True
self.xform.save()
self._submit_transport_instance()
response = self.anon.get(self.url)
self.assertContains(response, reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'csv'
}))
def test_show_link_if_owner(self):
self._submit_transport_instance()
response = self.client.get(self.url)
self.assertContains(response, reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'csv'
}))
self.assertContains(response, reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
}))
self.assertNotContains(response, reverse(map_view, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
# check that a form with geopoints has the map url
response = self._publish_xls_file(
os.path.join(
os.path.dirname(__file__), "fixtures", "gps", "gps.xls"))
self.assertEqual(response.status_code, 200)
self.xform = XForm.objects.latest('date_created')
show_url = reverse(show, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
map_url = reverse(map_view, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
response = self.client.get(show_url)
# check that map url doesnt show before we have submissions
self.assertNotContains(response, map_url)
# make a submission
self._make_submission(
os.path.join(
os.path.dirname(__file__), "fixtures", "gps", "instances",
"gps_1980-01-23_20-52-08.xml")
)
self.assertEqual(self.response.status_code, 201)
# get new show view
response = self.client.get(show_url)
self.assertContains(response, map_url)
def test_user_sees_edit_btn(self):
response = self.client.get(self.url)
self.assertContains(response, 'edit</a>')
def test_user_sees_settings(self):
response = self.client.get(self.url)
self.assertContains(response, 'Settings')
def test_anon_no_edit_btn(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertNotContains(response, 'edit</a>')
def test_anon_no_toggle_data_share_btn(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertNotContains(response, 'PUBLIC</a>')
self.assertNotContains(response, 'PRIVATE</a>')
def test_show_add_sourc_doc_if_owner(self):
response = self.client.get(self.url)
self.assertContains(response, 'Source document:')
def test_show_add_supporting_docs_if_owner(self):
response = self.client.get(self.url)
self.assertContains(response, 'Supporting document:')
def test_show_add_supporting_media_if_owner(self):
response = self.client.get(self.url)
self.assertContains(response, 'Media upload:')
def test_show_add_mapbox_layer_if_owner(self):
response = self.client.get(self.url)
self.assertContains(response, 'JSONP url:')
def test_hide_add_supporting_docs_if_not_owner(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertNotContains(response, 'Upload')
def test_load_photo_page(self):
response = self.client.get(reverse(form_photos, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string}))
self.assertEqual(response.status_code, 200)
def test_load_from_uuid(self):
self.xform = XForm.objects.get(pk=self.xform.id)
response = self.client.get(reverse(show, kwargs={
'uuid': self.xform.uuid}))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'],
'%s%s' % (self.base_url, self.url))
def test_xls_replace_markup(self):
"""
Check that update form is only shown when there are no submissions
and the user is the owner
"""
# when we have 0 submissions, update markup exists
self.xform.shared = True
self.xform.save()
dashboard_url = reverse(profile, kwargs={
'username': 'bob'
})
response = self.client.get(dashboard_url)
self.assertContains(
response, 'href="#replace-transportation_2011_07_25"')
# a non owner can't see the markup
response = self.anon.get(self.url)
self.assertNotContains(
response, 'href="#replace-transportation_2011_07_25"')
# when we have a submission, we cant update the xls form
self._submit_transport_instance()
response = self.client.get(dashboard_url)
self.assertNotContains(
response, 'href="#replace-transportation_2011_07_25"')
def test_non_owner_cannot_replace_form(self):
"""
Test that a non owner cannot replace a shared xls form
"""
xform_update_url = reverse(update_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
self.xform.shared = True
self.xform.save()
# create and login another user
self._create_user_and_login('peter', 'peter')
response = self.client.post(xform_update_url)
# since we are logged in, we'll be re-directed to our profile page
self.assertRedirects(response, self.base_url,
status_code=302, target_status_code=302)
def test_replace_xform(self):
xform_update_url = reverse(update_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
count = XForm.objects.count()
xls_path = os.path.join(self.this_directory, "fixtures",
"transportation", "transportation_updated.xls")
with open(xls_path, "r") as xls_file:
post_data = {'xls_file': xls_file}
self.client.post(xform_update_url, post_data)
self.assertEqual(XForm.objects.count(), count)
self.xform = XForm.objects.order_by('id').reverse()[0]
data_dictionary = self.xform.data_dictionary()
# look for the preferred_means question
# which is only in the updated xls
is_updated_form = len([e.name for e in data_dictionary.survey_elements
if e.name == u'preferred_means']) > 0
self.assertTrue(is_updated_form)
def test_update_form_doesnt_truncate_to_50_chars(self):
count = XForm.objects.count()
xls_path = os.path.join(
self.this_directory,
"fixtures",
"transportation",
"transportation_with_long_id_string.xls")
self._publish_xls_file_and_set_xform(xls_path)
# Update the form
xform_update_url = reverse(update_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
updated_xls_path = os.path.join(
self.this_directory,
"fixtures",
"transportation",
"transportation_with_long_id_string_updated.xls")
with open(updated_xls_path, "r") as xls_file:
post_data = {'xls_file': xls_file}
self.client.post(xform_update_url, post_data)
# Count should stay the same
self.assertEqual(XForm.objects.count(), count + 1)
self.xform = XForm.objects.order_by('id').reverse()[0]
data_dictionary = self.xform.data_dictionary()
# look for the preferred_means question
# which is only in the updated xls
is_updated_form = len([e.name for e in data_dictionary.survey_elements
if e.name == u'preferred_means']) > 0
self.assertTrue(is_updated_form)
def test_xform_delete(self):
id_string = self.xform.id_string
form_exists = XForm.objects.filter(
user=self.user, id_string=id_string).count() == 1
self.assertTrue(form_exists)
xform_delete_url = reverse(delete_xform, kwargs={
'username': self.user.username,
'id_string': id_string
})
self.client.post(xform_delete_url)
form_deleted = XForm.objects.filter(
user=self.user, id_string=id_string).count() == 0
self.assertTrue(form_deleted)
def test_non_owner_cant_delete_xform(self):
id_string = self.xform.id_string
form_exists = XForm.objects.filter(
user=self.user, id_string=id_string).count() == 1
self.assertTrue(form_exists)
xform_delete_url = reverse(delete_xform, kwargs={
'username': self.user.username,
'id_string': id_string
})
# save current user before we re-assign
bob = self.user
self._create_user_and_login('alice', 'alice')
self.client.post(xform_delete_url)
form_deleted = XForm.objects.filter(
user=bob, id_string=id_string).count() == 0
self.assertFalse(form_deleted)
def test_xform_delete_cascades_mongo_instances(self):
initial_mongo_count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]["count"]
# submit instance
for i in range(len(self.surveys)):
self._submit_transport_instance(i)
# check mongo record exists
mongo_count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]["count"]
self.assertEqual(mongo_count, initial_mongo_count + len(self.surveys))
# delete form
xform_delete_url = reverse(delete_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
self.client.post(xform_delete_url)
mongo_count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]["count"]
self.assertEqual(mongo_count, initial_mongo_count)
def test_enketo_preview(self):
url = reverse(
enketo_preview, kwargs={'username': self.user.username,
'id_string': self.xform.id_string})
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_enketo_preview_works_on_shared_forms(self):
self.xform.shared = True
self.xform.save()
url = reverse(
enketo_preview, kwargs={'username': self.user.username,
'id_string': self.xform.id_string})
response = self.anon.get(url)
self.assertEqual(response.status_code, 302)
# TODO PLD disabling this test
@skip('Insensitivity is not enforced upon creation of id_strings.')
def test_form_urls_case_insensitive(self):
url = reverse(show, kwargs={
'username': self.user.username.upper(),
'id_string': self.xform.id_string.upper()
})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_publish_xml_xlsform_download(self):
count = XForm.objects.count()
path = os.path.join(
self.this_directory, '..', '..', 'api', 'tests', 'fixtures',
'forms', 'contributions', 'contributions.xml')
f = open(path)
xml_file = ContentFile(f.read())
f.close()
xml_file.name = 'contributions.xml'
project = get_user_default_project(self.user)
self.xform = publish_xml_form(xml_file, self.user, project)
self.assertTrue(XForm.objects.count() > count)
response = self.client.get(reverse(download_xlsform, kwargs={
'username': self.user.username,
'id_string': 'contributions'
}), follow=True)
self.assertContains(response, 'No XLS file for your form ')
|
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
=============================================
fMRI: OpenfMRI.org data, FSL, ANTS, c3daffine
=============================================
A growing number of datasets are available on `OpenfMRI <http://openfmri.org>`_.
This script demonstrates how to use nipype to analyze a data set::
python fmri_ants_openfmri.py --datasetdir ds107
"""
from nipype import config
config.enable_provenance()
from nipype.external import six
from glob import glob
import os
import nipype.pipeline.engine as pe
import nipype.algorithms.modelgen as model
import nipype.algorithms.rapidart as ra
import nipype.interfaces.fsl as fsl
import nipype.interfaces.ants as ants
from nipype.algorithms.misc import TSNR
from nipype.interfaces.c3 import C3dAffineTool
import nipype.interfaces.io as nio
import nipype.interfaces.utility as niu
from nipype.workflows.fmri.fsl import (create_featreg_preproc,
create_modelfit_workflow,
create_fixed_effects_flow)
from nipype import LooseVersion
from nipype import Workflow, Node, MapNode
from nipype.interfaces import (fsl, Function, ants, freesurfer)
from nipype.interfaces.utility import Rename, Merge, IdentityInterface
from nipype.utils.filemanip import filename_to_list
from nipype.interfaces.io import DataSink, FreeSurferSource
import nipype.interfaces.freesurfer as fs
version = 0
if fsl.Info.version() and \
LooseVersion(fsl.Info.version()) > LooseVersion('5.0.6'):
version = 507
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
imports = ['import os',
'import nibabel as nb',
'import numpy as np',
'import scipy as sp',
'from nipype.utils.filemanip import filename_to_list, list_to_filename, split_filename',
'from scipy.special import legendre'
]
def median(in_files):
"""Computes an average of the median of each realigned timeseries
Parameters
----------
in_files: one or more realigned Nifti 4D time series
Returns
-------
out_file: a 3D Nifti file
"""
average = None
for idx, filename in enumerate(filename_to_list(in_files)):
img = nb.load(filename)
data = np.median(img.get_data(), axis=3)
if average is None:
average = data
else:
average = average + data
median_img = nb.Nifti1Image(average/float(idx + 1),
img.get_affine(), img.get_header())
filename = os.path.join(os.getcwd(), 'median.nii.gz')
median_img.to_filename(filename)
return filename
def create_reg_workflow(name='registration'):
"""Create a FEAT preprocessing workflow together with freesurfer
Parameters
----------
name : name of workflow (default: 'registration')
Inputs:
inputspec.source_files : files (filename or list of filenames to register)
inputspec.mean_image : reference image to use
inputspec.anatomical_image : anatomical image to coregister to
inputspec.target_image : registration target
Outputs:
outputspec.func2anat_transform : FLIRT transform
outputspec.anat2target_transform : FLIRT+FNIRT transform
outputspec.transformed_files : transformed files in target space
outputspec.transformed_mean : mean image in target space
"""
register = pe.Workflow(name=name)
inputnode = pe.Node(interface=niu.IdentityInterface(fields=['source_files',
'mean_image',
'anatomical_image',
'target_image',
'target_image_brain',
'config_file']),
name='inputspec')
outputnode = pe.Node(interface=niu.IdentityInterface(fields=['func2anat_transform',
'anat2target_transform',
'transformed_files',
'transformed_mean',
'anat2target',
'mean2anat_mask'
]),
name='outputspec')
"""
Estimate the tissue classes from the anatomical image. But use spm's segment
as FSL appears to be breaking.
"""
stripper = pe.Node(fsl.BET(), name='stripper')
register.connect(inputnode, 'anatomical_image', stripper, 'in_file')
fast = pe.Node(fsl.FAST(), name='fast')
register.connect(stripper, 'out_file', fast, 'in_files')
"""
Binarize the segmentation
"""
binarize = pe.Node(fsl.ImageMaths(op_string='-nan -thr 0.5 -bin'),
name='binarize')
pickindex = lambda x, i: x[i]
register.connect(fast, ('partial_volume_files', pickindex, 2),
binarize, 'in_file')
"""
Calculate rigid transform from mean image to anatomical image
"""
mean2anat = pe.Node(fsl.FLIRT(), name='mean2anat')
mean2anat.inputs.dof = 6
register.connect(inputnode, 'mean_image', mean2anat, 'in_file')
register.connect(stripper, 'out_file', mean2anat, 'reference')
"""
Now use bbr cost function to improve the transform
"""
mean2anatbbr = pe.Node(fsl.FLIRT(), name='mean2anatbbr')
mean2anatbbr.inputs.dof = 6
mean2anatbbr.inputs.cost = 'bbr'
mean2anatbbr.inputs.schedule = os.path.join(os.getenv('FSLDIR'),
'etc/flirtsch/bbr.sch')
register.connect(inputnode, 'mean_image', mean2anatbbr, 'in_file')
register.connect(binarize, 'out_file', mean2anatbbr, 'wm_seg')
register.connect(inputnode, 'anatomical_image', mean2anatbbr, 'reference')
register.connect(mean2anat, 'out_matrix_file',
mean2anatbbr, 'in_matrix_file')
"""
Create a mask of the median image coregistered to the anatomical image
"""
mean2anat_mask = Node(fsl.BET(mask=True), name='mean2anat_mask')
register.connect(mean2anatbbr, 'out_file', mean2anat_mask, 'in_file')
"""
Convert the BBRegister transformation to ANTS ITK format
"""
convert2itk = pe.Node(C3dAffineTool(),
name='convert2itk')
convert2itk.inputs.fsl2ras = True
convert2itk.inputs.itk_transform = True
register.connect(mean2anatbbr, 'out_matrix_file', convert2itk, 'transform_file')
register.connect(inputnode, 'mean_image',convert2itk, 'source_file')
register.connect(stripper, 'out_file', convert2itk, 'reference_file')
"""
Compute registration between the subject's structural and MNI template
This is currently set to perform a very quick registration. However, the
registration can be made significantly more accurate for cortical
structures by increasing the number of iterations
All parameters are set using the example from:
#https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
"""
reg = pe.Node(ants.Registration(), name='antsRegister')
reg.inputs.output_transform_prefix = "output_"
reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.2, 3.0, 0.0)]
reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[100, 30, 20]]
reg.inputs.dimension = 3
reg.inputs.write_composite_transform = True
reg.inputs.collapse_output_transforms = True
reg.inputs.initial_moving_transform_com = True
reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
reg.inputs.convergence_window_size = [20] * 2 + [5]
reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
reg.inputs.sigma_units = ['vox'] * 3
reg.inputs.shrink_factors = [[3, 2, 1]]*2 + [[4, 2, 1]]
reg.inputs.use_estimate_learning_rate_once = [True] * 3
reg.inputs.use_histogram_matching = [False] * 2 + [True]
reg.inputs.winsorize_lower_quantile = 0.005
reg.inputs.winsorize_upper_quantile = 0.995
reg.inputs.args = '--float'
reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
reg.inputs.num_threads = 4
reg.plugin_args = {'qsub_args': '-pe orte 4',
'sbatch_args': '--mem=6G -c 4'}
register.connect(stripper, 'out_file', reg, 'moving_image')
register.connect(inputnode,'target_image_brain', reg,'fixed_image')
"""
Concatenate the affine and ants transforms into a list
"""
pickfirst = lambda x: x[0]
merge = pe.Node(niu.Merge(2), iterfield=['in2'], name='mergexfm')
register.connect(convert2itk, 'itk_transform', merge, 'in2')
register.connect(reg, ('composite_transform', pickfirst), merge, 'in1')
"""
Transform the mean image. First to anatomical and then to target
"""
warpmean = pe.Node(ants.ApplyTransforms(),
name='warpmean')
warpmean.inputs.input_image_type = 0
warpmean.inputs.interpolation = 'Linear'
warpmean.inputs.invert_transform_flags = [False, False]
warpmean.inputs.terminal_output = 'file'
register.connect(inputnode,'target_image_brain', warpmean,'reference_image')
register.connect(inputnode, 'mean_image', warpmean, 'input_image')
register.connect(merge, 'out', warpmean, 'transforms')
"""
Transform the remaining images. First to anatomical and then to target
"""
warpall = pe.MapNode(ants.ApplyTransforms(),
iterfield=['input_image'],
name='warpall')
warpall.inputs.input_image_type = 0
warpall.inputs.interpolation = 'Linear'
warpall.inputs.invert_transform_flags = [False, False]
warpall.inputs.terminal_output = 'file'
register.connect(inputnode,'target_image_brain',warpall,'reference_image')
register.connect(inputnode,'source_files', warpall, 'input_image')
register.connect(merge, 'out', warpall, 'transforms')
"""
Assign all the output files
"""
register.connect(reg, 'warped_image', outputnode, 'anat2target')
register.connect(warpmean, 'output_image', outputnode, 'transformed_mean')
register.connect(warpall, 'output_image', outputnode, 'transformed_files')
register.connect(mean2anatbbr, 'out_matrix_file',
outputnode, 'func2anat_transform')
register.connect(mean2anat_mask, 'mask_file',
outputnode, 'mean2anat_mask')
register.connect(reg, 'composite_transform',
outputnode, 'anat2target_transform')
return register
def get_aparc_aseg(files):
"""Return the aparc+aseg.mgz file"""
for name in files:
if 'aparc+aseg.mgz' in name:
return name
raise ValueError('aparc+aseg.mgz not found')
def create_fs_reg_workflow(name='registration'):
"""Create a FEAT preprocessing workflow together with freesurfer
Parameters
----------
name : name of workflow (default: 'registration')
Inputs::
inputspec.source_files : files (filename or list of filenames to register)
inputspec.mean_image : reference image to use
inputspec.target_image : registration target
Outputs::
outputspec.func2anat_transform : FLIRT transform
outputspec.anat2target_transform : FLIRT+FNIRT transform
outputspec.transformed_files : transformed files in target space
outputspec.transformed_mean : mean image in target space
"""
register = Workflow(name=name)
inputnode = Node(interface=IdentityInterface(fields=['source_files',
'mean_image',
'subject_id',
'subjects_dir',
'target_image']),
name='inputspec')
outputnode = Node(interface=IdentityInterface(fields=['func2anat_transform',
'out_reg_file',
'anat2target_transform',
'transforms',
'transformed_mean',
'transformed_files',
'min_cost_file',
'anat2target',
'aparc',
'mean2anat_mask'
]),
name='outputspec')
# Get the subject's freesurfer source directory
fssource = Node(FreeSurferSource(),
name='fssource')
fssource.run_without_submitting = True
register.connect(inputnode, 'subject_id', fssource, 'subject_id')
register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir')
convert = Node(freesurfer.MRIConvert(out_type='nii'),
name="convert")
register.connect(fssource, 'T1', convert, 'in_file')
# Coregister the median to the surface
bbregister = Node(freesurfer.BBRegister(registered_file=True),
name='bbregister')
bbregister.inputs.init = 'fsl'
bbregister.inputs.contrast_type = 't2'
bbregister.inputs.out_fsl_file = True
bbregister.inputs.epi_mask = True
register.connect(inputnode, 'subject_id', bbregister, 'subject_id')
register.connect(inputnode, 'mean_image', bbregister, 'source_file')
register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir')
# Create a mask of the median coregistered to the anatomical image
mean2anat_mask = Node(fsl.BET(mask=True), name='mean2anat_mask')
register.connect(bbregister, 'registered_file', mean2anat_mask, 'in_file')
"""
use aparc+aseg's brain mask
"""
binarize = Node(fs.Binarize(min=0.5, out_type="nii.gz", dilate=1), name="binarize_aparc")
register.connect(fssource, ("aparc_aseg", get_aparc_aseg), binarize, "in_file")
stripper = Node(fsl.ApplyMask(), name ='stripper')
register.connect(binarize, "binary_file", stripper, "mask_file")
register.connect(convert, 'out_file', stripper, 'in_file')
"""
Apply inverse transform to aparc file
"""
aparcxfm = Node(freesurfer.ApplyVolTransform(inverse=True,
interp='nearest'),
name='aparc_inverse_transform')
register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir')
register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file')
register.connect(fssource, ('aparc_aseg', get_aparc_aseg),
aparcxfm, 'target_file')
register.connect(inputnode, 'mean_image', aparcxfm, 'source_file')
"""
Convert the BBRegister transformation to ANTS ITK format
"""
convert2itk = Node(C3dAffineTool(), name='convert2itk')
convert2itk.inputs.fsl2ras = True
convert2itk.inputs.itk_transform = True
register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file')
register.connect(inputnode, 'mean_image',convert2itk, 'source_file')
register.connect(stripper, 'out_file', convert2itk, 'reference_file')
"""
Compute registration between the subject's structural and MNI template
This is currently set to perform a very quick registration. However, the
registration can be made significantly more accurate for cortical
structures by increasing the number of iterations
All parameters are set using the example from:
#https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
"""
reg = Node(ants.Registration(), name='antsRegister')
reg.inputs.output_transform_prefix = "output_"
reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.2, 3.0, 0.0)]
reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[100, 30, 20]]
reg.inputs.dimension = 3
reg.inputs.write_composite_transform = True
reg.inputs.collapse_output_transforms = True
reg.inputs.initial_moving_transform_com = True
reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
reg.inputs.convergence_window_size = [20] * 2 + [5]
reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
reg.inputs.sigma_units = ['vox'] * 3
reg.inputs.shrink_factors = [[3, 2, 1]]*2 + [[4, 2, 1]]
reg.inputs.use_estimate_learning_rate_once = [True] * 3
reg.inputs.use_histogram_matching = [False] * 2 + [True]
reg.inputs.winsorize_lower_quantile = 0.005
reg.inputs.winsorize_upper_quantile = 0.995
reg.inputs.args = '--float'
reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
reg.inputs.num_threads = 4
reg.plugin_args = {'qsub_args': '-pe orte 4',
'sbatch_args': '--mem=6G -c 4'}
register.connect(stripper, 'out_file', reg, 'moving_image')
register.connect(inputnode,'target_image', reg,'fixed_image')
"""
Concatenate the affine and ants transforms into a list
"""
pickfirst = lambda x: x[0]
merge = Node(Merge(2), iterfield=['in2'], name='mergexfm')
register.connect(convert2itk, 'itk_transform', merge, 'in2')
register.connect(reg, ('composite_transform', pickfirst), merge, 'in1')
"""
Transform the mean image. First to anatomical and then to target
"""
warpmean = Node(ants.ApplyTransforms(), name='warpmean')
warpmean.inputs.input_image_type = 0
warpmean.inputs.interpolation = 'Linear'
warpmean.inputs.invert_transform_flags = [False, False]
warpmean.inputs.terminal_output = 'file'
warpmean.inputs.args = '--float'
#warpmean.inputs.num_threads = 4
#warpmean.plugin_args = {'sbatch_args': '--mem=4G -c 4'}
"""
Transform the remaining images. First to anatomical and then to target
"""
warpall = pe.MapNode(ants.ApplyTransforms(),
iterfield=['input_image'],
name='warpall')
warpall.inputs.input_image_type = 0
warpall.inputs.interpolation = 'Linear'
warpall.inputs.invert_transform_flags = [False, False]
warpall.inputs.terminal_output = 'file'
warpall.inputs.args = '--float'
warpall.inputs.num_threads = 2
warpall.plugin_args = {'sbatch_args': '--mem=6G -c 2'}
"""
Assign all the output files
"""
register.connect(warpmean, 'output_image', outputnode, 'transformed_mean')
register.connect(warpall, 'output_image', outputnode, 'transformed_files')
register.connect(inputnode,'target_image', warpmean,'reference_image')
register.connect(inputnode, 'mean_image', warpmean, 'input_image')
register.connect(merge, 'out', warpmean, 'transforms')
register.connect(inputnode,'target_image', warpall,'reference_image')
register.connect(inputnode,'source_files', warpall, 'input_image')
register.connect(merge, 'out', warpall, 'transforms')
"""
Assign all the output files
"""
register.connect(reg, 'warped_image', outputnode, 'anat2target')
register.connect(aparcxfm, 'transformed_file',
outputnode, 'aparc')
register.connect(bbregister, 'out_fsl_file',
outputnode, 'func2anat_transform')
register.connect(bbregister, 'out_reg_file',
outputnode, 'out_reg_file')
register.connect(bbregister, 'min_cost_file',
outputnode, 'min_cost_file')
register.connect(mean2anat_mask, 'mask_file',
outputnode, 'mean2anat_mask')
register.connect(reg, 'composite_transform',
outputnode, 'anat2target_transform')
register.connect(merge, 'out', outputnode, 'transforms')
return register
"""
Get info for a given subject
"""
def get_subjectinfo(subject_id, base_dir, task_id, model_id):
"""Get info for a given subject
Parameters
----------
subject_id : string
Subject identifier (e.g., sub001)
base_dir : string
Path to base directory of the dataset
task_id : int
Which task to process
model_id : int
Which model to process
Returns
-------
run_ids : list of ints
Run numbers
conds : list of str
Condition names
TR : float
Repetition time
"""
from glob import glob
import os
import numpy as np
condition_info = []
cond_file = os.path.join(base_dir, 'models', 'model%03d' % model_id,
'condition_key.txt')
with open(cond_file, 'rt') as fp:
for line in fp:
info = line.strip().split()
condition_info.append([info[0], info[1], ' '.join(info[2:])])
if len(condition_info) == 0:
raise ValueError('No condition info found in %s' % cond_file)
taskinfo = np.array(condition_info)
n_tasks = len(np.unique(taskinfo[:, 0]))
conds = []
run_ids = []
if task_id > n_tasks:
raise ValueError('Task id %d does not exist' % task_id)
for idx in range(n_tasks):
taskidx = np.where(taskinfo[:, 0] == 'task%03d' % (idx + 1))
conds.append([condition.replace(' ', '_') for condition
in taskinfo[taskidx[0], 2]]) # if 'junk' not in condition])
files = sorted(glob(os.path.join(base_dir,
subject_id,
'BOLD',
'task%03d_run*' % (idx + 1))))
runs = [int(val[-3:]) for val in files]
run_ids.insert(idx, runs)
json_info = os.path.join(base_dir, subject_id, 'BOLD',
'task%03d_run%03d' % (task_id, run_ids[task_id - 1][0]),
'bold_scaninfo.json')
if os.path.exists(json_info):
import json
with open(json_info, 'rt') as fp:
data = json.load(fp)
TR = data['global']['const']['RepetitionTime']/1000.
else:
task_scan_key = os.path.join(base_dir, subject_id, 'BOLD',
'task%03d_run%03d' % (task_id, run_ids[task_id - 1][0]),
'scan_key.txt')
if os.path.exists(task_scan_key):
TR = np.genfromtxt(task_scan_key)[1]
else:
TR = np.genfromtxt(os.path.join(base_dir, 'scan_key.txt'))[1]
return run_ids[task_id - 1], conds[task_id - 1], TR
"""
Analyzes an open fmri dataset
"""
def analyze_openfmri_dataset(data_dir, subject=None, model_id=None,
task_id=None, output_dir=None, subj_prefix='*',
hpcutoff=120., use_derivatives=True,
fwhm=6.0, subjects_dir=None, target=None):
"""Analyzes an open fmri dataset
Parameters
----------
data_dir : str
Path to the base data directory
work_dir : str
Nipype working directory (defaults to cwd)
"""
"""
Load nipype workflows
"""
preproc = create_featreg_preproc(whichvol='first')
modelfit = create_modelfit_workflow()
fixed_fx = create_fixed_effects_flow()
if subjects_dir:
registration = create_fs_reg_workflow()
else:
registration = create_reg_workflow()
"""
Remove the plotting connection so that plot iterables don't propagate
to the model stage
"""
preproc.disconnect(preproc.get_node('plot_motion'), 'out_file',
preproc.get_node('outputspec'), 'motion_plots')
"""
Set up openfmri data specific components
"""
subjects = sorted([path.split(os.path.sep)[-1] for path in
glob(os.path.join(data_dir, subj_prefix))])
infosource = pe.Node(niu.IdentityInterface(fields=['subject_id',
'model_id',
'task_id']),
name='infosource')
if len(subject) == 0:
infosource.iterables = [('subject_id', subjects),
('model_id', [model_id]),
('task_id', task_id)]
else:
infosource.iterables = [('subject_id',
[subjects[subjects.index(subj)] for subj in subject]),
('model_id', [model_id]),
('task_id', task_id)]
subjinfo = pe.Node(niu.Function(input_names=['subject_id', 'base_dir',
'task_id', 'model_id'],
output_names=['run_id', 'conds', 'TR'],
function=get_subjectinfo),
name='subjectinfo')
subjinfo.inputs.base_dir = data_dir
"""
Return data components as anat, bold and behav
"""
contrast_file = os.path.join(data_dir, 'models', 'model%03d' % model_id,
'task_contrasts.txt')
has_contrast = os.path.exists(contrast_file)
if has_contrast:
datasource = pe.Node(nio.DataGrabber(infields=['subject_id', 'run_id',
'task_id', 'model_id'],
outfields=['anat', 'bold', 'behav',
'contrasts']),
name='datasource')
else:
datasource = pe.Node(nio.DataGrabber(infields=['subject_id', 'run_id',
'task_id', 'model_id'],
outfields=['anat', 'bold', 'behav']),
name='datasource')
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '*'
if has_contrast:
datasource.inputs.field_template = {'anat': '%s/anatomy/T1_001.nii.gz',
'bold': '%s/BOLD/task%03d_r*/bold.nii.gz',
'behav': ('%s/model/model%03d/onsets/task%03d_'
'run%03d/cond*.txt'),
'contrasts': ('models/model%03d/'
'task_contrasts.txt')}
datasource.inputs.template_args = {'anat': [['subject_id']],
'bold': [['subject_id', 'task_id']],
'behav': [['subject_id', 'model_id',
'task_id', 'run_id']],
'contrasts': [['model_id']]}
else:
datasource.inputs.field_template = {'anat': '%s/anatomy/T1_001.nii.gz',
'bold': '%s/BOLD/task%03d_r*/bold.nii.gz',
'behav': ('%s/model/model%03d/onsets/task%03d_'
'run%03d/cond*.txt')}
datasource.inputs.template_args = {'anat': [['subject_id']],
'bold': [['subject_id', 'task_id']],
'behav': [['subject_id', 'model_id',
'task_id', 'run_id']]}
datasource.inputs.sort_filelist = True
"""
Create meta workflow
"""
wf = pe.Workflow(name='openfmri')
wf.connect(infosource, 'subject_id', subjinfo, 'subject_id')
wf.connect(infosource, 'model_id', subjinfo, 'model_id')
wf.connect(infosource, 'task_id', subjinfo, 'task_id')
wf.connect(infosource, 'subject_id', datasource, 'subject_id')
wf.connect(infosource, 'model_id', datasource, 'model_id')
wf.connect(infosource, 'task_id', datasource, 'task_id')
wf.connect(subjinfo, 'run_id', datasource, 'run_id')
wf.connect([(datasource, preproc, [('bold', 'inputspec.func')]),
])
def get_highpass(TR, hpcutoff):
return hpcutoff / (2 * TR)
gethighpass = pe.Node(niu.Function(input_names=['TR', 'hpcutoff'],
output_names=['highpass'],
function=get_highpass),
name='gethighpass')
wf.connect(subjinfo, 'TR', gethighpass, 'TR')
wf.connect(gethighpass, 'highpass', preproc, 'inputspec.highpass')
"""
Setup a basic set of contrasts, a t-test per condition
"""
def get_contrasts(contrast_file, task_id, conds):
import numpy as np
import os
contrast_def = []
if os.path.exists(contrast_file):
with open(contrast_file, 'rt') as fp:
contrast_def.extend([np.array(row.split()) for row in fp.readlines() if row.strip()])
contrasts = []
for row in contrast_def:
if row[0] != 'task%03d' % task_id:
continue
con = [row[1], 'T', ['cond%03d' % (i + 1) for i in range(len(conds))],
row[2:].astype(float).tolist()]
contrasts.append(con)
# add auto contrasts for each column
for i, cond in enumerate(conds):
con = [cond, 'T', ['cond%03d' % (i + 1)], [1]]
contrasts.append(con)
return contrasts
contrastgen = pe.Node(niu.Function(input_names=['contrast_file',
'task_id', 'conds'],
output_names=['contrasts'],
function=get_contrasts),
name='contrastgen')
art = pe.MapNode(interface=ra.ArtifactDetect(use_differences=[True, False],
use_norm=True,
norm_threshold=1,
zintensity_threshold=3,
parameter_source='FSL',
mask_type='file'),
iterfield=['realigned_files', 'realignment_parameters',
'mask_file'],
name="art")
modelspec = pe.Node(interface=model.SpecifyModel(),
name="modelspec")
modelspec.inputs.input_units = 'secs'
def check_behav_list(behav, run_id, conds):
from nipype.external import six
import numpy as np
num_conds = len(conds)
if isinstance(behav, six.string_types):
behav = [behav]
behav_array = np.array(behav).flatten()
num_elements = behav_array.shape[0]
return behav_array.reshape(num_elements/num_conds, num_conds).tolist()
reshape_behav = pe.Node(niu.Function(input_names=['behav', 'run_id', 'conds'],
output_names=['behav'],
function=check_behav_list),
name='reshape_behav')
wf.connect(subjinfo, 'TR', modelspec, 'time_repetition')
wf.connect(datasource, 'behav', reshape_behav, 'behav')
wf.connect(subjinfo, 'run_id', reshape_behav, 'run_id')
wf.connect(subjinfo, 'conds', reshape_behav, 'conds')
wf.connect(reshape_behav, 'behav', modelspec, 'event_files')
wf.connect(subjinfo, 'TR', modelfit, 'inputspec.interscan_interval')
wf.connect(subjinfo, 'conds', contrastgen, 'conds')
if has_contrast:
wf.connect(datasource, 'contrasts', contrastgen, 'contrast_file')
else:
contrastgen.inputs.contrast_file = ''
wf.connect(infosource, 'task_id', contrastgen, 'task_id')
wf.connect(contrastgen, 'contrasts', modelfit, 'inputspec.contrasts')
wf.connect([(preproc, art, [('outputspec.motion_parameters',
'realignment_parameters'),
('outputspec.realigned_files',
'realigned_files'),
('outputspec.mask', 'mask_file')]),
(preproc, modelspec, [('outputspec.highpassed_files',
'functional_runs'),
('outputspec.motion_parameters',
'realignment_parameters')]),
(art, modelspec, [('outlier_files', 'outlier_files')]),
(modelspec, modelfit, [('session_info',
'inputspec.session_info')]),
(preproc, modelfit, [('outputspec.highpassed_files',
'inputspec.functional_data')])
])
# Comute TSNR on realigned data regressing polynomials upto order 2
tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
wf.connect(preproc, "outputspec.realigned_files", tsnr, "in_file")
# Compute the median image across runs
calc_median = Node(Function(input_names=['in_files'],
output_names=['median_file'],
function=median,
imports=imports),
name='median')
wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')
"""
Reorder the copes so that now it combines across runs
"""
def sort_copes(copes, varcopes, contrasts):
import numpy as np
if not isinstance(copes, list):
copes = [copes]
varcopes = [varcopes]
num_copes = len(contrasts)
n_runs = len(copes)
all_copes = np.array(copes).flatten()
all_varcopes = np.array(varcopes).flatten()
outcopes = all_copes.reshape(len(all_copes)/num_copes, num_copes).T.tolist()
outvarcopes = all_varcopes.reshape(len(all_varcopes)/num_copes, num_copes).T.tolist()
return outcopes, outvarcopes, n_runs
cope_sorter = pe.Node(niu.Function(input_names=['copes', 'varcopes',
'contrasts'],
output_names=['copes', 'varcopes',
'n_runs'],
function=sort_copes),
name='cope_sorter')
pickfirst = lambda x: x[0]
wf.connect(contrastgen, 'contrasts', cope_sorter, 'contrasts')
wf.connect([(preproc, fixed_fx, [(('outputspec.mask', pickfirst),
'flameo.mask_file')]),
(modelfit, cope_sorter, [('outputspec.copes', 'copes')]),
(modelfit, cope_sorter, [('outputspec.varcopes', 'varcopes')]),
(cope_sorter, fixed_fx, [('copes', 'inputspec.copes'),
('varcopes', 'inputspec.varcopes'),
('n_runs', 'l2model.num_copes')]),
(modelfit, fixed_fx, [('outputspec.dof_file',
'inputspec.dof_files'),
])
])
wf.connect(calc_median, 'median_file', registration, 'inputspec.mean_image')
if subjects_dir:
wf.connect(infosource, 'subject_id', registration, 'inputspec.subject_id')
registration.inputs.inputspec.subjects_dir = subjects_dir
registration.inputs.inputspec.target_image = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
if target:
registration.inputs.inputspec.target_image = target
else:
wf.connect(datasource, 'anat', registration, 'inputspec.anatomical_image')
registration.inputs.inputspec.target_image = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
registration.inputs.inputspec.target_image_brain = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
registration.inputs.inputspec.config_file = 'T1_2_MNI152_2mm'
def merge_files(copes, varcopes, zstats):
out_files = []
splits = []
out_files.extend(copes)
splits.append(len(copes))
out_files.extend(varcopes)
splits.append(len(varcopes))
out_files.extend(zstats)
splits.append(len(zstats))
return out_files, splits
mergefunc = pe.Node(niu.Function(input_names=['copes', 'varcopes',
'zstats'],
output_names=['out_files', 'splits'],
function=merge_files),
name='merge_files')
wf.connect([(fixed_fx.get_node('outputspec'), mergefunc,
[('copes', 'copes'),
('varcopes', 'varcopes'),
('zstats', 'zstats'),
])])
wf.connect(mergefunc, 'out_files', registration, 'inputspec.source_files')
def split_files(in_files, splits):
copes = in_files[:splits[0]]
varcopes = in_files[splits[0]:(splits[0] + splits[1])]
zstats = in_files[(splits[0] + splits[1]):]
return copes, varcopes, zstats
splitfunc = pe.Node(niu.Function(input_names=['in_files', 'splits'],
output_names=['copes', 'varcopes',
'zstats'],
function=split_files),
name='split_files')
wf.connect(mergefunc, 'splits', splitfunc, 'splits')
wf.connect(registration, 'outputspec.transformed_files',
splitfunc, 'in_files')
if subjects_dir:
get_roi_mean = pe.MapNode(fs.SegStats(default_color_table=True),
iterfield=['in_file'], name='get_aparc_means')
get_roi_mean.inputs.avgwf_txt_file = True
wf.connect(fixed_fx.get_node('outputspec'), 'copes', get_roi_mean, 'in_file')
wf.connect(registration, 'outputspec.aparc', get_roi_mean, 'segmentation_file')
get_roi_tsnr = pe.MapNode(fs.SegStats(default_color_table=True),
iterfield=['in_file'], name='get_aparc_tsnr')
get_roi_tsnr.inputs.avgwf_txt_file = True
wf.connect(tsnr, 'tsnr_file', get_roi_tsnr, 'in_file')
wf.connect(registration, 'outputspec.aparc', get_roi_tsnr, 'segmentation_file')
"""
Connect to a datasink
"""
def get_subs(subject_id, conds, run_id, model_id, task_id):
subs = [('_subject_id_%s_' % subject_id, '')]
subs.append(('_model_id_%d' % model_id, 'model%03d' %model_id))
subs.append(('task_id_%d/' % task_id, '/task%03d_' % task_id))
subs.append(('bold_dtype_mcf_mask_smooth_mask_gms_tempfilt_mean_warp',
'mean'))
subs.append(('bold_dtype_mcf_mask_smooth_mask_gms_tempfilt_mean_flirt',
'affine'))
for i in range(len(conds)):
subs.append(('_flameo%d/cope1.' % i, 'cope%02d.' % (i + 1)))
subs.append(('_flameo%d/varcope1.' % i, 'varcope%02d.' % (i + 1)))
subs.append(('_flameo%d/zstat1.' % i, 'zstat%02d.' % (i + 1)))
subs.append(('_flameo%d/tstat1.' % i, 'tstat%02d.' % (i + 1)))
subs.append(('_flameo%d/res4d.' % i, 'res4d%02d.' % (i + 1)))
subs.append(('_warpall%d/cope1_warp.' % i,
'cope%02d.' % (i + 1)))
subs.append(('_warpall%d/varcope1_warp.' % (len(conds) + i),
'varcope%02d.' % (i + 1)))
subs.append(('_warpall%d/zstat1_warp.' % (2 * len(conds) + i),
'zstat%02d.' % (i + 1)))
subs.append(('_warpall%d/cope1_trans.' % i,
'cope%02d.' % (i + 1)))
subs.append(('_warpall%d/varcope1_trans.' % (len(conds) + i),
'varcope%02d.' % (i + 1)))
subs.append(('_warpall%d/zstat1_trans.' % (2 * len(conds) + i),
'zstat%02d.' % (i + 1)))
subs.append(('__get_aparc_means%d/' % i, '/cope%02d_' % (i + 1)))
for i, run_num in enumerate(run_id):
subs.append(('__get_aparc_tsnr%d/' % i, '/run%02d_' % run_num))
subs.append(('__art%d/' % i, '/run%02d_' % run_num))
subs.append(('__dilatemask%d/' % i, '/run%02d_' % run_num))
subs.append(('__realign%d/' % i, '/run%02d_' % run_num))
subs.append(('__modelgen%d/' % i, '/run%02d_' % run_num))
subs.append(('/model%03d/task%03d/' % (model_id, task_id), '/'))
subs.append(('/model%03d/task%03d_' % (model_id, task_id), '/'))
subs.append(('_bold_dtype_mcf_bet_thresh_dil', '_mask'))
subs.append(('_output_warped_image', '_anat2target'))
subs.append(('median_flirt_brain_mask', 'median_brain_mask'))
subs.append(('median_bbreg_brain_mask', 'median_brain_mask'))
return subs
subsgen = pe.Node(niu.Function(input_names=['subject_id', 'conds', 'run_id',
'model_id', 'task_id'],
output_names=['substitutions'],
function=get_subs),
name='subsgen')
wf.connect(subjinfo, 'run_id', subsgen, 'run_id')
datasink = pe.Node(interface=nio.DataSink(),
name="datasink")
wf.connect(infosource, 'subject_id', datasink, 'container')
wf.connect(infosource, 'subject_id', subsgen, 'subject_id')
wf.connect(infosource, 'model_id', subsgen, 'model_id')
wf.connect(infosource, 'task_id', subsgen, 'task_id')
wf.connect(contrastgen, 'contrasts', subsgen, 'conds')
wf.connect(subsgen, 'substitutions', datasink, 'substitutions')
wf.connect([(fixed_fx.get_node('outputspec'), datasink,
[('res4d', 'res4d'),
('copes', 'copes'),
('varcopes', 'varcopes'),
('zstats', 'zstats'),
('tstats', 'tstats')])
])
wf.connect([(modelfit.get_node('modelgen'), datasink,
[('design_cov', 'qa.model'),
('design_image', 'qa.model.@matrix_image'),
('design_file', 'qa.model.@matrix'),
])])
wf.connect([(preproc, datasink, [('outputspec.motion_parameters',
'qa.motion'),
('outputspec.motion_plots',
'qa.motion.plots'),
('outputspec.mask', 'qa.mask')])])
wf.connect(registration, 'outputspec.mean2anat_mask', datasink, 'qa.mask.mean2anat')
wf.connect(art, 'norm_files', datasink, 'qa.art.@norm')
wf.connect(art, 'intensity_files', datasink, 'qa.art.@intensity')
wf.connect(art, 'outlier_files', datasink, 'qa.art.@outlier_files')
wf.connect(registration, 'outputspec.anat2target', datasink, 'qa.anat2target')
wf.connect(tsnr, 'tsnr_file', datasink, 'qa.tsnr.@map')
if subjects_dir:
wf.connect(registration, 'outputspec.min_cost_file', datasink, 'qa.mincost')
wf.connect([(get_roi_tsnr, datasink, [('avgwf_txt_file', 'qa.tsnr'),
('summary_file', 'qa.tsnr.@summary')])])
wf.connect([(get_roi_mean, datasink, [('avgwf_txt_file', 'copes.roi'),
('summary_file', 'copes.roi.@summary')])])
wf.connect([(splitfunc, datasink,
[('copes', 'copes.mni'),
('varcopes', 'varcopes.mni'),
('zstats', 'zstats.mni'),
])])
wf.connect(calc_median, 'median_file', datasink, 'mean')
wf.connect(registration, 'outputspec.transformed_mean', datasink, 'mean.mni')
wf.connect(registration, 'outputspec.func2anat_transform', datasink, 'xfm.mean2anat')
wf.connect(registration, 'outputspec.anat2target_transform', datasink, 'xfm.anat2target')
"""
Set processing parameters
"""
preproc.inputs.inputspec.fwhm = fwhm
gethighpass.inputs.hpcutoff = hpcutoff
modelspec.inputs.high_pass_filter_cutoff = hpcutoff
modelfit.inputs.inputspec.bases = {'dgamma': {'derivs': use_derivatives}}
modelfit.inputs.inputspec.model_serial_correlations = True
modelfit.inputs.inputspec.film_threshold = 1000
datasink.inputs.base_directory = output_dir
return wf
"""
The following functions run the whole workflow.
"""
if __name__ == '__main__':
import argparse
defstr = ' (default %(default)s)'
parser = argparse.ArgumentParser(prog='fmri_openfmri.py',
description=__doc__)
parser.add_argument('-d', '--datasetdir', required=True)
parser.add_argument('-s', '--subject', default=[],
nargs='+', type=str,
help="Subject name (e.g. 'sub001')")
parser.add_argument('-m', '--model', default=1,
help="Model index" + defstr)
parser.add_argument('-x', '--subjectprefix', default='sub*',
help="Subject prefix" + defstr)
parser.add_argument('-t', '--task', default=1, #nargs='+',
type=int, help="Task index" + defstr)
parser.add_argument('--hpfilter', default=120.,
type=float, help="High pass filter cutoff (in secs)" + defstr)
parser.add_argument('--fwhm', default=6.,
type=float, help="Spatial FWHM" + defstr)
parser.add_argument('--derivatives', action="store_true",
help="Use derivatives" + defstr)
parser.add_argument("-o", "--output_dir", dest="outdir",
help="Output directory base")
parser.add_argument("-w", "--work_dir", dest="work_dir",
help="Output directory base")
parser.add_argument("-p", "--plugin", dest="plugin",
default='Linear',
help="Plugin to use")
parser.add_argument("--plugin_args", dest="plugin_args",
help="Plugin arguments")
parser.add_argument("--sd", dest="subjects_dir",
help="FreeSurfer subjects directory (if available)")
parser.add_argument("--target", dest="target_file",
help=("Target in MNI space. Best to use the MindBoggle "
"template - only used with FreeSurfer"
"OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz"))
args = parser.parse_args()
outdir = args.outdir
work_dir = os.getcwd()
if args.work_dir:
work_dir = os.path.abspath(args.work_dir)
if outdir:
outdir = os.path.abspath(outdir)
else:
outdir = os.path.join(work_dir, 'output')
outdir = os.path.join(outdir, 'model%02d' % int(args.model),
'task%03d' % int(args.task))
derivatives = args.derivatives
if derivatives is None:
derivatives = False
wf = analyze_openfmri_dataset(data_dir=os.path.abspath(args.datasetdir),
subject=args.subject,
model_id=int(args.model),
task_id=[int(args.task)],
subj_prefix=args.subjectprefix,
output_dir=outdir,
hpcutoff=args.hpfilter,
use_derivatives=derivatives,
fwhm=args.fwhm,
subjects_dir=args.subjects_dir,
target=args.target_file)
#wf.config['execution']['remove_unnecessary_outputs'] = False
wf.base_dir = work_dir
if args.plugin_args:
wf.run(args.plugin, plugin_args=eval(args.plugin_args))
else:
wf.run(args.plugin)
|
|
import asyncio
import datetime
import discord
import dateutil.parser
from .. import utils, errors, cmd
from ..servermodule import ServerModule
from ..enums import PrivilegeLevel
class Voting(ServerModule):
MODULE_NAME = "Voting"
MODULE_SHORT_DESCRIPTION = "Facilities for simple votes/polls."
RECOMMENDED_CMD_NAMES = ["voting"]
_SECRET_TOKEN = utils.SecretToken()
_cmd_dict = {}
_HELP_SUMMARY = """
See `{modhelp}` for voting commands.
""".strip()
DEFAULT_SETTINGS = {
"enabled channels": []
}
async def _initialize(self, resources):
self._client = resources.client
self._res = resources
self._vote_instances = {} # FORMAT: {vote ID: voting objects}
self._load_settings()
return
def _load_settings(self):
# Every file that is not settings.json is a vote json file.
data_dir = self._res.data_directory
for file_name in os.listdir(ch_dir):
if (file_name == "settings.json") or (not file_name.endswith(".json")):
continue
# File names are also the vote IDs
vote_instance = VoteInstance(json_data=utils.json_read(data_dir + file_name))
self._vote_instances[file_name[:-5]] = vote_instance
return
def _save_settings(self):
for (vote_ID, vote_obj) in self._vote_instances.items():
utils.jsonwrite(data_dir + vote_ID + ".json", data=vote_obj)
return
async def process_cmd(self, substr, msg, privilege_level):
if substr == "": # Default Case
substr = "newgame"
return await super(Voting, self).process_cmd(substr, msg, privilege_level)
@cmd.add(_cmd_dict, "rules")
async def _cmdf_enable(self, substr, msg, privilege_level):
"""`{cmd}` - View game rules."""
await self._client.send_msg(channel, self._RULES_STRING)
return
########################
### GENERAL SERVICES ###
########################
class VoteInstance:
def __init__(self, **kwargs):
"""
Manages voting data.
To keep things simple, this class will wrap json data object.
It is initialized either with an existing json data object (json_data), or
all other arguments.
"""
# Set up defaults.
self._start_date = None
self._continuation_date = None
self._json_data = None
if "json_data" in kwargs:
json_data = kwargs["json_data"]
self._start_date = dateutil.parser.parse(json_data["start"])
if not json_data["continuation"] is None:
self._continuation_date = dateutil.parser.parse(json_data["continuation"])
self._json_data = json_data
else:
# Fill in default values for other arguments.
if not "title" in kwargs:
kwargs["title"] = "Untitled"
if not "description" in kwargs:
kwargs["description"] = "No description."
if not "period_seconds" in kwargs:
kwargs["period_seconds"] = 1
if not "period_days" in kwargs:
kwargs["period_days"] = 1
self._start_date = datetime.datetime.utcnow()
self._continuation_date = None
self._json_data = {
"title": kwargs["title"],
"description": kwargs["description"],
"start": self._start_date.isoformat(),
"continuation": None,
"period seconds": kwargs["period_seconds"],
"period days": kwargs["period_days"],
"votes": {},
# Structure of each vote:
# member id string: {
# "option id": string, # This matches the id under "options".
# }
"options": {},
# Structure of each option:
# option id string: {
# "name": string,
# }
"history": [], # Human-readable strings documenting important changes.
# These strings are not machine-read.
# This is useful for when there's a dispute (though it doesn't protect
# against malicious editing of the data files themselves).
}
return
# The returned object must *NOT* be edited.
def get_json_serializable(self):
return self._json_data
######################################
### GETTERS AND STATUS INFORMATION ###
######################################
@property
def title(self):
return self._json_data["title"]
@property
def description(self):
return self._json_data["description"]
@property
def period_seconds(self):
return self._json_data["period seconds"]
@property
def period_days(self):
return self._json_data["period days"]
def is_ended(self):
return self.get_period_elapsed() >= datetime.timedelta(days=self.period_days, seconds=self.period_seconds)
# RETURNS: timedelta object of the time elapsed.
def get_period_elapsed(self):
now_date = datetime.datetime.utcnow()
start_or_cont_date = self._continuation_date
if start_or_cont_date is None:
start_or_cont_date = self._start_date
return now_date - start_or_cont_date
# RETURNS: datetime object representing end date.
def get_end_date(self):
start_or_cont_date = self._continuation_date
if start_or_cont_date is None:
start_or_cont_date = self._start_date
return start_or_cont_date + datetime.timedelta(days=self.period_days, seconds=self.period_seconds)
# RETURNS: The options dict representing all options.
def get_options_dict(self):
return self._json_data["options"]
# RETURNS: The options dict representing all options.
def get_votes_dict(self):
return self._json_data["votes"]
# RETURNS: The options dict representing all options.
def get_history_list(self):
return self._json_data["history"]
def is_valid_option_id(self, option_id):
return option_id in self._json_data["options"]
###############
### SETTERS ###
###############
def set_title(self, new_title, *, executing_member=None):
buf = "Set title to '{}'.".format(new_title)
self._add_history(buf, executing_member=executing_member)
self._json_data["title"] = new_title
return
def set_description(self, new_desc, *, executing_member=None):
buf = "Set description to '{}'.".format(new_desc)
self._add_history(buf, executing_member=executing_member)
self._json_data["description"] = new_desc
return
#####################
### OTHER METHODS ###
#####################
# PRECONDITION: option_name is not an empty string.
def add_option(self, option_name, *, executing_member=None):
options_dict = self._json_data["options"]
new_option_id = -1
for (option_id, option_dict) in options_dict.items():
option_id = int(option_id)
if new_option_id < option_id:
new_option_id = option_id
new_option_id = str(new_option_id + 1)
options_dict[new_option_id] = {"name": option_name}
# Add into history.
buf = "Added option '{}' (ID: {}).".format(option_name, new_option_id)
self._add_history(buf, executing_member=executing_member)
return
# Executing member should be the voter themselves.
# PRECONDITION: The option exists.
def add_vote(self, option_id, voter_id, *, executing_member=None):
buf = "User ID {} voted option {}.".format(voter_id, option_id)
self._add_history(buf, executing_member=executing_member)
self._json_data["votes"][voter_id] = {"option id": option_id}
return
def end_vote(self, *, executing_member=None):
self._add_history("Ended the vote.", executing_member=executing_member)
new_period = self.get_period_elapsed()
self.period_days = new_period.days
self.period_seconds = new_period.seconds
return
# Note: If a vote is still running, you'd probably want to extend it instead.
# The continuation facilities are intended to allow continuation of a
# vote that has ended.
# PRECONDITION: days<0, seconds<0, and they can't both be zero.
def continue_vote(self, *, days=0, seconds=0, executing_member=None):
buf = "Continued the vote. days={}, sec={}".format(str(days), str(seconds))
self._add_history(buf, executing_member=executing_member)
self._continuation_date = datetime.datetime.utcnow()
self._json_data["continuation"] = self._continuation_date.isoformat()
self._json_data["period seconds"] = seconds
self._json_data["period days"] = days
return
# PRECONDITION: Vote hasn't ended.
# PRECONDITION: days<0, seconds<0, and they can't both be zero.
def extend_vote_from_now(self, *, days=0, seconds=0, executing_member=None):
buf = "Extended vote from now. days={}, sec={}".format(str(days), str(seconds))
self._add_history(buf, executing_member=executing_member)
new_period = self.get_period_elapsed() + datetime.timedelta(days=days, seconds=seconds)
self.period_days = new_period.days
self.period_seconds = new_period.seconds
return
# PRECONDITION: Vote hasn't ended.
# PRECONDITION: days<0, seconds<0, and they can't both be zero.
def extend_vote_from_start(self, *, days=0, seconds=0, executing_member=None):
buf = "Extended vote from start. days={}, sec={}".format(str(days), str(seconds))
self._add_history(buf, executing_member=executing_member)
self.period_days = self.period_days + days
self.period_seconds = self.period_seconds + seconds
return
########################
### PRIVATE SERVICES ###
########################
def _add_history(self, text, *, executing_member=None):
date_str = datetime.datetime.utcnow().isoformat()
buf = "({} id={} name={}): ".format(date_str, executing_member.id, executing_member.name) + text
self._json_data["history"].append(buf)
return
|
|
# Copyright 2017 New Vector Ltd
# Copyright 2019 Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Iterable, Mapping, Optional, Tuple, cast
from typing_extensions import Literal, TypedDict
from synapse.api.errors import StoreError
from synapse.logging.opentracing import log_kv, trace
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import LoggingTransaction
from synapse.types import JsonDict, JsonSerializable
from synapse.util import json_encoder
class RoomKey(TypedDict):
"""`KeyBackupData` in the Matrix spec.
https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3room_keyskeysroomidsessionid
"""
first_message_index: int
forwarded_count: int
is_verified: bool
session_data: JsonSerializable
class EndToEndRoomKeyStore(SQLBaseStore):
"""The store for end to end room key backups.
See https://spec.matrix.org/v1.1/client-server-api/#server-side-key-backups
As per the spec, backups are identified by an opaque version string. Internally,
version identifiers are assigned using incrementing integers. Non-numeric version
strings are treated as if they do not exist, since we would have never issued them.
"""
async def update_e2e_room_key(
self,
user_id: str,
version: str,
room_id: str,
session_id: str,
room_key: RoomKey,
) -> None:
"""Replaces the encrypted E2E room key for a given session in a given backup
Args:
user_id: the user whose backup we're setting
version: the version ID of the backup we're updating
room_id: the ID of the room whose keys we're setting
session_id: the session whose room_key we're setting
room_key: the room_key being set
Raises:
StoreError
"""
try:
version_int = int(version)
except ValueError:
# Our versions are all ints so if we can't convert it to an integer,
# it doesn't exist.
raise StoreError(404, "No backup with that version exists")
await self.db_pool.simple_update_one(
table="e2e_room_keys",
keyvalues={
"user_id": user_id,
"version": version_int,
"room_id": room_id,
"session_id": session_id,
},
updatevalues={
"first_message_index": room_key["first_message_index"],
"forwarded_count": room_key["forwarded_count"],
"is_verified": room_key["is_verified"],
"session_data": json_encoder.encode(room_key["session_data"]),
},
desc="update_e2e_room_key",
)
async def add_e2e_room_keys(
self, user_id: str, version: str, room_keys: Iterable[Tuple[str, str, RoomKey]]
) -> None:
"""Bulk add room keys to a given backup.
Args:
user_id: the user whose backup we're adding to
version: the version ID of the backup for the set of keys we're adding to
room_keys: the keys to add, in the form (roomID, sessionID, keyData)
"""
try:
version_int = int(version)
except ValueError:
# Our versions are all ints so if we can't convert it to an integer,
# it doesn't exist.
raise StoreError(404, "No backup with that version exists")
values = []
for (room_id, session_id, room_key) in room_keys:
values.append(
(
user_id,
version_int,
room_id,
session_id,
room_key["first_message_index"],
room_key["forwarded_count"],
room_key["is_verified"],
json_encoder.encode(room_key["session_data"]),
)
)
log_kv(
{
"message": "Set room key",
"room_id": room_id,
"session_id": session_id,
"room_key": room_key,
}
)
await self.db_pool.simple_insert_many(
table="e2e_room_keys",
keys=(
"user_id",
"version",
"room_id",
"session_id",
"first_message_index",
"forwarded_count",
"is_verified",
"session_data",
),
values=values,
desc="add_e2e_room_keys",
)
@trace
async def get_e2e_room_keys(
self,
user_id: str,
version: str,
room_id: Optional[str] = None,
session_id: Optional[str] = None,
) -> Dict[
Literal["rooms"], Dict[str, Dict[Literal["sessions"], Dict[str, RoomKey]]]
]:
"""Bulk get the E2E room keys for a given backup, optionally filtered to a given
room, or a given session.
Args:
user_id: the user whose backup we're querying
version: the version ID of the backup for the set of keys we're querying
room_id: Optional. the ID of the room whose keys we're querying, if any.
If not specified, we return the keys for all the rooms in the backup.
session_id: Optional. the session whose room_key we're querying, if any.
If specified, we also require the room_id to be specified.
If not specified, we return all the keys in this version of
the backup (or for the specified room)
Returns:
A dict giving the session_data and message metadata for these room keys.
`{"rooms": {room_id: {"sessions": {session_id: room_key}}}}`
"""
try:
version_int = int(version)
except ValueError:
return {"rooms": {}}
keyvalues = {"user_id": user_id, "version": version_int}
if room_id:
keyvalues["room_id"] = room_id
if session_id:
keyvalues["session_id"] = session_id
rows = await self.db_pool.simple_select_list(
table="e2e_room_keys",
keyvalues=keyvalues,
retcols=(
"user_id",
"room_id",
"session_id",
"first_message_index",
"forwarded_count",
"is_verified",
"session_data",
),
desc="get_e2e_room_keys",
)
sessions: Dict[
Literal["rooms"], Dict[str, Dict[Literal["sessions"], Dict[str, RoomKey]]]
] = {"rooms": {}}
for row in rows:
room_entry = sessions["rooms"].setdefault(row["room_id"], {"sessions": {}})
room_entry["sessions"][row["session_id"]] = {
"first_message_index": row["first_message_index"],
"forwarded_count": row["forwarded_count"],
# is_verified must be returned to the client as a boolean
"is_verified": bool(row["is_verified"]),
"session_data": db_to_json(row["session_data"]),
}
return sessions
async def get_e2e_room_keys_multi(
self,
user_id: str,
version: str,
room_keys: Mapping[str, Mapping[Literal["sessions"], Iterable[str]]],
) -> Dict[str, Dict[str, RoomKey]]:
"""Get multiple room keys at a time. The difference between this function and
get_e2e_room_keys is that this function can be used to retrieve
multiple specific keys at a time, whereas get_e2e_room_keys is used for
getting all the keys in a backup version, all the keys for a room, or a
specific key.
Args:
user_id: the user whose backup we're querying
version: the version ID of the backup we're querying about
room_keys: a map from room ID -> {"sessions": [session ids]}
indicating the session IDs that we want to query
Returns:
A map of room IDs to session IDs to room key
"""
try:
version_int = int(version)
except ValueError:
# Our versions are all ints so if we can't convert it to an integer,
# it doesn't exist.
return {}
return await self.db_pool.runInteraction(
"get_e2e_room_keys_multi",
self._get_e2e_room_keys_multi_txn,
user_id,
version_int,
room_keys,
)
@staticmethod
def _get_e2e_room_keys_multi_txn(
txn: LoggingTransaction,
user_id: str,
version: int,
room_keys: Mapping[str, Mapping[Literal["sessions"], Iterable[str]]],
) -> Dict[str, Dict[str, RoomKey]]:
if not room_keys:
return {}
where_clauses = []
params = [user_id, version]
for room_id, room in room_keys.items():
sessions = list(room["sessions"])
if not sessions:
continue
params.append(room_id)
params.extend(sessions)
where_clauses.append(
"(room_id = ? AND session_id IN (%s))"
% (",".join(["?" for _ in sessions]),)
)
# check if we're actually querying something
if not where_clauses:
return {}
sql = """
SELECT room_id, session_id, first_message_index, forwarded_count,
is_verified, session_data
FROM e2e_room_keys
WHERE user_id = ? AND version = ? AND (%s)
""" % (
" OR ".join(where_clauses)
)
txn.execute(sql, params)
ret: Dict[str, Dict[str, RoomKey]] = {}
for row in txn:
room_id = row[0]
session_id = row[1]
ret.setdefault(room_id, {})
ret[room_id][session_id] = {
"first_message_index": row[2],
"forwarded_count": row[3],
"is_verified": row[4],
"session_data": db_to_json(row[5]),
}
return ret
async def count_e2e_room_keys(self, user_id: str, version: str) -> int:
"""Get the number of keys in a backup version.
Args:
user_id: the user whose backup we're querying
version: the version ID of the backup we're querying about
"""
try:
version_int = int(version)
except ValueError:
# Our versions are all ints so if we can't convert it to an integer,
# it doesn't exist.
return 0
return await self.db_pool.simple_select_one_onecol(
table="e2e_room_keys",
keyvalues={"user_id": user_id, "version": version_int},
retcol="COUNT(*)",
desc="count_e2e_room_keys",
)
@trace
async def delete_e2e_room_keys(
self,
user_id: str,
version: str,
room_id: Optional[str] = None,
session_id: Optional[str] = None,
) -> None:
"""Bulk delete the E2E room keys for a given backup, optionally filtered to a given
room or a given session.
Args:
user_id: the user whose backup we're deleting from
version: the version ID of the backup for the set of keys we're deleting
room_id: Optional. the ID of the room whose keys we're deleting, if any.
If not specified, we delete the keys for all the rooms in the backup.
session_id: Optional. the session whose room_key we're querying, if any.
If specified, we also require the room_id to be specified.
If not specified, we delete all the keys in this version of
the backup (or for the specified room)
"""
try:
version_int = int(version)
except ValueError:
# Our versions are all ints so if we can't convert it to an integer,
# it doesn't exist.
return
keyvalues = {"user_id": user_id, "version": version_int}
if room_id:
keyvalues["room_id"] = room_id
if session_id:
keyvalues["session_id"] = session_id
await self.db_pool.simple_delete(
table="e2e_room_keys", keyvalues=keyvalues, desc="delete_e2e_room_keys"
)
@staticmethod
def _get_current_version(txn: LoggingTransaction, user_id: str) -> int:
txn.execute(
"SELECT MAX(version) FROM e2e_room_keys_versions "
"WHERE user_id=? AND deleted=0",
(user_id,),
)
# `SELECT MAX() FROM ...` will always return 1 row. The value in that row will
# be `NULL` when there are no available versions.
row = cast(Tuple[Optional[int]], txn.fetchone())
if row[0] is None:
raise StoreError(404, "No current backup version")
return row[0]
async def get_e2e_room_keys_version_info(
self, user_id: str, version: Optional[str] = None
) -> JsonDict:
"""Get info metadata about a version of our room_keys backup.
Args:
user_id: the user whose backup we're querying
version: Optional. the version ID of the backup we're querying about
If missing, we return the information about the current version.
Raises:
StoreError: with code 404 if there are no e2e_room_keys_versions present
Returns:
A dict giving the info metadata for this backup version, with
fields including:
version(str)
algorithm(str)
auth_data(object): opaque dict supplied by the client
etag(int): tag of the keys in the backup
"""
def _get_e2e_room_keys_version_info_txn(txn: LoggingTransaction) -> JsonDict:
if version is None:
this_version = self._get_current_version(txn, user_id)
else:
try:
this_version = int(version)
except ValueError:
# Our versions are all ints so if we can't convert it to an integer,
# it isn't there.
raise StoreError(404, "No backup with that version exists")
result = self.db_pool.simple_select_one_txn(
txn,
table="e2e_room_keys_versions",
keyvalues={"user_id": user_id, "version": this_version, "deleted": 0},
retcols=("version", "algorithm", "auth_data", "etag"),
allow_none=False,
)
assert result is not None # see comment on `simple_select_one_txn`
result["auth_data"] = db_to_json(result["auth_data"])
result["version"] = str(result["version"])
if result["etag"] is None:
result["etag"] = 0
return result
return await self.db_pool.runInteraction(
"get_e2e_room_keys_version_info", _get_e2e_room_keys_version_info_txn
)
@trace
async def create_e2e_room_keys_version(self, user_id: str, info: JsonDict) -> str:
"""Atomically creates a new version of this user's e2e_room_keys store
with the given version info.
Args:
user_id: the user whose backup we're creating a version
info: the info about the backup version to be created
Returns:
The newly created version ID
"""
def _create_e2e_room_keys_version_txn(txn: LoggingTransaction) -> str:
txn.execute(
"SELECT MAX(version) FROM e2e_room_keys_versions WHERE user_id=?",
(user_id,),
)
current_version = cast(Tuple[Optional[int]], txn.fetchone())[0]
if current_version is None:
current_version = 0
new_version = current_version + 1
self.db_pool.simple_insert_txn(
txn,
table="e2e_room_keys_versions",
values={
"user_id": user_id,
"version": new_version,
"algorithm": info["algorithm"],
"auth_data": json_encoder.encode(info["auth_data"]),
},
)
return str(new_version)
return await self.db_pool.runInteraction(
"create_e2e_room_keys_version_txn", _create_e2e_room_keys_version_txn
)
@trace
async def update_e2e_room_keys_version(
self,
user_id: str,
version: str,
info: Optional[JsonDict] = None,
version_etag: Optional[int] = None,
) -> None:
"""Update a given backup version
Args:
user_id: the user whose backup version we're updating
version: the version ID of the backup version we're updating
info: the new backup version info to store. If None, then the backup
version info is not updated.
version_etag: etag of the keys in the backup. If None, then the etag
is not updated.
"""
updatevalues: Dict[str, object] = {}
if info is not None and "auth_data" in info:
updatevalues["auth_data"] = json_encoder.encode(info["auth_data"])
if version_etag is not None:
updatevalues["etag"] = version_etag
if updatevalues:
try:
version_int = int(version)
except ValueError:
# Our versions are all ints so if we can't convert it to an integer,
# it doesn't exist.
raise StoreError(404, "No backup with that version exists")
await self.db_pool.simple_update_one(
table="e2e_room_keys_versions",
keyvalues={"user_id": user_id, "version": version_int},
updatevalues=updatevalues,
desc="update_e2e_room_keys_version",
)
@trace
async def delete_e2e_room_keys_version(
self, user_id: str, version: Optional[str] = None
) -> None:
"""Delete a given backup version of the user's room keys.
Doesn't delete their actual key data.
Args:
user_id: the user whose backup version we're deleting
version: Optional. the version ID of the backup version we're deleting
If missing, we delete the current backup version info.
Raises:
StoreError: with code 404 if there are no e2e_room_keys_versions present,
or if the version requested doesn't exist.
"""
def _delete_e2e_room_keys_version_txn(txn: LoggingTransaction) -> None:
if version is None:
this_version = self._get_current_version(txn, user_id)
else:
try:
this_version = int(version)
except ValueError:
# Our versions are all ints so if we can't convert it to an integer,
# it isn't there.
raise StoreError(404, "No backup with that version exists")
self.db_pool.simple_delete_txn(
txn,
table="e2e_room_keys",
keyvalues={"user_id": user_id, "version": this_version},
)
self.db_pool.simple_update_one_txn(
txn,
table="e2e_room_keys_versions",
keyvalues={"user_id": user_id, "version": this_version},
updatevalues={"deleted": 1},
)
await self.db_pool.runInteraction(
"delete_e2e_room_keys_version", _delete_e2e_room_keys_version_txn
)
|
|
"""
desispec.io.meta
================
IO metadata functions.
"""
import os
import os.path
import datetime
import glob
import re
def findfile(filetype, night=None, expid=None, camera=None, brickname=None,
band=None, spectrograph=None, rawdata_dir=None, specprod_dir=None,
download=False, outdir=None):
"""Returns location where file should be
Args:
filetype : file type, typically the prefix, e.g. "frame" or "psf"
Args depending upon filetype:
night : YEARMMDD string
expid : integer exposure id
camera : 'b0' 'r1' .. 'z9'
brickname : brick name string
band : one of 'b','r','z' identifying the camera band
spectrograph : spectrograph number, 0-9
Options:
rawdata_dir : overrides $DESI_SPECTRO_DATA
specprod_dir : overrides $DESI_SPECTRO_REDUX/$SPECPROD/
download : if not found locally, try to fetch remotely
outdir : use this directory for output instead of canonical location
"""
#- NOTE: specprod_dir is the directory $DESI_SPECTRO_REDUX/$SPECPROD,
#- specprod is just the environment variable $SPECPROD
location = dict(
raw = '{rawdata_dir}/{night}/desi-{expid:08d}.fits.fz',
pix = '{rawdata_dir}/{night}/pix-{camera}-{expid:08d}.fits',
fiberflat = '{specprod_dir}/calib2d/{night}/fiberflat-{camera}-{expid:08d}.fits',
frame = '{specprod_dir}/exposures/{night}/{expid:08d}/frame-{camera}-{expid:08d}.fits',
cframe = '{specprod_dir}/exposures/{night}/{expid:08d}/cframe-{camera}-{expid:08d}.fits',
sky = '{specprod_dir}/exposures/{night}/{expid:08d}/sky-{camera}-{expid:08d}.fits',
stdstars = '{specprod_dir}/exposures/{night}/{expid:08d}/stdstars-{spectrograph:d}-{expid:08d}.fits',
calib = '{specprod_dir}/exposures/{night}/{expid:08d}/calib-{camera}-{expid:08d}.fits',
qa_data = '{specprod_dir}/exposures/{night}/{expid:08d}/qa-{camera}-{expid:08d}.yaml',
qa_data_exp = '{specprod_dir}/exposures/{night}/{expid:08d}/qa-{expid:08d}.yaml',
qa_sky_fig = '{specprod_dir}/exposures/{night}/{expid:08d}/qa-sky-{camera}-{expid:08d}.pdf',
qa_flux_fig = '{specprod_dir}/exposures/{night}/{expid:08d}/qa-flux-{camera}-{expid:08d}.pdf',
qa_calib = '{specprod_dir}/calib2d/{night}/qa-{camera}-{expid:08d}.yaml',
qa_calib_exp = '{specprod_dir}/calib2d/{night}/qa-{expid:08d}.yaml',
qa_flat_fig = '{specprod_dir}/calib2d/{night}/qa-flat-{camera}-{expid:08d}.pdf',
qa_ztruth = '{specprod_dir}/exposures/{night}/qa-ztruth-{night}.yaml',
qa_ztruth_fig = '{specprod_dir}/exposures/{night}/qa-ztruth-{night}.pdf',
### psf = '{specprod_dir}/exposures/{night}/{expid:08d}/psf-{camera}-{expid:08d}.fits',
psf = '{specprod_dir}/calib2d/{night}/psf-{camera}-{expid:08d}.fits',
fibermap = '{rawdata_dir}/{night}/fibermap-{expid:08d}.fits',
brick = '{specprod_dir}/bricks/{brickname}/brick-{band}-{brickname}.fits',
coadd = '{specprod_dir}/bricks/{brickname}/coadd-{band}-{brickname}.fits',
coadd_all = '{specprod_dir}/bricks/{brickname}/coadd-{brickname}.fits',
zbest = '{specprod_dir}/bricks/{brickname}/zbest-{brickname}.fits',
zspec = '{specprod_dir}/bricks/{brickname}/zspec-{brickname}.fits',
zcatalog = '{specprod_dir}/zcatalog-{specprod}.fits',
)
location['desi'] = location['raw']
#- Do we know about this kind of file?
if filetype not in location:
raise IOError("Unknown filetype {}; known types are {}".format(filetype, location.keys()))
#- Check for missing inputs
required_inputs = [i[0] for i in re.findall(r'\{([a-z_]+)(|[:0-9d]+)\}',location[filetype])]
if rawdata_dir is None and 'rawdata_dir' in required_inputs:
rawdata_dir = rawdata_root()
if specprod_dir is None and 'specprod_dir' in required_inputs:
specprod_dir = specprod_root()
if 'specprod' in required_inputs:
#- Replace / with _ in $SPECPROD so we can use it in a filename
specprod = os.getenv('SPECPROD').replace('/', '_')
else:
specprod = None
actual_inputs = {
'specprod_dir':specprod_dir, 'specprod':specprod,
'night':night, 'expid':expid, 'camera':camera, 'brickname':brickname,
'band':band, 'spectrograph':spectrograph
}
if 'rawdata_dir' in required_inputs:
actual_inputs['rawdata_dir'] = rawdata_dir
for i in required_inputs:
if actual_inputs[i] is None:
raise ValueError("Required input '{0}' is not set for type '{1}'!".format(i,filetype))
#- normpath to remove extraneous double slashes /a/b//c/d
filepath = os.path.normpath(location[filetype].format(**actual_inputs))
if outdir:
filepath = os.path.join(outdir, os.path.basename(filepath))
if download:
from .download import download
filepath = download(filepath,single_thread=True)[0]
return filepath
def get_raw_files(filetype, night, expid, rawdata_dir=None):
"""Get files for a specified exposure.
Uses :func:`findfile` to determine the valid file names for the specified type.
Any camera identifiers not matching the regular expression [brz][0-9] will be
silently ignored.
Args:
filetype(str): Type of files to get. Valid choices are 'frame','cframe','psf'.
night(str): Date string for the requested night in the format YYYYMMDD.
expid(int): Exposure number to get files for.
rawdata_dir(str): [optional] overrides $DESI_SPECTRO_DATA
Returns:
dict: Dictionary of found file names using camera id strings as keys, which are
guaranteed to match the regular expression [brz][0-9].
"""
glob_pattern = findfile(filetype, night, expid, camera='*', rawdata_dir=rawdata_dir)
literals = map(re.escape,glob_pattern.split('*'))
re_pattern = re.compile('([brz][0-9])'.join(literals))
listing = glob.glob(glob_pattern)
if len(listing) == 1:
return listing[0]
files = {}
for entry in listing:
found = re_pattern.match(entry)
files[found.group(1)] = entry
return files
def get_files(filetype, night, expid, specprod_dir=None):
"""Get files for a specified exposure.
Uses :func:`findfile` to determine the valid file names for the specified type.
Any camera identifiers not matching the regular expression [brz][0-9] will be
silently ignored.
Args:
filetype(str): Type of files to get. Valid choices are 'frame','cframe','psf'.
night(str): Date string for the requested night in the format YYYYMMDD.
expid(int): Exposure number to get files for.
specprod_dir(str): Path containing the exposures/ directory to use. If the value
is None, then the value of :func:`specprod_root` is used instead. Ignored
when raw is True.
Returns:
dict: Dictionary of found file names using camera id strings as keys, which are
guaranteed to match the regular expression [brz][0-9].
"""
glob_pattern = findfile(filetype, night, expid, camera='*', specprod_dir=specprod_dir)
literals = map(re.escape,glob_pattern.split('*'))
re_pattern = re.compile('([brz][0-9])'.join(literals))
files = { }
for entry in glob.glob(glob_pattern):
found = re_pattern.match(entry)
files[found.group(1)] = entry
return files
def validate_night(night):
"""Validates a night string and converts to a date.
Args:
night(str): Date string for the requested night in the format YYYYMMDD.
Returns:
datetime.date: Date object representing this night.
Raises:
RuntimeError: Badly formatted night string.
"""
try:
return datetime.datetime.strptime(night,'%Y%m%d').date()
except ValueError:
raise RuntimeError('Badly formatted night %s' % night)
def get_exposures(night, raw=False, rawdata_dir=None, specprod_dir=None):
"""Get a list of available exposures for the specified night.
Exposures are identified as correctly formatted subdirectory names within the
night directory, but no checks for valid contents of these exposure subdirectories
are performed.
Args:
night(str): Date string for the requested night in the format YYYYMMDD.
raw(bool): Returns raw exposures if set, otherwise returns processed exposures.
rawdata_dir(str): [optional] overrides $DESI_SPECTRO_DATA
specprod_dir(str): Path containing the exposures/ directory to use. If the value
is None, then the value of :func:`specprod_root` is used instead. Ignored
when raw is True.
Returns:
list: List of integer exposure numbers available for the specified night. The
list will be empty if no the night directory exists but does not contain
any exposures.
Raises:
RuntimeError: Badly formatted night date string or non-existent night.
"""
date = validate_night(night)
if raw:
if rawdata_dir is None:
rawdata_dir = rawdata_root()
night_path = os.path.join(rawdata_dir, night)
else:
if specprod_dir is None:
specprod_dir = specprod_root()
night_path = os.path.join(specprod_dir,'exposures',night)
if not os.path.exists(night_path):
raise RuntimeError('Non-existent night %s' % night)
exposures = []
if raw:
fpat = re.compile(r'.*fibermap-(.*).fits')
for entry in glob.glob(os.path.join(night_path,'fibermap-*.fits')):
mat = fpat.match(entry)
if mat is not None:
iexp = int(mat.group(1))
assert mat.group(1) == "{:08d}".format(iexp)
exposures.append(iexp)
else:
for entry in glob.glob(os.path.join(night_path,'*')):
head,tail = os.path.split(entry)
try:
exposure = int(tail)
assert tail == "{:08d}".format(exposure)
exposures.append(exposure)
except (ValueError,AssertionError):
# Silently ignore entries that are not exposure subdirectories.
pass
return sorted(exposures)
def rawdata_root():
"""Returns directory root for raw data, i.e. ``$DESI_SPECTRO_DATA``
Raises:
AssertionError: if these environment variables aren't set.
"""
assert 'DESI_SPECTRO_DATA' in os.environ, 'Missing $DESI_SPECTRO_DATA environment variable'
return os.environ['DESI_SPECTRO_DATA']
def specprod_root():
"""Return directory root for spectro production, i.e.
``$DESI_SPECTRO_REDUX/$SPECPROD``.
Raises:
AssertionError: if these environment variables aren't set.
"""
assert 'SPECPROD' in os.environ, 'Missing $SPECPROD environment variable'
assert 'DESI_SPECTRO_REDUX' in os.environ, 'Missing $DESI_SPECTRO_REDUX environment variable'
return os.path.join(os.getenv('DESI_SPECTRO_REDUX'), os.getenv('SPECPROD'))
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Models (mostly base classes) for the various kinds of renderer
types that Bokeh supports.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from difflib import get_close_matches
from typing import Literal
# Bokeh imports
from ..core.enums import RenderLevel
from ..core.has_props import abstract
from ..core.properties import (
Auto,
Bool,
Either,
Enum,
Float,
Instance,
InstanceDefault,
Nullable,
Override,
String,
)
from ..core.validation import error
from ..core.validation.errors import (
BAD_COLUMN_NAME,
CDSVIEW_FILTERS_WITH_CONNECTED,
MALFORMED_GRAPH_SOURCE,
MISSING_GLYPH,
NO_SOURCE_FOR_GLYPH,
)
from ..model import Model
from .canvas import CoordinateMapping
from .glyphs import (
Circle,
ConnectedXYGlyph,
Glyph,
MultiLine,
)
from .graphics import Decoration, Marking
from .graphs import GraphHitTestPolicy, LayoutProvider, NodesOnly
from .sources import (
CDSView,
ColumnDataSource,
DataSource,
WebDataSource,
)
from .tiles import TileSource, WMTSTileSource
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'DataRenderer',
'GlyphRenderer',
'GraphRenderer',
'GuideRenderer',
'Renderer',
'RendererGroup',
'TileRenderer',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class RendererGroup(Model):
'''A collection of renderers.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
visible = Bool(default=True, help="""
Makes all groupped renderers visible or not.
""")
@abstract
class Renderer(Model):
'''An abstract base class for renderer types.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
level = Enum(RenderLevel, help="""
Specifies the level in which to paint this renderer.
""")
visible = Bool(default=True, help="""
Is the renderer visible.
""")
coordinates = Nullable(Instance(CoordinateMapping))
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen locations when
rendering glyphs on the plot. If unset, use the default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen locations when
rendering glyphs on the plot. If unset, use the default y-range.
""")
group = Nullable(Instance(RendererGroup))
class TileRenderer(Renderer):
'''
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
tile_source = Instance(TileSource, default=InstanceDefault(WMTSTileSource), help="""
Local data source to use when rendering glyphs on the plot.
""")
alpha = Float(1.0, help="""
tile opacity 0.0 - 1.0
""")
smoothing = Bool(default=True, help="""
Enable image smoothing for the rendered tiles.
""")
render_parents = Bool(default=True, help="""
Flag enable/disable drawing of parent tiles while waiting for new tiles to arrive. Default value is True.
""")
level = Override(default="image")
@abstract
class DataRenderer(Renderer):
''' An abstract base class for data renderer types (e.g. ``GlyphRenderer``, ``GraphRenderer``).
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
level = Override(default="glyph")
class GlyphRenderer(DataRenderer):
'''
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@error(CDSVIEW_FILTERS_WITH_CONNECTED)
def _check_cdsview_filters_with_connected(self):
if isinstance(self.glyph, ConnectedXYGlyph) and len(self.view.filters) > 0:
return str(self)
@error(MISSING_GLYPH)
def _check_missing_glyph(self):
if not self.glyph: return str(self)
@error(NO_SOURCE_FOR_GLYPH)
def _check_no_source_for_glyph(self):
if not self.data_source: return str(self)
@error(BAD_COLUMN_NAME)
def _check_bad_column_name(self):
if not self.glyph: return
if not self.data_source: return
if isinstance(self.data_source, WebDataSource): return
missing_values = set()
specs = self.glyph.dataspecs()
for name, item in self.glyph.properties_with_values(include_defaults=False).items():
if name not in specs: continue
if not isinstance(item, dict): continue
if not isinstance(self.data_source, ColumnDataSource): continue
if 'field' in item and item['field'] not in self.data_source.column_names:
missing_values.add((item['field'], name))
if missing_values:
suggestions = ['" (closest match: "%s")' % s[0] if s else '"' for s in [
get_close_matches(term[0], self.data_source.column_names, n=1) for term in missing_values]]
missing_values = [("".join([m[0], s]), m[1]) for m, s in zip(missing_values, suggestions)]
missing = ['key "%s" value "%s' % (k, v) for v, k in missing_values]
return "%s [renderer: %s]" % (", ".join(sorted(missing)), self)
data_source = Instance(DataSource, help="""
Local data source to use when rendering glyphs on the plot.
""")
view = Instance(CDSView, default=InstanceDefault(CDSView), help="""
A view into the data source to use when rendering glyphs. A default view
of the entire data source is created when a view is not passed in during
initialization.
.. note:
Only the default (filterless) CDSView is compatible with glyphs that
have connected topology, such as Line and Patch. Setting filters on
views for these glyphs will result in a warning and undefined behavior.
""")
glyph = Instance(Glyph, help="""
The glyph to render, in conjunction with the supplied data source
and ranges.
""")
selection_glyph = Nullable(Either(Auto, Instance(Glyph)), default="auto", help=""""
An optional glyph used for selected points.
If set to "auto" then the standard glyph will be used for selected
points.
""")
nonselection_glyph = Nullable(Either(Auto, Instance(Glyph)), default="auto", help=""""
An optional glyph used for explicitly non-selected points
(i.e., non-selected when there are other points that are selected,
but not when no points at all are selected.)
If set to "auto" then a glyph with a low alpha value (0.1) will
be used for non-selected points.
""")
hover_glyph = Nullable(Instance(Glyph), help="""
An optional glyph used for inspected points, e.g., those that are
being hovered over by a ``HoverTool``.
""")
muted_glyph = Nullable(Either(Auto, Instance(Glyph)), default="auto", help=""""
""")
muted = Bool(False, help="""
""")
def add_decoration(self, marking: Marking, node: Literal["start", "middle", "end"]) -> Decoration:
glyphs = [self.glyph, self.selection_glyph, self.nonselection_glyph, self.hover_glyph, self.muted_glyph]
decoration = Decoration(marking=marking, node=node)
for glyph in glyphs:
if isinstance(glyph, Glyph):
glyph.decorations.append(decoration)
return decoration
# TODO: (bev) InstanceDefault woudl be better for these but the property
# values are also model instances and that is too complicated for now
_DEFAULT_NODE_RENDERER = lambda: GlyphRenderer(
glyph=Circle(), data_source=ColumnDataSource(data=dict(index=[]))
)
_DEFAULT_EDGE_RENDERER = lambda: GlyphRenderer(
glyph=MultiLine(), data_source=ColumnDataSource(data=dict(start=[], end=[]))
)
class GraphRenderer(DataRenderer):
'''
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@error(MALFORMED_GRAPH_SOURCE)
def _check_malformed_graph_source(self):
missing = []
if "index" not in self.node_renderer.data_source.column_names:
missing.append("Column 'index' is missing in GraphSource.node_renderer.data_source")
if "start" not in self.edge_renderer.data_source.column_names:
missing.append("Column 'start' is missing in GraphSource.edge_renderer.data_source")
if "end" not in self.edge_renderer.data_source.column_names:
missing.append("Column 'end' is missing in GraphSource.edge_renderer.data_source")
if missing:
return " ,".join(missing) + " [%s]" % self
layout_provider = Instance(LayoutProvider, help="""
An instance of a ``LayoutProvider`` that supplies the layout of the network
graph in cartesian space.
""")
node_renderer = Instance(GlyphRenderer, default=_DEFAULT_NODE_RENDERER, help="""
Instance of a ``GlyphRenderer`` containing an ``XYGlyph`` that will be rendered
as the graph nodes.
""")
edge_renderer = Instance(GlyphRenderer, default=_DEFAULT_EDGE_RENDERER, help="""
Instance of a ``GlyphRenderer`` containing an ``MultiLine`` Glyph that will be
rendered as the graph edges.
""")
selection_policy = Instance(GraphHitTestPolicy, default=InstanceDefault(NodesOnly), help="""
An instance of a ``GraphHitTestPolicy`` that provides the logic for selection
of graph components.
""")
inspection_policy = Instance(GraphHitTestPolicy, default=InstanceDefault(NodesOnly), help="""
An instance of a ``GraphHitTestPolicy`` that provides the logic for inspection
of graph components.
""")
@abstract
class GuideRenderer(Renderer):
''' A base class for all guide renderer types. ``GuideRenderer`` is
not generally useful to instantiate on its own.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
level = Override(default="guide")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
from peewee import *
from playhouse.fields import ManyToManyField
from playhouse.tests.base import database_initializer
from playhouse.tests.base import ModelTestCase
db = database_initializer.get_in_memory_database()
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
username = CharField(unique=True)
class Note(BaseModel):
text = TextField()
users = ManyToManyField(User)
NoteUserThrough = Note.users.get_through_model()
AltThroughProxy = Proxy()
class AltNote(BaseModel):
text = TextField()
users = ManyToManyField(User, through_model=AltThroughProxy)
class AltThroughModel(BaseModel):
user = ForeignKeyField(User, related_name='_xx_rel')
note = ForeignKeyField(AltNote, related_name='_xx_rel')
class Meta:
primary_key = CompositeKey('user', 'note')
AltThroughProxy.initialize(AltThroughModel)
class TestManyToManyField(ModelTestCase):
requires = [User, Note, NoteUserThrough, AltThroughModel, AltNote]
user_to_note = {
'charlie': [1, 2],
'huey': [2, 3],
'mickey': [3, 4],
'zaizee': [4, 5]}
def setUp(self):
super(TestManyToManyField, self).setUp()
usernames = ['charlie', 'huey', 'mickey', 'zaizee']
n_notes = 5
for username in usernames:
User.create(username=username)
for i in range(n_notes):
Note.create(text='note-%s' % (i + 1))
def test_through_model(self):
self.assertEqual(len(NoteUserThrough._meta.fields), 3)
fields = NoteUserThrough._meta.fields
self.assertEqual(sorted(fields), ['id', 'note', 'user'])
note_field = fields['note']
self.assertEqual(note_field.rel_model, Note)
self.assertFalse(note_field.null)
user_field = fields['user']
self.assertEqual(user_field.rel_model, User)
self.assertFalse(user_field.null)
def _create_relationship(self):
for username, notes in self.user_to_note.items():
user = User.get(User.username == username)
for note in notes:
NoteUserThrough.create(
note=Note.get(Note.text == 'note-%s' % note),
user=user)
def assertNotes(self, query, expected):
notes = [note.text for note in query]
self.assertEqual(
sorted(notes),
['note-%s' % i for i in sorted(expected)])
def assertUsers(self, query, expected):
usernames = [user.username for user in query]
self.assertEqual(sorted(usernames), sorted(expected))
def test_descriptor_query(self):
self._create_relationship()
charlie, huey, mickey, zaizee = User.select().order_by(User.username)
with self.assertQueryCount(1):
self.assertNotes(charlie.notes, [1, 2])
with self.assertQueryCount(1):
self.assertNotes(zaizee.notes, [4, 5])
u = User.create(username='beanie')
self.assertNotes(u.notes, [])
n1, n2, n3, n4, n5 = Note.select().order_by(Note.text)
with self.assertQueryCount(1):
self.assertUsers(n1.users, ['charlie'])
with self.assertQueryCount(1):
self.assertUsers(n2.users, ['charlie', 'huey'])
with self.assertQueryCount(1):
self.assertUsers(n5.users, ['zaizee'])
n6 = Note.create(text='note-6')
self.assertUsers(n6.users, [])
def test_desciptor_filtering(self):
self._create_relationship()
charlie, huey, mickey, zaizee = User.select().order_by(User.username)
with self.assertQueryCount(1):
notes = charlie.notes.order_by(Note.text.desc())
self.assertNotes(notes, [2, 1])
with self.assertQueryCount(1):
notes = huey.notes.where(Note.text != 'note-3')
self.assertNotes(notes, [2])
def test_set_values(self):
charlie = User.get(User.username == 'charlie')
huey = User.get(User.username == 'huey')
n1, n2, n3, n4, n5 = Note.select().order_by(Note.text)
with self.assertQueryCount(2):
charlie.notes = n1
self.assertNotes(charlie.notes, [1])
self.assertUsers(n1.users, ['charlie'])
charlie.notes = [n2, n3]
self.assertNotes(charlie.notes, [2, 3])
self.assertUsers(n1.users, [])
self.assertUsers(n2.users, ['charlie'])
self.assertUsers(n3.users, ['charlie'])
with self.assertQueryCount(2):
huey.notes = Note.select().where(~(Note.text.endswith('4')))
self.assertNotes(huey.notes, [1, 2, 3, 5])
def test_add(self):
charlie = User.get(User.username == 'charlie')
huey = User.get(User.username == 'huey')
n1, n2, n3, n4, n5 = Note.select().order_by(Note.text)
charlie.notes.add([n1, n2])
self.assertNotes(charlie.notes, [1, 2])
self.assertUsers(n1.users, ['charlie'])
self.assertUsers(n2.users, ['charlie'])
others = [n3, n4, n5]
for note in others:
self.assertUsers(note.users, [])
with self.assertQueryCount(1):
huey.notes.add(Note.select().where(
fn.substr(Note.text, 6, 1) << ['1', '3', '5']))
self.assertNotes(huey.notes, [1, 3, 5])
self.assertUsers(n1.users, ['charlie', 'huey'])
self.assertUsers(n2.users, ['charlie'])
self.assertUsers(n3.users, ['huey'])
self.assertUsers(n4.users, [])
self.assertUsers(n5.users, ['huey'])
with self.assertQueryCount(1):
charlie.notes.add(n4)
self.assertNotes(charlie.notes, [1, 2, 4])
with self.assertQueryCount(2):
n3.users.add(
User.select().where(User.username != 'charlie'),
clear_existing=True)
self.assertUsers(n3.users, ['huey', 'mickey', 'zaizee'])
def test_unique(self):
n1 = Note.get(Note.text == 'note-1')
charlie = User.get(User.username == 'charlie')
def add_user(note, user):
with self.assertQueryCount(1):
note.users.add(user)
add_user(n1, charlie)
self.assertRaises(IntegrityError, add_user, n1, charlie)
add_user(n1, User.get(User.username == 'zaizee'))
self.assertUsers(n1.users, ['charlie', 'zaizee'])
def test_remove(self):
self._create_relationship()
charlie, huey, mickey, zaizee = User.select().order_by(User.username)
n1, n2, n3, n4, n5 = Note.select().order_by(Note.text)
with self.assertQueryCount(1):
charlie.notes.remove([n1, n2, n3])
self.assertNotes(charlie.notes, [])
self.assertNotes(huey.notes, [2, 3])
with self.assertQueryCount(1):
huey.notes.remove(Note.select().where(
Note.text << ['note-2', 'note-4', 'note-5']))
self.assertNotes(huey.notes, [3])
self.assertNotes(mickey.notes, [3, 4])
self.assertNotes(zaizee.notes, [4, 5])
with self.assertQueryCount(1):
n4.users.remove([charlie, mickey])
self.assertUsers(n4.users, ['zaizee'])
with self.assertQueryCount(1):
n5.users.remove(User.select())
self.assertUsers(n5.users, [])
def test_clear(self):
charlie = User.get(User.username == 'charlie')
huey = User.get(User.username == 'huey')
charlie.notes = Note.select()
huey.notes = Note.select()
self.assertEqual(charlie.notes.count(), 5)
self.assertEqual(huey.notes.count(), 5)
charlie.notes.clear()
self.assertEqual(charlie.notes.count(), 0)
self.assertEqual(huey.notes.count(), 5)
n1 = Note.get(Note.text == 'note-1')
n2 = Note.get(Note.text == 'note-2')
n1.users = User.select()
n2.users = User.select()
self.assertEqual(n1.users.count(), 4)
self.assertEqual(n2.users.count(), 4)
n1.users.clear()
self.assertEqual(n1.users.count(), 0)
self.assertEqual(n2.users.count(), 4)
def test_manual_through(self):
charlie, huey, mickey, zaizee = User.select().order_by(User.username)
alt_notes = []
for i in range(5):
alt_notes.append(AltNote.create(text='note-%s' % (i + 1)))
self.assertNotes(charlie.altnotes, [])
for alt_note in alt_notes:
self.assertUsers(alt_note.users, [])
n1, n2, n3, n4, n5 = alt_notes
# Test adding relationships by setting the descriptor.
charlie.altnotes = [n1, n2]
with self.assertQueryCount(2):
huey.altnotes = AltNote.select().where(
fn.substr(AltNote.text, 6, 1) << ['1', '3', '5'])
mickey.altnotes.add([n1, n4])
with self.assertQueryCount(2):
zaizee.altnotes = AltNote.select()
# Test that the notes were added correctly.
with self.assertQueryCount(1):
self.assertNotes(charlie.altnotes, [1, 2])
with self.assertQueryCount(1):
self.assertNotes(huey.altnotes, [1, 3, 5])
with self.assertQueryCount(1):
self.assertNotes(mickey.altnotes, [1, 4])
with self.assertQueryCount(1):
self.assertNotes(zaizee.altnotes, [1, 2, 3, 4, 5])
# Test removing notes.
with self.assertQueryCount(1):
charlie.altnotes.remove(n1)
self.assertNotes(charlie.altnotes, [2])
with self.assertQueryCount(1):
huey.altnotes.remove([n1, n2, n3])
self.assertNotes(huey.altnotes, [5])
with self.assertQueryCount(1):
zaizee.altnotes.remove(
AltNote.select().where(
fn.substr(AltNote.text, 6, 1) << ['1', '2', '4']))
self.assertNotes(zaizee.altnotes, [3, 5])
# Test the backside of the relationship.
n1.users = User.select().where(User.username != 'charlie')
with self.assertQueryCount(1):
self.assertUsers(n1.users, ['huey', 'mickey', 'zaizee'])
with self.assertQueryCount(1):
self.assertUsers(n2.users, ['charlie'])
with self.assertQueryCount(1):
self.assertUsers(n3.users, ['zaizee'])
with self.assertQueryCount(1):
self.assertUsers(n4.users, ['mickey'])
with self.assertQueryCount(1):
self.assertUsers(n5.users, ['huey', 'zaizee'])
with self.assertQueryCount(1):
n1.users.remove(User.select())
with self.assertQueryCount(1):
n5.users.remove([charlie, huey])
with self.assertQueryCount(1):
self.assertUsers(n1.users, [])
with self.assertQueryCount(1):
self.assertUsers(n5.users, ['zaizee'])
|
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perspective camera functionalities."""
import math
import sys
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.rendering.camera import perspective
from tensorflow_graphics.util import test_case
class PerspectiveTest(test_case.TestCase):
@parameterized.parameters(
("must have exactly 4 dimensions in axis -1", (4, 3)),
("must have exactly 4 dimensions in axis -2", (5, 4)),
("must have exactly 4 dimensions in axis -2", (None, 4)),
("must have exactly 4 dimensions in axis -1", (4, None)),
)
def test_parameters_from_right_handed_shape_exception_raised(
self, error_msg, *shapes):
"""Checks the inputs of the from_right_handed_shape function."""
self.assert_exception_is_raised(perspective.parameters_from_right_handed,
error_msg, shapes)
@parameterized.parameters(
((4, 4),),
((None, 4, 4),),
((None, None, 4, 4),),
)
def test_parameters_from_right_handed_shape_exception_not_raised(
self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(
perspective.parameters_from_right_handed, shapes)
def test_parameters_from_right_handed_random(self):
"""Tests that parameters_from_right_handed returns the expected values."""
tensor_size = np.random.randint(2, 4)
tensor_shape = np.random.randint(2, 5, size=(tensor_size)).tolist()
vertical_field_of_view_gt = np.random.uniform(
sys.float_info.epsilon, np.pi - sys.float_info.epsilon,
tensor_shape + [1])
aspect_ratio_gt = np.random.uniform(0.1, 10.0, tensor_shape + [1])
near_gt = np.random.uniform(0.1, 100.0, tensor_shape + [1])
far_gt = near_gt + np.random.uniform(0.1, 100.0, tensor_shape + [1])
projection_matrix = perspective.right_handed(vertical_field_of_view_gt,
aspect_ratio_gt, near_gt,
far_gt)
vertical_field_of_view_pred, aspect_ratio_pred, near_pred, far_pred = perspective.parameters_from_right_handed(
projection_matrix)
with self.subTest(name="vertical_field_of_view"):
self.assertAllClose(vertical_field_of_view_gt,
vertical_field_of_view_pred)
with self.subTest(name="aspect_ratio"):
self.assertAllClose(aspect_ratio_gt, aspect_ratio_pred)
with self.subTest(name="near_plane"):
self.assertAllClose(near_gt, near_pred)
with self.subTest(name="far_plane"):
self.assertAllClose(far_gt, far_pred)
def test_parameters_from_right_handed_jacobian_random(self):
"""Tests the Jacobian of parameters_from_right_handed."""
tensor_size = np.random.randint(2, 4)
tensor_shape = np.random.randint(2, 5, size=(tensor_size)).tolist()
vertical_field_of_view = np.random.uniform(sys.float_info.epsilon,
np.pi - sys.float_info.epsilon,
tensor_shape + [1])
aspect_ratio = np.random.uniform(0.1, 10.0, tensor_shape + [1])
near = np.random.uniform(0.1, 100.0, tensor_shape + [1])
far = near + np.random.uniform(0.1, 100.0, tensor_shape + [1])
projection_matrix = perspective.right_handed(vertical_field_of_view,
aspect_ratio, near, far)
with self.subTest(name="vertical_field_of_view"):
self.assert_jacobian_is_finite_fn(
lambda x: perspective.parameters_from_right_handed(x)[0],
[projection_matrix])
with self.subTest(name="aspect_ratio"):
self.assert_jacobian_is_finite_fn(
lambda x: perspective.parameters_from_right_handed(x)[1],
[projection_matrix])
with self.subTest(name="near_plane"):
self.assert_jacobian_is_finite_fn(
lambda x: perspective.parameters_from_right_handed(x)[2],
[projection_matrix])
with self.subTest(name="far_plane"):
self.assert_jacobian_is_finite_fn(
lambda x: perspective.parameters_from_right_handed(x)[3],
[projection_matrix])
def test_perspective_right_handed_preset(self):
"""Tests that perspective_right_handed generates expected results."""
vertical_field_of_view = ((60.0 * math.pi / 180.0,),
(50.0 * math.pi / 180.0,))
aspect_ratio = ((1.5,), (1.1,))
near = ((1.0,), (1.2,))
far = ((10.0,), (5.0,))
pred = perspective.right_handed(vertical_field_of_view, aspect_ratio, near,
far)
gt = (((1.15470052, 0.0, 0.0, 0.0), (0.0, 1.73205066, 0.0, 0.0),
(0.0, 0.0, -1.22222221, -2.22222233), (0.0, 0.0, -1.0, 0.0)),
((1.9495517, 0.0, 0.0, 0.0), (0.0, 2.14450693, 0.0, 0.0),
(0.0, 0.0, -1.63157892, -3.15789485), (0.0, 0.0, -1.0, 0.0)))
self.assertAllClose(pred, gt)
@parameterized.parameters(
((1,), (1,), (1,), (1,)),
((None, 1), (None, 1), (None, 1), (None, 1)),
((None, 3, 1), (None, 3, 1), (None, 3, 1), (None, 3, 1)),
)
def test_perspective_right_handed_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.right_handed, shapes)
@parameterized.parameters(
("Not all batch dimensions are identical", (1,), (3, 1), (3, 1), (3, 1)),
("Not all batch dimensions are identical", (3, 1), (None, 3, 1), (3, 1),
(3, 1)),
)
def test_perspective_right_handed_shape_exception_raised(
self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.right_handed, error_msg, shapes)
@parameterized.parameters(
((1.0,),
(1.0,), np.random.uniform(-1.0, 0.0, size=(1,)).astype(np.float32),
(1.0,)),
((1.0,), (1.0,), (0.0,), (1.0,)),
((1.0,), np.random.uniform(-1.0, 0.0, size=(1,)).astype(np.float32),
(0.1,), (1.0,)),
((1.0,), (0.0,), (0.1,), (1.0,)),
((1.0,),
(1.0,), np.random.uniform(1.0, 2.0, size=(1,)).astype(np.float32),
np.random.uniform(0.1, 0.5, size=(1,)).astype(np.float32)),
((1.0,), (1.0,), (0.1,), (0.1,)),
(np.random.uniform(-math.pi, 0.0, size=(1,)).astype(np.float32), (1.0,),
(0.1,), (1.0,)),
(np.random.uniform(math.pi, 2.0 * math.pi, size=(1,)).astype(np.float32),
(1.0,), (0.1,), (1.0,)),
((0.0,), (1.0,), (0.1,), (1.0,)),
((math.pi,), (1.0,), (0.1,), (1.0,)),
)
def test_perspective_right_handed_valid_range_exception_raised(
self, vertical_field_of_view, aspect_ratio, near, far):
"""Tests that an exception is raised with out of bounds values."""
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(
perspective.right_handed(vertical_field_of_view, aspect_ratio, near,
far))
def test_perspective_right_handed_cross_jacobian_preset(self):
"""Tests the Jacobian of perspective_right_handed."""
vertical_field_of_view_init = np.array((1.0,))
aspect_ratio_init = np.array((1.0,))
near_init = np.array((1.0,))
far_init = np.array((10.0,))
self.assert_jacobian_is_correct_fn(
perspective.right_handed,
[vertical_field_of_view_init, aspect_ratio_init, near_init, far_init])
def test_perspective_right_handed_cross_jacobian_random(self):
"""Tests the Jacobian of perspective_right_handed."""
tensor_size = np.random.randint(1, 3)
tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist()
eps = np.finfo(np.float64).eps
vertical_field_of_view_init = np.random.uniform(
eps, math.pi - eps, size=tensor_shape + [1])
aspect_ratio_init = np.random.uniform(eps, 100.0, size=tensor_shape + [1])
near_init = np.random.uniform(eps, 10.0, size=tensor_shape + [1])
far_init = np.random.uniform(10 + eps, 100.0, size=tensor_shape + [1])
self.assert_jacobian_is_correct_fn(
perspective.right_handed,
[vertical_field_of_view_init, aspect_ratio_init, near_init, far_init])
@parameterized.parameters(
((3, 3),),
((3, 3, 3),),
((None, 3, 3),),
)
def test_intrinsics_from_matrix_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.intrinsics_from_matrix,
shapes)
@parameterized.parameters(
("must have a rank greater than 1", (3,)),
("must have exactly 3 dimensions in axis -2", (None, 3)),
("must have exactly 3 dimensions in axis -1", (3, None)),
)
def test_intrinsics_from_matrix_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.intrinsics_from_matrix,
error_msg, shapes)
@parameterized.parameters(
((((0., 0., 0.), (0., 0., 0.), (0., 0., 1.)),), ((0., 0.), (0., 0.),
(0.0,))),
((((1., 0., 3.), (0., 2., 4.), (0., 0., 1.)),), ((1., 2.), (3., 4.),
(0.0,))),
)
def test_intrinsics_from_matrix_preset(self, test_inputs, test_outputs):
"""Tests that intrinsics_from_matrix gives the correct result."""
self.assert_output_is_correct(perspective.intrinsics_from_matrix,
test_inputs, test_outputs)
def test_intrinsics_from_matrix_to_intrinsics_random(self):
"""Tests that converting intrinsics to a matrix and back is consistent."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
random_skew_coeff = np.random.normal(size=tensor_shape + [1])
matrix = perspective.matrix_from_intrinsics(random_focal,
random_principal_point,
random_skew_coeff)
focal, principal_point, skew_coeff = perspective.intrinsics_from_matrix(
matrix)
random_skew_coeff = np.reshape(random_skew_coeff, (1, 1))
self.assertAllClose(random_focal, focal, rtol=1e-3)
self.assertAllClose(random_principal_point, principal_point, rtol=1e-3)
self.assertAllClose(random_skew_coeff, skew_coeff, rtol=1e-3)
@parameterized.parameters(
((2,), (2,), (1,)),
((2, 2), (2, 2), (2, 1)),
((None, 2), (None, 2), (None, 1)),
)
def test_matrix_from_intrinsics_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.matrix_from_intrinsics,
shapes)
@parameterized.parameters(
((2,), (2,)),
((2, 2), (2, 2)),
((None, 2), (None, 2)),
)
def test_matrix_from_intrinsics_exception_not_raised_when_skew_not_passed(
self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.matrix_from_intrinsics,
shapes)
@parameterized.parameters(
("must have exactly 2 dimensions in axis -1", (None,), (2,)),
("must have exactly 2 dimensions in axis -1", (2,), (None,)),
("Not all batch dimensions are identical.", (3, 2), (2, 2)),
)
def test_matrix_from_intrinsics_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.matrix_from_intrinsics,
error_msg, shapes)
@parameterized.parameters(
(((0.0, 0.0), (0.0, 0.0), (0.0,)), (((0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
(0.0, 0.0, 1.0)),)),
(((1.0, 2.0), (3.0, 4.0), (0.0,)), (((1.0, 0.0, 3.0), (0.0, 2.0, 4.0),
(0.0, 0.0, 1.0)),)))
def test_matrix_from_intrinsics_preset(self, test_inputs, test_outputs):
"""Tests that matrix_from_intrinsics gives the correct result."""
self.assert_output_is_correct(perspective.matrix_from_intrinsics,
test_inputs, test_outputs)
def test_matrix_from_intrinsics_to_matrix_random(self):
"""Tests that converting a matrix to intrinsics and back is consistent."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
fx = random_focal[..., 0]
fy = random_focal[..., 1]
cx = random_principal_point[..., 0]
cy = random_principal_point[..., 1]
zero = np.zeros_like(fx)
one = np.ones_like(fx)
random_matrix = np.stack((fx, zero, cx, zero, fy, cy, zero, zero, one),
axis=-1).reshape(tensor_shape + [3, 3])
focal, principal_point, skew_coefficient = perspective.intrinsics_from_matrix(
random_matrix)
matrix = perspective.matrix_from_intrinsics(focal,
principal_point,
skew_coefficient)
self.assertAllClose(random_matrix, matrix, rtol=1e-3)
@parameterized.parameters(
((3,), (2,), (2,)),
((2, 3), (2, 2), (2, 2)),
((2, 3), (2,), (2,)),
((None, 3), (None, 2), (None, 2)),
)
def test_project_exception_not_exception_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.project, shapes)
@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (None,), (2,), (2,)),
("must have exactly 2 dimensions in axis -1", (3,), (None,), (2,)),
("must have exactly 2 dimensions in axis -1", (3,), (2,), (None,)),
("Not all batch dimensions are broadcast-compatible.", (3, 3), (2, 2),
(2, 2)),
)
def test_project_exception_raised(self, error_msg, *shape):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.project, error_msg, shape)
@parameterized.parameters(
(((0., 0., 1.), (1., 1.), (0., 0.)), ((0., 0.),)),
(((4., 2., 1.), (1., 1.), (-4., -2.)), ((0., 0.),)),
(((4., 2., 10.), (1., 1.), (-.4, -.2)), ((0., 0.),)),
(((4., 2., 10.), (2., 1.), (-.8, -.2)), ((0., 0.),)),
(((4., 2., 10.), (2., 1.), (-.8, 0.)), ((0., .2),)),
)
def test_project_preset(self, test_inputs, test_outputs):
"""Tests that the project function gives the correct result."""
self.assert_output_is_correct(perspective.project, test_inputs,
test_outputs)
def test_project_unproject_random(self):
"""Tests that projecting and unprojecting gives an identity mapping."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_point_3d = np.random.normal(size=tensor_shape + [3])
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
random_depth = np.expand_dims(random_point_3d[..., 2], axis=-1)
point_2d = perspective.project(random_point_3d, random_focal,
random_principal_point)
point_3d = perspective.unproject(point_2d, random_depth, random_focal,
random_principal_point)
self.assertAllClose(random_point_3d, point_3d, rtol=1e-3)
def test_project_ray_random(self):
"""Tests that that ray is pointing toward the correct location."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_point_3d = np.random.normal(size=tensor_shape + [3])
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
random_depth = np.expand_dims(random_point_3d[..., 2], axis=-1)
point_2d = perspective.project(random_point_3d, random_focal,
random_principal_point)
ray_3d = perspective.ray(point_2d, random_focal, random_principal_point)
ray_3d = random_depth * ray_3d
self.assertAllClose(random_point_3d, ray_3d, rtol=1e-3)
@parameterized.parameters(
((2,), (2,), (2,)),
((2, 2), (2, 2), (2, 2)),
((3, 2), (1, 2), (2,)),
((None, 2), (None, 2), (None, 2)),
)
def test_ray_exception_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.ray, shapes)
@parameterized.parameters(
("must have exactly 2 dimensions in axis -1", (None,), (2,), (2,)),
("must have exactly 2 dimensions in axis -1", (2,), (None,), (2,)),
("must have exactly 2 dimensions in axis -1", (2,), (2,), (None,)),
("Not all batch dimensions are broadcast-compatible.", (3, 2), (1, 2),
(2, 2)),
)
def test_ray_exception_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.ray, error_msg, shapes)
@parameterized.parameters(
(((0., 0.), (1., 1.), (0., 0.)), ((0., 0., 1.),)),
(((0., 0.), (1., 1.), (-1., -2.)), ((1., 2., 1.),)),
(((0., 0.), (10., 1.), (-1., -2.)), ((.1, 2., 1.),)),
(((-2., -4.), (10., 1.), (-3., -6.)), ((.1, 2., 1.),)),
)
def test_ray_preset(self, test_inputs, test_outputs):
"""Tests that the ray function gives the correct result."""
self.assert_output_is_correct(perspective.ray, test_inputs, test_outputs)
def test_ray_project_random(self):
"""Tests that the end point of the ray projects at the good location."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_point_2d = np.random.normal(size=tensor_shape + [2])
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
ray_3d = perspective.ray(random_point_2d, random_focal,
random_principal_point)
point_2d = perspective.project(ray_3d, random_focal, random_principal_point)
self.assertAllClose(random_point_2d, point_2d, rtol=1e-3)
@parameterized.parameters(
((2,), (1,), (2,), (2,)),
((2, 2), (2, 1), (2, 2), (2, 2)),
((None, 2), (None, 1), (None, 2), (None, 2)),
)
def test_unproject_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.unproject, shapes)
@parameterized.parameters(
("must have exactly 2 dimensions in axis -1", (None,), (1,), (2,), (2,)),
("must have exactly 1 dimensions in axis -1", (2,), (None,), (2,), (2,)),
("must have exactly 2 dimensions in axis -1", (2,), (1,), (None,), (2,)),
("must have exactly 2 dimensions in axis -1", (2,), (1,), (2,), (None,)),
("Not all batch dimensions are identical.", (1, 2), (2, 1), (2, 2),
(2, 2)),
)
def test_unproject_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.unproject, error_msg, shapes)
@parameterized.parameters(
(((0., 0.), (1.,), (1., 1.), (0., 0.)), ((0., 0., 1.),)),
(((0., 0.), (1.,), (1., 1.), (-4., -2.)), ((4., 2., 1.),)),
(((0., 0.), (10.,), (1., 1.), (-.4, -.2)), ((4., 2., 10.),)),
(((0., 0.), (10.,), (2., 1.), (-.8, -.2)), ((4., 2., 10.),)),
(((0., .2), (10.,), (2., 1.), (-.8, 0.)), ((4., 2., 10.),)),
)
def test_unproject_preset(self, test_inputs, test_outputs):
"""Tests that the unproject function gives the correct result."""
self.assert_output_is_correct(perspective.unproject, test_inputs,
test_outputs)
def test_unproject_project_random(self):
"""Tests that unprojecting and projecting gives and identity mapping."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_point_2d = np.random.normal(size=tensor_shape + [2])
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
random_depth = np.random.normal(size=tensor_shape + [1])
point_3d = perspective.unproject(random_point_2d, random_depth,
random_focal, random_principal_point)
point_2d = perspective.project(point_3d, random_focal,
random_principal_point)
self.assertAllClose(random_point_2d, point_2d, rtol=1e-3)
def test_unproject_ray_random(self):
"""Tests that that ray is pointing toward the correct location."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
random_point_2d = np.random.normal(size=tensor_shape + [2])
random_focal = np.random.normal(size=tensor_shape + [2])
random_principal_point = np.random.normal(size=tensor_shape + [2])
random_depth = np.random.normal(size=tensor_shape + [1])
point_3d = perspective.unproject(random_point_2d, random_depth,
random_focal, random_principal_point)
ray_3d = perspective.ray(random_point_2d, random_focal,
random_principal_point)
ray_3d = random_depth * ray_3d
self.assertAllClose(point_3d, ray_3d, rtol=1e-3)
@parameterized.parameters(
(128, 128, 500, (2,), (2,)),
(128, 128, 500, (2, 2), (2, 2)),
(128, 128, 500, (5, 3, 2), (5, 3, 2)),
(128, 128, 500, (3, 2), (1, 2)),
)
def test_random_rays_exception_exception_not_raised(self, height, width,
n_rays, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.random_rays, shapes,
height=height, width=width,
n_rays=n_rays)
@parameterized.parameters(
("must have exactly 2 dimensions in axis -1",
128, 128, 500, (None,), (2,)),
("must have exactly 2 dimensions in axis -1",
128, 128, 500, (2,), (None,)),
("Not all batch dimensions are broadcast-compatible.",
128, 128, 500, (3, 2), (2, 2)),
)
def test_random_rays_exception_exception_raised(self, error_msg,
height, width, n_rays,
*shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.random_rays, error_msg, shapes,
height=height, width=width, n_rays=n_rays)
@parameterized.parameters(
(128, 128, 64, 64, (2,), (2,)),
(128, 256, 64, 64, (2,), (2,)),
(128, 128, 64, 72, (2,), (2,)),
(128, 256, 64, 72, (2,), (2,)),
(128, 128, 64, 64, (2, 2), (2, 2)),
(128, 128, 64, 64, (5, 3, 2), (5, 3, 2)),
(128, 128, 64, 64, (3, 2), (1, 2)),
(128, 128, 128, 128, (3, 2), (1, 2)),
)
def test_random_patches_exception_exception_not_raised(self,
height,
width,
patch_height,
patch_width,
*shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(perspective.random_patches, shapes,
height=height, width=width,
patch_height=patch_height,
patch_width=patch_width)
@parameterized.parameters(
("must have exactly 2 dimensions in axis -1",
128, 128, 64, 64, (None,), (2,)),
("must have exactly 2 dimensions in axis -1",
128, 128, 64, 64, (2,), (None,)),
("Not all batch dimensions are broadcast-compatible.",
128, 128, 64, 64, (3, 2), (2, 2)),
)
def test_random_patches_exception_exception_raised(self, error_msg,
height, width,
patch_height, patch_width,
*shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(perspective.random_patches,
error_msg,
shapes,
height=height,
width=width,
patch_height=patch_height,
patch_width=patch_width)
@parameterized.parameters(
(((1., 1.), (1., 1.), 1, 1, 1, 1), (((-1., -1., 1.),), ((0., 0.),))),
)
def test_random_patches_preset(self, test_inputs, test_outputs):
"""Tests that the ray function gives the correct result."""
self.assert_output_is_correct(perspective.random_patches,
test_inputs,
test_outputs,
tile=False)
if __name__ == "__main__":
test_case.main()
|
|
import os,sys,math,pickle
import random as lrand
import rpy2.robjects as robjects
import argparse
import numpy
#import svmutil
def init():
lrand.seed(1982)
robjects.r('library(splines)')
robjects.r('library(stats4)')
robjects.r('library(survival)')
robjects.r('library(mvtnorm)')
robjects.r('library(modeltools)')
robjects.r('library(coin)')
robjects.r('library(MASS)')
def get_class_means(class_sl,feats):
means = {}
clk = class_sl.keys()
for fk,f in feats.items():
means[fk] = [numpy.mean((f[class_sl[k][0]:class_sl[k][1]])) for k in clk]
return clk,means
def save_res(res,filename):
with open(filename, 'w') as out:
for k,v in res['cls_means'].items():
out.write(k+"\t"+str(math.log(max(max(v),1.0),10.0))+"\t")
if k in res['lda_res_th']:
for i,vv in enumerate(v):
if vv == max(v):
out.write(str(res['cls_means_kord'][i])+"\t")
break
out.write(str(res['lda_res'][k]))
else: out.write("\t")
out.write( "\t" + (res['wilcox_res'][k] if 'wilcox_res' in res and k in res['wilcox_res'] else "-")+"\n")
def load_data(input_file, nnorm = False):
with open(input_file, 'rb') as inputf:
inp = pickle.load(inputf)
if nnorm: return inp['feats'],inp['cls'],inp['class_sl'],inp['subclass_sl'],inp['class_hierarchy'],inp['norm']
else: return inp['feats'],inp['cls'],inp['class_sl'],inp['subclass_sl'],inp['class_hierarchy']
def load_res(input_file):
with open(input_file, 'rb') as inputf:
inp = pickle.load(inputf)
return inp['res'],inp['params'],inp['class_sl'],inp['subclass_sl']
def test_kw_r(cls,feats,p,factors):
robjects.globalenv["y"] = robjects.FloatVector(feats)
for i,f in enumerate(factors):
robjects.globalenv['x'+str(i+1)] = robjects.FactorVector(robjects.StrVector(cls[f]))
fo = "y~x1"
#for i,f in enumerate(factors[1:]):
# if f == "subclass" and len(set(cls[f])) <= len(set(cls["class"])): continue
# if len(set(cls[f])) == len(cls[f]): continue
# fo += "+x"+str(i+2)
kw_res = robjects.r('kruskal.test('+fo+',)$p.value')
return float(tuple(kw_res)[0]) < p, float(tuple(kw_res)[0])
def test_rep_wilcoxon_r(sl,cl_hie,feats,th,multiclass_strat,mul_cor,fn,min_c,comp_only_same_subcl,curv=False):
comp_all_sub = not comp_only_same_subcl
tot_ok = 0
alpha_mtc = th
all_diff = []
for pair in [(x,y) for x in cl_hie.keys() for y in cl_hie.keys() if x < y]:
dir_cmp = "not_set" #
l_subcl1, l_subcl2 = (len(cl_hie[pair[0]]), len(cl_hie[pair[1]]))
if mul_cor != 0: alpha_mtc = th*l_subcl1*l_subcl2 if mul_cor == 2 else 1.0-math.pow(1.0-th,l_subcl1*l_subcl2)
ok = 0
curv_sign = 0
first = True
for i,k1 in enumerate(cl_hie[pair[0]]):
br = False
for j,k2 in enumerate(cl_hie[pair[1]]):
if not comp_all_sub and k1[len(pair[0]):] != k2[len(pair[1]):]:
ok += 1
continue
cl1 = feats[sl[k1][0]:sl[k1][1]]
cl2 = feats[sl[k2][0]:sl[k2][1]]
med_comp = False
if len(cl1) < min_c or len(cl2) < min_c:
med_comp = True
sx,sy = numpy.median(cl1),numpy.median(cl2)
if cl1[0] == cl2[0] and len(set(cl1)) == 1 and len(set(cl2)) == 1:
tres, first = False, False
elif not med_comp:
robjects.globalenv["x"] = robjects.FloatVector(cl1+cl2)
robjects.globalenv["y"] = robjects.FactorVector(robjects.StrVector(["a" for a in cl1]+["b" for b in cl2]))
pv = float(robjects.r('pvalue(wilcox_test(x~y,data=data.frame(x,y)))')[0])
tres = pv < alpha_mtc*2.0
if first:
first = False
if not curv and ( med_comp or tres ):
dir_cmp = sx < sy
#if sx == sy: br = True
elif curv:
dir_cmp = None
if med_comp or tres:
curv_sign += 1
dir_cmp = sx < sy
else: br = True
elif not curv and med_comp:
if ((sx < sy) != dir_cmp or sx == sy): br = True
elif curv:
if tres and dir_cmp == None:
curv_sign += 1
dir_cmp = sx < sy
if tres and dir_cmp != (sx < sy):
br = True
curv_sign = -1
elif not tres or (sx < sy) != dir_cmp or sx == sy: br = True
if br: break
ok += 1
if br: break
if curv: diff = curv_sign > 0
else: diff = (ok == len(cl_hie[pair[1]])*len(cl_hie[pair[0]])) # or (not comp_all_sub and dir_cmp != "not_set")
if diff: tot_ok += 1
if not diff and multiclass_strat: return False
if diff and not multiclass_strat: all_diff.append(pair)
if not multiclass_strat:
tot_k = len(cl_hie.keys())
for k in cl_hie.keys():
nk = 0
for a in all_diff:
if k in a: nk += 1
if nk == tot_k-1: return True
return False
return True
def contast_within_classes_or_few_per_class(feats,inds,min_cl,ncl):
ff = zip(*[v for n,v in feats.items() if n != 'class'])
cols = [ff[i] for i in inds]
cls = [feats['class'][i] for i in inds]
if len(set(cls)) < ncl:
return True
for c in set(cls):
if cls.count(c) < min_cl:
return True
cols_cl = [x for i,x in enumerate(cols) if cls[i] == c]
for i,col in enumerate(zip(*cols_cl)):
if (len(set(col)) <= min_cl and min_cl > 1) or (min_cl == 1 and len(set(col)) <= 1):
return True
return False
def test_lda_r(cls,feats,cl_sl,boots,fract_sample,lda_th,tol_min,nlogs):
fk = feats.keys()
means = dict([(k,[]) for k in feats.keys()])
feats['class'] = list(cls['class'])
clss = list(set(feats['class']))
for uu,k in enumerate(fk):
if k == 'class': continue
ff = [(feats['class'][i],v) for i,v in enumerate(feats[k])]
for c in clss:
if len(set([float(v[1]) for v in ff if v[0] == c])) > max(float(feats['class'].count(c))*0.5,4): continue
for i,v in enumerate(feats[k]):
if feats['class'][i] == c:
feats[k][i] = math.fabs(feats[k][i] + lrand.normalvariate(0.0,max(feats[k][i]*0.05,0.01)))
rdict = {}
for a,b in feats.items():
if a == 'class' or a == 'subclass' or a == 'subject':
rdict[a] = robjects.StrVector(b)
else: rdict[a] = robjects.FloatVector(b)
robjects.globalenv["d"] = robjects.DataFrame(rdict)
lfk = len(feats[fk[0]])
rfk = int(float(len(feats[fk[0]]))*fract_sample)
f = "class ~ "+fk[0]
for k in fk[1:]: f += " + " + k.strip()
ncl = len(set(cls['class']))
min_cl = int(float(min([cls['class'].count(c) for c in set(cls['class'])]))*fract_sample*fract_sample*0.5)
min_cl = max(min_cl,1)
pairs = [(a,b) for a in set(cls['class']) for b in set(cls['class']) if a > b]
for k in fk:
for i in range(boots):
means[k].append([])
for i in range(boots):
for rtmp in range(1000):
rand_s = [lrand.randint(0,lfk-1) for v in range(rfk)]
if not contast_within_classes_or_few_per_class(feats,rand_s,min_cl,ncl): break
rand_s = [r+1 for r in rand_s]
means[k][i] = []
for p in pairs:
robjects.globalenv["rand_s"] = robjects.IntVector(rand_s)
robjects.globalenv["sub_d"] = robjects.r('d[rand_s,]')
z = robjects.r('z <- suppressWarnings(lda(as.formula('+f+'),data=sub_d,tol='+str(tol_min)+'))')
robjects.r('w <- z$scaling[,1]')
robjects.r('w.unit <- w/sqrt(sum(w^2))')
robjects.r('ss <- sub_d[,-match("class",colnames(sub_d))]')
if 'subclass' in feats:
robjects.r('ss <- ss[,-match("subclass",colnames(ss))]')
if 'subject' in feats:
robjects.r('ss <- ss[,-match("subject",colnames(ss))]')
robjects.r('xy.matrix <- as.matrix(ss)')
robjects.r('LD <- xy.matrix%*%w.unit')
robjects.r('effect.size <- abs(mean(LD[sub_d[,"class"]=="'+p[0]+'"]) - mean(LD[sub_d[,"class"]=="'+p[1]+'"]))')
scal = robjects.r('wfinal <- w.unit * effect.size')
rres = robjects.r('mm <- z$means')
rowns = list(rres.rownames)
lenc = len(list(rres.colnames))
coeff = [abs(float(v)) if not math.isnan(float(v)) else 0.0 for v in scal]
res = dict([(pp,[float(ff) for ff in rres.rx(pp,True)] if pp in rowns else [0.0]*lenc ) for pp in [p[0],p[1]]])
for j,k in enumerate(fk):
gm = abs(res[p[0]][j] - res[p[1]][j])
means[k][i].append((gm+coeff[j])*0.5)
res = {}
for k in fk:
m = max([numpy.mean([means[k][kk][p] for kk in range(boots)]) for p in range(len(pairs))])
res[k] = math.copysign(1.0,m)*math.log(1.0+math.fabs(m),10)
return res,dict([(k,x) for k,x in res.items() if math.fabs(x) > lda_th])
def test_svm(cls,feats,cl_sl,boots,fract_sample,lda_th,tol_min,nsvm):
return NULL
"""
fk = feats.keys()
clss = list(set(cls['class']))
y = [clss.index(c)*2-1 for c in list(cls['class'])]
xx = [feats[f] for f in fk]
if nsvm:
maxs = [max(v) for v in xx]
mins = [min(v) for v in xx]
x = [ dict([(i+1,(v-mins[i])/(maxs[i]-mins[i])) for i,v in enumerate(f)]) for f in zip(*xx)]
else: x = [ dict([(i+1,v) for i,v in enumerate(f)]) for f in zip(*xx)]
lfk = len(feats[fk[0]])
rfk = int(float(len(feats[fk[0]]))*fract_sample)
mm = []
best_c = svmutil.svm_ms(y, x, [pow(2.0,i) for i in range(-5,10)],'-t 0 -q')
for i in range(boots):
rand_s = [lrand.randint(0,lfk-1) for v in range(rfk)]
r = svmutil.svm_w([y[yi] for yi in rand_s], [x[xi] for xi in rand_s], best_c,'-t 0 -q')
mm.append(r[:len(fk)])
m = [numpy.mean(v) for v in zip(*mm)]
res = dict([(v,m[i]) for i,v in enumerate(fk)])
return res,dict([(k,x) for k,x in res.items() if math.fabs(x) > lda_th])
"""
|
|
"""Plugin common functions."""
import os
import re
import shutil
import tempfile
import OpenSSL
import pkg_resources
import zope.interface
from acme.jose import util as jose_util
from certbot import constants
from certbot import interfaces
from certbot import util
def option_namespace(name):
"""ArgumentParser options namespace (prefix of all options)."""
return name + "-"
def dest_namespace(name):
"""ArgumentParser dest namespace (prefix of all destinations)."""
return name.replace("-", "_") + "_"
private_ips_regex = re.compile(
r"(^127\.0\.0\.1)|(^10\.)|(^172\.1[6-9]\.)|"
r"(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^192\.168\.)")
hostname_regex = re.compile(
r"^(([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])\.)*[a-z]+$", re.IGNORECASE)
@zope.interface.implementer(interfaces.IPlugin)
class Plugin(object):
"""Generic plugin."""
# provider is not inherited, subclasses must define it on their own
# @zope.interface.provider(interfaces.IPluginFactory)
def __init__(self, config, name):
self.config = config
self.name = name
@jose_util.abstractclassmethod
def add_parser_arguments(cls, add):
"""Add plugin arguments to the CLI argument parser.
NOTE: If some of your flags interact with others, you can
use cli.report_config_interaction to register this to ensure
values are correctly saved/overridable during renewal.
:param callable add: Function that proxies calls to
`argparse.ArgumentParser.add_argument` prepending options
with unique plugin name prefix.
"""
@classmethod
def inject_parser_options(cls, parser, name):
"""Inject parser options.
See `~.IPlugin.inject_parser_options` for docs.
"""
# dummy function, doesn't check if dest.startswith(self.dest_namespace)
def add(arg_name_no_prefix, *args, **kwargs):
# pylint: disable=missing-docstring
return parser.add_argument(
"--{0}{1}".format(option_namespace(name), arg_name_no_prefix),
*args, **kwargs)
return cls.add_parser_arguments(add)
@property
def option_namespace(self):
"""ArgumentParser options namespace (prefix of all options)."""
return option_namespace(self.name)
def option_name(self, name):
"""Option name (include plugin namespace)."""
return self.option_namespace + name
@property
def dest_namespace(self):
"""ArgumentParser dest namespace (prefix of all destinations)."""
return dest_namespace(self.name)
def dest(self, var):
"""Find a destination for given variable ``var``."""
# this should do exactly the same what ArgumentParser(arg),
# does to "arg" to compute "dest"
return self.dest_namespace + var.replace("-", "_")
def conf(self, var):
"""Find a configuration value for variable ``var``."""
return getattr(self.config, self.dest(var))
# other
class Addr(object):
r"""Represents an virtual host address.
:param str addr: addr part of vhost address
:param str port: port number or \*, or ""
"""
def __init__(self, tup, ipv6=False):
self.tup = tup
self.ipv6 = ipv6
@classmethod
def fromstring(cls, str_addr):
"""Initialize Addr from string."""
if str_addr.startswith('['):
# ipv6 addresses starts with [
endIndex = str_addr.rfind(']')
host = str_addr[:endIndex + 1]
port = ''
if len(str_addr) > endIndex + 2 and str_addr[endIndex + 1] == ':':
port = str_addr[endIndex + 2:]
return cls((host, port), ipv6=True)
else:
tup = str_addr.partition(':')
return cls((tup[0], tup[2]))
def __str__(self):
if self.tup[1]:
return "%s:%s" % self.tup
return self.tup[0]
def __eq__(self, other):
if isinstance(other, self.__class__):
if self.ipv6:
# compare normalized to take different
# styles of representation into account
return (other.ipv6 and
self._normalize_ipv6(self.tup[0]) ==
self._normalize_ipv6(other.tup[0]) and
self.tup[1] == other.tup[1])
else:
return self.tup == other.tup
return False
def __hash__(self):
return hash(self.tup)
def get_addr(self):
"""Return addr part of Addr object."""
return self.tup[0]
def get_port(self):
"""Return port."""
return self.tup[1]
def get_addr_obj(self, port):
"""Return new address object with same addr and new port."""
return self.__class__((self.tup[0], port), self.ipv6)
def _normalize_ipv6(self, addr):
"""Return IPv6 address in normalized form, helper function"""
addr = addr.lstrip("[")
addr = addr.rstrip("]")
return self._explode_ipv6(addr)
def get_ipv6_exploded(self):
"""Return IPv6 in normalized form"""
if self.ipv6:
return ":".join(self._normalize_ipv6(self.tup[0]))
return ""
def _explode_ipv6(self, addr):
"""Explode IPv6 address for comparison"""
result = ['0', '0', '0', '0', '0', '0', '0', '0']
addr_list = addr.split(":")
if len(addr_list) > len(result):
# too long, truncate
addr_list = addr_list[0:len(result)]
append_to_end = False
for i in range(0, len(addr_list)):
block = addr_list[i]
if len(block) == 0:
# encountered ::, so rest of the blocks should be
# appended to the end
append_to_end = True
continue
elif len(block) > 1:
# remove leading zeros
block = block.lstrip("0")
if not append_to_end:
result[i] = str(block)
else:
# count the location from the end using negative indices
result[i-len(addr_list)] = str(block)
return result
class TLSSNI01(object):
"""Abstract base for TLS-SNI-01 challenge performers"""
def __init__(self, configurator):
self.configurator = configurator
self.achalls = []
self.indices = []
self.challenge_conf = os.path.join(
configurator.config.config_dir, "le_tls_sni_01_cert_challenge.conf")
# self.completed = 0
def add_chall(self, achall, idx=None):
"""Add challenge to TLSSNI01 object to perform at once.
:param .KeyAuthorizationAnnotatedChallenge achall: Annotated
TLSSNI01 challenge.
:param int idx: index to challenge in a larger array
"""
self.achalls.append(achall)
if idx is not None:
self.indices.append(idx)
def get_cert_path(self, achall):
"""Returns standardized name for challenge certificate.
:param .KeyAuthorizationAnnotatedChallenge achall: Annotated
tls-sni-01 challenge.
:returns: certificate file name
:rtype: str
"""
return os.path.join(self.configurator.config.work_dir,
achall.chall.encode("token") + ".crt")
def get_key_path(self, achall):
"""Get standardized path to challenge key."""
return os.path.join(self.configurator.config.work_dir,
achall.chall.encode("token") + '.pem')
def _setup_challenge_cert(self, achall, cert_key=None):
"""Generate and write out challenge certificate."""
cert_path = self.get_cert_path(achall)
key_path = self.get_key_path(achall)
# Register the path before you write out the file
self.configurator.reverter.register_file_creation(True, key_path)
self.configurator.reverter.register_file_creation(True, cert_path)
response, (cert, key) = achall.response_and_validation(
cert_key=cert_key)
cert_pem = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, cert)
key_pem = OpenSSL.crypto.dump_privatekey(
OpenSSL.crypto.FILETYPE_PEM, key)
# Write out challenge cert and key
with open(cert_path, "wb") as cert_chall_fd:
cert_chall_fd.write(cert_pem)
with util.safe_open(key_path, 'wb', chmod=0o400) as key_file:
key_file.write(key_pem)
return response
# test utils used by certbot_apache/certbot_nginx (hence
# "pragma: no cover") TODO: this might quickly lead to dead code (also
# c.f. #383)
def setup_ssl_options(config_dir, src, dest): # pragma: no cover
"""Move the ssl_options into position and return the path."""
option_path = os.path.join(config_dir, dest)
shutil.copyfile(src, option_path)
return option_path
def dir_setup(test_dir, pkg): # pragma: no cover
"""Setup the directories necessary for the configurator."""
temp_dir = tempfile.mkdtemp("temp")
config_dir = tempfile.mkdtemp("config")
work_dir = tempfile.mkdtemp("work")
os.chmod(temp_dir, constants.CONFIG_DIRS_MODE)
os.chmod(config_dir, constants.CONFIG_DIRS_MODE)
os.chmod(work_dir, constants.CONFIG_DIRS_MODE)
test_configs = pkg_resources.resource_filename(
pkg, os.path.join("testdata", test_dir))
shutil.copytree(
test_configs, os.path.join(temp_dir, test_dir), symlinks=True)
return temp_dir, config_dir, work_dir
|
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy as oslo_policy
from oslo_utils import excutils
import six
import webob.exc
from neutron.api import api_common
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.api.v2 import resource as wsgi_resource
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.db import api as db_api
from neutron.i18n import _LE, _LI
from neutron import policy
from neutron import quota
LOG = logging.getLogger(__name__)
FAULT_MAP = {exceptions.NotFound: webob.exc.HTTPNotFound,
exceptions.Conflict: webob.exc.HTTPConflict,
exceptions.InUse: webob.exc.HTTPConflict,
exceptions.BadRequest: webob.exc.HTTPBadRequest,
exceptions.ServiceUnavailable: webob.exc.HTTPServiceUnavailable,
exceptions.NotAuthorized: webob.exc.HTTPForbidden,
netaddr.AddrFormatError: webob.exc.HTTPBadRequest,
oslo_policy.PolicyNotAuthorized: webob.exc.HTTPForbidden
}
class Controller(object):
LIST = 'list'
SHOW = 'show'
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'
def __init__(self, plugin, collection, resource, attr_info,
allow_bulk=False, member_actions=None, parent=None,
allow_pagination=False, allow_sorting=False):
if member_actions is None:
member_actions = []
self._plugin = plugin
self._collection = collection.replace('-', '_')
self._resource = resource.replace('-', '_')
self._attr_info = attr_info
self._allow_bulk = allow_bulk
self._allow_pagination = allow_pagination
self._allow_sorting = allow_sorting
self._native_bulk = self._is_native_bulk_supported()
self._native_pagination = self._is_native_pagination_supported()
self._native_sorting = self._is_native_sorting_supported()
self._policy_attrs = [name for (name, info) in self._attr_info.items()
if info.get('required_by_policy')]
self._notifier = n_rpc.get_notifier('network')
# use plugin's dhcp notifier, if this is already instantiated
agent_notifiers = getattr(plugin, 'agent_notifiers', {})
self._dhcp_agent_notifier = (
agent_notifiers.get(const.AGENT_TYPE_DHCP) or
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
if cfg.CONF.notify_nova_on_port_data_changes:
from neutron.notifiers import nova
self._nova_notifier = nova.Notifier()
self._member_actions = member_actions
self._primary_key = self._get_primary_key()
if self._allow_pagination and self._native_pagination:
# Native pagination need native sorting support
if not self._native_sorting:
raise exceptions.Invalid(
_("Native pagination depend on native sorting")
)
if not self._allow_sorting:
LOG.info(_LI("Allow sorting is enabled because native "
"pagination requires native sorting"))
self._allow_sorting = True
if parent:
self._parent_id_name = '%s_id' % parent['member_name']
parent_part = '_%s' % parent['member_name']
else:
self._parent_id_name = None
parent_part = ''
self._plugin_handlers = {
self.LIST: 'get%s_%s' % (parent_part, self._collection),
self.SHOW: 'get%s_%s' % (parent_part, self._resource)
}
for action in [self.CREATE, self.UPDATE, self.DELETE]:
self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
self._resource)
def _get_primary_key(self, default_primary_key='id'):
for key, value in six.iteritems(self._attr_info):
if value.get('primary_key', False):
return key
return default_primary_key
def _is_native_bulk_supported(self):
native_bulk_attr_name = ("_%s__native_bulk_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_bulk_attr_name, False)
def _is_native_pagination_supported(self):
native_pagination_attr_name = ("_%s__native_pagination_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_pagination_attr_name, False)
def _is_native_sorting_supported(self):
native_sorting_attr_name = ("_%s__native_sorting_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_sorting_attr_name, False)
def _exclude_attributes_by_policy(self, context, data):
"""Identifies attributes to exclude according to authZ policies.
Return a list of attribute names which should be stripped from the
response returned to the user because the user is not authorized
to see them.
"""
attributes_to_exclude = []
for attr_name in data.keys():
attr_data = self._attr_info.get(attr_name)
if attr_data and attr_data['is_visible']:
if policy.check(
context,
'%s:%s' % (self._plugin_handlers[self.SHOW], attr_name),
data,
might_not_exist=True,
pluralized=self._collection):
# this attribute is visible, check next one
continue
# if the code reaches this point then either the policy check
# failed or the attribute was not visible in the first place
attributes_to_exclude.append(attr_name)
return attributes_to_exclude
def _view(self, context, data, fields_to_strip=None):
"""Build a view of an API resource.
:param context: the neutron context
:param data: the object for which a view is being created
:param fields_to_strip: attributes to remove from the view
:returns: a view of the object which includes only attributes
visible according to API resource declaration and authZ policies.
"""
fields_to_strip = ((fields_to_strip or []) +
self._exclude_attributes_by_policy(context, data))
return self._filter_attributes(context, data, fields_to_strip)
def _filter_attributes(self, context, data, fields_to_strip=None):
if not fields_to_strip:
return data
return dict(item for item in six.iteritems(data)
if (item[0] not in fields_to_strip))
def _do_field_list(self, original_fields):
fields_to_add = None
# don't do anything if fields were not specified in the request
if original_fields:
fields_to_add = [attr for attr in self._policy_attrs
if attr not in original_fields]
original_fields.extend(self._policy_attrs)
return original_fields, fields_to_add
def __getattr__(self, name):
if name in self._member_actions:
@db_api.retry_db_errors
def _handle_action(request, id, **kwargs):
arg_list = [request.context, id]
# Ensure policy engine is initialized
policy.init()
# Fetch the resource and verify if the user can access it
try:
resource = self._item(request, id, True)
except oslo_policy.PolicyNotAuthorized:
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
body = copy.deepcopy(kwargs.pop('body', None))
# Explicit comparison with None to distinguish from {}
if body is not None:
arg_list.append(body)
# It is ok to raise a 403 because accessibility to the
# object was checked earlier in this method
policy.enforce(request.context,
name,
resource,
pluralized=self._collection)
return getattr(self._plugin, name)(*arg_list, **kwargs)
return _handle_action
else:
raise AttributeError()
def _get_pagination_helper(self, request):
if self._allow_pagination and self._native_pagination:
return api_common.PaginationNativeHelper(request,
self._primary_key)
elif self._allow_pagination:
return api_common.PaginationEmulatedHelper(request,
self._primary_key)
return api_common.NoPaginationHelper(request, self._primary_key)
def _get_sorting_helper(self, request):
if self._allow_sorting and self._native_sorting:
return api_common.SortingNativeHelper(request, self._attr_info)
elif self._allow_sorting:
return api_common.SortingEmulatedHelper(request, self._attr_info)
return api_common.NoSortingHelper(request, self._attr_info)
def _items(self, request, do_authz=False, parent_id=None):
"""Retrieves and formats a list of elements of the requested entity."""
# NOTE(salvatore-orlando): The following ensures that fields which
# are needed for authZ policy validation are not stripped away by the
# plugin before returning.
original_fields, fields_to_add = self._do_field_list(
api_common.list_args(request, 'fields'))
filters = api_common.get_filters(request, self._attr_info,
['fields', 'sort_key', 'sort_dir',
'limit', 'marker', 'page_reverse'])
kwargs = {'filters': filters,
'fields': original_fields}
sorting_helper = self._get_sorting_helper(request)
pagination_helper = self._get_pagination_helper(request)
sorting_helper.update_args(kwargs)
sorting_helper.update_fields(original_fields, fields_to_add)
pagination_helper.update_args(kwargs)
pagination_helper.update_fields(original_fields, fields_to_add)
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, self._plugin_handlers[self.LIST])
obj_list = obj_getter(request.context, **kwargs)
obj_list = sorting_helper.sort(obj_list)
obj_list = pagination_helper.paginate(obj_list)
# Check authz
if do_authz:
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
# Omit items from list that should not be visible
obj_list = [obj for obj in obj_list
if policy.check(request.context,
self._plugin_handlers[self.SHOW],
obj,
plugin=self._plugin,
pluralized=self._collection)]
# Use the first element in the list for discriminating which attributes
# should be filtered out because of authZ policies
# fields_to_add contains a list of attributes added for request policy
# checks but that were not required by the user. They should be
# therefore stripped
fields_to_strip = fields_to_add or []
if obj_list:
fields_to_strip += self._exclude_attributes_by_policy(
request.context, obj_list[0])
collection = {self._collection:
[self._filter_attributes(
request.context, obj,
fields_to_strip=fields_to_strip)
for obj in obj_list]}
pagination_links = pagination_helper.get_links(obj_list)
if pagination_links:
collection[self._collection + "_links"] = pagination_links
return collection
def _item(self, request, id, do_authz=False, field_list=None,
parent_id=None):
"""Retrieves and formats a single element of the requested entity."""
kwargs = {'fields': field_list}
action = self._plugin_handlers[self.SHOW]
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, action)
obj = obj_getter(request.context, id, **kwargs)
# Check authz
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
if do_authz:
policy.enforce(request.context,
action,
obj,
pluralized=self._collection)
return obj
def _send_dhcp_notification(self, context, data, methodname):
if cfg.CONF.dhcp_agent_notification:
if self._collection in data:
for body in data[self._collection]:
item = {self._resource: body}
self._dhcp_agent_notifier.notify(context, item, methodname)
else:
self._dhcp_agent_notifier.notify(context, data, methodname)
def _send_nova_notification(self, action, orig, returned):
if hasattr(self, '_nova_notifier'):
self._nova_notifier.send_network_change(action, orig, returned)
def index(self, request, **kwargs):
"""Returns a list of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return self._items(request, True, parent_id)
def show(self, request, id, **kwargs):
"""Returns detailed information about the requested entity."""
try:
# NOTE(salvatore-orlando): The following ensures that fields
# which are needed for authZ policy validation are not stripped
# away by the plugin before returning.
field_list, added_fields = self._do_field_list(
api_common.list_args(request, "fields"))
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return {self._resource:
self._view(request.context,
self._item(request,
id,
do_authz=True,
field_list=field_list,
parent_id=parent_id),
fields_to_strip=added_fields)}
except oslo_policy.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None):
objs = []
try:
for item in body[self._collection]:
kwargs = {self._resource: item}
if parent_id:
kwargs[self._parent_id_name] = parent_id
fields_to_strip = self._exclude_attributes_by_policy(
request.context, item)
objs.append(self._filter_attributes(
request.context,
obj_creator(request.context, **kwargs),
fields_to_strip=fields_to_strip))
return objs
# Note(salvatore-orlando): broad catch as in theory a plugin
# could raise any kind of exception
except Exception:
with excutils.save_and_reraise_exception():
for obj in objs:
obj_deleter = getattr(self._plugin,
self._plugin_handlers[self.DELETE])
try:
kwargs = ({self._parent_id_name: parent_id}
if parent_id else {})
obj_deleter(request.context, obj['id'], **kwargs)
except Exception:
# broad catch as our only purpose is to log the
# exception
LOG.exception(_LE("Unable to undo add for "
"%(resource)s %(id)s"),
{'resource': self._resource,
'id': obj['id']})
# TODO(salvatore-orlando): The object being processed when the
# plugin raised might have been created or not in the db.
# We need a way for ensuring that if it has been created,
# it is then deleted
@db_api.retry_db_errors
def create(self, request, body=None, **kwargs):
"""Creates a new instance of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
self._notifier.info(request.context,
self._resource + '.create.start',
body)
body = Controller.prepare_request_body(request.context,
copy.deepcopy(body), True,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.CREATE]
# Check authz
if self._collection in body:
# Have to account for bulk create
items = body[self._collection]
deltas = {}
bulk = True
else:
items = [body]
bulk = False
# Ensure policy engine is initialized
policy.init()
for item in items:
self._validate_network_tenant_ownership(request,
item[self._resource])
policy.enforce(request.context,
action,
item[self._resource],
pluralized=self._collection)
if 'tenant_id' not in item[self._resource]:
# no tenant_id - no quota check
continue
try:
tenant_id = item[self._resource]['tenant_id']
count = quota.QUOTAS.count(request.context, self._resource,
self._plugin, self._collection,
tenant_id)
if bulk:
delta = deltas.get(tenant_id, 0) + 1
deltas[tenant_id] = delta
else:
delta = 1
kwargs = {self._resource: count + delta}
except exceptions.QuotaResourceUnknown as e:
# We don't want to quota this resource
LOG.debug(e)
else:
quota.QUOTAS.limit_check(request.context,
item[self._resource]['tenant_id'],
**kwargs)
def notify(create_result):
notifier_method = self._resource + '.create.end'
self._notifier.info(request.context,
notifier_method,
create_result)
self._send_dhcp_notification(request.context,
create_result,
notifier_method)
return create_result
kwargs = {self._parent_id_name: parent_id} if parent_id else {}
if self._collection in body and self._native_bulk:
# plugin does atomic bulk create operations
obj_creator = getattr(self._plugin, "%s_bulk" % action)
objs = obj_creator(request.context, body, **kwargs)
# Use first element of list to discriminate attributes which
# should be removed because of authZ policies
fields_to_strip = self._exclude_attributes_by_policy(
request.context, objs[0])
return notify({self._collection: [self._filter_attributes(
request.context, obj, fields_to_strip=fields_to_strip)
for obj in objs]})
else:
obj_creator = getattr(self._plugin, action)
if self._collection in body:
# Emulate atomic bulk behavior
objs = self._emulate_bulk_create(obj_creator, request,
body, parent_id)
return notify({self._collection: objs})
else:
kwargs.update({self._resource: body})
obj = obj_creator(request.context, **kwargs)
self._send_nova_notification(action, {},
{self._resource: obj})
return notify({self._resource: self._view(request.context,
obj)})
@db_api.retry_db_errors
def delete(self, request, id, **kwargs):
"""Deletes the specified entity."""
self._notifier.info(request.context,
self._resource + '.delete.start',
{self._resource + '_id': id})
action = self._plugin_handlers[self.DELETE]
# Check authz
policy.init()
parent_id = kwargs.get(self._parent_id_name)
obj = self._item(request, id, parent_id=parent_id)
try:
policy.enforce(request.context,
action,
obj,
pluralized=self._collection)
except oslo_policy.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_deleter = getattr(self._plugin, action)
obj_deleter(request.context, id, **kwargs)
notifier_method = self._resource + '.delete.end'
self._notifier.info(request.context,
notifier_method,
{self._resource + '_id': id})
result = {self._resource: self._view(request.context, obj)}
self._send_nova_notification(action, {}, result)
self._send_dhcp_notification(request.context,
result,
notifier_method)
@db_api.retry_db_errors
def update(self, request, id, body=None, **kwargs):
"""Updates the specified entity's attributes."""
parent_id = kwargs.get(self._parent_id_name)
try:
payload = body.copy()
except AttributeError:
msg = _("Invalid format: %s") % request.body
raise exceptions.BadRequest(resource='body', msg=msg)
payload['id'] = id
self._notifier.info(request.context,
self._resource + '.update.start',
payload)
body = Controller.prepare_request_body(request.context, body, False,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.UPDATE]
# Load object to check authz
# but pass only attributes in the original body and required
# by the policy engine to the policy 'brain'
field_list = [name for (name, value) in six.iteritems(self._attr_info)
if (value.get('required_by_policy') or
value.get('primary_key') or
'default' not in value)]
# Ensure policy engine is initialized
policy.init()
orig_obj = self._item(request, id, field_list=field_list,
parent_id=parent_id)
orig_object_copy = copy.copy(orig_obj)
orig_obj.update(body[self._resource])
# Make a list of attributes to be updated to inform the policy engine
# which attributes are set explicitly so that it can distinguish them
# from the ones that are set to their default values.
orig_obj[const.ATTRIBUTES_TO_UPDATE] = body[self._resource].keys()
try:
policy.enforce(request.context,
action,
orig_obj,
pluralized=self._collection)
except oslo_policy.PolicyNotAuthorized:
with excutils.save_and_reraise_exception() as ctxt:
# If a tenant is modifying it's own object, it's safe to return
# a 403. Otherwise, pretend that it doesn't exist to avoid
# giving away information.
if request.context.tenant_id != orig_obj['tenant_id']:
ctxt.reraise = False
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_updater = getattr(self._plugin, action)
kwargs = {self._resource: body}
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj = obj_updater(request.context, id, **kwargs)
result = {self._resource: self._view(request.context, obj)}
notifier_method = self._resource + '.update.end'
self._notifier.info(request.context, notifier_method, result)
self._send_dhcp_notification(request.context,
result,
notifier_method)
self._send_nova_notification(action, orig_object_copy, result)
return result
@staticmethod
def _populate_tenant_id(context, res_dict, attr_info, is_create):
if (('tenant_id' in res_dict and
res_dict['tenant_id'] != context.tenant_id and
not context.is_admin)):
msg = _("Specifying 'tenant_id' other than authenticated "
"tenant in request requires admin privileges")
raise webob.exc.HTTPBadRequest(msg)
if is_create and 'tenant_id' not in res_dict:
if context.tenant_id:
res_dict['tenant_id'] = context.tenant_id
elif 'tenant_id' in attr_info:
msg = _("Running without keystone AuthN requires "
"that tenant_id is specified")
raise webob.exc.HTTPBadRequest(msg)
@staticmethod
def prepare_request_body(context, body, is_create, resource, attr_info,
allow_bulk=False):
"""Verifies required attributes are in request body.
Also checking that an attribute is only specified if it is allowed
for the given operation (create/update).
Attribute with default values are considered to be optional.
body argument must be the deserialized body.
"""
collection = resource + "s"
if not body:
raise webob.exc.HTTPBadRequest(_("Resource body required"))
LOG.debug("Request body: %(body)s", {'body': body})
try:
if collection in body:
if not allow_bulk:
raise webob.exc.HTTPBadRequest(_("Bulk operation "
"not supported"))
if not body[collection]:
raise webob.exc.HTTPBadRequest(_("Resources required"))
bulk_body = [
Controller.prepare_request_body(
context, item if resource in item
else {resource: item}, is_create, resource, attr_info,
allow_bulk) for item in body[collection]
]
return {collection: bulk_body}
res_dict = body.get(resource)
except (AttributeError, TypeError):
msg = _("Body contains invalid data")
raise webob.exc.HTTPBadRequest(msg)
if res_dict is None:
msg = _("Unable to find '%s' in request body") % resource
raise webob.exc.HTTPBadRequest(msg)
Controller._populate_tenant_id(context, res_dict, attr_info, is_create)
Controller._verify_attributes(res_dict, attr_info)
if is_create: # POST
for attr, attr_vals in six.iteritems(attr_info):
if attr_vals['allow_post']:
if ('default' not in attr_vals and
attr not in res_dict):
msg = _("Failed to parse request. Required "
"attribute '%s' not specified") % attr
raise webob.exc.HTTPBadRequest(msg)
res_dict[attr] = res_dict.get(attr,
attr_vals.get('default'))
else:
if attr in res_dict:
msg = _("Attribute '%s' not allowed in POST") % attr
raise webob.exc.HTTPBadRequest(msg)
else: # PUT
for attr, attr_vals in six.iteritems(attr_info):
if attr in res_dict and not attr_vals['allow_put']:
msg = _("Cannot update read-only attribute %s") % attr
raise webob.exc.HTTPBadRequest(msg)
for attr, attr_vals in six.iteritems(attr_info):
if (attr not in res_dict or
res_dict[attr] is attributes.ATTR_NOT_SPECIFIED):
continue
# Convert values if necessary
if 'convert_to' in attr_vals:
res_dict[attr] = attr_vals['convert_to'](res_dict[attr])
# Check that configured values are correct
if 'validate' not in attr_vals:
continue
for rule in attr_vals['validate']:
res = attributes.validators[rule](res_dict[attr],
attr_vals['validate'][rule])
if res:
msg_dict = dict(attr=attr, reason=res)
msg = _("Invalid input for %(attr)s. "
"Reason: %(reason)s.") % msg_dict
raise webob.exc.HTTPBadRequest(msg)
return body
@staticmethod
def _verify_attributes(res_dict, attr_info):
extra_keys = set(res_dict.keys()) - set(attr_info.keys())
if extra_keys:
msg = _("Unrecognized attribute(s) '%s'") % ', '.join(extra_keys)
raise webob.exc.HTTPBadRequest(msg)
def _validate_network_tenant_ownership(self, request, resource_item):
# TODO(salvatore-orlando): consider whether this check can be folded
# in the policy engine
if (request.context.is_admin or request.context.is_advsvc or
self._resource not in ('port', 'subnet')):
return
network = self._plugin.get_network(
request.context,
resource_item['network_id'])
# do not perform the check on shared networks
if network.get('shared'):
return
network_owner = network['tenant_id']
if network_owner != resource_item['tenant_id']:
msg = _("Tenant %(tenant_id)s not allowed to "
"create %(resource)s on this network")
raise webob.exc.HTTPForbidden(msg % {
"tenant_id": resource_item['tenant_id'],
"resource": self._resource,
})
def create_resource(collection, resource, plugin, params, allow_bulk=False,
member_actions=None, parent=None, allow_pagination=False,
allow_sorting=False):
controller = Controller(plugin, collection, resource, params, allow_bulk,
member_actions=member_actions, parent=parent,
allow_pagination=allow_pagination,
allow_sorting=allow_sorting)
return wsgi_resource.Resource(controller, FAULT_MAP)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import edalize
import os
work_root = 'build'
post_imp_file = os.path.realpath(os.path.join(work_root, 'post.tcl'))
os.makedirs(work_root, exist_ok=True)
synth_tool = 'vivado'
srcs = [
'lowrisc_constants_top_pkg_0/rtl/top_pkg.sv',
'lowrisc_dv_pins_if_0/pins_if.sv',
'lowrisc_prim_generic_clock_gating_0/rtl/prim_generic_clock_gating.sv',
'lowrisc_prim_generic_clock_mux2_0/rtl/prim_generic_clock_mux2.sv',
'lowrisc_prim_generic_flash_0/rtl/prim_generic_flash.sv',
'lowrisc_prim_generic_pad_wrapper_0/rtl/prim_generic_pad_wrapper.sv',
'lowrisc_prim_generic_ram_1p_0/rtl/prim_generic_ram_1p.sv',
'lowrisc_prim_generic_ram_2p_0/rtl/prim_generic_ram_2p.sv',
'lowrisc_prim_prim_pkg_0.1/rtl/prim_pkg.sv',
'lowrisc_prim_xilinx_clock_gating_0/rtl/prim_xilinx_clock_gating.sv',
'lowrisc_prim_xilinx_clock_mux2_0/rtl/prim_xilinx_clock_mux2.sv',
'lowrisc_prim_xilinx_pad_wrapper_0/rtl/prim_xilinx_pad_wrapper.sv',
'lowrisc_prim_xilinx_ram_2p_0/rtl/prim_xilinx_ram_2p.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_pkg.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_alu.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_compressed_decoder.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_controller.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_cs_registers.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_decoder.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_ex_block.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_fetch_fifo.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_id_stage.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_if_stage.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_load_store_unit.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_multdiv_fast.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_multdiv_slow.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_prefetch_buffer.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_pmp.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_register_file_ff.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_core.sv',
'lowrisc_ip_flash_ctrl_pkg_0.1/rtl/flash_ctrl_pkg.sv',
'lowrisc_prim_clock_gating_0/abstract/prim_clock_gating.sv',
'lowrisc_prim_clock_mux2_0/abstract/prim_clock_mux2.sv',
'lowrisc_prim_diff_decode_0/rtl/prim_diff_decode.sv',
'lowrisc_prim_pad_wrapper_0/abstract/prim_pad_wrapper.sv',
'lowrisc_prim_ram_1p_0/abstract/prim_ram_1p.sv',
'lowrisc_prim_ram_2p_0/abstract/prim_ram_2p.sv',
'lowrisc_tlul_headers_0.1/rtl/tlul_pkg.sv',
'lowrisc_prim_all_0.1/rtl/prim_clock_inverter.sv',
'lowrisc_prim_all_0.1/rtl/prim_alert_receiver.sv',
'lowrisc_prim_all_0.1/rtl/prim_alert_sender.sv',
'lowrisc_prim_all_0.1/rtl/prim_arbiter_ppc.sv',
'lowrisc_prim_all_0.1/rtl/prim_arbiter_tree.sv',
'lowrisc_prim_all_0.1/rtl/prim_esc_receiver.sv',
'lowrisc_prim_all_0.1/rtl/prim_esc_sender.sv',
'lowrisc_prim_all_0.1/rtl/prim_sram_arbiter.sv',
'lowrisc_prim_all_0.1/rtl/prim_fifo_async.sv',
'lowrisc_prim_all_0.1/rtl/prim_fifo_sync.sv',
'lowrisc_prim_all_0.1/rtl/prim_flop_2sync.sv',
'lowrisc_prim_all_0.1/rtl/prim_lfsr.sv',
'lowrisc_prim_all_0.1/rtl/prim_packer.sv',
'lowrisc_prim_all_0.1/rtl/prim_pulse_sync.sv',
'lowrisc_prim_all_0.1/rtl/prim_filter.sv',
'lowrisc_prim_all_0.1/rtl/prim_filter_ctr.sv',
'lowrisc_prim_all_0.1/rtl/prim_subreg.sv',
'lowrisc_prim_all_0.1/rtl/prim_subreg_ext.sv',
'lowrisc_prim_all_0.1/rtl/prim_intr_hw.sv',
'lowrisc_prim_all_0.1/rtl/prim_secded_39_32_enc.sv',
'lowrisc_prim_all_0.1/rtl/prim_secded_39_32_dec.sv',
'lowrisc_prim_all_0.1/rtl/prim_ram_2p_adv.sv',
'lowrisc_prim_all_0.1/rtl/prim_ram_2p_async_adv.sv',
'lowrisc_prim_flash_0/abstract/prim_flash.sv',
'lowrisc_top_earlgrey_alert_handler_reg_0.1/rtl/autogen/alert_handler_reg_pkg.sv',
'lowrisc_top_earlgrey_alert_handler_reg_0.1/rtl/autogen/alert_handler_reg_top.sv',
'lowrisc_top_earlgrey_pinmux_reg_0.1/rtl/autogen/pinmux_reg_pkg.sv',
'lowrisc_top_earlgrey_pinmux_reg_0.1/rtl/autogen/pinmux_reg_top.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_consts_pkg.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_nb_in_pe.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_nb_out_pe.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_nb_pe.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_rx.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_tx.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_tx_mux.sv',
'lowrisc_prim_generic_rom_0/rtl/prim_generic_rom.sv',
'lowrisc_prim_xilinx_rom_0/rtl/prim_xilinx_rom.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_fifo_sync.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_fifo_async.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_assert.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_err.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_assert_multiple.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/debug_rom/debug_rom.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_pkg.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_sba.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_csrs.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_mem.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dmi_cdc.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dmi_jtag.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dmi_jtag_tap.sv',
'lowrisc_prim_rom_0/abstract/prim_rom.sv',
'lowrisc_tlul_adapter_reg_0.1/rtl/tlul_adapter_reg.sv',
'lowrisc_tlul_adapter_sram_0.1/rtl/tlul_adapter_sram.sv',
'lowrisc_tlul_socket_1n_0.1/rtl/tlul_err_resp.sv',
'lowrisc_tlul_socket_1n_0.1/rtl/tlul_socket_1n.sv',
'lowrisc_tlul_socket_m1_0.1/rtl/tlul_socket_m1.sv',
'lowrisc_tlul_sram2tlul_0.1/rtl/sram2tlul.sv',
'lowrisc_ip_aes_0.5/rtl/aes_pkg.sv',
'lowrisc_ip_aes_0.5/rtl/aes_reg_pkg.sv',
'lowrisc_ip_aes_0.5/rtl/aes_reg_top.sv',
'lowrisc_ip_aes_0.5/rtl/aes_core.sv',
'lowrisc_ip_aes_0.5/rtl/aes_control.sv',
'lowrisc_ip_aes_0.5/rtl/aes_cipher_core.sv',
'lowrisc_ip_aes_0.5/rtl/aes_cipher_control.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sub_bytes.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sbox.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sbox_lut.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sbox_canright.sv',
'lowrisc_ip_aes_0.5/rtl/aes_shift_rows.sv',
'lowrisc_ip_aes_0.5/rtl/aes_mix_columns.sv',
'lowrisc_ip_aes_0.5/rtl/aes_mix_single_column.sv',
'lowrisc_ip_aes_0.5/rtl/aes_key_expand.sv',
'lowrisc_ip_aes_0.5/rtl/aes.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_pkg.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_reg_wrap.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_class.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_ping_timer.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_esc_timer.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_accu.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_ctrl_reg_pkg.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_ctrl_reg_top.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_erase_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_prog_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_rd_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_mp.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_phy.sv',
'lowrisc_ip_gpio_0.1/rtl/gpio_reg_pkg.sv',
'lowrisc_ip_gpio_0.1/rtl/gpio.sv',
'lowrisc_ip_gpio_0.1/rtl/gpio_reg_top.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_pkg.sv',
'lowrisc_ip_hmac_0.1/rtl/sha2.sv',
'lowrisc_ip_hmac_0.1/rtl/sha2_pad.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_reg_pkg.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_reg_top.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_core.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac.sv',
'lowrisc_ip_nmi_gen_0.1/rtl/nmi_gen_reg_pkg.sv',
'lowrisc_ip_nmi_gen_0.1/rtl/nmi_gen_reg_top.sv',
'lowrisc_ip_nmi_gen_0.1/rtl/nmi_gen.sv',
'lowrisc_ip_pinmux_component_0.1/rtl/pinmux.sv',
'lowrisc_ip_rv_core_ibex_0.1/rtl/rv_core_ibex.sv',
'lowrisc_ip_rv_dm_0.1/rtl/rv_dm.sv',
'lowrisc_ip_rv_dm_0.1/rtl/tlul_adapter_host.sv',
'lowrisc_ip_rv_plic_component_0.1/rtl/rv_plic_gateway.sv',
'lowrisc_ip_rv_plic_component_0.1/rtl/rv_plic_target.sv',
'lowrisc_ip_rv_timer_0.1/rtl/rv_timer_reg_pkg.sv',
'lowrisc_ip_rv_timer_0.1/rtl/rv_timer_reg_top.sv',
'lowrisc_ip_rv_timer_0.1/rtl/timer_core.sv',
'lowrisc_ip_rv_timer_0.1/rtl/rv_timer.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device_reg_pkg.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device_reg_top.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device_pkg.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_fwm_rxf_ctrl.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_fwm_txf_ctrl.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_fwmode.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device.sv',
'lowrisc_ip_uart_0.1/rtl/uart_reg_pkg.sv',
'lowrisc_ip_uart_0.1/rtl/uart_reg_top.sv',
'lowrisc_ip_uart_0.1/rtl/uart_rx.sv',
'lowrisc_ip_uart_0.1/rtl/uart_tx.sv',
'lowrisc_ip_uart_0.1/rtl/uart_core.sv',
'lowrisc_ip_uart_0.1/rtl/uart.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_reg_pkg.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_reg_top.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_usbif.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_flop_2syncpulse.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_linkstate.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_iomux.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev.sv',
'lowrisc_ip_xbar_main_0.1/tl_main_pkg.sv',
'lowrisc_ip_xbar_main_0.1/xbar_main.sv',
'lowrisc_ip_xbar_peri_0.1/tl_peri_pkg.sv',
'lowrisc_ip_xbar_peri_0.1/xbar_peri.sv',
'lowrisc_top_earlgrey_rv_plic_0.1/rtl/autogen/rv_plic_reg_pkg.sv',
'lowrisc_top_earlgrey_rv_plic_0.1/rtl/autogen/rv_plic_reg_top.sv',
'lowrisc_top_earlgrey_rv_plic_0.1/rtl/autogen/rv_plic.sv',
'lowrisc_systems_top_earlgrey_0.1/rtl/padctl.sv',
'lowrisc_systems_top_earlgrey_0.1/rtl/autogen/top_earlgrey.sv',
'lowrisc_systems_top_earlgrey_zcu104_0.1/rtl/clkgen_xilusp.sv',
'lowrisc_systems_top_earlgrey_zcu104_0.1/rtl/top_earlgrey_zcu104.sv',
]
with open(post_imp_file, 'w') as f:
f.write('write_checkpoint -force design.dcp')
files = [{
'name':
os.path.realpath(
'lowrisc_systems_top_earlgrey_zcu104_0.1/data/pins_zcu104.xdc'),
'file_type':
'xdc'
},
{
'name':
os.path.realpath('lowrisc_prim_assert_0.1/rtl/prim_assert.sv'),
'file_type':
'systemVerilogSource',
'is_include_file':
'true'
}]
parameters = {
'ROM_INIT_FILE': {
'datatype': 'str',
'paramtype': 'vlogdefine'
},
'PRIM_DEFAULT_IMPL': {
'datatype': 'str',
'paramtype': 'vlogdefine'
},
}
for src in srcs:
files.append({
'name': os.path.realpath(src),
'file_type': 'systemVerilogSource'
})
tool = 'vivado'
incdirs = [os.path.realpath('lowrisc_prim_assert_0.1/rtl')]
edam = {
'files': files,
'name': 'design',
'toplevel': 'top_earlgrey_zcu104',
'parameters': parameters,
'tool_options': {
'vivado': {
'part': os.environ['URAY_PART'],
'post_imp': post_imp_file,
'synth': synth_tool
}
}
}
backend = edalize.get_edatool(tool)(edam=edam, work_root=work_root)
args = [
'--ROM_INIT_FILE={}'.format(
os.path.realpath('boot_rom_fpga_nexysvideo.vmem')),
'--PRIM_DEFAULT_IMPL=prim_pkg::ImplXilinx'
]
backend.configure(args)
backend.build()
|
|
import copy
import unittest
import numpy
import pytest
from cupy_backends.cuda import stream as stream_module
import cupy
from cupy import _util
from cupy import _core
from cupy import cuda
from cupy import get_array_module
from cupy import testing
def wrap_take(array, *args, **kwargs):
if get_array_module(array) == numpy:
kwargs['mode'] = 'wrap'
return array.take(*args, **kwargs)
@testing.gpu
class TestNdarrayInit(unittest.TestCase):
def test_shape_none(self):
with testing.assert_warns(DeprecationWarning):
a = cupy.ndarray(None)
assert a.shape == ()
def test_shape_int(self):
a = cupy.ndarray(3)
assert a.shape == (3,)
def test_shape_int_with_strides(self):
dummy = cupy.ndarray(3)
a = cupy.ndarray(3, strides=(0,), memptr=dummy.data)
assert a.shape == (3,)
assert a.strides == (0,)
def test_memptr(self):
a = cupy.arange(6).astype(numpy.float32).reshape((2, 3))
memptr = a.data
b = cupy.ndarray((2, 3), numpy.float32, memptr)
testing.assert_array_equal(a, b)
b += 1
testing.assert_array_equal(a, b)
def test_memptr_with_strides(self):
buf = cupy.ndarray(20, numpy.uint8)
memptr = buf.data
# self-overlapping strides
a = cupy.ndarray((2, 3), numpy.float32, memptr, strides=(8, 4))
assert a.strides == (8, 4)
a[:] = 1
a[0, 2] = 4
assert float(a[1, 0]) == 4
def test_strides_without_memptr(self):
for xp in (numpy, cupy):
with pytest.raises(ValueError):
xp.ndarray((2, 3), numpy.float32, strides=(20, 4))
def test_strides_is_given_and_order_is_ignored(self):
buf = cupy.ndarray(20, numpy.uint8)
a = cupy.ndarray(
(2, 3), numpy.float32, buf.data, strides=(8, 4), order='C')
assert a.strides == (8, 4)
@testing.with_requires('numpy>=1.19')
def test_strides_is_given_but_order_is_invalid(self):
for xp in (numpy, cupy):
with pytest.raises(ValueError):
xp.ndarray((2, 3), numpy.float32, strides=(8, 4), order='!')
def test_order(self):
shape = (2, 3, 4)
a = _core.ndarray(shape, order='F')
a_cpu = numpy.ndarray(shape, order='F')
assert a.strides == a_cpu.strides
assert a.flags.f_contiguous
assert not a.flags.c_contiguous
def test_order_none(self):
shape = (2, 3, 4)
a = _core.ndarray(shape, order=None)
a_cpu = numpy.ndarray(shape, order=None)
assert a.flags.c_contiguous == a_cpu.flags.c_contiguous
assert a.flags.f_contiguous == a_cpu.flags.f_contiguous
assert a.strides == a_cpu.strides
@testing.parameterize(
*testing.product({
'shape': [(), (1,), (1, 2), (1, 2, 3)],
'order': ['C', 'F'],
'dtype': [
numpy.uint8, # itemsize=1
numpy.uint16, # itemsize=2
],
}))
@testing.gpu
class TestNdarrayInitStrides(unittest.TestCase):
# Check the strides given shape, itemsize and order.
@testing.numpy_cupy_equal()
def test_strides(self, xp):
arr = xp.ndarray(self.shape, dtype=self.dtype, order=self.order)
return (
arr.strides,
arr.flags.c_contiguous,
arr.flags.f_contiguous)
@testing.gpu
class TestNdarrayInitRaise(unittest.TestCase):
def test_unsupported_type(self):
arr = numpy.ndarray((2, 3), dtype=object)
with pytest.raises(ValueError):
_core.array(arr)
def test_excessive_ndim(self):
for xp in (numpy, cupy):
with pytest.raises(ValueError):
xp.ndarray(
shape=[1 for i in range(33)], dtype=xp.int8)
@testing.parameterize(
*testing.product({
'shape': [(), (0,), (1,), (0, 0, 2), (2, 3)],
})
)
@testing.gpu
class TestNdarrayDeepCopy(unittest.TestCase):
def _check_deepcopy(self, arr, arr2):
assert arr.data is not arr2.data
assert arr.shape == arr2.shape
assert arr.size == arr2.size
assert arr.dtype == arr2.dtype
assert arr.strides == arr2.strides
testing.assert_array_equal(arr, arr2)
def test_deepcopy(self):
arr = _core.ndarray(self.shape)
arr2 = copy.deepcopy(arr)
self._check_deepcopy(arr, arr2)
@testing.multi_gpu(2)
def test_deepcopy_multi_device(self):
arr = _core.ndarray(self.shape)
with cuda.Device(1):
arr2 = copy.deepcopy(arr)
self._check_deepcopy(arr, arr2)
assert arr2.device == arr.device
_test_copy_multi_device_with_stream_src = r'''
extern "C" __global__
void wait_and_write(long long *x) {
clock_t start = clock();
clock_t now;
for (;;) {
now = clock();
clock_t cycles = now > start ? now - start : now + (0xffffffff - start);
if (cycles >= 1000000000) {
break;
}
}
x[0] = 1;
x[1] = now; // in case the compiler optimizing away the entire loop
}
'''
@testing.gpu
class TestNdarrayCopy(unittest.TestCase):
@testing.multi_gpu(2)
@testing.for_orders('CFA')
def test_copy_multi_device_non_contiguous(self, order):
arr = _core.ndarray((20,))[::2]
dev1 = cuda.Device(1)
with dev1:
arr2 = arr.copy(order)
assert arr2.device == dev1
testing.assert_array_equal(arr, arr2)
@testing.multi_gpu(2)
def test_copy_multi_device_non_contiguous_K(self):
arr = _core.ndarray((20,))[::2]
with cuda.Device(1):
with self.assertRaises(NotImplementedError):
arr.copy('K')
# See cupy/cupy#5004
@testing.multi_gpu(2)
def test_copy_multi_device_with_stream(self):
# Kernel that takes long enough then finally writes values.
kern = cupy.RawKernel(
_test_copy_multi_device_with_stream_src, 'wait_and_write')
# Allocates a memory and launches the kernel on a device with its
# stream.
with cuda.Device(0):
# Keep this stream alive over the D2D copy below for HIP
with cuda.Stream() as s1: # NOQA
a = cupy.zeros((2,), dtype=numpy.uint64)
kern((1,), (1,), a)
# D2D copy to another device with another stream should get the
# original values of the memory before the kernel on the first device
# finally makes the write.
with cuda.Device(1):
with cuda.Stream():
b = a.copy()
testing.assert_array_equal(
b, numpy.array([0, 0], dtype=numpy.uint64))
@testing.gpu
class TestNdarrayShape(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def test_shape_set(self, xp):
arr = xp.ndarray((2, 3))
arr.shape = (3, 2)
return xp.array(arr.shape)
@testing.numpy_cupy_array_equal()
def test_shape_set_infer(self, xp):
arr = xp.ndarray((2, 3))
arr.shape = (3, -1)
return xp.array(arr.shape)
@testing.numpy_cupy_array_equal()
def test_shape_set_int(self, xp):
arr = xp.ndarray((2, 3))
arr.shape = 6
return xp.array(arr.shape)
def test_shape_need_copy(self):
# from cupy/cupy#5470
for xp in (numpy, cupy):
arr = xp.ndarray((2, 3), order='F')
with pytest.raises(AttributeError) as e:
arr.shape = (3, 2)
assert 'incompatible shape' in str(e.value).lower()
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestNdarrayCudaInterface(unittest.TestCase):
def test_cuda_array_interface(self):
arr = cupy.zeros(shape=(2, 3), dtype=cupy.float64)
iface = arr.__cuda_array_interface__
assert iface['version'] == 3
assert (set(iface.keys()) ==
set(['shape', 'typestr', 'data', 'version', 'descr',
'stream', 'strides']))
assert iface['shape'] == (2, 3)
assert iface['typestr'] == '<f8'
assert isinstance(iface['data'], tuple)
assert len(iface['data']) == 2
assert iface['data'][0] == arr.data.ptr
assert not iface['data'][1]
assert iface['descr'] == [('', '<f8')]
assert iface['strides'] is None
assert iface['stream'] == stream_module.get_default_stream_ptr()
def test_cuda_array_interface_view(self):
arr = cupy.zeros(shape=(10, 20), dtype=cupy.float64)
view = arr[::2, ::5]
iface = view.__cuda_array_interface__
assert iface['version'] == 3
assert (set(iface.keys()) ==
set(['shape', 'typestr', 'data', 'version', 'descr',
'stream', 'strides']))
assert iface['shape'] == (5, 4)
assert iface['typestr'] == '<f8'
assert isinstance(iface['data'], tuple)
assert len(iface['data']) == 2
assert iface['data'][0] == arr.data.ptr
assert not iface['data'][1]
assert iface['strides'] == (320, 40)
assert iface['descr'] == [('', '<f8')]
assert iface['stream'] == stream_module.get_default_stream_ptr()
def test_cuda_array_interface_zero_size(self):
arr = cupy.zeros(shape=(10,), dtype=cupy.float64)
view = arr[0:3:-1]
iface = view.__cuda_array_interface__
assert iface['version'] == 3
assert (set(iface.keys()) ==
set(['shape', 'typestr', 'data', 'version', 'descr',
'stream', 'strides']))
assert iface['shape'] == (0,)
assert iface['typestr'] == '<f8'
assert isinstance(iface['data'], tuple)
assert len(iface['data']) == 2
assert iface['data'][0] == 0
assert not iface['data'][1]
assert iface['strides'] is None
assert iface['descr'] == [('', '<f8')]
assert iface['stream'] == stream_module.get_default_stream_ptr()
@testing.parameterize(*testing.product({
'stream': ('null', 'new', 'ptds'),
'ver': (2, 3),
}))
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support this')
class TestNdarrayCudaInterfaceStream(unittest.TestCase):
def setUp(self):
if self.stream == 'null':
self.stream = cuda.Stream.null
elif self.stream == 'new':
self.stream = cuda.Stream()
elif self.stream == 'ptds':
self.stream = cuda.Stream.ptds
self.old_ver = _util.CUDA_ARRAY_INTERFACE_EXPORT_VERSION
_util.CUDA_ARRAY_INTERFACE_EXPORT_VERSION = self.ver
def tearDown(self):
_util.CUDA_ARRAY_INTERFACE_EXPORT_VERSION = self.old_ver
def test_cuda_array_interface_stream(self):
# this tests exporting CAI with a given stream
arr = cupy.zeros(shape=(10,), dtype=cupy.float64)
stream = self.stream
with stream:
iface = arr.__cuda_array_interface__
assert iface['version'] == self.ver
attrs = ['shape', 'typestr', 'data', 'version', 'descr', 'strides']
if self.ver == 3:
attrs.append('stream')
assert set(iface.keys()) == set(attrs)
assert iface['shape'] == (10,)
assert iface['typestr'] == '<f8'
assert isinstance(iface['data'], tuple)
assert len(iface['data']) == 2
assert iface['data'] == (arr.data.ptr, False)
assert iface['descr'] == [('', '<f8')]
assert iface['strides'] is None
if self.ver == 3:
if stream.ptr == 0:
ptr = stream_module.get_default_stream_ptr()
assert iface['stream'] == ptr
else:
assert iface['stream'] == stream.ptr
@pytest.mark.skipif(not cupy.cuda.runtime.is_hip,
reason='This is supported on CUDA')
class TestNdarrayCudaInterfaceNoneCUDA(unittest.TestCase):
def setUp(self):
self.arr = cupy.zeros(shape=(2, 3), dtype=cupy.float64)
def test_cuda_array_interface_hasattr(self):
assert not hasattr(self.arr, '__cuda_array_interface__')
def test_cuda_array_interface_getattr(self):
with pytest.raises(AttributeError) as e:
getattr(self.arr, '__cuda_array_interface__')
assert 'HIP' in str(e.value)
@testing.parameterize(
*testing.product({
'indices_shape': [(2,), (2, 3)],
'axis': [None, 0, 1, 2, -1, -2],
})
)
@testing.gpu
class TestNdarrayTake(unittest.TestCase):
shape = (3, 4, 5)
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_take(self, xp, dtype):
a = testing.shaped_arange(self.shape, xp, dtype)
if self.axis is None:
m = a.size
else:
m = a.shape[self.axis]
i = testing.shaped_arange(self.indices_shape, xp, numpy.int32) % m
return wrap_take(a, i, self.axis)
@testing.parameterize(
*testing.product({
'indices': [2, [0, 1], -1, [-1, -2]],
'axis': [None, 0, 1, -1, -2],
})
)
@testing.gpu
class TestNdarrayTakeWithInt(unittest.TestCase):
shape = (3, 4, 5)
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_take(self, xp, dtype):
a = testing.shaped_arange(self.shape, xp, dtype)
return wrap_take(a, self.indices, self.axis)
@testing.parameterize(
*testing.product({
'indices': [2, [0, 1], -1, [-1, -2]],
'axis': [None, 0, 1, -1, -2],
})
)
@testing.gpu
class TestNdarrayTakeWithIntWithOutParam(unittest.TestCase):
shape = (3, 4, 5)
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_take(self, xp, dtype):
a = testing.shaped_arange(self.shape, xp, dtype)
r1 = wrap_take(a, self.indices, self.axis)
r2 = xp.zeros_like(r1)
wrap_take(a, self.indices, self.axis, out=r2)
testing.assert_array_equal(r1, r2)
return r2
@testing.parameterize(
*testing.product({
'indices': [0, -1, [0], [0, -1]],
'axis': [None, 0, -1],
})
)
@testing.gpu
class TestScalaNdarrayTakeWithIntWithOutParam(unittest.TestCase):
shape = ()
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_take(self, xp, dtype):
a = testing.shaped_arange(self.shape, xp, dtype)
r1 = wrap_take(a, self.indices, self.axis)
r2 = xp.zeros_like(r1)
wrap_take(a, self.indices, self.axis, out=r2)
testing.assert_array_equal(r1, r2)
return r2
@testing.parameterize(
{'shape': (3, 4, 5), 'indices': (2,), 'axis': 3},
{'shape': (), 'indices': (0,), 'axis': 2}
)
@testing.gpu
class TestNdarrayTakeErrorAxisOverRun(unittest.TestCase):
def test_axis_overrun1(self):
for xp in (numpy, cupy):
a = testing.shaped_arange(self.shape, xp)
with pytest.raises(numpy.AxisError):
wrap_take(a, self.indices, axis=self.axis)
def test_axis_overrun2(self):
a = testing.shaped_arange(self.shape, cupy)
with pytest.raises(numpy.AxisError):
wrap_take(a, self.indices, axis=self.axis)
@testing.parameterize(
{'shape': (3, 4, 5), 'indices': (2, 3), 'out_shape': (2, 4)},
{'shape': (), 'indices': (), 'out_shape': (1,)}
)
@testing.gpu
class TestNdarrayTakeErrorShapeMismatch(unittest.TestCase):
def test_shape_mismatch(self):
for xp in (numpy, cupy):
a = testing.shaped_arange(self.shape, xp)
i = testing.shaped_arange(self.indices, xp, numpy.int32) % 3
o = testing.shaped_arange(self.out_shape, xp)
with pytest.raises(ValueError):
wrap_take(a, i, out=o)
@testing.parameterize(
{'shape': (3, 4, 5), 'indices': (2, 3), 'out_shape': (2, 3)},
{'shape': (), 'indices': (), 'out_shape': ()}
)
@testing.gpu
class TestNdarrayTakeErrorTypeMismatch(unittest.TestCase):
def test_output_type_mismatch(self):
for xp in (numpy, cupy):
a = testing.shaped_arange(self.shape, xp, numpy.int32)
i = testing.shaped_arange(self.indices, xp, numpy.int32) % 3
o = testing.shaped_arange(self.out_shape, xp, numpy.float32)
with pytest.raises(TypeError):
wrap_take(a, i, out=o)
@testing.parameterize(
{'shape': (0,), 'indices': (0,), 'axis': None},
{'shape': (0,), 'indices': (0, 1), 'axis': None},
{'shape': (3, 0), 'indices': (2,), 'axis': 0},
)
@testing.gpu
class TestZeroSizedNdarrayTake(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def test_output_type_mismatch(self, xp):
a = testing.shaped_arange(self.shape, xp, numpy.int32)
i = testing.shaped_arange(self.indices, xp, numpy.int32)
return wrap_take(a, i, axis=self.axis)
@testing.parameterize(
{'shape': (0,), 'indices': (1,)},
{'shape': (0,), 'indices': (1, 1)},
)
@testing.gpu
class TestZeroSizedNdarrayTakeIndexError(unittest.TestCase):
def test_output_type_mismatch(self):
for xp in (numpy, cupy):
a = testing.shaped_arange(self.shape, xp, numpy.int32)
i = testing.shaped_arange(self.indices, xp, numpy.int32)
with pytest.raises(IndexError):
wrap_take(a, i)
@testing.gpu
class TestSize(unittest.TestCase):
@testing.numpy_cupy_equal()
def test_size_without_axis(self, xp):
x = testing.shaped_arange((3, 4, 5), xp, numpy.int32)
return xp.size(x)
@testing.numpy_cupy_equal()
def test_size_with_axis(self, xp):
x = testing.shaped_arange((3, 4, 5), xp, numpy.int32)
return xp.size(x, 0)
@testing.numpy_cupy_equal()
def test_size_with_negative_axis(self, xp):
x = testing.shaped_arange((3, 4, 5), xp, numpy.int32)
return xp.size(x, -1)
@testing.numpy_cupy_equal()
def test_size_zero_dim_array(self, xp):
x = testing.shaped_arange((), xp, numpy.int32)
return xp.size(x)
def test_size_axis_too_large(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((3, 4, 5), xp, numpy.int32)
with pytest.raises(IndexError):
xp.size(x, 3)
def test_size_axis_too_small(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((3, 4, 5), xp, numpy.int32)
with pytest.raises(IndexError):
xp.size(x, -4)
def test_size_zero_dim_array_with_axis(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((), xp, numpy.int32)
with pytest.raises(IndexError):
xp.size(x, 0)
@testing.gpu
class TestPythonInterface(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_equal()
def test_bytes_tobytes(self, xp, dtype):
x = testing.shaped_arange((3, 4, 5), xp, dtype)
return bytes(x)
@testing.for_all_dtypes()
@testing.numpy_cupy_equal()
def test_bytes_tobytes_empty(self, xp, dtype):
x = xp.empty((0,), dtype)
return bytes(x)
@testing.for_all_dtypes()
@testing.numpy_cupy_equal()
def test_bytes_tobytes_empty2(self, xp, dtype):
x = xp.empty((3, 0, 4), dtype)
return bytes(x)
# The result of bytes(numpy.array(scalar)) is the same as bytes(scalar)
# if scalar is of an integer dtype including bool_. It's spec is
# bytes(int): bytes object of size given by the parameter initialized with
# null bytes.
@testing.for_float_dtypes()
@testing.numpy_cupy_equal()
def test_bytes_tobytes_scalar_array(self, xp, dtype):
x = xp.array(3, dtype)
return bytes(x)
@testing.numpy_cupy_equal()
def test_format(self, xp):
x = xp.array(1.12345)
return format(x, '.2f')
@testing.gpu
class TestNdarrayImplicitConversion(unittest.TestCase):
def test_array(self):
a = testing.shaped_arange((3, 4, 5), cupy, numpy.int64)
with pytest.raises(TypeError):
numpy.asarray(a)
|
|
# encoding: utf-8
"""
Test suite for the docx.table module
"""
from __future__ import absolute_import, print_function, unicode_literals
import pytest
from docx.enum.table import WD_TABLE_ALIGNMENT
from docx.oxml import parse_xml
from docx.oxml.table import CT_Tc
from docx.shared import Inches
from docx.table import _Cell, _Column, _Columns, _Row, _Rows, Table
from docx.text import Paragraph
from .oxml.unitdata.table import a_gridCol, a_tbl, a_tblGrid, a_tc, a_tr
from .oxml.unitdata.text import a_p
from .unitutil.cxml import element, xml
from .unitutil.file import snippet_seq
from .unitutil.mock import instance_mock, property_mock
class DescribeTable(object):
def it_knows_its_alignment_setting(self, alignment_get_fixture):
table, expected_value = alignment_get_fixture
assert table.alignment == expected_value
def it_can_change_its_alignment_setting(self, alignment_set_fixture):
table, new_value, expected_xml = alignment_set_fixture
table.alignment = new_value
assert table._tbl.xml == expected_xml
def it_knows_whether_it_should_autofit(self, autofit_get_fixture):
table, expected_value = autofit_get_fixture
assert table.autofit is expected_value
def it_can_change_its_autofit_setting(self, autofit_set_fixture):
table, new_value, expected_xml = autofit_set_fixture
table.autofit = new_value
assert table._tbl.xml == expected_xml
def it_knows_its_table_style(self, table_style_get_fixture):
table, style = table_style_get_fixture
assert table.style == style
def it_can_apply_a_table_style_by_name(self, table_style_set_fixture):
table, style_name, expected_xml = table_style_set_fixture
table.style = style_name
assert table._tbl.xml == expected_xml
def it_knows_it_is_the_table_its_children_belong_to(self, table_fixture):
table = table_fixture
assert table.table is table
def it_knows_its_column_count_to_help(self, column_count_fixture):
table, expected_value = column_count_fixture
column_count = table._column_count
assert column_count == expected_value
def it_provides_access_to_the_table_rows(self, table):
rows = table.rows
assert isinstance(rows, _Rows)
def it_provides_access_to_the_table_columns(self, table):
columns = table.columns
assert isinstance(columns, _Columns)
def it_provides_access_to_a_cell_by_row_and_col_indices(self, table):
for row_idx in range(2):
for col_idx in range(2):
cell = table.cell(row_idx, col_idx)
assert isinstance(cell, _Cell)
tr = table._tbl.tr_lst[row_idx]
tc = tr.tc_lst[col_idx]
assert tc is cell._tc
def it_provides_access_to_the_cells_in_a_column(self, col_cells_fixture):
table, column_idx, expected_cells = col_cells_fixture
column_cells = table.column_cells(column_idx)
assert column_cells == expected_cells
def it_provides_access_to_the_cells_in_a_row(self, row_cells_fixture):
table, row_idx, expected_cells = row_cells_fixture
row_cells = table.row_cells(row_idx)
assert row_cells == expected_cells
def it_can_add_a_row(self, add_row_fixture):
table, expected_xml = add_row_fixture
row = table.add_row()
assert table._tbl.xml == expected_xml
assert isinstance(row, _Row)
assert row._tr is table._tbl.tr_lst[1]
def it_can_add_a_column(self, add_column_fixture):
table, expected_xml = add_column_fixture
column = table.add_column()
assert table._tbl.xml == expected_xml
assert isinstance(column, _Column)
assert column._gridCol is table._tbl.tblGrid.gridCol_lst[1]
def it_provides_access_to_its_cells_to_help(self, cells_fixture):
table, cell_count, unique_count, matches = cells_fixture
cells = table._cells
assert len(cells) == cell_count
assert len(set(cells)) == unique_count
for matching_idxs in matches:
comparator_idx = matching_idxs[0]
for idx in matching_idxs[1:]:
assert cells[idx] is cells[comparator_idx]
# fixtures -------------------------------------------------------
@pytest.fixture
def add_column_fixture(self):
tbl = _tbl_bldr(2, 1).element
table = Table(tbl, None)
expected_xml = _tbl_bldr(2, 2).xml()
return table, expected_xml
@pytest.fixture
def add_row_fixture(self):
tbl = _tbl_bldr(rows=1, cols=2).element
table = Table(tbl, None)
expected_xml = _tbl_bldr(rows=2, cols=2).xml()
return table, expected_xml
@pytest.fixture(params=[
('w:tbl/w:tblPr', None),
('w:tbl/w:tblPr/w:jc{w:val=center}', WD_TABLE_ALIGNMENT.CENTER),
('w:tbl/w:tblPr/w:jc{w:val=right}', WD_TABLE_ALIGNMENT.RIGHT),
('w:tbl/w:tblPr/w:jc{w:val=left}', WD_TABLE_ALIGNMENT.LEFT),
])
def alignment_get_fixture(self, request):
tbl_cxml, expected_value = request.param
table = Table(element(tbl_cxml), None)
return table, expected_value
@pytest.fixture(params=[
('w:tbl/w:tblPr', WD_TABLE_ALIGNMENT.LEFT,
'w:tbl/w:tblPr/w:jc{w:val=left}'),
('w:tbl/w:tblPr/w:jc{w:val=left}', WD_TABLE_ALIGNMENT.RIGHT,
'w:tbl/w:tblPr/w:jc{w:val=right}'),
('w:tbl/w:tblPr/w:jc{w:val=right}', None,
'w:tbl/w:tblPr'),
])
def alignment_set_fixture(self, request):
tbl_cxml, new_value, expected_tbl_cxml = request.param
table = Table(element(tbl_cxml), None)
expected_xml = xml(expected_tbl_cxml)
return table, new_value, expected_xml
@pytest.fixture(params=[
('w:tbl/w:tblPr', True),
('w:tbl/w:tblPr/w:tblLayout', True),
('w:tbl/w:tblPr/w:tblLayout{w:type=autofit}', True),
('w:tbl/w:tblPr/w:tblLayout{w:type=fixed}', False),
])
def autofit_get_fixture(self, request):
tbl_cxml, expected_autofit = request.param
table = Table(element(tbl_cxml), None)
return table, expected_autofit
@pytest.fixture(params=[
('w:tbl/w:tblPr', True,
'w:tbl/w:tblPr/w:tblLayout{w:type=autofit}'),
('w:tbl/w:tblPr', False,
'w:tbl/w:tblPr/w:tblLayout{w:type=fixed}'),
('w:tbl/w:tblPr', None,
'w:tbl/w:tblPr/w:tblLayout{w:type=fixed}'),
('w:tbl/w:tblPr/w:tblLayout{w:type=fixed}', True,
'w:tbl/w:tblPr/w:tblLayout{w:type=autofit}'),
('w:tbl/w:tblPr/w:tblLayout{w:type=autofit}', False,
'w:tbl/w:tblPr/w:tblLayout{w:type=fixed}'),
])
def autofit_set_fixture(self, request):
tbl_cxml, new_value, expected_tbl_cxml = request.param
table = Table(element(tbl_cxml), None)
expected_xml = xml(expected_tbl_cxml)
return table, new_value, expected_xml
@pytest.fixture(params=[
(0, 9, 9, ()),
(1, 9, 8, ((0, 1),)),
(2, 9, 8, ((1, 4),)),
(3, 9, 6, ((0, 1, 3, 4),)),
(4, 9, 4, ((0, 1), (3, 6), (4, 5, 7, 8))),
])
def cells_fixture(self, request):
snippet_idx, cell_count, unique_count, matches = request.param
tbl_xml = snippet_seq('tbl-cells')[snippet_idx]
table = Table(parse_xml(tbl_xml), None)
return table, cell_count, unique_count, matches
@pytest.fixture
def col_cells_fixture(self, _cells_, _column_count_):
table = Table(None, None)
_cells_.return_value = [0, 1, 2, 3, 4, 5, 6, 7, 8]
_column_count_.return_value = 3
column_idx = 1
expected_cells = [1, 4, 7]
return table, column_idx, expected_cells
@pytest.fixture
def column_count_fixture(self):
tbl_cxml = 'w:tbl/w:tblGrid/(w:gridCol,w:gridCol,w:gridCol)'
expected_value = 3
table = Table(element(tbl_cxml), None)
return table, expected_value
@pytest.fixture
def row_cells_fixture(self, _cells_, _column_count_):
table = Table(None, None)
_cells_.return_value = [0, 1, 2, 3, 4, 5, 6, 7, 8]
_column_count_.return_value = 3
row_idx = 1
expected_cells = [3, 4, 5]
return table, row_idx, expected_cells
@pytest.fixture
def table_fixture(self):
table = Table(None, None)
return table
@pytest.fixture(params=[
('w:tbl/w:tblPr', None),
('w:tbl/w:tblPr/w:tblStyle{w:val=foobar}', 'foobar'),
])
def table_style_get_fixture(self, request):
tbl_cxml, expected_style = request.param
table = Table(element(tbl_cxml), None)
return table, expected_style
@pytest.fixture(params=[
('w:tbl/w:tblPr', 'foobar',
'w:tbl/w:tblPr/w:tblStyle{w:val=foobar}'),
('w:tbl/w:tblPr/w:tblStyle{w:val=foobar}', 'barfoo',
'w:tbl/w:tblPr/w:tblStyle{w:val=barfoo}'),
('w:tbl/w:tblPr/w:tblStyle{w:val=foobar}', None,
'w:tbl/w:tblPr'),
('w:tbl/w:tblPr', None,
'w:tbl/w:tblPr'),
])
def table_style_set_fixture(self, request):
tbl_cxml, new_style, expected_cxml = request.param
table = Table(element(tbl_cxml), None)
expected_xml = xml(expected_cxml)
return table, new_style, expected_xml
# fixture components ---------------------------------------------
@pytest.fixture
def _cells_(self, request):
return property_mock(request, Table, '_cells')
@pytest.fixture
def _column_count_(self, request):
return property_mock(request, Table, '_column_count')
@pytest.fixture
def table(self):
tbl = _tbl_bldr(rows=2, cols=2).element
table = Table(tbl, None)
return table
class Describe_Cell(object):
def it_knows_what_text_it_contains(self, text_get_fixture):
cell, expected_text = text_get_fixture
text = cell.text
assert text == expected_text
def it_can_replace_its_content_with_a_string_of_text(
self, text_set_fixture):
cell, text, expected_xml = text_set_fixture
cell.text = text
assert cell._tc.xml == expected_xml
def it_knows_its_width_in_EMU(self, width_get_fixture):
cell, expected_width = width_get_fixture
assert cell.width == expected_width
def it_can_change_its_width(self, width_set_fixture):
cell, value, expected_xml = width_set_fixture
cell.width = value
assert cell.width == value
assert cell._tc.xml == expected_xml
def it_provides_access_to_the_paragraphs_it_contains(
self, paragraphs_fixture):
cell = paragraphs_fixture
paragraphs = cell.paragraphs
assert len(paragraphs) == 2
count = 0
for idx, paragraph in enumerate(paragraphs):
assert isinstance(paragraph, Paragraph)
assert paragraph is paragraphs[idx]
count += 1
assert count == 2
def it_provides_access_to_the_tables_it_contains(self, tables_fixture):
# test len(), iterable, and indexed access
cell, expected_count = tables_fixture
tables = cell.tables
assert len(tables) == expected_count
count = 0
for idx, table in enumerate(tables):
assert isinstance(table, Table)
assert tables[idx] is table
count += 1
assert count == expected_count
def it_can_add_a_paragraph(self, add_paragraph_fixture):
cell, expected_xml = add_paragraph_fixture
p = cell.add_paragraph()
assert cell._tc.xml == expected_xml
assert isinstance(p, Paragraph)
def it_can_add_a_table(self, add_table_fixture):
cell, expected_xml = add_table_fixture
table = cell.add_table(rows=0, cols=0)
assert cell._tc.xml == expected_xml
assert isinstance(table, Table)
def it_can_merge_itself_with_other_cells(self, merge_fixture):
cell, other_cell, merged_tc_ = merge_fixture
merged_cell = cell.merge(other_cell)
cell._tc.merge.assert_called_once_with(other_cell._tc)
assert isinstance(merged_cell, _Cell)
assert merged_cell._tc is merged_tc_
assert merged_cell._parent is cell._parent
# fixtures -------------------------------------------------------
@pytest.fixture(params=[
('w:tc', 'w:tc/w:p'),
('w:tc/w:p', 'w:tc/(w:p, w:p)'),
('w:tc/w:tbl', 'w:tc/(w:tbl, w:p)'),
])
def add_paragraph_fixture(self, request):
tc_cxml, after_tc_cxml = request.param
cell = _Cell(element(tc_cxml), None)
expected_xml = xml(after_tc_cxml)
return cell, expected_xml
@pytest.fixture(params=[
('w:tc', 'w:tc/(w:tbl'),
('w:tc/w:p', 'w:tc/(w:p, w:tbl'),
])
def add_table_fixture(self, request):
tc_cxml, after_tc_cxml = request.param
# the table has some overhead elements, also a blank para after since
# it's in a cell.
after_tc_cxml += (
'/(w:tblPr/w:tblW{w:type=auto,w:w=0},w:tblGrid),w:p)'
)
cell = _Cell(element(tc_cxml), None)
expected_xml = xml(after_tc_cxml)
return cell, expected_xml
@pytest.fixture
def merge_fixture(self, tc_, tc_2_, parent_, merged_tc_):
cell, other_cell = _Cell(tc_, parent_), _Cell(tc_2_, parent_)
tc_.merge.return_value = merged_tc_
return cell, other_cell, merged_tc_
@pytest.fixture
def paragraphs_fixture(self):
return _Cell(element('w:tc/(w:p, w:p)'), None)
@pytest.fixture(params=[
('w:tc', 0),
('w:tc/w:tbl', 1),
('w:tc/(w:tbl,w:tbl)', 2),
('w:tc/(w:p,w:tbl)', 1),
('w:tc/(w:tbl,w:tbl,w:p)', 2),
])
def tables_fixture(self, request):
cell_cxml, expected_count = request.param
cell = _Cell(element(cell_cxml), None)
return cell, expected_count
@pytest.fixture(params=[
('w:tc', ''),
('w:tc/w:p/w:r/w:t"foobar"', 'foobar'),
('w:tc/(w:p/w:r/w:t"foo",w:p/w:r/w:t"bar")', 'foo\nbar'),
('w:tc/(w:tcPr,w:p/w:r/w:t"foobar")', 'foobar'),
('w:tc/w:p/w:r/(w:t"fo",w:tab,w:t"ob",w:br,w:t"ar",w:br)',
'fo\tob\nar\n'),
])
def text_get_fixture(self, request):
tc_cxml, expected_text = request.param
cell = _Cell(element(tc_cxml), None)
return cell, expected_text
@pytest.fixture(params=[
('w:tc/w:p', 'foobar',
'w:tc/w:p/w:r/w:t"foobar"'),
('w:tc/w:p', 'fo\tob\rar\n',
'w:tc/w:p/w:r/(w:t"fo",w:tab,w:t"ob",w:br,w:t"ar",w:br)'),
('w:tc/(w:tcPr, w:p, w:tbl, w:p)', 'foobar',
'w:tc/(w:tcPr, w:p/w:r/w:t"foobar")'),
])
def text_set_fixture(self, request):
tc_cxml, new_text, expected_cxml = request.param
cell = _Cell(element(tc_cxml), None)
expected_xml = xml(expected_cxml)
return cell, new_text, expected_xml
@pytest.fixture(params=[
('w:tc', None),
('w:tc/w:tcPr', None),
('w:tc/w:tcPr/w:tcW{w:w=25%,w:type=pct}', None),
('w:tc/w:tcPr/w:tcW{w:w=1440,w:type=dxa}', 914400),
])
def width_get_fixture(self, request):
tc_cxml, expected_width = request.param
cell = _Cell(element(tc_cxml), None)
return cell, expected_width
@pytest.fixture(params=[
('w:tc', Inches(1),
'w:tc/w:tcPr/w:tcW{w:w=1440,w:type=dxa}'),
('w:tc/w:tcPr/w:tcW{w:w=25%,w:type=pct}', Inches(2),
'w:tc/w:tcPr/w:tcW{w:w=2880,w:type=dxa}'),
])
def width_set_fixture(self, request):
tc_cxml, new_value, expected_cxml = request.param
cell = _Cell(element(tc_cxml), None)
expected_xml = xml(expected_cxml)
return cell, new_value, expected_xml
# fixture components ---------------------------------------------
@pytest.fixture
def merged_tc_(self, request):
return instance_mock(request, CT_Tc)
@pytest.fixture
def parent_(self, request):
return instance_mock(request, Table)
@pytest.fixture
def tc_(self, request):
return instance_mock(request, CT_Tc)
@pytest.fixture
def tc_2_(self, request):
return instance_mock(request, CT_Tc)
class Describe_Column(object):
def it_provides_access_to_its_cells(self, cells_fixture):
column, column_idx, expected_cells = cells_fixture
cells = column.cells
column.table.column_cells.assert_called_once_with(column_idx)
assert cells == expected_cells
def it_provides_access_to_the_table_it_belongs_to(self, table_fixture):
column, table_ = table_fixture
assert column.table is table_
def it_knows_its_width_in_EMU(self, width_get_fixture):
column, expected_width = width_get_fixture
assert column.width == expected_width
def it_can_change_its_width(self, width_set_fixture):
column, value, expected_xml = width_set_fixture
column.width = value
assert column.width == value
assert column._gridCol.xml == expected_xml
def it_knows_its_index_in_table_to_help(self, index_fixture):
column, expected_idx = index_fixture
assert column._index == expected_idx
# fixtures -------------------------------------------------------
@pytest.fixture
def cells_fixture(self, _index_, table_prop_, table_):
column = _Column(None, None)
_index_.return_value = column_idx = 4
expected_cells = (3, 2, 1)
table_.column_cells.return_value = list(expected_cells)
return column, column_idx, expected_cells
@pytest.fixture
def index_fixture(self):
tbl = element('w:tbl/w:tblGrid/(w:gridCol,w:gridCol,w:gridCol)')
gridCol, expected_idx = tbl.tblGrid[1], 1
column = _Column(gridCol, None)
return column, expected_idx
@pytest.fixture
def table_fixture(self, parent_, table_):
column = _Column(None, parent_)
parent_.table = table_
return column, table_
@pytest.fixture(params=[
('w:gridCol{w:w=4242}', 2693670),
('w:gridCol{w:w=1440}', 914400),
('w:gridCol{w:w=2.54cm}', 914400),
('w:gridCol{w:w=54mm}', 1944000),
('w:gridCol{w:w=12.5pt}', 158750),
('w:gridCol', None),
])
def width_get_fixture(self, request):
gridCol_cxml, expected_width = request.param
column = _Column(element(gridCol_cxml), None)
return column, expected_width
@pytest.fixture(params=[
('w:gridCol', 914400, 'w:gridCol{w:w=1440}'),
('w:gridCol{w:w=4242}', 457200, 'w:gridCol{w:w=720}'),
('w:gridCol{w:w=4242}', None, 'w:gridCol'),
('w:gridCol', None, 'w:gridCol'),
])
def width_set_fixture(self, request):
gridCol_cxml, new_value, expected_cxml = request.param
column = _Column(element(gridCol_cxml), None)
expected_xml = xml(expected_cxml)
return column, new_value, expected_xml
# fixture components ---------------------------------------------
@pytest.fixture
def _index_(self, request):
return property_mock(request, _Column, '_index')
@pytest.fixture
def parent_(self, request):
return instance_mock(request, Table)
@pytest.fixture
def table_(self, request):
return instance_mock(request, Table)
@pytest.fixture
def table_prop_(self, request, table_):
return property_mock(request, _Column, 'table', return_value=table_)
class Describe_Columns(object):
def it_knows_how_many_columns_it_contains(self, columns_fixture):
columns, column_count = columns_fixture
assert len(columns) == column_count
def it_can_interate_over_its__Column_instances(self, columns_fixture):
columns, column_count = columns_fixture
actual_count = 0
for column in columns:
assert isinstance(column, _Column)
actual_count += 1
assert actual_count == column_count
def it_provides_indexed_access_to_columns(self, columns_fixture):
columns, column_count = columns_fixture
for idx in range(-column_count, column_count):
column = columns[idx]
assert isinstance(column, _Column)
def it_raises_on_indexed_access_out_of_range(self, columns_fixture):
columns, column_count = columns_fixture
too_low = -1 - column_count
too_high = column_count
with pytest.raises(IndexError):
columns[too_low]
with pytest.raises(IndexError):
columns[too_high]
def it_provides_access_to_the_table_it_belongs_to(self, table_fixture):
columns, table_ = table_fixture
assert columns.table is table_
# fixtures -------------------------------------------------------
@pytest.fixture
def columns_fixture(self):
column_count = 2
tbl = _tbl_bldr(rows=2, cols=column_count).element
columns = _Columns(tbl, None)
return columns, column_count
@pytest.fixture
def table_fixture(self, table_):
columns = _Columns(None, table_)
table_.table = table_
return columns, table_
# fixture components ---------------------------------------------
@pytest.fixture
def table_(self, request):
return instance_mock(request, Table)
class Describe_Row(object):
def it_provides_access_to_its_cells(self, cells_fixture):
row, row_idx, expected_cells = cells_fixture
cells = row.cells
row.table.row_cells.assert_called_once_with(row_idx)
assert cells == expected_cells
def it_provides_access_to_the_table_it_belongs_to(self, table_fixture):
row, table_ = table_fixture
assert row.table is table_
def it_knows_its_index_in_table_to_help(self, idx_fixture):
row, expected_idx = idx_fixture
assert row._index == expected_idx
# fixtures -------------------------------------------------------
@pytest.fixture
def cells_fixture(self, _index_, table_prop_, table_):
row = _Row(None, None)
_index_.return_value = row_idx = 6
expected_cells = (1, 2, 3)
table_.row_cells.return_value = list(expected_cells)
return row, row_idx, expected_cells
@pytest.fixture
def idx_fixture(self):
tbl = element('w:tbl/(w:tr,w:tr,w:tr)')
tr, expected_idx = tbl[1], 1
row = _Row(tr, None)
return row, expected_idx
@pytest.fixture
def table_fixture(self, parent_, table_):
row = _Row(None, parent_)
parent_.table = table_
return row, table_
# fixture components ---------------------------------------------
@pytest.fixture
def _index_(self, request):
return property_mock(request, _Row, '_index')
@pytest.fixture
def parent_(self, request):
return instance_mock(request, Table)
@pytest.fixture
def table_(self, request):
return instance_mock(request, Table)
@pytest.fixture
def table_prop_(self, request, table_):
return property_mock(request, _Row, 'table', return_value=table_)
class Describe_Rows(object):
def it_knows_how_many_rows_it_contains(self, rows_fixture):
rows, row_count = rows_fixture
assert len(rows) == row_count
def it_can_iterate_over_its__Row_instances(self, rows_fixture):
rows, row_count = rows_fixture
actual_count = 0
for row in rows:
assert isinstance(row, _Row)
actual_count += 1
assert actual_count == row_count
def it_provides_indexed_access_to_rows(self, rows_fixture):
rows, row_count = rows_fixture
for idx in range(-row_count, row_count):
row = rows[idx]
assert isinstance(row, _Row)
def it_raises_on_indexed_access_out_of_range(self, rows_fixture):
rows, row_count = rows_fixture
with pytest.raises(IndexError):
too_low = -1 - row_count
rows[too_low]
with pytest.raises(IndexError):
too_high = row_count
rows[too_high]
def it_provides_access_to_the_table_it_belongs_to(self, table_fixture):
rows, table_ = table_fixture
assert rows.table is table_
# fixtures -------------------------------------------------------
@pytest.fixture
def rows_fixture(self):
row_count = 2
tbl = _tbl_bldr(rows=row_count, cols=2).element
rows = _Rows(tbl, None)
return rows, row_count
@pytest.fixture
def table_fixture(self, table_):
rows = _Rows(None, table_)
table_.table = table_
return rows, table_
# fixture components ---------------------------------------------
@pytest.fixture
def table_(self, request):
return instance_mock(request, Table)
# fixtures -----------------------------------------------------------
def _tbl_bldr(rows, cols):
tblGrid_bldr = a_tblGrid()
for i in range(cols):
tblGrid_bldr.with_child(a_gridCol())
tbl_bldr = a_tbl().with_nsdecls().with_child(tblGrid_bldr)
for i in range(rows):
tr_bldr = _tr_bldr(cols)
tbl_bldr.with_child(tr_bldr)
return tbl_bldr
def _tc_bldr():
return a_tc().with_child(a_p())
def _tr_bldr(cols):
tr_bldr = a_tr()
for i in range(cols):
tc_bldr = _tc_bldr()
tr_bldr.with_child(tc_bldr)
return tr_bldr
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import exception
import time
from VirtualBox_client import vboxServiceLocator
from VirtualBox_client import IWebsessionManager_logonRequestMsg
from VirtualBox_client import IVirtualBox_getVersionRequestMsg
from VirtualBox_client import IVirtualBox_findMachineRequestMsg
from VirtualBox_client import IWebsessionManager_getSessionObjectRequestMsg
from VirtualBox_client import IMachine_launchVMProcessRequestMsg
from VirtualBox_client import ISession_getConsoleRequestMsg
from VirtualBox_client import IConsole_powerDownRequestMsg
from VirtualBox_client import ISession_unlockMachineRequestMsg
from VirtualBox_client import IMachine_lockMachineRequestMsg
from VirtualBox_client import IManagedObjectRef_releaseRequestMsg
from VirtualBox_client import IVirtualBox_getMachineStatesRequestMsg
from VirtualBox_client import IMachine_getBootOrderRequestMsg
from VirtualBox_client import IMachine_setBootOrderRequestMsg
from VirtualBox_client import ISession_getMachineRequestMsg
from VirtualBox_client import IMachine_saveSettingsRequestMsg
from VirtualBox_client import IMachine_attachDeviceRequestMsg
from VirtualBox_client import IMachine_detachDeviceRequestMsg
from VirtualBox_client import IVirtualBox_openMediumRequestMsg
from VirtualBox_client import IMachine_getFirmwareTypeRequestMsg
from VirtualBox_client import IMachine_setFirmwareTypeRequestMsg
from VirtualBox_client import IMachine_getMediumRequestMsg
from VirtualBox_client import IMedium_getLocationRequestMsg
STATE_POWERED_OFF = 'PoweredOff'
STATE_POWERED_ON = 'Running'
STATE_ERROR = 'Error'
DEVICE_NETWORK = 'Network'
DEVICE_FLOPPY = 'Floppy'
DEVICE_CDROM = 'DVD'
DEVICE_DISK = 'HardDisk'
LOCKTYPE_SHARED = 1
LOCKTYPE_WRITE = 2
ACCESS_READONLY = 1
ACCESS_READWRITE = 2
FIRMWARE_BIOS = 'BIOS'
FIRMWARE_EFI = 'EFI'
DEVICE_TO_CONTROLLER_MAP = {
DEVICE_DISK: 'SATA',
DEVICE_FLOPPY: 'SATA',
DEVICE_CDROM: 'IDE'
}
class VirtualBoxHost:
def __init__(self, **kwargs):
host = kwargs.get('host', '10.0.2.2')
username = kwargs.get('username', '')
password = kwargs.get('password', '')
port = kwargs.get('port', 18083)
url = "http://%(host)s:%(port)s" % {'host': host, 'port': port}
self.port = vboxServiceLocator().getvboxServicePort(url)
if not (host):
raise exception.InvalidInput("'host' is missing")
req = IWebsessionManager_logonRequestMsg()
req._this = None
req._username = username
req._password = password
val = self.run_command('IWebsessionManager_logon', req)
self.handle = val._returnval
def run_command(self, command, request):
method = getattr(self.port, command)
try:
return method(request)
except Exception as e:
raise exception.PyRemoteVBoxException(e)
def get_version(self):
req = IVirtualBox_getVersionRequestMsg()
req._this = self.handle
val = self.run_command('IVirtualBox_getVersion', req)
return val._returnval
def find_vm(self, vmname):
req = IVirtualBox_findMachineRequestMsg()
req._this = self.handle
req._nameOrId = vmname
val = self.run_command('IVirtualBox_findMachine', req)
return VirtualBoxVm(self, val._returnval)
def _open_medium(self, device_type, location):
req = IVirtualBox_openMediumRequestMsg()
req._this = self.handle
req._location = location
req._deviceType = device_type
req._accessMode = ACCESS_READONLY
req._forceNewUuid = False
val = self.run_command('IVirtualBox_openMedium', req)
return val._returnval
def _get_medium_location(self, medium_id):
req = IMedium_getLocationRequestMsg()
req._this = medium_id
val = self.run_command('IMedium_getLocation', req)
return val._returnval
class VirtualBoxVm:
def __init__(self, virtualboxhost, handle):
self.host = virtualboxhost
self.handle = handle
def get_power_status(self):
req = IVirtualBox_getMachineStatesRequestMsg()
req._this = self.host.handle
req._machines = [self.handle]
val = self.host.run_command('IVirtualBox_getMachineStates', req)
state = val._returnval[0]
if state not in [STATE_POWERED_OFF, STATE_POWERED_ON]:
return STATE_ERROR
return state
def get_boot_device(self, position=1):
req = IMachine_getBootOrderRequestMsg()
req._this = self.handle
req._position = position
val = self.host.run_command('IMachine_getBootOrder', req)
return val._returnval
def _get_session_id(self):
req = IWebsessionManager_getSessionObjectRequestMsg()
req._this = None
req._refIVirtualBox = self.host.handle
val = self.host.run_command('IWebsessionManager_getSessionObject', req)
session_id = val._returnval
return session_id
def _lock_machine(self, session_id, lock_type=LOCKTYPE_SHARED):
req = IMachine_lockMachineRequestMsg()
req._this = self.handle
req._session = session_id
req._lockType = lock_type
val = self.host.run_command('IMachine_lockMachine', req)
def _get_mutable_machine(self, session_id):
# Lock machine
self._lock_machine(session_id, LOCKTYPE_WRITE)
# Get mutable machine
req = ISession_getMachineRequestMsg()
req._this = session_id
val = self.host.run_command('ISession_getMachine', req)
mutable_machine_id = val._returnval
return mutable_machine_id
def _save_settings(self, mutable_machine_id):
req = IMachine_saveSettingsRequestMsg()
req._this = mutable_machine_id
val = self.host.run_command('IMachine_saveSettings', req)
def _unlock_machine(self, session_id):
req = ISession_unlockMachineRequestMsg()
req._this = session_id
val = self.host.run_command('ISession_unlockMachine', req)
def attach_device(self, device_type, location):
if self.get_power_status() == STATE_POWERED_ON:
raise exception.VmInWrongPowerState(operation='attach_device',
state='powered on')
try:
self.detach_device(device_type)
except Exception:
pass
# Get mutable machine
session_id = self._get_session_id()
controller_name = DEVICE_TO_CONTROLLER_MAP[device_type]
medium_id = self.host._open_medium(device_type, location)
mutable_machine_id = self._get_mutable_machine(session_id)
try:
req = IMachine_attachDeviceRequestMsg()
req._this = mutable_machine_id
req._name = controller_name
req._controllerPort=0
req._device = 0
req._type = device_type
req._medium = medium_id
val = self.host.run_command('IMachine_attachDevice', req)
# Save settings and unlock
self._save_settings(mutable_machine_id)
finally:
self._unlock_machine(session_id)
def detach_device(self, device_type):
if self.get_power_status() == STATE_POWERED_ON:
raise exception.VmInWrongPowerState(operation='detach_device',
state='powered on')
session_id = self._get_session_id()
controller_name = DEVICE_TO_CONTROLLER_MAP[device_type]
mutable_machine_id = self._get_mutable_machine(session_id)
try:
req = IMachine_detachDeviceRequestMsg()
req._this = mutable_machine_id
req._name = controller_name
req._controllerPort=0
req._device = 0
req._type = device_type
val = self.host.run_command('IMachine_detachDevice', req)
# Save settings and unlock
self._save_settings(mutable_machine_id)
finally:
self._unlock_machine(session_id)
def get_attached_device(self, device_type):
session_id = self._get_session_id()
controller_name = DEVICE_TO_CONTROLLER_MAP[device_type]
req = IMachine_getMediumRequestMsg()
req._this = self.handle
req._name = controller_name
req._controllerPort=0
req._device = 0
val = None
try:
val = self.host.run_command('IMachine_getMedium', req)
except exception.PyRemoteVBoxException as e:
if 'No storage device attached' in str(e):
return None
return self.host._get_medium_location(val._returnval)
def set_boot_device(self, device, position=1):
if self.get_power_status() == STATE_POWERED_ON:
raise exception.VmInWrongPowerState(operation='set_boot_device',
state='powered on')
# Get mutable machine
session_id = self._get_session_id()
mutable_machine_id = self._get_mutable_machine(session_id)
# Change boot order
req = IMachine_setBootOrderRequestMsg()
req._this = mutable_machine_id
req._position = position
req._device = device
val = self.host.run_command('IMachine_setBootOrder', req)
# Save settings and unlock
self._save_settings(mutable_machine_id)
self._unlock_machine(session_id)
def get_firmware_type(self):
session_id = self._get_session_id()
req = IMachine_getFirmwareTypeRequestMsg()
req._this = self.handle
val = self.host.run_command('IMachine_getFirmwareType', req)
return val._returnval
def set_firmware_type(self, firmware_type):
if self.get_power_status() == STATE_POWERED_ON:
raise exception.VmInWrongPowerState(operation='set_firmware_type',
state='powered on')
session_id = self._get_session_id()
mutable_machine_id = self._get_mutable_machine(session_id)
try:
req = IMachine_setFirmwareTypeRequestMsg()
req._this = mutable_machine_id
req._firmwareType = firmware_type
val = self.host.run_command('IMachine_setFirmwareType', req)
# Save settings and unlock
self._save_settings(mutable_machine_id)
finally:
self._unlock_machine(session_id)
def start(self, vm_type="gui"):
if self.get_power_status() == STATE_POWERED_ON:
return
session_id = self._get_session_id()
req = IMachine_launchVMProcessRequestMsg()
req._this = self.handle
req._type = vm_type
req._environment = ""
req._session = session_id
val=self.host.run_command('IMachine_launchVMProcess', req)
for i in range(1, 10):
time.sleep(2)
try:
self._unlock_machine(session_id)
break
except Exception:
pass
else:
raise exception.PyRemoteVBoxException("Failed to unlock machine "
"after 10 attempts.")
def stop(self):
if self.get_power_status() == STATE_POWERED_OFF:
return
session_id = self._get_session_id()
self._lock_machine(session_id, LOCKTYPE_SHARED)
req = ISession_getConsoleRequestMsg()
req._this = session_id
val = self.host.run_command('ISession_getConsole', req)
console_id = val._returnval
req = IConsole_powerDownRequestMsg()
req._this = console_id
val = self.host.run_command('IConsole_powerDown', req)
# Give a while for VirtualBox to unlock the machine.
time.sleep(1)
|
|
"""
This module implements a parser for language tags, according to the RFC 5646
(BCP 47) standard.
Here, we're only concerned with the syntax of the language tag. Looking up
what they actually mean in a data file is a separate step.
For a full description of the syntax of a language tag, see page 3 of
http://tools.ietf.org/html/bcp47
>>> parse_tag('en')
[('language', 'en')]
>>> parse_tag('en_US')
[('language', 'en'), ('territory', 'US')]
>>> parse_tag('en-Latn')
[('language', 'en'), ('script', 'Latn')]
>>> parse_tag('es-419')
[('language', 'es'), ('territory', '419')]
>>> parse_tag('zh-hant-tw')
[('language', 'zh'), ('script', 'Hant'), ('territory', 'TW')]
>>> parse_tag('zh-tw-hant')
Traceback (most recent call last):
...
langcodes.tag_parser.LanguageTagError: This script subtag, 'hant', is out of place. Expected variant, extension, or end of string.
>>> parse_tag('de-DE-1901')
[('language', 'de'), ('territory', 'DE'), ('variant', '1901')]
>>> parse_tag('ja-latn-hepburn')
[('language', 'ja'), ('script', 'Latn'), ('variant', 'hepburn')]
>>> parse_tag('ja-hepburn-latn')
Traceback (most recent call last):
...
langcodes.tag_parser.LanguageTagError: This script subtag, 'latn', is out of place. Expected variant, extension, or end of string.
>>> parse_tag('zh-yue')
[('language', 'zh'), ('extlang', 'yue')]
>>> parse_tag('zh-yue-Hant')
[('language', 'zh'), ('extlang', 'yue'), ('script', 'Hant')]
>>> parse_tag('zh-min-nan')
[('grandfathered', 'zh-min-nan')]
>>> parse_tag('x-dothraki')
[('language', 'x-dothraki')]
>>> parse_tag('en-u-co-backwards-x-pig-latin')
[('language', 'en'), ('extension', 'u-co-backwards'), ('private', 'x-pig-latin')]
>>> parse_tag('en-x-pig-latin-u-co-backwards')
[('language', 'en'), ('private', 'x-pig-latin-u-co-backwards')]
>>> parse_tag('u-co-backwards')
Traceback (most recent call last):
...
langcodes.tag_parser.LanguageTagError: Expected a language code, got 'u'
"""
from __future__ import print_function, unicode_literals
# These tags should not be parsed by the usual parser; they're grandfathered
# in from RFC 3066. The 'irregular' ones don't fit the syntax at all; the
# 'regular' ones do, but would give meaningless results when parsed.
#
# These are all lowercased so they can be matched case-insensitively, as the
# standard requires.
EXCEPTIONS = {
# Irregular exceptions
"en-gb-oed",
"i-ami",
"i-bnn",
"i-default",
"i-enochian",
"i-hak",
"i-klingon",
"i-lux",
"i-mingo",
"i-navajo",
"i-pwn",
"i-tao",
"i-tay",
"i-tsu",
"sgn-be-fr",
"sgn-be-nl",
"sgn-ch-de",
# Regular exceptions
"art-lojban",
"cel-gaulish",
"no-bok",
"no-nyn",
"zh-guoyu",
"zh-hakka",
"zh-min",
"zh-min-nan",
"zh-xiang",
}
# Define the order of subtags as integer constants, but also give them names
# so we can describe them in error messages
EXTLANG, SCRIPT, TERRITORY, VARIANT, EXTENSION = range(5)
SUBTAG_TYPES = [
'extlang',
'script',
'territory',
'variant',
'extension',
'end of string',
]
def normalize_characters(tag):
"""
BCP 47 is case-insensitive, and considers underscores equivalent to
hyphens. So here we smash tags into lowercase with hyphens, so we can
make exact comparisons.
>>> normalize_characters('en_US')
'en-us'
>>> normalize_characters('zh-Hant_TW')
'zh-hant-tw'
"""
return tag.lower().replace('_', '-')
def parse_tag(tag):
"""
Parse the syntax of a language tag, without looking up anything in the
registry, yet. Returns a list of (type, value) tuples indicating what
information will need to be looked up.
"""
tag = normalize_characters(tag)
if tag in EXCEPTIONS:
return [('grandfathered', tag)]
else:
# The first subtag is always either the language code, or 'x' to mark
# the entire tag as private-use. Other subtags are distinguished
# by their length and format, but the language code is distinguished
# by the fact that it is required to come first.
subtags = tag.split('-')
if subtags[0] == 'x':
if len(subtags) == 1:
raise LanguageTagError("'x' is not a language tag on its own")
else:
# the entire language tag is private use, but we know that,
# whatever it is, it fills the "language" slot
return [('language', tag)]
elif 2 <= len(subtags[0]) <= 4:
# Language codes should be 2 or 3 letters, but 4-letter codes
# are allowed to parse for legacy Unicode reasons
return [('language', subtags[0])] + parse_subtags(subtags[1:])
else:
subtag_error(subtags[0], 'a language code')
def parse_subtags(subtags, expect=EXTLANG):
"""
Parse everything that comes after the language tag: scripts, territories,
variants, and assorted extensions.
"""
# We parse the parts of a language code recursively: each step of
# language code parsing handles one component of the code, recurses
# to handle the rest of the code, and adds what it found onto the
# list of things that were in the rest of the code.
#
# This could just as well have been iterative, but the loops would have
# been convoluted.
#
# So here's the base case.
if not subtags:
return []
# There's a subtag that comes next. We need to find out what it is.
#
# The primary thing that distinguishes different types of subtags is
# length, but the subtags also come in a specified order. The 'expect'
# parameter keeps track of where we are in that order. expect=TERRITORY,
# for example, means we're expecting a territory code, or anything later
# (because everything but the language is optional).
subtag = subtags[0]
tag_length = len(subtag)
# In the usual case, our goal is to recognize what kind of tag this is,
# and set it in 'tagtype' -- as an integer, so we can compare where it
# should go in order. You can see the enumerated list of tagtypes above,
# where the SUBTAG_TYPES global is defined.
tagtype = None
if tag_length == 0 or tag_length > 8:
# Unless you're inside a private use tag or something -- in which case,
# you're not in this function at the moment -- every component needs to
# be between 1 and 8 characters.
subtag_error(subtag, '1-8 characters')
elif tag_length == 1:
# A one-character subtag introduces an extension, which can itself have
# sub-subtags, so we dispatch to a different function at this point.
#
# We don't need to check anything about the order, because extensions
# necessarily come last.
return parse_extension(subtags)
elif tag_length == 2:
if subtag.isalpha():
# Two-letter alphabetic subtags are territories. These are the only
# two-character subtags after the language.
tagtype = TERRITORY
elif tag_length == 3:
if subtag.isalpha():
# Three-letter alphabetic subtags are 'extended languages'.
# It's allowed for there to be up to three of them in a row, so we
# need another function to enforce that. Before we dispatch to that
# function, though, we need to check whether we're in the right
# place in order.
if expect <= EXTLANG:
return parse_extlang(subtags)
else:
order_error(subtag, EXTLANG, expect)
elif subtag.isdigit():
# Three-digit subtags are territories representing broad regions,
# such as Latin America (419).
tagtype = TERRITORY
elif tag_length == 4:
if subtag.isalpha():
# Four-letter alphabetic subtags are scripts.
tagtype = SCRIPT
elif subtag[0].isdigit():
# Four-character subtags that start with a digit are variants.
tagtype = VARIANT
else:
# Tags of length 5-8 are variants.
tagtype = VARIANT
# That's the end of the big elif block for figuring out what kind of
# subtag we have based on its length. Now we should do something with that
# kind of subtag.
if tagtype is None:
# We haven't recognized a type of tag. This subtag just doesn't fit the
# standard.
subtag_error(subtag)
elif tagtype < expect:
# We got a tag type that was supposed to appear earlier in the order.
order_error(subtag, tagtype, expect)
else:
# We've recognized a subtag of a particular type. If it's a territory or
# script, we expect the next subtag to be a strictly later type, because
# there can be at most one territory and one script. Otherwise, we expect
# the next subtag to be the type we got or later.
if tagtype in (SCRIPT, TERRITORY):
expect = tagtype + 1
else:
expect = tagtype
# Get the name of this subtag type instead of its integer value.
typename = SUBTAG_TYPES[tagtype]
# Some subtags are conventionally written with capitalization. Apply
# those conventions.
if tagtype == SCRIPT:
subtag = subtag.title()
elif tagtype == TERRITORY:
subtag = subtag.upper()
# Recurse on the remaining subtags.
return [(typename, subtag)] + parse_subtags(subtags[1:], expect)
def parse_extlang(subtags):
"""
Parse an 'extended language' tag, which consists of 1 to 3 three-letter
language codes.
Extended languages are used for distinguishing dialects/sublanguages
(depending on your view) of macrolanguages such as Arabic, Bahasa Malay,
and Chinese.
It's supposed to also be acceptable to just use the sublanguage as the
primary language code, and your code should know what's a macrolanguage of
what. For example, 'zh-yue' and 'yue' are the same language (Cantonese),
and differ only in whether they explicitly spell out that Cantonese is a
kind of Chinese.
"""
index = 0
parsed = []
while index < len(subtags) and len(subtags[index]) == 3 and index < 3:
parsed.append(('extlang', subtags[index]))
index += 1
return parsed + parse_subtags(subtags[index:], SCRIPT)
def parse_extension(subtags):
"""
An extension tag consists of a 'singleton' -- a one-character subtag --
followed by other subtags. Extension tags are in the BCP 47 syntax, but
their meaning is outside the scope of the standard.
For example, there's the u- extension, which is used for setting Unicode
properties in some context I'm not aware of.
If the singleton is 'x', it's a private use extension, and consumes the
rest of the tag. Otherwise, it stops at the next singleton.
"""
subtag = subtags[0]
if len(subtags) == 1:
raise LanguageTagError(f"The subtag {subtag!r} must be followed by something")
if subtag == 'x':
# Private use. Everything after this is arbitrary codes that we
# can't look up.
return [('private', '-'.join(subtags))]
else:
# Look for the next singleton, if there is one.
boundary = 1
while boundary < len(subtags) and len(subtags[boundary]) != 1:
boundary += 1
# We've parsed a complete extension subtag. Return to the main
# parse_subtags function, but expect to find nothing but more
# extensions at this point.
return [('extension', '-'.join(subtags[:boundary]))] + parse_subtags(
subtags[boundary:], EXTENSION
)
class LanguageTagError(ValueError):
pass
def order_error(subtag, got, expected):
"""
Output an error indicating that tags were out of order.
"""
options = SUBTAG_TYPES[expected:]
if len(options) == 1:
expect_str = options[0]
elif len(options) == 2:
expect_str = f'{options[0]} or {options[1]}'
else:
joined = ', '.join(options[:-1])
last = options[-1]
expect_str = f'{joined}, or {last}'
got_str = SUBTAG_TYPES[got]
raise LanguageTagError(
f"This {got_str} subtag, {subtag!r}, is out of place. Expected {expect_str}."
)
def subtag_error(subtag, expected='a valid subtag'):
"""
Try to output a reasonably helpful error message based on our state of
parsing. Most of this code is about how to list, in English, the kinds
of things we were expecting to find.
"""
raise LanguageTagError(f"Expected {expected}, got {subtag!r}")
|
|
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import logging
import argparse
import platform
import subprocess
os.environ["PYTHONUNBUFFERED"] = "y"
PY2 = sys.version_info[0] == 2
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(ZULIP_PATH)
from scripts.lib.zulip_tools import run, subprocess_text_output, OKBLUE, ENDC, WARNING
from scripts.lib.setup_venv import setup_virtualenv, VENV_DEPENDENCIES
from scripts.lib.node_cache import setup_node_modules, NPM_CACHE_PATH
from version import PROVISION_VERSION
if False:
from typing import Any
SUPPORTED_PLATFORMS = {
"Ubuntu": [
"trusty",
"xenial",
],
}
PY2_VENV_PATH = "/srv/zulip-venv"
PY3_VENV_PATH = "/srv/zulip-py3-venv"
VAR_DIR_PATH = os.path.join(ZULIP_PATH, 'var')
LOG_DIR_PATH = os.path.join(VAR_DIR_PATH, 'log')
UPLOAD_DIR_PATH = os.path.join(VAR_DIR_PATH, 'uploads')
TEST_UPLOAD_DIR_PATH = os.path.join(VAR_DIR_PATH, 'test_uploads')
COVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'coverage')
LINECOVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'linecoverage-report')
NODE_TEST_COVERAGE_DIR_PATH = os.path.join(VAR_DIR_PATH, 'node-coverage')
# TODO: De-duplicate this with emoji_dump.py
EMOJI_CACHE_PATH = "/srv/zulip-emoji-cache"
if 'TRAVIS' in os.environ:
# In Travis CI, we don't have root access
EMOJI_CACHE_PATH = "/home/travis/zulip-emoji-cache"
if PY2:
VENV_PATH = PY2_VENV_PATH
else:
VENV_PATH = PY3_VENV_PATH
if not os.path.exists(os.path.join(ZULIP_PATH, ".git")):
print("Error: No Zulip git repository present!")
print("To setup the Zulip development environment, you should clone the code")
print("from GitHub, rather than using a Zulip production release tarball.")
sys.exit(1)
try:
run(["mkdir", "-p", VAR_DIR_PATH])
if os.path.exists(os.path.join(VAR_DIR_PATH, 'zulip-test-symlink')):
os.remove(os.path.join(VAR_DIR_PATH, 'zulip-test-symlink'))
os.symlink(
os.path.join(ZULIP_PATH, 'README.md'),
os.path.join(VAR_DIR_PATH, 'zulip-test-symlink')
)
os.remove(os.path.join(VAR_DIR_PATH, 'zulip-test-symlink'))
except OSError as err:
print("Error: Unable to create symlinks. Make sure you have permission to create symbolic links.")
print("See this page for more information:")
print(" http://zulip.readthedocs.io/en/latest/dev-env-first-time-contributors.html#os-symlink-error")
sys.exit(1)
if platform.architecture()[0] == '64bit':
arch = 'amd64'
elif platform.architecture()[0] == '32bit':
arch = "i386"
else:
logging.critical("Only x86 is supported; ping [email protected] if you want another architecture.")
sys.exit(1)
# Ideally we wouldn't need to install a dependency here, before we
# know the codename.
subprocess.check_call(["sudo", "apt-get", "install", "-y", "lsb-release"])
vendor = subprocess_text_output(["lsb_release", "-is"])
codename = subprocess_text_output(["lsb_release", "-cs"])
if not (vendor in SUPPORTED_PLATFORMS and codename in SUPPORTED_PLATFORMS[vendor]):
logging.critical("Unsupported platform: {} {}".format(vendor, codename))
sys.exit(1)
POSTGRES_VERSION_MAP = {
"trusty": "9.3",
"xenial": "9.5",
}
POSTGRES_VERSION = POSTGRES_VERSION_MAP[codename]
UBUNTU_COMMON_APT_DEPENDENCIES = [
"closure-compiler",
"memcached",
"rabbitmq-server",
"redis-server",
"hunspell-en-us",
"supervisor",
"git",
"libssl-dev",
"yui-compressor",
"wget",
"ca-certificates", # Explicit dependency in case e.g. wget is already installed
"puppet", # Used by lint-all
"gettext", # Used by makemessages i18n
"curl", # Used for fetching PhantomJS as wget occasionally fails on redirects
"netcat", # Used for flushing memcached
] + VENV_DEPENDENCIES
APT_DEPENDENCIES = {
"trusty": UBUNTU_COMMON_APT_DEPENDENCIES + [
"postgresql-9.3",
"postgresql-9.3-tsearch-extras",
"postgresql-9.3-pgroonga",
],
"xenial": UBUNTU_COMMON_APT_DEPENDENCIES + [
"postgresql-9.5",
"postgresql-9.5-tsearch-extras",
"postgresql-9.5-pgroonga",
],
}
TSEARCH_STOPWORDS_PATH = "/usr/share/postgresql/%s/tsearch_data/" % (POSTGRES_VERSION,)
REPO_STOPWORDS_PATH = os.path.join(
ZULIP_PATH,
"puppet",
"zulip",
"files",
"postgresql",
"zulip_english.stop",
)
LOUD = dict(_out=sys.stdout, _err=sys.stderr)
user_id = os.getuid()
def main(options):
# type: (Any) -> int
# npm install and management commands expect to be run from the root of the
# project.
os.chdir(ZULIP_PATH)
# setup-apt-repo does an `apt-get update`
run(["sudo", "./scripts/lib/setup-apt-repo"])
run(["sudo", "apt-get", "-y", "install", "--no-install-recommends"] + APT_DEPENDENCIES[codename])
if options.is_travis:
if PY2:
MYPY_REQS_FILE = os.path.join(ZULIP_PATH, "requirements", "mypy.txt")
setup_virtualenv(PY3_VENV_PATH, MYPY_REQS_FILE, patch_activate_script=True,
virtualenv_args=['-p', 'python3'])
DEV_REQS_FILE = os.path.join(ZULIP_PATH, "requirements", "py2_dev.txt")
setup_virtualenv(PY2_VENV_PATH, DEV_REQS_FILE, patch_activate_script=True)
else:
DEV_REQS_FILE = os.path.join(ZULIP_PATH, "requirements", "py3_dev.txt")
setup_virtualenv(VENV_PATH, DEV_REQS_FILE, patch_activate_script=True,
virtualenv_args=['-p', 'python3'])
else:
# Import tools/setup_venv.py instead of running it so that we get an
# activated virtualenv for the rest of the provisioning process.
from tools.setup import setup_venvs
setup_venvs.main()
# Put Python2 virtualenv activation in our .bash_profile.
with open(os.path.expanduser('~/.bash_profile'), 'w+') as bash_profile:
bash_profile.writelines([
"source .bashrc\n",
"source %s\n" % (os.path.join(VENV_PATH, "bin", "activate"),),
])
run(["sudo", "cp", REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH])
# create log directory `zulip/var/log`
run(["mkdir", "-p", LOG_DIR_PATH])
# create upload directory `var/uploads`
run(["mkdir", "-p", UPLOAD_DIR_PATH])
# create test upload directory `var/test_upload`
run(["mkdir", "-p", TEST_UPLOAD_DIR_PATH])
# create coverage directory`var/coverage`
run(["mkdir", "-p", COVERAGE_DIR_PATH])
# create linecoverage directory`var/linecoverage-report`
run(["mkdir", "-p", LINECOVERAGE_DIR_PATH])
# create linecoverage directory`var/node-coverage`
run(["mkdir", "-p", NODE_TEST_COVERAGE_DIR_PATH])
run(["tools/setup/download-zxcvbn"])
if not os.path.isdir(EMOJI_CACHE_PATH):
run(["sudo", "mkdir", EMOJI_CACHE_PATH])
run(["sudo", "chown", "%s:%s" % (user_id, user_id), EMOJI_CACHE_PATH])
run(["tools/setup/emoji_dump/build_emoji"])
run(["scripts/setup/generate_secrets.py", "--development"])
if options.is_travis and not options.is_production_travis:
run(["sudo", "service", "rabbitmq-server", "restart"])
run(["sudo", "service", "redis-server", "restart"])
run(["sudo", "service", "memcached", "restart"])
elif options.is_docker:
run(["sudo", "service", "rabbitmq-server", "restart"])
run(["sudo", "pg_dropcluster", "--stop", POSTGRES_VERSION, "main"])
run(["sudo", "pg_createcluster", "-e", "utf8", "--start", POSTGRES_VERSION, "main"])
run(["sudo", "service", "redis-server", "restart"])
run(["sudo", "service", "memcached", "restart"])
if not options.is_production_travis:
# These won't be used anyway
run(["scripts/setup/configure-rabbitmq"])
run(["tools/setup/postgres-init-dev-db"])
run(["tools/do-destroy-rebuild-database"])
# Need to set up Django before using is_template_database_current.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
import django
django.setup()
from zerver.lib.test_fixtures import is_template_database_current
if options.is_force or not is_template_database_current():
run(["tools/setup/postgres-init-test-db"])
run(["tools/do-destroy-rebuild-test-database"])
else:
print("No need to regenerate the test DB.")
run(["./manage.py", "compilemessages"])
# Here we install nvm, node, and npm.
run(["sudo", "tools/setup/install-node"])
# This is a wrapper around `npm install`, which we run last since
# it can often fail due to network issues beyond our control.
try:
# Hack: We remove `node_modules` as root to work around an
# issue with the symlinks being improperly owned by root.
if os.path.islink("node_modules"):
run(["sudo", "rm", "-f", "node_modules"])
if not os.path.isdir(NPM_CACHE_PATH):
run(["sudo", "mkdir", NPM_CACHE_PATH])
run(["sudo", "chown", "%s:%s" % (user_id, user_id), NPM_CACHE_PATH])
setup_node_modules()
except subprocess.CalledProcessError:
print(WARNING + "`npm install` failed; retrying..." + ENDC)
setup_node_modules()
version_file = os.path.join(ZULIP_PATH, 'var/provision_version')
print('writing to %s\n' % (version_file,))
open(version_file, 'w').write(PROVISION_VERSION + '\n')
print()
print(OKBLUE + "Zulip development environment setup succeeded!" + ENDC)
return 0
if __name__ == "__main__":
description = ("Provision script to install Zulip")
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--force', action='store_true', dest='is_force',
default=False,
help="Ignore all provisioning optimizations.")
parser.add_argument('--travis', action='store_true', dest='is_travis',
default=False,
help="Provision for Travis but without production settings.")
parser.add_argument('--production-travis', action='store_true',
dest='is_production_travis',
default=False,
help="Provision for Travis but with production settings.")
parser.add_argument('--docker', action='store_true',
dest='is_docker',
default=False,
help="Provision for Docker.")
options = parser.parse_args()
sys.exit(main(options))
|
|
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron_lib import context as nctx
from neutron_lib.db import api as db_api
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
from sqlalchemy.orm import session as se
from webob import exc
from neutron.common import utils
from neutron.db import models_v2
from neutron.objects import ports as port_obj
from neutron.tests.unit.plugins.ml2 import test_plugin
class TestRevisionPlugin(test_plugin.Ml2PluginV2TestCase):
l3_plugin = ('neutron.tests.unit.extensions.test_extraroute.'
'TestExtraRouteL3NatServicePlugin')
_extension_drivers = ['qos']
def get_additional_service_plugins(self):
p = super(TestRevisionPlugin, self).get_additional_service_plugins()
p.update({'revision_plugin_name': 'revisions',
'qos_plugin_name': 'qos',
'tag_name': 'tag'})
return p
def setUp(self):
cfg.CONF.set_override('extension_drivers',
self._extension_drivers,
group='ml2')
super(TestRevisionPlugin, self).setUp()
self.cp = directory.get_plugin()
self.l3p = directory.get_plugin(constants.L3)
self._ctx = nctx.get_admin_context()
self._tenant_id = uuidutils.generate_uuid()
@property
def ctx(self):
# TODO(kevinbenton): return ctx without expire_all after switch to
# enginefacade complete. We expire_all here because the switch to
# the new engine facade is resulting in changes being spread over
# other sessions so we can end up getting stale reads in the parent
# session if objects remain in the identity map.
if not utils.is_session_active(self._ctx.session):
self._ctx.session.expire_all()
return self._ctx
def test_handle_expired_object(self):
rp = directory.get_plugin('revision_plugin')
with self.port():
with db_api.CONTEXT_WRITER.using(self.ctx):
ipal_objs = port_obj.IPAllocation.get_objects(self.ctx)
if not ipal_objs:
raise Exception("No IP allocations available.")
ipal_obj = ipal_objs[0]
# load port into our session
port = self.ctx.session.query(models_v2.Port).one()
# simulate concurrent delete in another session
other_ctx = nctx.get_admin_context()
other_ctx.session.delete(
other_ctx.session.query(models_v2.Port).first()
)
other_ctx.session.flush()
# ensure no attribute lookups are attempted on an
# object deleted from the session when doing related
# bumps
self.ctx.session.expire(port)
collected = rp._collect_related_tobump(
self.ctx.session, [ipal_obj], set())
rp._bump_obj_revisions(
self.ctx.session, collected, version_check=False)
def test_shared_network_create(self):
# this test intends to run db_base_plugin_v2 -> create_network_db,
# which in turn creates a Network and then a NetworkRBAC object.
# An issue was observed with the revision_plugin which would interfere
# with the flush process that occurs with these two connected objects,
# creating two copies of the Network object in the Session and putting
# it into an invalid state.
with self.network(shared=True):
pass
def test_port_name_update_revises(self):
with self.port() as port:
rev = port['port']['revision_number']
new = {'port': {'name': 'seaweed'}}
response = self._update('ports', port['port']['id'], new)
new_rev = response['port']['revision_number']
self.assertGreater(new_rev, rev)
def test_constrained_port_update(self):
with self.port() as port:
rev = port['port']['revision_number']
new = {'port': {'name': 'nigiri'}}
for val in (rev - 1, rev + 1):
# make sure off-by ones are rejected
self._update('ports', port['port']['id'], new,
headers={'If-Match': 'revision_number=%s' % val},
expected_code=exc.HTTPPreconditionFailed.code)
after_attempt = self._show('ports', port['port']['id'])
self.assertEqual(rev, after_attempt['port']['revision_number'])
self.assertEqual(port['port']['name'],
after_attempt['port']['name'])
# correct revision should work
self._update('ports', port['port']['id'], new,
headers={'If-Match': 'revision_number=%s' % rev})
def test_constrained_port_delete(self):
with self.port() as port:
rev = port['port']['revision_number']
for val in (rev - 1, rev + 1):
# make sure off-by ones are rejected
self._delete('ports', port['port']['id'],
headers={'If-Match': 'revision_number=%s' % val},
expected_code=exc.HTTPPreconditionFailed.code)
# correct revision should work
self._delete('ports', port['port']['id'],
headers={'If-Match': 'revision_number=%s' % rev})
def test_constrained_port_update_handles_db_retries(self):
# here we ensure all of the constraint handling logic persists
# on retriable failures to commit caused by races with another
# update
with self.port() as port:
rev = port['port']['revision_number']
new = {'port': {'name': 'nigiri'}}
def concurrent_increment(s):
db_api.sqla_remove(se.Session, 'before_commit',
concurrent_increment)
# slip in a concurrent update that will bump the revision
plugin = directory.get_plugin()
plugin.update_port(nctx.get_admin_context(),
port['port']['id'], new)
raise db_exc.DBDeadlock()
db_api.sqla_listen(se.Session, 'before_commit',
concurrent_increment)
# Despite the revision number is bumped twice during the session
# transaction, the revision number is tested only once the first
# time the revision number service is executed for this session and
# object.
self._update('ports', port['port']['id'], new,
headers={'If-Match': 'revision_number=%s' % rev},
expected_code=exc.HTTPOk.code)
self._update('ports', port['port']['id'], new,
headers={'If-Match': 'revision_number=%s' %
str(int(rev) + 2)},
expected_code=exc.HTTPOk.code)
self._update('ports', port['port']['id'], new,
headers={'If-Match': 'revision_number=1'},
expected_code=exc.HTTPPreconditionFailed.code)
def test_port_ip_update_revises(self):
with self.subnet() as subnet, self.port(subnet=subnet) as port:
rev = port['port']['revision_number']
new = {'port': {'fixed_ips': port['port']['fixed_ips']}}
# ensure adding an IP allocation updates the port
free_ip = self._find_ip_address(subnet['subnet'])
new['port']['fixed_ips'].append({'ip_address': free_ip})
response = self._update('ports', port['port']['id'], new)
self.assertEqual(2, len(response['port']['fixed_ips']))
new_rev = response['port']['revision_number']
self.assertGreater(new_rev, rev)
# ensure deleting an IP allocation updates the port
rev = new_rev
new['port']['fixed_ips'].pop()
response = self._update('ports', port['port']['id'], new)
self.assertEqual(1, len(response['port']['fixed_ips']))
new_rev = response['port']['revision_number']
self.assertGreater(new_rev, rev)
def test_security_group_rule_ops_bump_security_group(self):
s = {'security_group': {'tenant_id': 'some_tenant', 'name': '',
'description': 's'}}
sg = self.cp.create_security_group(self.ctx, s)
s['security_group']['name'] = 'hello'
updated = self.cp.update_security_group(self.ctx, sg['id'], s)
self.assertGreater(updated['revision_number'], sg['revision_number'])
# ensure rule changes bump parent SG
r = {'security_group_rule': {'tenant_id': 'some_tenant',
'port_range_min': 80, 'protocol': 6,
'port_range_max': 90,
'remote_ip_prefix': '0.0.0.0/0',
'ethertype': 'IPv4',
'remote_group_id': None,
'remote_address_group_id': None,
'direction': 'ingress',
'security_group_id': sg['id']}}
rule = self.cp.create_security_group_rule(self.ctx, r)
sg = updated
updated = self.cp.get_security_group(self.ctx, sg['id'])
self.assertGreater(updated['revision_number'], sg['revision_number'])
self.cp.delete_security_group_rule(self.ctx, rule['id'])
sg = updated
updated = self.cp.get_security_group(self.ctx, sg['id'])
self.assertGreater(updated['revision_number'], sg['revision_number'])
def test_router_interface_ops_bump_router(self):
r = {'router': {'name': 'myrouter', 'tenant_id': 'some_tenant',
'admin_state_up': True}}
router = self.l3p.create_router(self.ctx, r)
r['router']['name'] = 'yourrouter'
updated = self.l3p.update_router(self.ctx, router['id'], r)
self.assertGreater(updated['revision_number'],
router['revision_number'])
# add an intf and make sure it bumps rev
with self.subnet(tenant_id='some_tenant', cidr='10.0.1.0/24') as s:
interface_info = {'subnet_id': s['subnet']['id']}
self.l3p.add_router_interface(self.ctx, router['id'],
interface_info)
router = updated
updated = self.l3p.get_router(self.ctx, router['id'])
self.assertGreater(updated['revision_number'],
router['revision_number'])
# Add a route and make sure it bumps revision number
router = updated
body = {'router': {'routes': [{'destination': '192.168.2.0/24',
'nexthop': '10.0.1.3'}]}}
self.l3p.update_router(self.ctx, router['id'], body)
updated = self.l3p.get_router(self.ctx, router['id'])
self.assertGreater(updated['revision_number'],
router['revision_number'])
router = updated
body['router']['routes'] = []
self.l3p.update_router(self.ctx, router['id'], body)
updated = self.l3p.get_router(self.ctx, router['id'])
self.assertGreater(updated['revision_number'],
router['revision_number'])
self.l3p.remove_router_interface(self.ctx, router['id'],
interface_info)
router = updated
updated = self.l3p.get_router(self.ctx, router['id'])
self.assertGreater(updated['revision_number'],
router['revision_number'])
def test_qos_policy_bump_port_revision(self):
with self.port() as port:
rev = port['port']['revision_number']
qos_plugin = directory.get_plugin('QOS')
qos_policy = {'policy': {'id': uuidutils.generate_uuid(),
'name': "policy1",
'project_id': uuidutils.generate_uuid()}}
qos_obj = qos_plugin.create_policy(self.ctx, qos_policy)
data = {'port': {'qos_policy_id': qos_obj['id']}}
response = self._update('ports', port['port']['id'], data)
new_rev = response['port']['revision_number']
self.assertGreater(new_rev, rev)
def test_qos_policy_bump_network_revision(self):
with self.network() as network:
rev = network['network']['revision_number']
qos_plugin = directory.get_plugin('QOS')
qos_policy = {'policy': {'id': uuidutils.generate_uuid(),
'name': "policy1",
'project_id': uuidutils.generate_uuid()}}
qos_obj = qos_plugin.create_policy(self.ctx, qos_policy)
data = {'network': {'qos_policy_id': qos_obj['id']}}
response = self._update('networks', network['network']['id'], data)
new_rev = response['network']['revision_number']
self.assertGreater(new_rev, rev)
def test_net_tag_bumps_net_revision(self):
with self.network() as network:
rev = network['network']['revision_number']
tag_plugin = directory.get_plugin('TAG')
tag_plugin.update_tag(self.ctx, 'networks',
network['network']['id'], 'mytag')
updated = directory.get_plugin().get_network(
self.ctx, network['network']['id'])
self.assertGreater(updated['revision_number'], rev)
tag_plugin.delete_tag(self.ctx, 'networks',
network['network']['id'], 'mytag')
rev = updated['revision_number']
updated = directory.get_plugin().get_network(
self.ctx, network['network']['id'])
self.assertGreater(updated['revision_number'], rev)
|
|
"""Views from ecommerce"""
import logging
import traceback
from urllib.parse import urljoin
from django.conf import settings
from django.db import transaction
from django.http.response import Http404
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import RedirectView
from ipware import get_client_ip
from rest_framework.authentication import SessionAuthentication, TokenAuthentication
from rest_framework.exceptions import ValidationError
from rest_framework.mixins import ListModelMixin
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from courses.models import CourseRun
from ecommerce.api import (
create_unfulfilled_order,
enroll_user_on_success,
generate_cybersource_sa_payload,
get_new_order_by_reference_number,
is_coupon_redeemable,
make_dashboard_receipt_url,
pick_coupons,
)
from ecommerce.constants import (
CYBERSOURCE_DECISION_ACCEPT,
CYBERSOURCE_DECISION_CANCEL,
)
from ecommerce.exceptions import EcommerceException
from ecommerce.models import (
Coupon,
Order,
Receipt,
UserCoupon,
)
from ecommerce.permissions import (
IsLoggedInUser,
IsSignedByCyberSource,
)
from ecommerce.serializers import CouponSerializer
from mail.api import MailgunClient
from ui.url_utils import DASHBOARD_URL, PAYMENT_CALL_BACK_URL
log = logging.getLogger(__name__)
class CheckoutView(APIView):
"""
View for checkout API. This creates an Order in our system and provides a dictionary to
send to Cybersource
"""
authentication_classes = (
SessionAuthentication,
TokenAuthentication,
)
permission_classes = (IsAuthenticated,)
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
If the course run is part of a financial aid program, create a new unfulfilled Order
and return information used to submit to CyberSource.
If the program does not have financial aid, this will return a URL to let the user
pay for the course on edX.
"""
user_ip, _ = get_client_ip(request)
try:
course_id = request.data['course_id']
except KeyError:
raise ValidationError("Missing course_id")
course_run = get_object_or_404(
CourseRun,
course__program__live=True,
edx_course_key=course_id,
)
if course_run.course.program.financial_aid_availability:
order = create_unfulfilled_order(course_id, request.user)
payment_callback_url = request.build_absolute_uri(PAYMENT_CALL_BACK_URL)
if order.total_price_paid == 0:
# If price is $0, don't bother going to CyberSource, just mark as fulfilled
order.status = Order.FULFILLED
order.save_and_log(request.user)
try:
enroll_user_on_success(order)
except: # pylint: disable=bare-except
log.exception(
"Error occurred when enrolling user in one or more courses for order %s. "
"See other errors above for more info.",
order
)
try:
MailgunClient().send_individual_email(
"Error occurred when enrolling user during $0 checkout",
"Error occurred when enrolling user during $0 checkout for {order}. "
"Exception: {exception}".format(
order=order,
exception=traceback.format_exc()
),
settings.ECOMMERCE_EMAIL,
)
except: # pylint: disable=bare-except
log.exception(
"Error occurred when sending the email to notify support "
"of user enrollment error during order %s $0 checkout",
order,
)
# This redirects the user to our order success page
payload = {}
url = make_dashboard_receipt_url(payment_callback_url, course_id, 'receipt')
method = 'GET'
else:
# This generates a signed payload which is submitted as an HTML form to CyberSource
payload = generate_cybersource_sa_payload(order, payment_callback_url, user_ip)
url = settings.CYBERSOURCE_SECURE_ACCEPTANCE_URL
method = 'POST'
else:
# This redirects the user to edX to purchase the course there
payload = {}
url = urljoin(settings.EDXORG_BASE_URL, '/course_modes/choose/{}/'.format(course_id))
method = 'GET'
return Response({
'payload': payload,
'url': url,
'method': method,
})
class OrderFulfillmentView(APIView):
"""
View for order fulfillment API. This API is special in that only CyberSource should talk to it.
Instead of authenticating with OAuth or via session this looks at the signature of the message
to verify authenticity.
"""
authentication_classes = ()
permission_classes = (IsSignedByCyberSource, )
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
Confirmation from CyberSource which fulfills an existing Order.
"""
# First, save this information in a receipt
receipt = Receipt.objects.create(data=request.data)
# Link the order with the receipt if we can parse it
reference_number = request.data['req_reference_number']
order = get_new_order_by_reference_number(reference_number)
receipt.order = order
receipt.save()
decision = request.data['decision']
if order.status == Order.FAILED and decision == CYBERSOURCE_DECISION_CANCEL:
# This is a duplicate message, ignore since it's already handled
return Response(status=HTTP_200_OK)
elif order.status != Order.CREATED:
raise EcommerceException("Order {} is expected to have status 'created'".format(order.id))
if decision != CYBERSOURCE_DECISION_ACCEPT:
order.status = Order.FAILED
log.warning(
"Order fulfillment failed: received a decision that wasn't ACCEPT for order %s",
order,
)
if decision != CYBERSOURCE_DECISION_CANCEL:
try:
MailgunClient().send_individual_email(
"Order fulfillment failed, decision={decision}".format(
decision=decision
),
"Order fulfillment failed for order {order}".format(
order=order,
),
settings.ECOMMERCE_EMAIL
)
except: # pylint: disable=bare-except
log.exception(
"Error occurred when sending the email to notify "
"about order fulfillment failure for order %s",
order,
)
else:
order.status = Order.FULFILLED
order.save_and_log(None)
if order.status == Order.FULFILLED:
try:
enroll_user_on_success(order)
except: # pylint: disable=bare-except
log.exception(
"Error occurred when enrolling user in one or more courses for order %s. "
"See other errors above for more info.",
order
)
try:
MailgunClient().send_individual_email(
"Error occurred when enrolling user during order fulfillment",
"Error occurred when enrolling user during order fulfillment for {order}. "
"Exception: {exception}".format(
order=order,
exception=traceback.format_exc()
),
settings.ECOMMERCE_EMAIL,
)
except: # pylint: disable=bare-except
log.exception(
"Error occurred when sending the email to notify support "
"of user enrollment error during order %s fulfillment",
order,
)
# The response does not matter to CyberSource
return Response(status=HTTP_200_OK)
class CouponsView(ListModelMixin, GenericViewSet):
"""
View for coupons API. This is a read-only API showing the user
- what coupons they have available if those coupons would be automatically applied on checkout
- what coupons they have for a given coupon code, even if those coupons wouldn't be automatically applied
"""
authentication_classes = (
SessionAuthentication,
TokenAuthentication,
)
permission_classes = (IsAuthenticated,)
serializer_class = CouponSerializer
def get_queryset(self):
"""List coupons which a user is allowed to see"""
return pick_coupons(self.request.user)
class UserCouponsView(APIView):
"""
View for coupon/user attachments. Used to create attachments for a user for a coupon.
"""
permission_classes = (IsLoggedInUser, IsAuthenticated,)
authentication_classes = (
SessionAuthentication,
TokenAuthentication,
)
def post(self, request, code, *args, **kwargs): # pylint: disable=unused-argument
"""Attach a coupon to a user"""
with transaction.atomic():
coupon = get_object_or_404(Coupon, coupon_code=code)
if not is_coupon_redeemable(coupon, self.request.user):
# Coupon is not redeemable. Return a 404 to prevent the user from
raise Http404
try:
user_coupon = UserCoupon.objects.get(
coupon=coupon,
user=self.request.user,
)
except UserCoupon.DoesNotExist:
user_coupon = UserCoupon(
coupon=coupon,
user=self.request.user,
)
# Note: we always want to save so that the modification date is updated
user_coupon.save_and_log(request.user)
return Response(
status=HTTP_200_OK,
data={
'message': 'Attached user to coupon successfully.',
'coupon': CouponSerializer(coupon).data,
}
)
@method_decorator(csrf_exempt, name='dispatch')
class PaymentCallBackView(RedirectView):
"""
payment callback view that will redirect to dashboard url
"""
url = DASHBOARD_URL
query_string = True
|
|
import pytest
import demistomock as demisto
from CommonServerPython import formatEpochDate
SERVER_URL = "https://1.2.3.4"
@pytest.fixture(autouse=True)
def get_params(requests_mock, mocker):
mocker.patch.object(
demisto,
"params",
return_value={
"server": SERVER_URL,
"api_key": "1234567890",
"secret_key": "s3cr3t",
"insecure": False,
},
)
def test_list_dsns_command(requests_mock, mocker):
from CounterCraft import list_dsns_command
from test_data.api_response import response_dsns as json_response
requests_mock.get(
f"{SERVER_URL}/api/deception_support_nodes", json=json_response, status_code=200
)
mocker.patch.object(demisto, "results")
list_dsns_command()
results = demisto.results.call_args[0][0]
assert (
results["HumanReadable"]
== "### Deception Support Node\n|Id|Name|Description|Hostname|Port|\n|---|---|---|---|---|\n| 2 | \
Remote DSN | Remote DSN | 1.2.3.3 | 7080 |\n| 1 | Local network | Local network | thedsn | 7080 |\n"
)
assert results["Contents"] == json_response["data"]
assert results["EntryContext"] == {
"CounterCraft.DSN(val.ID && val.ID === obj.ID)": [
{
"ID": 2,
"Name": "Remote DSN",
"Description": "Remote DSN",
"Hostname": "1.2.3.3",
"Port": 7080,
},
{
"ID": 1,
"Name": "Local network",
"Description": "Local network",
"Hostname": "thedsn",
"Port": 7080,
},
]
}
def test_list_providers_command(requests_mock, mocker):
from CounterCraft import list_providers_command
from test_data.api_response import response_providers as json_response
requests_mock.get(
f"{SERVER_URL}/api/providers", json=json_response, status_code=200
)
mocker.patch.object(demisto, "results")
list_providers_command()
results = demisto.results.call_args[0][0]
assert (
results["HumanReadable"]
== "### Providers\n|Id|Name|Description|Typecode|Statuscode|\n|---|---|---|---|---|\n| 1 | \
ManualMachine | Hosts that are manually created | MANUAL_MACHINE | ACTIVE |\n| 2 | CompanyProvider | \
Hosts that are automatically created when activating breadcrumbs | COMPANY_PROVIDER | ACTIVE |\n| 3 | \
ManualIdentity | Identities that are manually created | MANUAL_IDENTITY | ACTIVE |\n| 4 | ManualRouter | \
Routers that are manually created | MANUAL_ROUTER | ACTIVE |\n| 5 | MISP Provider | | MISP_PROVIDER | ACTIVE |\n"
)
assert results["Contents"] == json_response["data"]
assert results["EntryContext"] == {
"CounterCraft.Provider(val.ID && val.ID === obj.ID)": [
{
"ID": 1,
"Name": "ManualMachine",
"Description": "Hosts that are manually created",
"TypeCode": "MANUAL_MACHINE",
"StatusCode": "ACTIVE",
},
{
"ID": 2,
"Name": "CompanyProvider",
"Description": "Hosts that are automatically created when activating breadcrumbs",
"TypeCode": "COMPANY_PROVIDER",
"StatusCode": "ACTIVE",
},
{
"ID": 3,
"Name": "ManualIdentity",
"Description": "Identities that are manually created",
"TypeCode": "MANUAL_IDENTITY",
"StatusCode": "ACTIVE",
},
{
"ID": 4,
"Name": "ManualRouter",
"Description": "Routers that are manually created",
"TypeCode": "MANUAL_ROUTER",
"StatusCode": "ACTIVE",
},
{
"ID": 5,
"Name": "MISP Provider",
"TypeCode": "MISP_PROVIDER",
"StatusCode": "ACTIVE",
},
]
}
def test_list_campaigns_command(requests_mock, mocker):
from CounterCraft import list_campaigns_command
from test_data.api_response import response_campaigns as json_response
requests_mock.get(
f"{SERVER_URL}/api/campaigns", json=json_response, status_code=200
)
mocker.patch.object(demisto, "results")
list_campaigns_command()
results = demisto.results.call_args[0][0]
assert (
results["HumanReadable"]
== "### Campaigns\n|Id|Name|Description|Statuscode|\n|---|---|---|---|\n| 1 | \
Devel Campaign | Campaign just to be used in devel | DESIGN |\n| 2 | 2nd Campaign | \
Campaign just to be used in devel 2 | DESIGN |\n"
)
assert results["Contents"] == json_response["data"]
assert results["EntryContext"] == {
"CounterCraft.Campaign(val.ID && val.ID === obj.ID)": [
{
"ID": 1,
"Name": "Devel Campaign",
"Description": "Campaign just to be used in devel",
"StatusCode": "DESIGN",
},
{
"ID": 2,
"Name": "2nd Campaign",
"Description": "Campaign just to be used in devel 2",
"StatusCode": "DESIGN",
},
]
}
def test_list_hosts_command(requests_mock, mocker):
from CounterCraft import list_hosts_command
from test_data.api_response import response_hosts as json_response
requests_mock.get(f"{SERVER_URL}/api/hosts", json=json_response, status_code=200)
mocker.patch.object(demisto, "results")
list_hosts_command()
results = demisto.results.call_args[0][0]
assert (
results["HumanReadable"]
== "### Hosts\n|Id|Name|Description|Statuscode|Typecode|\n|---|---|---|---|---|\n| 1 | \
Linux in AWS | Linux machine in AWS | DESIGN | MACHINE |\n"
)
assert results["Contents"] == json_response["data"]
assert results["EntryContext"] == {
"CounterCraft.Host(val.ID && val.ID === obj.ID)": [
{
"ID": 1,
"Name": "Linux in AWS",
"Description": "Linux machine in AWS",
"TypeCode": "MACHINE",
"StatusCode": "DESIGN",
}
],
'Host(val.IP && val.IP === obj.IP)': [
{
'ID': '61daa693-11cf-49a6-8fae-5111f630ee39',
'IP': '1.4.5.6'
}
]
}
def test_list_services_command(requests_mock, mocker):
from CounterCraft import list_services_command
from test_data.api_response import response_services as json_response
requests_mock.get(f"{SERVER_URL}/api/services", json=json_response, status_code=200)
mocker.patch.object(demisto, "results")
list_services_command()
results = demisto.results.call_args[0][0]
assert (
results["HumanReadable"]
== "### Services\n|Id|Name|Description|Statuscode|Typecode|\n|---|---|---|---|---|\n| 1 | \
Employee web portal | <p>-</p> | ACTIVE | WEB_SERVER |\n| 2 | Test | <p>-</p> | DESIGN | WEB_SERVER |\n"
)
assert results["Contents"] == json_response["data"]
assert results["EntryContext"] == {
"CounterCraft.Service(val.ID && val.ID === obj.ID)": [
{
"ID": 1,
"Name": "Employee web portal",
"Description": "<p>-</p>",
"TypeCode": "WEB_SERVER",
"StatusCode": "ACTIVE",
},
{
"ID": 2,
"Name": "Test",
"Description": "<p>-</p>",
"TypeCode": "WEB_SERVER",
"StatusCode": "DESIGN",
},
]
}
def test_list_breadcrumbs_command(requests_mock, mocker):
from CounterCraft import list_breadcrumbs_command
from test_data.api_response import response_breadcrumbs as json_response
requests_mock.get(
f"{SERVER_URL}/api/breadcrumbs", json=json_response, status_code=200
)
mocker.patch.object(demisto, "results")
list_breadcrumbs_command()
results = demisto.results.call_args[0][0]
assert (
results["HumanReadable"]
== "### Breadcrumbs\n|Id|Name|Description|Statuscode|Typecode|\n|---|---|---|---|---|\n| 1 | \
Fake Document | <p>-</p> | DESIGN | DOCUMENT |\n| 2 | Fake Mobile App | <p>-</p> | ACTIVE | MOBILE_APP |\n"
)
assert results["Contents"] == json_response["data"]
assert results["EntryContext"] == {
"CounterCraft.Breadcrumb(val.ID && val.ID === obj.ID)": [
{
"ID": 1,
"Name": "Fake Document",
"Description": "<p>-</p>",
"TypeCode": "DOCUMENT",
"StatusCode": "DESIGN",
},
{
"ID": 2,
"Name": "Fake Mobile App",
"Description": "<p>-</p>",
"TypeCode": "MOBILE_APP",
"StatusCode": "ACTIVE",
},
]
}
def test_list_incidents_command(requests_mock, mocker):
from CounterCraft import list_incidents_command
from test_data.api_response import response_incidents as json_response
requests_mock.get(
f"{SERVER_URL}/api/incidents", json=json_response, status_code=200
)
mocker.patch.object(demisto, "results")
list_incidents_command()
results = demisto.results.call_args[0][0]
assert (
results["HumanReadable"]
== "### Incidents\n|Id|Name|Description|Statuscode|Tlpcode|Tags|\n|---|---|---|---|---|---|\n| 1 | \
Invalid auth | Invalid auth incident. | OPEN | GREEN | |\n"
)
assert results["Contents"] == json_response["data"]
assert results["EntryContext"] == {
"CounterCraft.Incident(val.ID && val.ID === obj.ID)": [
{
"ID": 1,
"Name": "Invalid auth",
"Description": "Invalid auth incident.",
"StatusCode": "OPEN",
"TLPCode": "GREEN",
}
]
}
def test_get_object_command(requests_mock, mocker):
from CounterCraft import get_object_command
from test_data.api_response import response_objects as json_response
mocker.patch.object(demisto, "args", return_value={"value": "1.2.3.3"})
requests_mock.get(f"{SERVER_URL}/api/objects", json=json_response, status_code=200)
mocker.patch.object(demisto, "results")
get_object_command()
results = demisto.results.call_args[0][0]
assert (
results["HumanReadable"]
== f"### Objects\n|Id|Value|Hits|Eventscount|Typecode|Score|Firstseen|Lastseen|Tags|\n|\
---|---|---|---|---|---|---|---|---|\n| 1411 | 1.2.3.3 | 370 | 168 | CC_IP | 0 | \
{formatEpochDate(json_response['data'][0]['first_seen'])} | {formatEpochDate(json_response['data'][0]['last_seen'])} | |\n"
)
# Only dates have been changed
results["Contents"][0]["first_seen"] = 1507030039.331
results["Contents"][0]["last_seen"] = 1507313997.703
assert results["Contents"] == json_response["data"]
assert results["EntryContext"] == {
"CounterCraft.Object(val.ID && val.ID === obj.ID)": [
{
"ID": 1411,
"Value": "1.2.3.3",
"Hits": 370,
"Score": 0,
"TypeCode": "CC_IP",
"FirstSeen": f"{formatEpochDate(json_response['data'][0]['first_seen'])}",
"LastSeen": f"{formatEpochDate(json_response['data'][0]['last_seen'])}",
"EventsCount": 168,
}
]
}
def test_get_events_command(requests_mock, mocker):
from CounterCraft import get_events_command
from test_data.api_response import response_events as json_response
mocker.patch.object(
demisto,
"args",
return_value={"criteria": "type_code:ValidAuth", "max_results": 1},
)
requests_mock.get(f"{SERVER_URL}/api/events", json=json_response, status_code=200)
mocker.patch.object(demisto, "results")
get_events_command()
results = demisto.results.call_args[0][0]
assert (
results["HumanReadable"]
== f"### Events\n|Id|Campaignname|Categorycode|Hostname|Servicename|Eventdate|Score|\
Typecode|Data|Tags|\n|---|---|---|---|---|---|---|---|---|---|\n| 7882 | Linux Campaign | | \
Ubuntu18.04 | SYSTEM (Ubuntu18.04) | {formatEpochDate(json_response['data'][0]['event_date'])} | 100 | ValidAuth | \
event: ValidAuth<br>subject: Login successful<br>username: ubuntu<br>logon_type: -1<br>process_basename: su | \
attack.T1078 |\n"
)
# Only dates have been changed
results["Contents"][0]["event_date"] = 1570049630.0
assert results["Contents"] == json_response["data"]
assert results["EntryContext"] == {
"CounterCraft.Event(val.ID && val.ID === obj.ID)": [
{
"ID": 7882,
"CampaignName": "Linux Campaign",
"HostName": "Ubuntu18.04",
"ServiceName": "SYSTEM (Ubuntu18.04)",
"EventDate": f"{formatEpochDate(json_response['data'][0]['event_date'])}",
"Score": 100,
"TypeCode": "ValidAuth",
"Data": {
"event": "ValidAuth",
"subject": "Login successful",
"username": "ubuntu",
"logon_type": -1,
"process_basename": "su",
},
"Tags": ["attack.T1078"],
}
]
}
def test_create_campaign_command(requests_mock, mocker):
from CounterCraft import create_campaign_command
from test_data.api_response import response_campaigns as json_response
mocker.patch.object(
demisto,
"args",
return_value={"name": "TestCampaign", "description": "Test Description"},
)
requests_mock.post(
f"{SERVER_URL}/api/campaigns", json=json_response["data"][0], status_code=201
)
mocker.patch.object(demisto, "results")
create_campaign_command()
results = demisto.results.call_args[0][0]
assert (
results["HumanReadable"]
== "### Campaign\n|Id|Name|Description|Statuscode|\n|---|---|---|---|\n| 1 | Devel Campaign | \
Campaign just to be used in devel | DESIGN |\n"
)
assert results["Contents"] == json_response["data"][0]
assert results["EntryContext"] == {
"CounterCraft.Campaign(val.ID && val.ID === obj.ID)": {
"ID": 1,
"Name": "Devel Campaign",
"Description": "Campaign just to be used in devel",
"StatusCode": "DESIGN",
}
}
def test_manage_campaign_command(requests_mock, mocker):
from CounterCraft import manage_campaign_command
mocker.patch.object(
demisto, "args", return_value={"campaign_id": "1", "operation": "activate"},
)
requests_mock.patch(
f"{SERVER_URL}/api/campaigns/1",
json={"message": "Action successful"},
status_code=200,
)
mocker.patch.object(demisto, "results")
manage_campaign_command()
results = demisto.results.call_args[0][0]
assert (
results["HumanReadable"]
== "### Campaign Management\n|Id|Message|\n|---|---|\n| 1 | Action successful |\n"
)
assert results["Contents"] == {"message": "Action successful"}
assert results["EntryContext"] == {
"CounterCraft.Campaign(val.ID && val.ID === obj.ID)": [
{"ID": "1", "Message": "Action successful"}
]
}
def test_create_host_command(requests_mock, mocker):
from CounterCraft import create_host_machine_command
from test_data.api_response import response_hosts as json_response
mocker.patch.object(
demisto,
"args",
return_value={
"name": "TestCampaign",
"description": "Test Description",
"provider_id": 1,
"deception_support_node_id": 1,
"campaign_id": 1,
"ip_address": "1.1.1.1",
"port": 22,
"username": "ubuntu",
"password": "password",
},
)
requests_mock.post(
f"{SERVER_URL}/api/hosts", json=json_response["data"][0], status_code=201
)
mocker.patch.object(demisto, "results")
create_host_machine_command()
results = demisto.results.call_args[0][0]
assert (
results["HumanReadable"]
== "### Hosts\n|Id|Name|Description|Statuscode|Typecode|\n|---|---|---|---|---|\n| 1 | \
Linux in AWS | Linux machine in AWS | DESIGN | MACHINE |\n"
)
assert results["Contents"] == json_response["data"][0]
assert results["EntryContext"] == {
"CounterCraft.Host(val.ID && val.ID === obj.ID)": {
"ID": 1,
"Name": "Linux in AWS",
"Description": "Linux machine in AWS",
"TypeCode": "MACHINE",
"StatusCode": "DESIGN",
}
}
def test_manage_host_command(requests_mock, mocker):
from CounterCraft import manage_host_command
mocker.patch.object(
demisto, "args", return_value={"host_id": "1", "operation": "activate"},
)
requests_mock.patch(
f"{SERVER_URL}/api/hosts/1",
json={"message": "Action successful"},
status_code=200,
)
mocker.patch.object(demisto, "results")
manage_host_command()
results = demisto.results.call_args[0][0]
assert (
results["HumanReadable"]
== "### Host Management\n|Id|Message|\n|---|---|\n| 1 | Action successful |\n"
)
assert results["Contents"] == {"message": "Action successful"}
assert results["EntryContext"] == {
"CounterCraft.Host(val.ID && val.ID === obj.ID)": {
"ID": "1",
"Message": "Action successful",
}
}
def test_manage_service_command(requests_mock, mocker):
from CounterCraft import manage_service_command
mocker.patch.object(
demisto, "args", return_value={"service_id": "1", "operation": "activate"},
)
requests_mock.patch(
f"{SERVER_URL}/api/services/1",
json={"message": "Action successful"},
status_code=200,
)
mocker.patch.object(demisto, "results")
manage_service_command()
results = demisto.results.call_args[0][0]
assert (
results["HumanReadable"]
== "### Service Management\n|Id|Message|\n|---|---|\n| 1 | Action successful |\n"
)
assert results["Contents"] == {"message": "Action successful"}
assert results["EntryContext"] == {
"CounterCraft.Service(val.ID && val.ID === obj.ID)": {
"ID": "1",
"Message": "Action successful",
}
}
def test_manage_breadcrumb_command(requests_mock, mocker):
from CounterCraft import manage_breadcrumb_command
mocker.patch.object(
demisto, "args", return_value={"breadcrumb_id": "1", "operation": "activate"},
)
requests_mock.patch(
f"{SERVER_URL}/api/breadcrumbs/1",
json={"message": "Action successful"},
status_code=200,
)
mocker.patch.object(demisto, "results")
manage_breadcrumb_command()
results = demisto.results.call_args[0][0]
assert (
results["HumanReadable"]
== "### Breadcrumb Management\n|Id|Message|\n|---|---|\n| 1 | Action successful |\n"
)
assert results["Contents"] == {"message": "Action successful"}
assert results["EntryContext"] == {
"CounterCraft.Breadcrumb(val.ID && val.ID === obj.ID)": {
"ID": "1",
"Message": "Action successful",
}
}
def test_fetch_incidents_command(requests_mock, mocker):
from CounterCraft import fetch_incidents_command
from test_data.api_response import response_alerts as json_response
requests_mock.get(
f"{SERVER_URL}/api/notifications", json=json_response, status_code=200,
)
mocker.patch.object(demisto, "incidents")
fetch_incidents_command()
incidents = demisto.incidents.call_args[0][0]
assert demisto.incidents.call_count == 1
assert len(incidents) == 1
assert incidents[0]["name"] == "Possible mimikatz"
|
|
import gzip
import os
import shutil
import sys
import warnings
from ..extern.six import print_, string_types
from ..file import _File
from ..util import (_is_int, _tmp_name, _pad_length, ignore_sigint,
_get_array_mmap, indent, fileobj_closed,
PyfitsDeprecationWarning)
from ..verify import _Verify, _ErrList, VerifyError, VerifyWarning
from . import compressed
from .base import _BaseHDU, _ValidHDU, _NonstandardHDU, ExtensionHDU
from .groups import GroupsHDU
from .image import PrimaryHDU, ImageHDU
def fitsopen(name, mode='readonly', memmap=None, save_backup=False, **kwargs):
"""Factory function to open a FITS file and return an `HDUList` object.
Parameters
----------
name : file path, file object or file-like object
File to be opened.
mode : str, optional
Open mode, 'readonly' (default), 'update', 'append', 'denywrite', or
'ostream'.
If ``name`` is a file object that is already opened, ``mode`` must
match the mode the file was opened with, readonly (rb), update (rb+),
append (ab+), ostream (w), denywrite (rb)).
memmap : bool, optional
Is memory mapping to be used?
save_backup : bool, optional
If the file was opened in update or append mode, this ensures that a
backup of the original file is saved before any changes are flushed.
The backup has the same name as the original file with ".bak" appended.
If "file.bak" already exists then "file.bak.1" is used, and so on.
kwargs : dict, optional
additional optional keyword arguments, possible values are:
- **uint** : bool
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
This is enabled by default so that the pseudo-unsigned
integer convention is assumed.
Note, for backward compatibility, the kwarg **uint16** may
be used instead. The kwarg was renamed when support was
added for integers of any size.
- **ignore_missing_end** : bool
Do not issue an exception when opening a file that is
missing an ``END`` card in the last header.
- **checksum** : bool, str
If `True`, verifies that both ``DATASUM`` and
``CHECKSUM`` card values (when present in the HDU header)
match the header and data of all HDU's in the file. Updates to a
file that already has a checksum will preserve and update the
existing checksums unless this argument is given a value of
'remove', in which case the CHECKSUM and DATASUM values are not
checked, and are removed when saving changes to the file.
- **disable_image_compression** : bool
If `True`, treats compressed image HDU's like normal
binary table HDU's.
- **do_not_scale_image_data** : bool
If `True`, image data is not scaled using BSCALE/BZERO values
when read.
- **ignore_blank** : bool
If `True`, the BLANK keyword is ignored if present.
- **scale_back** : bool
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data.
Returns
-------
hdulist : an `HDUList` object
`HDUList` containing all of the header data units in the
file.
"""
if memmap is None:
from pyfits import USE_MEMMAP
# distinguish between True (kwarg explicitly set)
# and None (preference for memmap in config, might be ignored)
memmap = None if USE_MEMMAP else False
if 'uint16' in kwargs and 'uint' not in kwargs:
kwargs['uint'] = kwargs['uint16']
del kwargs['uint16']
warnings.warn(
'The uint16 keyword argument is deprecated since v3.4.0. Use '
'the uint argument instead.', PyfitsDeprecationWarning)
if 'uint' not in kwargs:
from pyfits import ENABLE_UINT
kwargs['uint'] = ENABLE_UINT
if not name:
raise ValueError('Empty filename: %s' % repr(name))
return HDUList.fromfile(name, mode, memmap, save_backup, **kwargs)
class HDUList(list, _Verify):
"""
HDU list class. This is the top-level FITS object. When a FITS
file is opened, a `HDUList` object is returned.
"""
def __init__(self, hdus=[], file=None):
"""
Construct a `HDUList` object.
Parameters
----------
hdus : sequence of HDU objects or single HDU, optional
The HDU object(s) to comprise the `HDUList`. Should be
instances of HDU classes like `ImageHDU` or `BinTableHDU`.
file : file object, optional
The opened physical file associated with the `HDUList`.
"""
self._file = file
self._save_backup = False
if hdus is None:
hdus = []
# can take one HDU, as well as a list of HDU's as input
if isinstance(hdus, _ValidHDU):
hdus = [hdus]
elif not isinstance(hdus, (HDUList, list)):
raise TypeError("Invalid input for HDUList.")
for idx, hdu in enumerate(hdus):
if not isinstance(hdu, _BaseHDU):
raise TypeError(
"Element %d in the HDUList input is not an HDU." % idx)
super(HDUList, self).__init__(hdus)
self.update_extend()
def __iter__(self):
for idx in range(len(self)):
yield self[idx]
def __getitem__(self, key):
"""
Get an HDU from the `HDUList`, indexed by number or name.
"""
if isinstance(key, slice):
hdus = super(HDUList, self).__getitem__(key)
return HDUList(hdus)
idx = self.index_of(key)
return super(HDUList, self).__getitem__(idx)
def __contains__(self, item):
"""
Returns `True` if ``HDUList.index_of(item)`` succeeds.
"""
try:
self.index_of(item)
return True
except KeyError:
return False
def __setitem__(self, key, hdu):
"""
Set an HDU to the `HDUList`, indexed by number or name.
"""
_key = self.index_of(key)
if isinstance(hdu, (slice, list)):
if _is_int(_key):
raise ValueError('An element in the HDUList must be an HDU.')
for item in hdu:
if not isinstance(item, _BaseHDU):
raise ValueError('%s is not an HDU.' % item)
else:
if not isinstance(hdu, _BaseHDU):
raise ValueError('%s is not an HDU.' % hdu)
try:
super(HDUList, self).__setitem__(_key, hdu)
except IndexError:
raise IndexError('Extension %s is out of bound or not found.'
% key)
self._resize = True
self._truncate = False
def __delitem__(self, key):
"""
Delete an HDU from the `HDUList`, indexed by number or name.
"""
if isinstance(key, slice):
end_index = len(self)
else:
key = self.index_of(key)
end_index = len(self) - 1
super(HDUList, self).__delitem__(key)
if (key == end_index or key == -1 and not self._resize):
self._truncate = True
else:
self._truncate = False
self._resize = True
def __getslice__(self, start, end):
return self[slice(start, end)]
def __delslice__(self, start, stop):
"""
Delete a slice of HDUs from the `HDUList`, indexed by number only.
"""
del self[slice(start, stop)]
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
@classmethod
def fromfile(cls, fileobj, mode=None, memmap=None,
save_backup=False, **kwargs):
"""
Creates an `HDUList` instance from a file-like object.
The actual implementation of ``fitsopen()``, and generally shouldn't
be used directly. Use :func:`pyfits.open` instead (and see its
documentation for details of the parameters accepted by this method).
"""
return cls._readfrom(fileobj=fileobj, mode=mode, memmap=memmap,
save_backup=save_backup, **kwargs)
@classmethod
def fromstring(cls, data, **kwargs):
"""
Creates an `HDUList` instance from a string or other in-memory data
buffer containing an entire FITS file. Similar to
:meth:`HDUList.fromfile`, but does not accept the mode or memmap
arguments, as they are only relevant to reading from a file on disk.
This is useful for interfacing with other libraries such as CFITSIO,
and may also be useful for streaming applications.
Parameters
----------
data : str, buffer, memoryview, etc.
A string or other memory buffer containing an entire FITS file. It
should be noted that if that memory is read-only (such as a Python
string) the returned :class:`HDUList`'s data portions will also be
read-only.
kwargs : dict
Optional keyword arguments. See :func:`pyfits.open` for details.
Returns
-------
hdul : HDUList
An :class:`HDUList` object representing the in-memory FITS file.
"""
return cls._readfrom(data=data, **kwargs)
def fileinfo(self, index):
"""
Returns a dictionary detailing information about the locations
of the indexed HDU within any associated file. The values are
only valid after a read or write of the associated file with
no intervening changes to the `HDUList`.
Parameters
----------
index : int
Index of HDU for which info is to be returned.
Returns
-------
fileinfo : dict or None
The dictionary details information about the locations of
the indexed HDU within an associated file. Returns `None`
when the HDU is not associated with a file.
Dictionary contents:
========== ========================================================
Key Value
========== ========================================================
file File object associated with the HDU
filename Name of associated file object
filemode Mode in which the file was opened (readonly,
update, append, denywrite, ostream)
resized Flag that when `True` indicates that the data has been
resized since the last read/write so the returned values
may not be valid.
hdrLoc Starting byte location of header in file
datLoc Starting byte location of data block in file
datSpan Data size including padding
========== ========================================================
"""
if self._file is not None:
output = self[index].fileinfo()
if not output:
# OK, the HDU associated with this index is not yet
# tied to the file associated with the HDUList. The only way
# to get the file object is to check each of the HDU's in the
# list until we find the one associated with the file.
f = None
for hdu in self:
info = hdu.fileinfo()
if info:
f = info['file']
fm = info['filemode']
break
output = {'file': f, 'filemode': fm, 'hdrLoc': None,
'datLoc': None, 'datSpan': None}
output['filename'] = self._file.name
output['resized'] = self._wasresized()
else:
output = None
return output
def insert(self, index, hdu):
"""
Insert an HDU into the `HDUList` at the given ``index``.
Parameters
----------
index : int
Index before which to insert the new HDU.
hdu : HDU object
The HDU object to insert
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError('%s is not an HDU.' % hdu)
num_hdus = len(self)
if index == 0 or num_hdus == 0:
if num_hdus != 0:
# We are inserting a new Primary HDU so we need to
# make the current Primary HDU into an extension HDU.
if isinstance(self[0], GroupsHDU):
raise ValueError(
"The current Primary HDU is a GroupsHDU. "
"It can't be made into an extension HDU, "
"so another HDU cannot be inserted before it.")
hdu1 = ImageHDU(self[0].data, self[0].header)
# Insert it into position 1, then delete HDU at position 0.
super(HDUList, self).insert(1, hdu1)
super(HDUList, self).__delitem__(0)
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super(HDUList, self).insert(0, phdu)
index = 1
else:
if isinstance(hdu, GroupsHDU):
raise ValueError('A GroupsHDU must be inserted as a '
'Primary HDU.')
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
hdu = ImageHDU(hdu.data, hdu.header)
super(HDUList, self).insert(index, hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def append(self, hdu):
"""
Append a new HDU to the `HDUList`.
Parameters
----------
hdu : HDU object
HDU to add to the `HDUList`.
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError('HDUList can only append an HDU.')
if len(self) > 0:
if isinstance(hdu, GroupsHDU):
raise ValueError(
"Can't append a GroupsHDU to a non-empty HDUList")
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
# TODO: This isn't necessarily sufficient to copy the HDU;
# _header_offset and friends need to be copied too.
hdu = ImageHDU(hdu.data, hdu.header)
else:
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary
# HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super(HDUList, self).append(phdu)
super(HDUList, self).append(hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def index_of(self, key):
"""
Get the index of an HDU from the `HDUList`.
Parameters
----------
key : int, str or tuple of (string, int)
The key identifying the HDU. If ``key`` is a tuple, it is of the
form ``(key, ver)`` where ``ver`` is an ``EXTVER`` value that must
match the HDU being searched for.
Returns
-------
index : int
The index of the HDU in the `HDUList`.
"""
if _is_int(key):
return key
elif isinstance(key, tuple):
_key, _ver = key
else:
_key = key
_ver = None
if not isinstance(_key, string_types):
raise KeyError(key)
_key = (_key.strip()).upper()
nfound = 0
found = None
for idx, hdu in enumerate(self):
name = hdu.name
if isinstance(name, string_types):
name = name.strip().upper()
# 'PRIMARY' should always work as a reference to the first HDU
if ((name == _key or (_key == 'PRIMARY' and idx == 0)) and
(_ver is None or _ver == hdu.ver)):
found = idx
nfound += 1
if (nfound == 0):
raise KeyError('Extension %s not found.' % repr(key))
elif (nfound > 1):
raise KeyError('There are %d extensions of %s.'
% (nfound, repr(key)))
else:
return found
def readall(self):
"""
Read data of all HDUs into memory.
"""
for hdu in self:
if hdu.data is not None:
continue
@ignore_sigint
def flush(self, output_verify='fix', verbose=False):
"""
Force a write of the `HDUList` back to the file (for append and
update modes only).
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
verbose : bool
When `True`, print verbose messages
"""
if self._file.mode not in ('append', 'update', 'ostream'):
warnings.warn("Flush for '%s' mode is not supported."
% self._file.mode)
return
if self._save_backup and self._file.mode in ('append', 'update'):
filename = self._file.name
if os.path.exists(filename):
# The the file doesn't actually exist anymore for some reason
# then there's no point in trying to make a backup
backup = filename + '.bak'
idx = 1
while os.path.exists(backup):
backup = filename + '.bak.' + str(idx)
idx += 1
warnings.warn('Saving a backup of %s to %s.' %
(filename, backup))
try:
shutil.copy(filename, backup)
except IOError as exc:
raise IOError('Failed to save backup to destination %s: '
'%s' % (filename, exc))
self.verify(option=output_verify)
if self._file.mode in ('append', 'ostream'):
for hdu in self:
if verbose:
try:
extver = str(hdu._header['extver'])
except KeyError:
extver = ''
# only append HDU's which are "new"
if hdu._new:
hdu._prewriteto(checksum=hdu._output_checksum)
try:
hdu._writeto(self._file)
if verbose:
print_('append HDU', hdu.name, extver)
hdu._new = False
finally:
hdu._postwriteto()
elif self._file.mode == 'update':
self._flush_update()
def update_extend(self):
"""
Make sure that if the primary header needs the keyword ``EXTEND`` that
it has it and it is correct.
"""
if not len(self):
return
if not isinstance(self[0], PrimaryHDU):
# A PrimaryHDU will be automatically inserted at some point, but it
# might not have been added yet
return
hdr = self[0].header
if 'EXTEND' in hdr:
if len(self) > 1 and hdr['EXTEND'] == False:
hdr['EXTEND'] = True
elif len(self) > 1:
if hdr['NAXIS'] == 0:
hdr.set('EXTEND', True, after='NAXIS')
else:
n = hdr['NAXIS']
hdr.set('EXTEND', True, after='NAXIS' + str(n))
def writeto(self, fileobj, output_verify='exception', clobber=False,
checksum=False):
"""
Write the `HDUList` to a new file.
Parameters
----------
fileobj : file path, file object or file-like object
File to write to. If a file object, must be opened in a
writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
clobber : bool
When `True`, overwrite the output file if exists.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the headers of all HDU's written to the file.
"""
if (len(self) == 0):
warnings.warn("There is nothing to write.")
return
self.verify(option=output_verify)
# make sure the EXTEND keyword is there if there is extension
self.update_extend()
# make note of whether the input file object is already open, in which
# case we should not close it after writing (that should be the job
# of the caller)
closed = isinstance(fileobj, string_types) or fileobj_closed(fileobj)
# writeto is only for writing a new file from scratch, so the most
# sensible mode to require is 'ostream'. This can accept an open
# file object that's open to write only, or in append/update modes
# but only if the file doesn't exist.
fileobj = _File(fileobj, mode='ostream', clobber=clobber)
hdulist = self.fromfile(fileobj)
for hdu in self:
hdu._prewriteto(checksum=checksum)
try:
hdu._writeto(hdulist._file)
finally:
hdu._postwriteto()
hdulist.close(output_verify=output_verify, closed=closed)
def close(self, output_verify='exception', verbose=False, closed=True):
"""
Close the associated FITS file and memmap object, if any.
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
verbose : bool
When `True`, print out verbose messages.
closed : bool
When `True`, close the underlying file object.
"""
if self._file:
if self._file.mode in ['append', 'update']:
self.flush(output_verify=output_verify, verbose=verbose)
if closed and hasattr(self._file, 'close'):
self._file.close()
# Give individual HDUs an opportunity to do on-close cleanup
for hdu in self:
hdu._close(closed=closed)
def info(self, output=None):
"""
Summarize the info of the HDUs in this `HDUList`.
Note that this function prints its results to the console---it
does not return a value.
Parameters
----------
output : file, bool, optional
A file-like object to write the output to. If `False`, does not
output to a file and instead returns a list of tuples representing
the HDU info. Writes to ``sys.stdout`` by default.
"""
if output is None:
output = sys.stdout
if self._file is None:
name = '(No file associated with this HDUList)'
else:
name = self._file.name
results = ['Filename: %s' % name,
'No. Name Type Cards Dimensions Format']
format = '%-3d %-10s %-11s %5d %-10s %s %s'
default = ('', '', 0, (), '', '')
for idx, hdu in enumerate(self):
summary = hdu._summary()
if len(summary) < len(default):
summary += default[len(summary):]
summary = (idx,) + summary
if output:
results.append(format % summary)
else:
results.append(summary)
if output:
output.write('\n'.join(results))
output.write('\n')
output.flush()
else:
return results[2:]
def filename(self):
"""
Return the file name associated with the HDUList object if one exists.
Otherwise returns None.
Returns
-------
filename : a string containing the file name associated with the
HDUList object if an association exists. Otherwise returns
None.
"""
if self._file is not None:
if hasattr(self._file, 'name'):
return self._file.name
return None
@classmethod
def _readfrom(cls, fileobj=None, data=None, mode=None,
memmap=None, save_backup=False, **kwargs):
"""
Provides the implementations from HDUList.fromfile and
HDUList.fromstring, both of which wrap this method, as their
implementations are largely the same.
"""
if fileobj is not None:
if not isinstance(fileobj, _File):
# instantiate a FITS file object (ffo)
ffo = _File(fileobj, mode=mode, memmap=memmap)
else:
ffo = fileobj
# The pyfits mode is determined by the _File initializer if the
# supplied mode was None
mode = ffo.mode
hdulist = cls(file=ffo)
else:
if mode is None:
# The default mode
mode = 'readonly'
hdulist = cls()
# This method is currently only called from HDUList.fromstring and
# HDUList.fromfile. If fileobj is None then this must be the
# fromstring case; the data type of ``data`` will be checked in the
# _BaseHDU.fromstring call.
hdulist._save_backup = save_backup
saved_compression_enabled = compressed.COMPRESSION_ENABLED
try:
if ('disable_image_compression' in kwargs and
kwargs['disable_image_compression']):
compressed.COMPRESSION_ENABLED = False
# read all HDUs
while True:
try:
if fileobj is not None:
if ffo.writeonly:
# Output stream--not interested in reading/parsing
# the HDUs--just writing to the output file
return hdulist
try:
hdu = _BaseHDU.readfrom(ffo, **kwargs)
except EOFError:
break
except IOError:
if ffo.writeonly:
break
else:
raise
else:
if not data:
break
hdu = _BaseHDU.fromstring(data)
data = data[hdu._data_offset + hdu._data_size:]
hdulist.append(hdu)
hdu._new = False
if 'checksum' in kwargs:
hdu._output_checksum = kwargs['checksum']
# check in the case there is extra space after the last HDU or
# corrupted HDU
except (VerifyError, ValueError) as exc:
warnings.warn(
'Error validating header for HDU #%d (note: PyFITS '
'uses zero-based indexing).\n%s\n'
'There may be extra bytes after the last HDU or the '
'file is corrupted.' %
(len(hdulist), indent(str(exc))), VerifyWarning)
del exc
break
# If we're trying to read only and no header units were found,
# raise and exception
if mode in ('readonly', 'denywrite') and len(hdulist) == 0:
raise IOError('Empty or corrupt FITS file')
# initialize/reset attributes to be used in "update/append" mode
hdulist._resize = False
hdulist._truncate = False
finally:
compressed.COMPRESSION_ENABLED = saved_compression_enabled
return hdulist
def _verify(self, option='warn'):
text = ''
errs = _ErrList([], unit='HDU')
# the first (0th) element must be a primary HDU
if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)) and \
(not isinstance(self[0], _NonstandardHDU)):
err_text = "HDUList's 0th element is not a primary HDU."
fix_text = 'Fixed by inserting one as 0th HDU.'
def fix(self=self):
self.insert(0, PrimaryHDU())
err = self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix)
errs.append(err)
if len(self) > 1 and ('EXTEND' not in self[0].header or
self[0].header['EXTEND'] is not True):
err_text = ('Primary HDU does not contain an EXTEND keyword '
'equal to T even though there are extension HDUs.')
fix_text = 'Fixed by inserting or updating the EXTEND keyword.'
def fix(header=self[0].header):
naxis = header['NAXIS']
if naxis == 0:
after = 'NAXIS'
else:
after = 'NAXIS' + str(naxis)
header.set('EXTEND', value=True, after=after)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
# each element calls their own verify
for idx, hdu in enumerate(self):
if idx > 0 and (not isinstance(hdu, ExtensionHDU)):
err_text = ("HDUList's element %s is not an extension HDU." %
str(idx))
err = self.run_option(option, err_text=err_text, fixable=False)
errs.append(err)
else:
result = hdu._verify(option)
if result:
errs.append(result)
return errs
def _flush_update(self):
"""Implements flushing changes to a file in update mode."""
for hdu in self:
# Need to all _prewriteto() for each HDU first to determine if
# resizing will be necessary
hdu._prewriteto(checksum=hdu._output_checksum, inplace=True)
try:
self._wasresized()
# if the HDUList is resized, need to write out the entire contents of
# the hdulist to the file.
if self._resize or self._file.compression:
self._flush_resize()
else:
# if not resized, update in place
for hdu in self:
hdu._writeto(self._file, inplace=True)
# reset the modification attributes after updating
for hdu in self:
hdu._header._modified = False
finally:
for hdu in self:
hdu._postwriteto()
def _flush_resize(self):
"""
Implements flushing changes in update mode when parts of one or more HDU
need to be resized.
"""
old_name = self._file.name
old_memmap = self._file.memmap
name = _tmp_name(old_name)
if not self._file.file_like:
old_mode = os.stat(old_name).st_mode
# The underlying file is an actual file object. The HDUList is
# resized, so we need to write it to a tmp file, delete the
# original file, and rename the tmp file to the original file.
if self._file.compression == 'gzip':
new_file = gzip.GzipFile(name, mode='ab+')
elif self._file.compression == 'bzip2':
new_file = bz2.BZ2File(name, mode='w')
else:
new_file = name
hdulist = self.fromfile(new_file, mode='append')
for hdu in self:
hdu._writeto(hdulist._file, inplace=True, copy=True)
if sys.platform.startswith('win'):
# Collect a list of open mmaps to the data; this well be used
# later. See below.
mmaps = [(idx, _get_array_mmap(hdu.data), hdu.data)
for idx, hdu in enumerate(self) if hdu._has_data]
hdulist._file.close()
self._file.close()
if sys.platform.startswith('win'):
# Close all open mmaps to the data. This is only necessary on
# Windows, which will not allow a file to be renamed or deleted
# until all handles to that file have been closed.
for idx, mmap, arr in mmaps:
if mmap is not None:
mmap.close()
os.remove(self._file.name)
# reopen the renamed new file with "update" mode
os.rename(name, old_name)
os.chmod(old_name, old_mode)
if isinstance(new_file, gzip.GzipFile):
old_file = gzip.GzipFile(old_name, mode='rb+')
else:
old_file = old_name
ffo = _File(old_file, mode='update', memmap=old_memmap)
self._file = ffo
for hdu in self:
# Need to update the _file attribute and close any open mmaps
# on each HDU
if hdu._has_data and _get_array_mmap(hdu.data) is not None:
del hdu.data
hdu._file = ffo
if sys.platform.startswith('win'):
# On Windows, all the original data mmaps were closed above.
# However, it's possible that the user still has references to
# the old data which would no longer work (possibly even cause
# a segfault if they try to access it). This replaces the
# buffers used by the original arrays with the buffers of mmap
# arrays created from the new file. This seems to work, but
# it's a flaming hack and carries no guarantees that it won't
# lead to odd behavior in practice. Better to just not keep
# references to data from files that had to be resized upon
# flushing (on Windows--again, this is no problem on Linux).
for idx, mmap, arr in mmaps:
if mmap is not None:
arr.data = self[idx].data.data
del mmaps # Just to be sure
else:
# The underlying file is not a file object, it is a file like
# object. We can't write out to a file, we must update the file
# like object in place. To do this, we write out to a temporary
# file, then delete the contents in our file like object, then
# write the contents of the temporary file to the now empty file
# like object.
self.writeto(name)
hdulist = self.fromfile(name)
ffo = self._file
ffo.truncate(0)
ffo.seek(0)
for hdu in hdulist:
hdu._writeto(ffo, inplace=True, copy=True)
# Close the temporary file and delete it.
hdulist.close()
os.remove(hdulist._file.name)
# reset the resize attributes after updating
self._resize = False
self._truncate = False
for hdu in self:
hdu._header._modified = False
hdu._new = False
hdu._file = ffo
def _wasresized(self, verbose=False):
"""
Determine if any changes to the HDUList will require a file resize
when flushing the file.
Side effect of setting the objects _resize attribute.
"""
if not self._resize:
# determine if any of the HDU is resized
for hdu in self:
# Header:
nbytes = len(str(hdu._header))
if nbytes != (hdu._data_offset - hdu._header_offset):
self._resize = True
self._truncate = False
if verbose:
print_('One or more header is resized.')
break
# Data:
if not hdu._has_data:
continue
nbytes = hdu.size
nbytes = nbytes + _pad_length(nbytes)
if nbytes != hdu._data_size:
self._resize = True
self._truncate = False
if verbose:
print_('One or more data area is resized.')
break
if self._truncate:
try:
self._file.truncate(hdu._data_offset + hdu._data_size)
except IOError:
self._resize = True
self._truncate = False
return self._resize
|
|
# Copyright (c) 2014 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
"""
This module provides a client class for ACL.
"""
import copy
import json
import logging
import uuid
import sys
from baidubce import bce_base_client
from baidubce.auth import bce_v1_signer
from baidubce.http import bce_http_client
from baidubce.http import handler
from baidubce.http import http_methods
from baidubce import utils
from baidubce.utils import required
from baidubce import compat
if sys.version < '3':
sys.setdefaultencoding('utf-8')
_logger = logging.getLogger(__name__)
class AclClient(bce_base_client.BceBaseClient):
"""
ACL base sdk client
"""
prefix = b'/v1'
def __init__(self, config=None):
bce_base_client.BceBaseClient.__init__(self, config)
def _merge_config(self, config=None):
"""
:param config:
:type config: baidubce.BceClientConfiguration
:return:
"""
if config is None:
return self.config
else:
new_config = copy.copy(self.config)
new_config.merge_non_none_values(config)
return new_config
def _send_request(self, http_method, path,
body=None, headers=None, params=None,
config=None, body_parser=None):
config = self._merge_config(config)
if body_parser is None:
body_parser = handler.parse_json
if headers is None:
headers = {b'Accept': b'*/*', b'Content-Type':
b'application/json;charset=utf-8'}
return bce_http_client.send_request(
config, bce_v1_signer.sign, [handler.parse_error, body_parser],
http_method, path, body, headers, params)
@required(vpc_id=(bytes, str))
def list_acl_entrys(self, vpc_id, config=None):
"""
Get the detail information of acl for specific vpc.
:param vpc_id:
the vpc id
:type vpc_id: string
:param config:
:type config: baidubce.BceClientConfiguration
:return:
:rtype baidubce.bce_response.BceResponse
"""
path = utils.append_uri(self.prefix, 'acl')
params = {}
params[b'vpcId'] = vpc_id
return self._send_request(http_methods.GET, path,
params=params, config=config)
@required(rule_list=list)
def create_acl(self, rule_list, client_token=None, config=None):
"""
Create acl rules with the specified options.
:param rule_list:
a list contains acl rules.
https://cloud.baidu.com/doc/VPC/API.html#AclRuleRequest
The elements of the list are AclRuleRequest
:type rule_list: list
AclRuleRequest{
:param protocol:
The parameter specify which protocol will the acl rule work on
:value: "all" or ""tcp" or "udp" or "icmp"
:type protocol: string
:param sourceIpAddress:
Source ip address which the rule applied to
:type sourceIpAddress: string
:param destinationIpAddress:
Destination ip address which the rule applied to
:type destinationIpAddress: string
:param sourcePort:
Port used by source ip address
:value 1-65535
:type sourcePort: string
:param destinationPort:
Port used by destination ip address
:value 1-65535
:type destinationPort:string
:param position:
Priority of the rule
:value 1-5000,unique,The smaller the value, the higher the priority
:type:position:Integer
:param direction:
The rule is a ingress or a egress rule
:value: "ingress" or "egress"
:type direction:string
:param action:
The rule is allowed or denied
:value "allow" or "deny"
:type action:string
:param description(Optional):
The option param to describe the acl rule.
:type description: string
}
:param client_token:
If the clientToken is not specified by the user,
a random Stringgenerated by default algorithm will be used.
:type client_token: string
:param config:
:type config: baidubce.BceClientConfiguration
:return:
:rtype baidubce.bce_response.BceResponse
"""
path = utils.append_uri(self.prefix, 'acl', 'rule')
params = {}
if client_token is None:
params[b'clientToken'] = generate_client_token()
else:
params[b'clientToken'] = client_token
body = {
'aclRules': rule_list
}
return self._send_request(http_methods.POST, path,
body=json.dumps(body), params=params,
config=config)
@required(subnet_id=(bytes, str))
def list_subnet_acl(self, subnet_id, marker=None, max_keys=None, config=None):
"""
Return a list of acl rules of specify subnet.
:param subnet_id
the id of subnet whhich the acl applied
:type subnet_id: string
:param marker
The optional parameter marker specified in the original
request to specify where in the results to begin listing.
Together with the marker, specifies the list result
which listing should begin. If the marker is not specified,
the list result will listing from the first one.
:type marker: string
:param max_keys
The optional parameter to specifies the max number of
list result to return.
The default value is 1000.
:type max_keys: int
:param config:
:type config: baidubce.BceClientConfiguration
:return:
:rtype baidubce.bce_response.BceResponse
"""
path = utils.append_uri(self.prefix, 'acl', 'rule')
params = {}
if marker is not None:
params[b'marker'] = marker
if max_keys is not None:
params[b'maxKeys'] = max_keys
params[b'subnetId'] = subnet_id
return self._send_request(http_methods.GET, path,
params=params, config=config)
@required(acl_rule_id=(bytes, str))
def delete_acl(self, acl_rule_id, client_token=None, config=None):
"""
Delete the specific acl rule.
:param acl_rule_id:
The id of the specified acl.
:type acl_rule_id: string
:param client_token:
If the clientToken is not specified by the user, a random String
generated by default algorithm will be used.
:type client_token: string
:param config:
:type config: baidubce.BceClientConfiguration
:return:
:rtype baidubce.bce_response.BceResponse
"""
path = utils.append_uri(self.prefix, 'acl', 'rule', acl_rule_id)
params = {}
if client_token is None:
params[b'clientToken'] = generate_client_token()
else:
params[b'clientToken'] = client_token
return self._send_request(http_methods.DELETE, path,
params=params, config=config)
@required(acl_rule_id=(bytes, str))
def update_acl(self, acl_rule_id, description=None,
protocol=None, source_ip_address=None,
destination_ip_address=None, source_port=None,
destination_port=None,
position=None, action=None,
client_token=None, config=None):
"""
Modify the special attribute to new value of the acl owned by the user.
:param acl_rule_id
id of the acl to be modified
:type acl_rule_id:string
:param description:
The option param to describe the acl rule.
:type description: string
:param protocol:
The parameter specify which protocol will the acl rule work on
:value: "all" or ""tcp" or "udp" or "icmp"
:type protocol: string
:param source_ip_address:
source ip address which the rule applied to
:type source_ip_address: string
:param destination_ip_address:
destination ip address which the rule applied to
:type destination_ip_address: string
:param source_port:
port used by source ip address
:value 1-65535
:type source_port: string
:param destination_port:
port used by destination ip address
:value 1-65535
:type destination_port:string
:param position:
priority of the rule
:value 1-5000,unique,The smaller the value, the higher the priority
:type:position:Integer
:param action:
the rule is allowed or denied
:value "allow" or "deny"
:type action:string
:param client_token:
If the clientToken is not specified by the user, a random
String generated by default algorithm will be used.
:type client_token: string
:param config:
:type config: baidubce.BceClientConfiguration
:return:
:rtype baidubce.bce_response.BceResponse
"""
path = utils.append_uri(self.prefix, 'acl', 'rule', acl_rule_id)
params = {}
if client_token is None:
params[b'clientToken'] = generate_client_token()
else:
params[b'clientToken'] = client_token
body = {}
if description is not None:
body['description'] = compat.convert_to_string(description)
if protocol is not None:
body['protocol'] = compat.convert_to_string(protocol)
if source_ip_address is not None:
body['sourceIpAddress'] = \
compat.convert_to_string(source_ip_address)
if destination_ip_address is not None:
body['destinationIpAddress'] = \
compat.convert_to_string(destination_ip_address)
if source_port is not None:
body['sourcePort'] = compat.convert_to_string(source_port)
if destination_port is not None:
body['destinationPort'] = \
compat.convert_to_string(destination_port)
if position is not None:
body['position'] = position
if action is not None:
body['action'] = compat.convert_to_string(action)
return self._send_request(http_methods.PUT, path, json.dumps(body),
params=params, config=config)
def generate_client_token_by_uuid():
"""
The default method to generate the random string for client_token
if the optional parameter client_token is not specified by the user.
:return:
:rtype string
"""
return str(uuid.uuid4())
generate_client_token = generate_client_token_by_uuid
|
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by ExopyHqcLegacy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Tasks to used to load a file in memory.
"""
import os
import errno
import logging
import numbers
import warnings
from inspect import cleandoc
from collections import OrderedDict
#: Protection against numpy deprecation message in h5py
warnings.filterwarnings("ignore", category=FutureWarning, module="h5py")
import numpy
import h5py
from atom.api import Unicode, Enum, Value, Bool, Int, Typed, List, set_default
from exopy.tasks.api import SimpleTask, validators
from exopy.utils.atom_util import ordered_dict_from_pref, ordered_dict_to_pref
from exopy.utils.traceback import format_exc
class SaveTask(SimpleTask):
""" Save the specified entries either in a CSV file or an array. The file
is closed when the line number is reached.
Wait for any parallel operation before execution.
Notes
-----
Currently only support saving floats.
"""
#: Kind of object in which to save the data.
saving_target = Enum('File', 'Array', 'File and array').tag(pref=True)
#: Folder in which to save the data.
folder = Unicode('{default_path}').tag(pref=True)
#: Name of the file in which to write the data.
filename = Unicode().tag(pref=True)
#: Currently opened file object. (File mode)
file_object = Value()
#: Opening mode to use when saving to a file.
file_mode = Enum('New', 'Add')
#: Header to write at the top of the file.
header = Unicode().tag(pref=True)
#: Numpy array in which data are stored (Array mode)
array = Value() # Array
#: Size of the data to be saved. (Evaluated at runtime)
array_size = Unicode().tag(pref=True)
#: Computed size of the data (post evaluation)
array_length = Int()
#: Index of the current line.
line_index = Int(0)
#: Values to save as an ordered dictionary.
saved_values = Typed(OrderedDict, ()).tag(pref=(ordered_dict_to_pref,
ordered_dict_from_pref))
#: Flag indicating whether or not initialisation has been performed.
initialized = Bool(False)
wait = set_default({'activated': True}) # Wait on all pools by default.
def perform(self):
""" Collect all data and write them to array or file according to mode.
On first call initialise the system by opening file and/or array. Close
file when the expected number of lines has been written.
"""
# Initialisation.
if not self.initialized:
self.line_index = 0
size_str = self.array_size
if size_str:
self.array_length = self.format_and_eval_string(size_str)
else:
self.array_length = -1
if self.saving_target != 'Array':
full_folder_path = self.format_string(self.folder)
filename = self.format_string(self.filename)
full_path = os.path.join(full_folder_path, filename)
mode = 'wb' if self.file_mode == 'New' else 'ab'
try:
self.file_object = open(full_path, mode)
except IOError as e:
log = logging.getLogger()
mes = ('In {}, failed to open the specified '
'file {}').format(self.name, e)
log.error(mes)
self.root.should_stop.set()
self.root.resources['files'][full_path] = self.file_object
if self.header:
h = self.format_string(self.header)
for line in h.split('\n'):
self.file_object.write(('# ' + line +
'\n').encode('utf-8'))
labels = [self.format_string(s) for s in self.saved_values]
self.file_object.write(('\t'.join(labels) +
'\n').encode('utf-8'))
self.file_object.flush()
if self.saving_target != 'File':
# TODO add more flexibilty on the dtype (possible complex
# values)
dtype = numpy.dtype({'names': [self.format_string(s)
for s in self.saved_values],
'formats': ['f8']*len(self.saved_values)})
self.array = numpy.empty((self.array_length,),
dtype=dtype)
self.write_in_database('array', self.array)
self.initialized = True
# Writing
values = tuple(self.format_and_eval_string(s)
for s in self.saved_values.values())
if self.saving_target != 'Array':
new_line = '\t'.join([str(val) for val in values]) + '\n'
self.file_object.write(new_line.encode('utf-8'))
self.file_object.flush()
if self.saving_target != 'File':
self.array[self.line_index] = tuple(values)
self.line_index += 1
# Closing
if self.line_index == self.array_length:
if self.file_object:
self.file_object.close()
self.initialized = False
def check(self, *args, **kwargs):
"""Check that the provided parameters make sense.
"""
err_path = self.get_error_path()
test, traceback = super(SaveTask, self).check(*args, **kwargs)
if self.saving_target != 'Array':
try:
full_folder_path = self.format_string(self.folder)
except Exception as e:
mess = 'Failed to format the folder path: {}'
traceback[err_path] = mess.format(e)
return False, traceback
try:
filename = self.format_string(self.filename)
except Exception as e:
mess = 'Failed to format the filename: {}'
traceback[err_path] = mess.format(e)
return False, traceback
full_path = os.path.join(full_folder_path, filename)
overwrite = False
if self.file_mode == 'New' and os.path.isfile(full_path):
overwrite = True
traceback[err_path + '-file'] = \
('File already exists, running the measure will '
'override it.')
try:
f = open(full_path, 'ab')
f.close()
if self.file_mode == 'New' and not overwrite:
os.remove(full_path)
except Exception as e:
mess = 'Failed to open the specified file : {}'.format(e)
traceback[err_path] = mess.format(e)
return False, traceback
try:
self.format_string(self.header)
except Exception as e:
mess = 'Failed to format the header: {}'
traceback[err_path] = mess.format(e)
return False, traceback
if self.array_size:
try:
self.format_and_eval_string(self.array_size)
except Exception as e:
mess = 'Failed to compute the array size: {}'
traceback[err_path] = mess.format(e)
return False, traceback
elif self.saving_target != 'File':
traceback[err_path] = 'A size for the array must be provided.'
return False, traceback
labels = list()
for i, (l, v) in enumerate(self.saved_values.items()):
try:
labels.append(self.format_string(l))
except Exception:
traceback[err_path + '-label_' + str(i)] = \
'Failed to evaluate label {}:\n{}'.format(l, format_exc())
test = False
try:
self.format_and_eval_string(v)
except Exception:
traceback[err_path + '-entry_' + str(i)] = \
'Failed to evaluate entry {}:\n{}'.format(v, format_exc())
test = False
if not test:
return test, traceback
if len(set(labels)) != len(self.saved_values):
traceback[err_path] = "All labels must be different."
return False, traceback
if self.saving_target != 'File':
data = [numpy.array([0.0, 1.0]) for s in self.saved_values]
names = str(','.join([s for s in labels]))
final_arr = numpy.rec.fromarrays(data, names=names)
self.write_in_database('array', final_arr)
return test, traceback
def _post_setattr_saving_target(self, old, new):
"""Add the array in the database if using it.
"""
if new != 'File':
self.database_entries = {'array': numpy.array([1.0])}
else:
self.database_entries = {}
class SaveFileTask(SimpleTask):
""" Save the specified entries in a CSV file.
Wait for any parallel operation before execution.
Notes
-----
Currently only support saving floats and arrays of floats (record arrays
or simple arrays).
"""
#: Folder in which to save the data.
folder = Unicode('{default_path}').tag(pref=True, fmt=True)
#: Name of the file in which to write the data.
filename = Unicode().tag(pref=True, fmt=True)
#: Currently opened file object. (File mode)
file_object = Value()
#: Header to write at the top of the file.
header = Unicode().tag(pref=True, fmt=True)
#: Values to save as an ordered dictionary.
saved_values = Typed(OrderedDict, ()).tag(pref=(ordered_dict_to_pref,
ordered_dict_from_pref))
#: Flag indicating whether or not initialisation has been performed.
initialized = Bool(False)
#: Column indices identified as arrays. Use to save 2D arrays in
#: concatenated columns.
array_values = Value()
#: Shapes of identified arrays.
array_dims = Value()
database_entries = set_default({'file': None})
wait = set_default({'activated': True}) # Wait on all pools by default.
def perform(self):
""" Collect all data and write them to file.
"""
# Initialisation.
if not self.initialized:
full_folder_path = self.format_string(self.folder)
filename = self.format_string(self.filename)
full_path = os.path.join(full_folder_path, filename)
try:
self.file_object = open(full_path, 'wb')
except IOError:
log = logging.getLogger()
msg = "In {}, failed to open the specified file."
log.exception(msg.format(self.name))
self.root.should_stop.set()
self.root.resources['files'][full_path] = self.file_object
if self.header:
h = self.format_string(self.header)
for line in h.split('\n'):
self.file_object.write(('# ' + line +
'\n').encode('utf-8'))
labels = []
self.array_values = list()
self.array_dims = list()
for i, (l, v) in enumerate(self.saved_values.items()):
label = self.format_string(l)
value = self.format_and_eval_string(v)
if isinstance(value, numpy.ndarray):
names = value.dtype.names
self.array_values.append(i)
self.array_dims.append(value.ndim)
if names:
labels.extend([label + '_' + m for m in names])
else:
labels.append(label)
else:
labels.append(label)
self.file_object.write(('\t'.join(labels) + '\n').encode('utf-8'))
self.file_object.flush()
self.initialized = True
shapes_1D = set()
shapes_2D = set()
values = []
for i, v in enumerate(self.saved_values.values()):
value = self.format_and_eval_string(v)
values.append(value)
if i in self.array_values: # if we deal with an array_type value
if len(value.shape) == 1:
shapes_1D.add(value.shape)
elif len(value.shape) == 2:
shapes_2D.add(value.shape)
else:
log = logging.getLogger()
msg = ("In {}, impossible to save arrays exceeding two "
"dimension. Save file in HDF5 format.")
log.error(msg.format(self.name))
self.root.should_stop.set()
if shapes_1D:
if len(shapes_1D) > 1:
log = logging.getLogger()
msg = ("In {}, impossible to save simultaneously 1D-arrays of "
"different sizes. Save file in HDF5 format.")
log.error(msg.format(self.name))
self.root.should_stop.set()
else:
length = shapes_1D.pop()
if shapes_2D:
if len(shapes_2D) > 1:
log = logging.getLogger()
msg = ("In {}, impossible to save simultaneously 2D-arrays of "
"different sizes. Save file in HDF5 format.")
log.error(msg.format(self.name))
self.root.should_stop.set()
elif shapes_1D:
if length == shapes_2D[0]:
shape = shapes_2D.pop()
else:
log = logging.getLogger()
msg = ("In {}, 1D-arrays and 2D-arrays could not be "
"broadcast together. Save file in HDF5 format.")
log.error(msg.format(self.name))
self.root.should_stop.set()
else:
shape = shapes_2D.pop()
if not self.array_values:
new_line = '\t'.join([str(val) for val in values]) + '\n'
self.file_object.write(new_line.encode('utf-8'))
self.file_object.flush()
else:
columns = []
if not (2 in self.array_dims):
for i, val in enumerate(values):
if i in self.array_values:
if val.dtype.names:
columns.extend([val[m] for m in val.dtype.names])
else:
columns.append(val)
else:
columns.append(numpy.ones(length)*val)
else:
for i, val in enumerate(values):
if i in self.array_values:
if val.ndim == 1:
val_2D = numpy.array([val]).T
ones = numpy.ones((1, shape[1]))
val = numpy.multiply(val_2D, ones)
else:
val = numpy.ones(shape[0]*shape[1])*val
columns.append(val.reshape((shape[0]*shape[1])))
array_to_save = numpy.rec.fromarrays(columns)
numpy.savetxt(self.file_object, array_to_save, delimiter='\t')
self.file_object.flush()
def check(self, *args, **kwargs):
"""Check that given parameters are meaningful
"""
err_path = self.get_error_path()
test, traceback = super(SaveFileTask, self).check(*args, **kwargs)
try:
full_folder_path = self.format_string(self.folder)
filename = self.format_string(self.filename)
except Exception:
return test, traceback
full_path = os.path.join(full_folder_path, filename)
overwrite = False
if os.path.isfile(full_path):
overwrite = True
traceback[err_path + '-file'] = \
cleandoc('''File already exists, running the measure will
override it.''')
try:
f = open(full_path, 'ab')
f.close()
if not overwrite:
os.remove(full_path)
except Exception as e:
mess = 'Failed to open the specified file : {}'.format(e)
traceback[err_path] = mess.format(e)
return False, traceback
labels = set()
for i, (l, v) in enumerate(self.saved_values.items()):
try:
labels.add(self.format_string(l))
except Exception:
traceback[err_path + '-label_' + str(i)] = \
'Failed to evaluate label {}:\n{}'.format(l, format_exc())
test = False
try:
self.format_and_eval_string(v)
except Exception:
traceback[err_path + '-entry_' + str(i)] = \
'Failed to evaluate entry {}:\n{}'.format(v, format_exc())
test = False
if not test:
return test, traceback
if len(labels) != len(self.saved_values):
traceback[err_path] = "All labels must be different."
return False, traceback
return test, traceback
class _HDF5File(h5py.File):
"""Resize the datasets before closing the file
Sets the compression with a boolean
"""
def close(self):
for dataset in self.keys():
oldshape = self[dataset].shape
newshape = (self.attrs['count_calls'], ) + oldshape[1:]
self[dataset].resize(newshape)
super(_HDF5File, self).close()
def create_dataset(self, name, shape, maximumshape, datatype, compress):
f = super(_HDF5File, self)
if compress != 'None':
f.create_dataset(name, shape, maxshape=maximumshape,
dtype=datatype, compression=compress)
else:
f.create_dataset(name, shape, maxshape=maximumshape,
dtype=datatype)
VAL_REAL = validators.Feval(types=numbers.Real)
class SaveFileHDF5Task(SimpleTask):
""" Save the specified entries in a HDF5 file.
Wait for any parallel operation before execution.
"""
#: Folder in which to save the data.
folder = Unicode('{default_path}').tag(pref=True, fmt=True)
#: Name of the file in which to write the data.
filename = Unicode().tag(pref=True, fmt=True)
#: Currently opened file object. (File mode)
file_object = Value()
#: Header to write at the top of the file.
header = Unicode().tag(pref=True, fmt=True)
#: Values to save as an ordered dictionary.
saved_values = Typed(OrderedDict, ()).tag(pref=(ordered_dict_to_pref,
ordered_dict_from_pref))
#: Data type (float16, float32, etc.)
datatype = Enum('float16', 'float32', 'float64').tag(pref=True)
#: Compression type of the data in the HDF5 file
compression = Enum('None', 'gzip').tag(pref=True)
#: Estimation of the number of calls of this task during the measure.
#: This helps h5py to chunk the file appropriately
calls_estimation = Unicode('1').tag(pref=True, feval=VAL_REAL)
#: Flag indicating whether or not initialisation has been performed.
initialized = Bool(False)
database_entries = set_default({'file': None})
wait = set_default({'activated': True}) # Wait on all pools by default.
def perform(self):
""" Collect all data and write them to file.
"""
calls_estimation = self.format_and_eval_string(self.calls_estimation)
# Initialisation.
if not self.initialized:
self._formatted_labels = []
full_folder_path = self.format_string(self.folder)
filename = self.format_string(self.filename)
full_path = os.path.join(full_folder_path, filename)
try:
self.file_object = _HDF5File(full_path, 'w')
except IOError:
log = logging.getLogger()
msg = "In {}, failed to open the specified file."
log.exception(msg.format(self.name))
self.root.should_stop.set()
self.root.resources['files'][full_path] = self.file_object
f = self.file_object
for l, v in self.saved_values.items():
label = self.format_string(l)
self._formatted_labels.append(label)
value = self.format_and_eval_string(v)
if isinstance(value, numpy.ndarray):
names = value.dtype.names
if names:
for m in names:
f.create_dataset(label + '_' + m,
(calls_estimation,) + value.shape,
(None, ) + value.shape,
self.datatype,
self.compression)
else:
f.create_dataset(label,
(calls_estimation,) + value.shape,
(None, ) + value.shape,
self.datatype,
self.compression)
else:
f.create_dataset(label, (calls_estimation,), (None,),
self.datatype, self.compression)
f.attrs['header'] = self.format_string(self.header)
f.attrs['count_calls'] = 0
f.flush()
self.initialized = True
f = self.file_object
count_calls = f.attrs['count_calls']
if not (count_calls % calls_estimation):
for dataset in f.keys():
oldshape = f[dataset].shape
newshape = (oldshape[0] + calls_estimation, ) + oldshape[1:]
f[dataset].resize(newshape)
labels = self._formatted_labels
for i, v in enumerate(self.saved_values.values()):
value = self.format_and_eval_string(v)
if isinstance(value, numpy.ndarray):
names = value.dtype.names
if names:
for m in names:
f[labels[i] + '_' + m][count_calls] = value[m]
else:
f[labels[i]][count_calls] = value
else:
f[labels[i]][count_calls] = value
f.attrs['count_calls'] = count_calls + 1
f.flush()
def check(self, *args, **kwargs):
"""Check that all teh parameters are correct.
"""
err_path = self.get_error_path
test, traceback = super(SaveFileHDF5Task, self).check(*args, **kwargs)
try:
full_folder_path = self.format_string(self.folder)
filename = self.format_string(self.filename)
except Exception:
return test, traceback
full_path = os.path.join(full_folder_path, filename)
overwrite = False
if os.path.isfile(full_path):
overwrite = True
traceback[err_path + '-file'] = \
cleandoc('''File already exists, running the measure will
override it.''')
try:
f = open(full_path, 'ab')
f.close()
if not overwrite:
os.remove(full_path)
except Exception as e:
mess = 'Failed to open the specified file : {}'.format(e)
traceback[err_path] = mess.format(e)
return False, traceback
labels = set()
for i, (l, v) in enumerate(self.saved_values.items()):
try:
labels.add(self.format_string(l))
except Exception:
traceback[err_path + '-label_' + str(i)] = \
'Failed to evaluate label {}:\n{}'.format(l, format_exc())
test = False
try:
self.format_and_eval_string(v)
except Exception:
traceback[err_path + '-entry_' + str(i)] = \
'Failed to evaluate entry {}:\n{}'.format(v, format_exc())
test = False
if not test:
return test, traceback
if len(labels) != len(self.saved_values):
traceback[err_path] = "All labels must be different."
return False, traceback
return test, traceback
#: List of the formatted names of the entries.
_formatted_labels = List()
ARR_VAL = validators.Feval(types=numpy.ndarray)
class SaveArrayTask(SimpleTask):
"""Save the specified array either in a CSV file or as a .npy binary file.
Wait for any parallel operation before execution.
"""
#: Folder in which to save the data.
folder = Unicode().tag(pref=True, fmt=True)
#: Name of the file in which to write the data.
filename = Unicode().tag(pref=True, fmt=True)
#: Header to write at the top of the file.
header = Unicode().tag(pref=True, fmt=True)
#: Name of the array to save in the database.
target_array = Unicode().tag(pref=True, feval=ARR_VAL)
#: Flag indicating whether to save as csv or .npy.
mode = Enum('Text file', 'Binary file').tag(pref=True)
wait = set_default({'activated': True}) # Wait on all pools by default.
def perform(self):
""" Save array to file.
"""
array_to_save = self.format_and_eval_string(self.target_array)
assert isinstance(array_to_save, numpy.ndarray), 'Wrong type returned.'
full_folder_path = self.format_string(self.folder)
filename = self.format_string(self.filename)
full_path = os.path.join(full_folder_path, filename)
# Create folder if it does not exists.
try:
os.makedirs(full_folder_path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
if self.mode == 'Text file':
try:
file_object = open(full_path, 'wb')
except IOError:
msg = "In {}, failed to open the specified file"
log = logging.getLogger()
log.exception(msg.format(self.name))
raise
if self.header:
h = self.format_string(self.header)
for line in h.split('\n'):
file_object.write(('# ' + line + '\n').encode('utf-8'))
if array_to_save.dtype.names:
names = '\t'.join(array_to_save.dtype.names) + '\n'
file_object.write(names.encode('utf-8'))
numpy.savetxt(file_object, array_to_save, delimiter='\t')
file_object.close()
else:
try:
file_object = open(full_path, 'wb')
file_object.close()
except IOError:
msg = "In {}, failed to open the specified file."
log = logging.getLogger()
log.exception(msg.format(self.name))
self.root.should_stop.set()
return
numpy.save(full_path, array_to_save)
def check(self, *args, **kwargs):
"""Check folder path and filename.
"""
err_path = self.get_error_path()
test, traceback = super(SaveArrayTask, self).check(*args, **kwargs)
if self.mode == 'Binary file':
if len(self.filename) > 3 and self.filename[-4] == '.'\
and self.filename[-3:] != 'npy':
self.filename = self.filename[:-4] + '.npy'
msg = ("The extension of the file will be replaced by '.npy' "
"in task {}").format(self.name)
traceback[err_path + '-file_ext'] = msg
if self.header:
traceback[err_path + '-header'] =\
'Cannot write a header when saving in binary mode.'
try:
full_folder_path = self.format_string(self.folder)
filename = self.format_string(self.filename)
except Exception:
return test, traceback
full_path = os.path.join(full_folder_path, filename)
overwrite = False
if os.path.isfile(full_path):
overwrite = True
traceback[err_path + '-file'] = \
cleandoc('''File already exists, running the measure will
override it.''')
try:
f = open(full_path, 'ab')
f.close()
if not overwrite:
os.remove(full_path)
except Exception as e:
mess = 'Failed to open the specified file: {}'
traceback[err_path] = mess.format(e)
return False, traceback
return test, traceback
|
|
"""Support for Huawei LTE routers."""
from __future__ import annotations
from collections import defaultdict
from collections.abc import Callable
from contextlib import suppress
from datetime import timedelta
import logging
import time
from typing import Any, cast
import attr
from huawei_lte_api.AuthorizedConnection import AuthorizedConnection
from huawei_lte_api.Client import Client
from huawei_lte_api.Connection import Connection
from huawei_lte_api.exceptions import (
ResponseErrorException,
ResponseErrorLoginRequiredException,
ResponseErrorNotSupportedException,
)
from requests.exceptions import Timeout
from url_normalize import url_normalize
import voluptuous as vol
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.device_tracker.const import (
DOMAIN as DEVICE_TRACKER_DOMAIN,
)
from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_MODEL,
ATTR_SW_VERSION,
CONF_MAC,
CONF_NAME,
CONF_PASSWORD,
CONF_RECIPIENT,
CONF_URL,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import (
config_validation as cv,
device_registry as dr,
discovery,
entity_registry,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType
from .const import (
ADMIN_SERVICES,
ALL_KEYS,
ATTR_UNIQUE_ID,
CONF_UNAUTHENTICATED_MODE,
CONNECTION_TIMEOUT,
DEFAULT_DEVICE_NAME,
DEFAULT_NOTIFY_SERVICE_NAME,
DOMAIN,
KEY_DEVICE_BASIC_INFORMATION,
KEY_DEVICE_INFORMATION,
KEY_DEVICE_SIGNAL,
KEY_DIALUP_MOBILE_DATASWITCH,
KEY_LAN_HOST_INFO,
KEY_MONITORING_CHECK_NOTIFICATIONS,
KEY_MONITORING_MONTH_STATISTICS,
KEY_MONITORING_STATUS,
KEY_MONITORING_TRAFFIC_STATISTICS,
KEY_NET_CURRENT_PLMN,
KEY_NET_NET_MODE,
KEY_SMS_SMS_COUNT,
KEY_WLAN_HOST_LIST,
KEY_WLAN_WIFI_FEATURE_SWITCH,
NOTIFY_SUPPRESS_TIMEOUT,
SERVICE_CLEAR_TRAFFIC_STATISTICS,
SERVICE_REBOOT,
SERVICE_RESUME_INTEGRATION,
SERVICE_SUSPEND_INTEGRATION,
UPDATE_SIGNAL,
)
from .utils import get_device_macs
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
NOTIFY_SCHEMA = vol.Any(
None,
vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_RECIPIENT): vol.Any(
None, vol.All(cv.ensure_list, [cv.string])
),
}
),
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_URL): cv.url,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(NOTIFY_DOMAIN): NOTIFY_SCHEMA,
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA = vol.Schema({vol.Optional(CONF_URL): cv.url})
CONFIG_ENTRY_PLATFORMS = (
BINARY_SENSOR_DOMAIN,
DEVICE_TRACKER_DOMAIN,
SENSOR_DOMAIN,
SWITCH_DOMAIN,
)
@attr.s
class Router:
"""Class for router state."""
hass: HomeAssistant = attr.ib()
config_entry: ConfigEntry = attr.ib()
connection: Connection = attr.ib()
url: str = attr.ib()
data: dict[str, Any] = attr.ib(init=False, factory=dict)
subscriptions: dict[str, set[str]] = attr.ib(
init=False,
factory=lambda: defaultdict(set, ((x, {"initial_scan"}) for x in ALL_KEYS)),
)
inflight_gets: set[str] = attr.ib(init=False, factory=set)
client: Client
suspended = attr.ib(init=False, default=False)
notify_last_attempt: float = attr.ib(init=False, default=-1)
def __attrs_post_init__(self) -> None:
"""Set up internal state on init."""
self.client = Client(self.connection)
@property
def device_name(self) -> str:
"""Get router device name."""
for key, item in (
(KEY_DEVICE_BASIC_INFORMATION, "devicename"),
(KEY_DEVICE_INFORMATION, "DeviceName"),
):
with suppress(KeyError, TypeError):
return cast(str, self.data[key][item])
return DEFAULT_DEVICE_NAME
@property
def device_identifiers(self) -> set[tuple[str, str]]:
"""Get router identifiers for device registry."""
assert self.config_entry.unique_id is not None
return {(DOMAIN, self.config_entry.unique_id)}
@property
def device_connections(self) -> set[tuple[str, str]]:
"""Get router connections for device registry."""
return {
(dr.CONNECTION_NETWORK_MAC, x) for x in self.config_entry.data[CONF_MAC]
}
def _get_data(self, key: str, func: Callable[[], Any]) -> None:
if not self.subscriptions.get(key):
return
if key in self.inflight_gets:
_LOGGER.debug("Skipping already inflight get for %s", key)
return
self.inflight_gets.add(key)
_LOGGER.debug("Getting %s for subscribers %s", key, self.subscriptions[key])
try:
self.data[key] = func()
except ResponseErrorLoginRequiredException:
if isinstance(self.connection, AuthorizedConnection):
_LOGGER.debug("Trying to authorize again")
if self.connection.enforce_authorized_connection():
_LOGGER.debug(
"success, %s will be updated by a future periodic run",
key,
)
else:
_LOGGER.debug("failed")
return
_LOGGER.info(
"%s requires authorization, excluding from future updates", key
)
self.subscriptions.pop(key)
except ResponseErrorException as exc:
if not isinstance(
exc, ResponseErrorNotSupportedException
) and exc.code not in (
# additional codes treated as unusupported
-1,
100006,
):
raise
_LOGGER.info(
"%s apparently not supported by device, excluding from future updates",
key,
)
self.subscriptions.pop(key)
except Timeout:
grace_left = (
self.notify_last_attempt - time.monotonic() + NOTIFY_SUPPRESS_TIMEOUT
)
if grace_left > 0:
_LOGGER.debug(
"%s timed out, %.1fs notify timeout suppress grace remaining",
key,
grace_left,
exc_info=True,
)
else:
raise
finally:
self.inflight_gets.discard(key)
_LOGGER.debug("%s=%s", key, self.data.get(key))
def update(self) -> None:
"""Update router data."""
if self.suspended:
_LOGGER.debug("Integration suspended, not updating data")
return
self._get_data(KEY_DEVICE_INFORMATION, self.client.device.information)
if self.data.get(KEY_DEVICE_INFORMATION):
# Full information includes everything in basic
self.subscriptions.pop(KEY_DEVICE_BASIC_INFORMATION, None)
self._get_data(
KEY_DEVICE_BASIC_INFORMATION, self.client.device.basic_information
)
self._get_data(KEY_DEVICE_SIGNAL, self.client.device.signal)
self._get_data(
KEY_DIALUP_MOBILE_DATASWITCH, self.client.dial_up.mobile_dataswitch
)
self._get_data(
KEY_MONITORING_MONTH_STATISTICS, self.client.monitoring.month_statistics
)
self._get_data(
KEY_MONITORING_CHECK_NOTIFICATIONS,
self.client.monitoring.check_notifications,
)
self._get_data(KEY_MONITORING_STATUS, self.client.monitoring.status)
self._get_data(
KEY_MONITORING_TRAFFIC_STATISTICS, self.client.monitoring.traffic_statistics
)
self._get_data(KEY_NET_CURRENT_PLMN, self.client.net.current_plmn)
self._get_data(KEY_NET_NET_MODE, self.client.net.net_mode)
self._get_data(KEY_SMS_SMS_COUNT, self.client.sms.sms_count)
self._get_data(KEY_LAN_HOST_INFO, self.client.lan.host_info)
if self.data.get(KEY_LAN_HOST_INFO):
# LAN host info includes everything in WLAN host list
self.subscriptions.pop(KEY_WLAN_HOST_LIST, None)
self._get_data(KEY_WLAN_HOST_LIST, self.client.wlan.host_list)
self._get_data(
KEY_WLAN_WIFI_FEATURE_SWITCH, self.client.wlan.wifi_feature_switch
)
dispatcher_send(self.hass, UPDATE_SIGNAL, self.config_entry.unique_id)
def logout(self) -> None:
"""Log out router session."""
if not isinstance(self.connection, AuthorizedConnection):
return
try:
self.client.user.logout()
except ResponseErrorNotSupportedException:
_LOGGER.debug("Logout not supported by device", exc_info=True)
except ResponseErrorLoginRequiredException:
_LOGGER.debug("Logout not supported when not logged in", exc_info=True)
except Exception: # pylint: disable=broad-except
_LOGGER.warning("Logout error", exc_info=True)
def cleanup(self, *_: Any) -> None:
"""Clean up resources."""
self.subscriptions.clear()
self.logout()
@attr.s
class HuaweiLteData:
"""Shared state."""
hass_config: ConfigType = attr.ib()
# Our YAML config, keyed by router URL
config: dict[str, dict[str, Any]] = attr.ib()
routers: dict[str, Router] = attr.ib(init=False, factory=dict)
async def async_setup_entry( # noqa: C901
hass: HomeAssistant, entry: ConfigEntry
) -> bool:
"""Set up Huawei LTE component from config entry."""
url = entry.data[CONF_URL]
# Override settings from YAML config, but only if they're changed in it
# Old values are stored as *_from_yaml in the config entry
if yaml_config := hass.data[DOMAIN].config.get(url):
# Config values
new_data = {}
for key in CONF_USERNAME, CONF_PASSWORD:
if key in yaml_config:
value = yaml_config[key]
if value != entry.data.get(f"{key}_from_yaml"):
new_data[f"{key}_from_yaml"] = value
new_data[key] = value
# Options
new_options = {}
yaml_recipient = yaml_config.get(NOTIFY_DOMAIN, {}).get(CONF_RECIPIENT)
if yaml_recipient is not None and yaml_recipient != entry.options.get(
f"{CONF_RECIPIENT}_from_yaml"
):
new_options[f"{CONF_RECIPIENT}_from_yaml"] = yaml_recipient
new_options[CONF_RECIPIENT] = yaml_recipient
yaml_notify_name = yaml_config.get(NOTIFY_DOMAIN, {}).get(CONF_NAME)
if yaml_notify_name is not None and yaml_notify_name != entry.options.get(
f"{CONF_NAME}_from_yaml"
):
new_options[f"{CONF_NAME}_from_yaml"] = yaml_notify_name
new_options[CONF_NAME] = yaml_notify_name
# Update entry if overrides were found
if new_data or new_options:
hass.config_entries.async_update_entry(
entry,
data={**entry.data, **new_data},
options={**entry.options, **new_options},
)
def get_connection() -> Connection:
"""Set up a connection."""
if entry.options.get(CONF_UNAUTHENTICATED_MODE):
_LOGGER.debug("Connecting in unauthenticated mode, reduced feature set")
connection = Connection(url, timeout=CONNECTION_TIMEOUT)
else:
_LOGGER.debug("Connecting in authenticated mode, full feature set")
username = entry.data.get(CONF_USERNAME) or ""
password = entry.data.get(CONF_PASSWORD) or ""
connection = AuthorizedConnection(
url, username=username, password=password, timeout=CONNECTION_TIMEOUT
)
return connection
try:
connection = await hass.async_add_executor_job(get_connection)
except Timeout as ex:
raise ConfigEntryNotReady from ex
# Set up router
router = Router(hass, entry, connection, url)
# Do initial data update
await hass.async_add_executor_job(router.update)
# Check that we found required information
router_info = router.data.get(KEY_DEVICE_INFORMATION)
if not entry.unique_id:
# Transitional from < 2021.8: update None config entry and entity unique ids
if router_info and (serial_number := router_info.get("SerialNumber")):
hass.config_entries.async_update_entry(entry, unique_id=serial_number)
ent_reg = entity_registry.async_get(hass)
for entity_entry in entity_registry.async_entries_for_config_entry(
ent_reg, entry.entry_id
):
if not entity_entry.unique_id.startswith("None-"):
continue
new_unique_id = (
f"{serial_number}-{entity_entry.unique_id.split('-', 1)[1]}"
)
ent_reg.async_update_entity(
entity_entry.entity_id, new_unique_id=new_unique_id
)
else:
await hass.async_add_executor_job(router.cleanup)
msg = (
"Could not resolve serial number to use as unique id for router at %s"
", setup failed"
)
if not entry.data.get(CONF_PASSWORD):
msg += (
". Try setting up credentials for the router for one startup, "
"unauthenticated mode can be enabled after that in integration "
"settings"
)
_LOGGER.error(msg, url)
return False
# Store reference to router
hass.data[DOMAIN].routers[entry.unique_id] = router
# Clear all subscriptions, enabled entities will push back theirs
router.subscriptions.clear()
# Update device MAC addresses on record. These can change due to toggling between
# authenticated and unauthenticated modes, or likely also when enabling/disabling
# SSIDs in the router config.
try:
wlan_settings = await hass.async_add_executor_job(
router.client.wlan.multi_basic_settings
)
except Exception: # pylint: disable=broad-except
# Assume not supported, or authentication required but in unauthenticated mode
wlan_settings = {}
macs = get_device_macs(router_info or {}, wlan_settings)
# Be careful not to overwrite a previous, more complete set with a partial one
if macs and (not entry.data[CONF_MAC] or (router_info and wlan_settings)):
new_data = dict(entry.data)
new_data[CONF_MAC] = macs
hass.config_entries.async_update_entry(entry, data=new_data)
# Set up device registry
if router.device_identifiers or router.device_connections:
device_info = DeviceInfo(
connections=router.device_connections,
identifiers=router.device_identifiers,
name=router.device_name,
manufacturer="Huawei",
)
sw_version = None
if router_info:
sw_version = router_info.get("SoftwareVersion")
if router_info.get("DeviceName"):
device_info[ATTR_MODEL] = router_info["DeviceName"]
if not sw_version and router.data.get(KEY_DEVICE_BASIC_INFORMATION):
sw_version = router.data[KEY_DEVICE_BASIC_INFORMATION].get(
"SoftwareVersion"
)
if sw_version:
device_info[ATTR_SW_VERSION] = sw_version
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
**device_info,
)
# Forward config entry setup to platforms
hass.config_entries.async_setup_platforms(entry, CONFIG_ENTRY_PLATFORMS)
# Notify doesn't support config entry setup yet, load with discovery for now
await discovery.async_load_platform(
hass,
NOTIFY_DOMAIN,
DOMAIN,
{
ATTR_UNIQUE_ID: entry.unique_id,
CONF_NAME: entry.options.get(CONF_NAME, DEFAULT_NOTIFY_SERVICE_NAME),
CONF_RECIPIENT: entry.options.get(CONF_RECIPIENT),
},
hass.data[DOMAIN].hass_config,
)
def _update_router(*_: Any) -> None:
"""
Update router data.
Separate passthrough function because lambdas don't work with track_time_interval.
"""
router.update()
# Set up periodic update
entry.async_on_unload(
async_track_time_interval(hass, _update_router, SCAN_INTERVAL)
)
# Clean up at end
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, router.cleanup)
)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload config entry."""
# Forward config entry unload to platforms
await hass.config_entries.async_unload_platforms(
config_entry, CONFIG_ENTRY_PLATFORMS
)
# Forget about the router and invoke its cleanup
router = hass.data[DOMAIN].routers.pop(config_entry.unique_id)
await hass.async_add_executor_job(router.cleanup)
return True
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up Huawei LTE component."""
# dicttoxml (used by huawei-lte-api) has uselessly verbose INFO level.
# https://github.com/quandyfactory/dicttoxml/issues/60
logging.getLogger("dicttoxml").setLevel(logging.WARNING)
# Arrange our YAML config to dict with normalized URLs as keys
domain_config: dict[str, dict[str, Any]] = {}
if DOMAIN not in hass.data:
hass.data[DOMAIN] = HuaweiLteData(hass_config=config, config=domain_config)
for router_config in config.get(DOMAIN, []):
domain_config[url_normalize(router_config.pop(CONF_URL))] = router_config
def service_handler(service: ServiceCall) -> None:
"""
Apply a service.
We key this using the router URL instead of its unique id / serial number,
because the latter is not available anywhere in the UI.
"""
routers = hass.data[DOMAIN].routers
if url := service.data.get(CONF_URL):
router = next(
(router for router in routers.values() if router.url == url), None
)
elif not routers:
_LOGGER.error("%s: no routers configured", service.service)
return
elif len(routers) == 1:
router = next(iter(routers.values()))
else:
_LOGGER.error(
"%s: more than one router configured, must specify one of URLs %s",
service.service,
sorted(router.url for router in routers.values()),
)
return
if not router:
_LOGGER.error("%s: router %s unavailable", service.service, url)
return
if service.service == SERVICE_CLEAR_TRAFFIC_STATISTICS:
if router.suspended:
_LOGGER.debug("%s: ignored, integration suspended", service.service)
return
result = router.client.monitoring.set_clear_traffic()
_LOGGER.debug("%s: %s", service.service, result)
elif service.service == SERVICE_REBOOT:
if router.suspended:
_LOGGER.debug("%s: ignored, integration suspended", service.service)
return
result = router.client.device.reboot()
_LOGGER.debug("%s: %s", service.service, result)
elif service.service == SERVICE_RESUME_INTEGRATION:
# Login will be handled automatically on demand
router.suspended = False
_LOGGER.debug("%s: %s", service.service, "done")
elif service.service == SERVICE_SUSPEND_INTEGRATION:
router.logout()
router.suspended = True
_LOGGER.debug("%s: %s", service.service, "done")
else:
_LOGGER.error("%s: unsupported service", service.service)
for service in ADMIN_SERVICES:
hass.helpers.service.async_register_admin_service(
DOMAIN,
service,
service_handler,
schema=SERVICE_SCHEMA,
)
for url, router_config in domain_config.items():
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_URL: url,
CONF_USERNAME: router_config.get(CONF_USERNAME),
CONF_PASSWORD: router_config.get(CONF_PASSWORD),
},
)
)
return True
async def async_migrate_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Migrate config entry to new version."""
if config_entry.version == 1:
options = dict(config_entry.options)
recipient = options.get(CONF_RECIPIENT)
if isinstance(recipient, str):
options[CONF_RECIPIENT] = [x.strip() for x in recipient.split(",")]
config_entry.version = 2
hass.config_entries.async_update_entry(config_entry, options=options)
_LOGGER.info("Migrated config entry to version %d", config_entry.version)
if config_entry.version == 2:
config_entry.version = 3
data = dict(config_entry.data)
data[CONF_MAC] = []
hass.config_entries.async_update_entry(config_entry, data=data)
_LOGGER.info("Migrated config entry to version %d", config_entry.version)
return True
@attr.s
class HuaweiLteBaseEntity(Entity):
"""Huawei LTE entity base class."""
router: Router = attr.ib()
_available: bool = attr.ib(init=False, default=True)
_unsub_handlers: list[Callable] = attr.ib(init=False, factory=list)
@property
def _entity_name(self) -> str:
raise NotImplementedError
@property
def _device_unique_id(self) -> str:
"""Return unique ID for entity within a router."""
raise NotImplementedError
@property
def unique_id(self) -> str:
"""Return unique ID for entity."""
return f"{self.router.config_entry.unique_id}-{self._device_unique_id}"
@property
def name(self) -> str:
"""Return entity name."""
return f"Huawei {self.router.device_name} {self._entity_name}"
@property
def available(self) -> bool:
"""Return whether the entity is available."""
return self._available
@property
def should_poll(self) -> bool:
"""Huawei LTE entities report their state without polling."""
return False
@property
def device_info(self) -> DeviceInfo:
"""Get info for matching with parent router."""
return DeviceInfo(
connections=self.router.device_connections,
identifiers=self.router.device_identifiers,
)
async def async_update(self) -> None:
"""Update state."""
raise NotImplementedError
async def async_added_to_hass(self) -> None:
"""Connect to update signals."""
self._unsub_handlers.append(
async_dispatcher_connect(self.hass, UPDATE_SIGNAL, self._async_maybe_update)
)
async def _async_maybe_update(self, config_entry_unique_id: str) -> None:
"""Update state if the update signal comes from our router."""
if config_entry_unique_id == self.router.config_entry.unique_id:
self.async_schedule_update_ha_state(True)
async def async_will_remove_from_hass(self) -> None:
"""Invoke unsubscription handlers."""
for unsub in self._unsub_handlers:
unsub()
self._unsub_handlers.clear()
|
|
from __future__ import unicode_literals
import json
from django.contrib import admin
from django.conf.urls import patterns, url
from django.core import urlresolvers
from django.template.defaultfilters import capfirst
from django.utils.translation import ugettext, ugettext_lazy as _
# from vesper.apps import site
# from vds.apps import site
# from vesper.views import ModelAdmin
from vesper.layouts import Tab, Fieldset, Field, Button, Inline, Layout, FormHelper
from vesper.views import ModelAdminView, TabularModelAdminInline, FormAdminView, TabularFormAdminInline, TemplateAdminView
from .models import ListData, GlobalA, GlobalB, NestedA, NestedB1, NestedC1
from .forms import CreateShipmentForm, CreatePackageForm, FieldsForm
class PageAdminView(TemplateAdminView):
admin = None
template_name = 'views/base.html'
class CreatePackageInline(TabularFormAdminInline):
"""
"""
name = 'packages'
form_class = CreatePackageForm
form_layout = Layout(
Field('length'),
Field('width'),
Field('height'),
Field('weight')
)
class ExtraViewAdmin(FormAdminView):
"""
"""
form_class = CreateShipmentForm
form_layout = Layout(
Fieldset('Fields',
Field('channel'),
Field('shipment_method'),
Field('shipment_number'),
Field('parcels', css_class='vds-hide'),
Field('reference'),
Field('print_labels')
),
Fieldset('Extra',
Inline('packages'),
css_id='fieldset-extra'
)
)
inlines = [
CreatePackageInline
]
def get_page_header_title(self, request, obj):
if obj:
return str(obj)
else:
return 'New object'
def get_form_initial(self):
"""
"""
initial = {
'reference': self.object.name,
}
return initial
def get_form_kwargs(self, **kwargs):
"""
"""
kwargs = super(ExtraViewAdmin, self).get_form_kwargs()
kwargs.update({
'choices_channel': [(1, 'Channel 1'), (2, 'Channel 2')],
'choices_shipment_method': [(2, 'Shipment Method A'), (100, 'Shipment Method B')]
})
return kwargs
def form_valid(self, form):
"""
"""
print form.cleaned_data
if form.cleaned_data['shipment_method'] == '2':
print 'start here'
# if len(form.cleaned_data['packages']) != 1:
# print 'error'
form.add_error('shipment_method', 'We need a error')
return self.form_invalid(form=form)
class ListDataAdmin(ModelAdminView):
detail_layout = [
Tab('General',
Fieldset('Name',
Field('name'),
Field('date'),
css_class='vds-size--1-of-2'
),
Fieldset('Status',
Field('boolean'),
Field('select'),
css_class='vds-size--1-of-2'
)
),
Tab('Info',
Fieldset('Time',
Field('datetime'),
Field('decimal'),
Field('description'),
),
),
]
views = [
url(r'page/$', PageAdminView, name='page'),
url(r'^(.+)/action/create-shipment/$', ExtraViewAdmin, name='action_shipment'),
]
def get_detail_actions(self, request, obj):
actions = []
if obj:
actions.append(
Button('create-shipment', 'Markt as Payt', self.get_url_reverse('action_shipment', obj.pk), icon='envelope')
)
return actions
# def get_detail_header(self, request, obj):
# header = super(ListDataAdmin, self).get_detail_header(request, obj)
# if obj:
# header.title = obj.pk
# header.subtitle = 'WeStockLots - Website'
# header.title = 'WO20180012002'
# header.icon = 'pro'
# return header
class NestedC1InlineAdmin(TabularModelAdminInline):
model = NestedC1
class NestedB1InlineAdmin(TabularModelAdminInline):
model = NestedB1
inlines = [
NestedC1InlineAdmin
]
class NestedAAdmin(ModelAdminView):
inlines = [
NestedB1InlineAdmin
]
detail_layout = [
Tab('General',
Fieldset('Name',
Field('name'),
Field('global_a'),
Field('decimal'),
Field('boolean'),
),
Fieldset('Inline',
Inline('NestedB1InlineAdmin')
)
)
]
# Field Test
admin.site.register(ListData, ListDataAdmin)
# Relation Test
admin.site.register(GlobalA)
admin.site.register(GlobalB)
admin.site.register(NestedA, NestedAAdmin)
|
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, sum, sin, cos, asarray, arange, pi, exp, log, sqrt
from scipy.optimize import rosen
from .go_benchmark import Benchmark
class Rana(Benchmark):
r"""
Rana objective function.
This class defines the Rana [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Rana}}(x) = \sum_{i=1}^{n} \left[x_{i}
\sin\left(\sqrt{\lvert{x_{1} - x_{i} + 1}\rvert}\right)
\cos\left(\sqrt{\lvert{x_{1} + x_{i} + 1}\rvert}\right) +
\left(x_{1} + 1\right) \sin\left(\sqrt{\lvert{x_{1} + x_{i} +
1}\rvert}\right) \cos\left(\sqrt{\lvert{x_{1} - x_{i} +
1}\rvert}\right)\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-500.0, 500.0]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x_i) = -928.5478` for
:math:`x = [-300.3376, 500]`.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: homemade global minimum here.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.000001] * self.N,
[500.000001] * self.N))
self.global_optimum = [[-300.3376, 500.]]
self.fglob = -500.8021602966615
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
t1 = sqrt(abs(x[1:] + x[: -1] + 1))
t2 = sqrt(abs(x[1:] - x[: -1] + 1))
v = (x[1:] + 1) * cos(t2) * sin(t1) + x[:-1] * cos(t1) * sin(t2)
return sum(v)
class Rastrigin(Benchmark):
r"""
Rastrigin objective function.
This class defines the Rastrigin [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Rastrigin}}(x) = 10n \sum_{i=1}^n \left[ x_i^2
- 10 \cos(2\pi x_i) \right]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5.12, 5.12]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return 10.0 * self.N + sum(x ** 2.0 - 10.0 * cos(2.0 * pi * x))
class Ratkowsky01(Benchmark):
"""
Ratkowsky objective function.
.. [1] https://www.itl.nist.gov/div898/strd/nls/data/ratkowsky3.shtml
"""
# TODO, this is a NIST regression standard dataset
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0., 1., 0., 0.1],
[1000, 20., 3., 6.]))
self.global_optimum = [[6.996415127e2, 5.2771253025, 7.5962938329e-1,
1.2792483859]]
self.fglob = 8.786404908e3
self.a = asarray([16.08, 33.83, 65.80, 97.20, 191.55, 326.20, 386.87,
520.53, 590.03, 651.92, 724.93, 699.56, 689.96,
637.56, 717.41])
self.b = arange(1, 16.)
def fun(self, x, *args):
self.nfev += 1
vec = x[0] / ((1 + exp(x[1] - x[2] * self.b)) ** (1 / x[3]))
return sum((self.a - vec) ** 2)
class Ratkowsky02(Benchmark):
r"""
Ratkowsky02 objective function.
This class defines the Ratkowsky 2 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Ratkowsky02}}(x) = \sum_{m=1}^{9}(a_m - x[0] / (1 + exp(x[1]
- b_m x[2]))^2
where
.. math::
\begin{cases}
a=[8.93, 10.8, 18.59, 22.33, 39.35, 56.11, 61.73, 64.62, 67.08]\\
b=[9., 14., 21., 28., 42., 57., 63., 70., 79.]\\
\end{cases}
Here :math:`x_1 \in [1, 100]`, :math:`x_2 \in [0.1, 5]` and
:math:`x_3 \in [0.01, 0.5]`
*Global optimum*: :math:`f(x) = 8.0565229338` for
:math:`x = [7.2462237576e1, 2.6180768402, 6.7359200066e-2]`
.. [1] https://www.itl.nist.gov/div898/strd/nls/data/ratkowsky2.shtml
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([10, 0.5, 0.01],
[200, 5., 0.5]))
self.global_optimum = [[7.2462237576e1, 2.6180768402, 6.7359200066e-2]]
self.fglob = 8.0565229338
self.a = asarray([8.93, 10.8, 18.59, 22.33, 39.35, 56.11, 61.73, 64.62,
67.08])
self.b = asarray([9., 14., 21., 28., 42., 57., 63., 70., 79.])
def fun(self, x, *args):
self.nfev += 1
vec = x[0] / (1 + exp(x[1] - x[2] * self.b))
return sum((self.a - vec) ** 2)
class Ripple01(Benchmark):
r"""
Ripple 1 objective function.
This class defines the Ripple 1 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Ripple01}}(x) = \sum_{i=1}^2 -e^{-2 \log 2
(\frac{x_i-0.1}{0.8})^2} \left[\sin^6(5 \pi x_i)
+ 0.1\cos^2(500 \pi x_i) \right]
with :math:`x_i \in [0, 1]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -2.2` for :math:`x_i = 0.1` for
:math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.1 for _ in range(self.N)]]
self.fglob = -2.2
def fun(self, x, *args):
self.nfev += 1
u = -2.0 * log(2.0) * ((x - 0.1) / 0.8) ** 2.0
v = sin(5.0 * pi * x) ** 6.0 + 0.1 * cos(500.0 * pi * x) ** 2.0
return sum(-exp(u) * v)
class Ripple25(Benchmark):
r"""
Ripple 25 objective function.
This class defines the Ripple 25 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Ripple25}}(x) = \sum_{i=1}^2 -e^{-2
\log 2 (\frac{x_i-0.1}{0.8})^2}
\left[\sin^6(5 \pi x_i) \right]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [0, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -2` for :math:`x_i = 0.1` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.1 for _ in range(self.N)]]
self.fglob = -2.0
def fun(self, x, *args):
self.nfev += 1
u = -2.0 * log(2.0) * ((x - 0.1) / 0.8) ** 2.0
v = sin(5.0 * pi * x) ** 6.0
return sum(-exp(u) * v)
class Rosenbrock(Benchmark):
r"""
Rosenbrock objective function.
This class defines the Rosenbrock [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Rosenbrock}}(x) = \sum_{i=1}^{n-1} [100(x_i^2
- x_{i+1})^2 + (x_i - 1)^2]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 1` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-30.] * self.N, [30.0] * self.N))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [[1 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return rosen(x)
class RosenbrockModified(Benchmark):
r"""
Modified Rosenbrock objective function.
This class defines the Modified Rosenbrock [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{RosenbrockModified}}(x) = 74 + 100(x_2 - x_1^2)^2
+ (1 - x_1)^2 - 400 e^{-\frac{(x_1+1)^2 + (x_2 + 1)^2}{0.1}}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-2, 2]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 34.04024310` for
:math:`x = [-0.90955374, -0.95057172]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: We have different global minimum compared to Jamil #106. This is
possibly because of the (1-x) term is using the wrong parameter.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-2.0] * self.N, [2.0] * self.N))
self.custom_bounds = ([-1.0, 0.5], [-1.0, 1.0])
self.global_optimum = [[-0.90955374, -0.95057172]]
self.fglob = 34.040243106640844
def fun(self, x, *args):
self.nfev += 1
a = 74 + 100. * (x[1] - x[0] ** 2) ** 2 + (1 - x[0]) ** 2
a -= 400 * exp(-((x[0] + 1.) ** 2 + (x[1] + 1.) ** 2) / 0.1)
return a
class RotatedEllipse01(Benchmark):
r"""
Rotated Ellipse 1 objective function.
This class defines the Rotated Ellipse 1 [1]_ global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\text{RotatedEllipse01}}(x) = 7x_1^2 - 6 \sqrt{3} x_1x_2 + 13x_2^2
with :math:`x_i \in [-500, 500]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.0] * self.N,
[500.0] * self.N))
self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0])
self.global_optimum = [[0.0, 0.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (7.0 * x[0] ** 2.0 - 6.0 * sqrt(3) * x[0] * x[1]
+ 13 * x[1] ** 2.0)
class RotatedEllipse02(Benchmark):
r"""
Rotated Ellipse 2 objective function.
This class defines the Rotated Ellipse 2 [1]_ global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\text{RotatedEllipse02}}(x) = x_1^2 - x_1 x_2 + x_2^2
with :math:`x_i \in [-500, 500]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.0] * self.N,
[500.0] * self.N))
self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0])
self.global_optimum = [[0.0, 0.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return x[0] ** 2.0 - x[0] * x[1] + x[1] ** 2.0
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""## Placeholders
TensorFlow provides a placeholder operation that must be fed with data
on execution. For more info, see the section on [Feeding
data](../../how_tos/reading_data/index.md#feeding).
@@placeholder
@@placeholder_with_default
For feeding `SparseTensor`s which are composite type,
there is a convenience function:
@@sparse_placeholder
## Readers
TensorFlow provides a set of Reader classes for reading data formats.
For more information on inputs and readers, see [Reading
data](../../how_tos/reading_data/index.md).
@@ReaderBase
@@TextLineReader
@@WholeFileReader
@@IdentityReader
@@TFRecordReader
@@FixedLengthRecordReader
## Converting
TensorFlow provides several operations that you can use to convert various data
formats into tensors.
@@decode_csv
@@decode_raw
- - -
### Example protocol buffer
TensorFlow's [recommended format for training
examples](../../how_tos/reading_data/index.md#standard-tensorflow-format)
is serialized `Example` protocol buffers, [described
here](https://www.tensorflow.org/code/tensorflow/core/example/example.proto).
They contain `Features`, [described
here](https://www.tensorflow.org/code/tensorflow/core/example/feature.proto).
@@VarLenFeature
@@FixedLenFeature
@@FixedLenSequenceFeature
@@parse_example
@@parse_single_example
@@parse_tensor
@@decode_json_example
## Queues
TensorFlow provides several implementations of 'Queues', which are
structures within the TensorFlow computation graph to stage pipelines
of tensors together. The following describe the basic Queue interface
and some implementations. To see an example use, see [Threading and
Queues](../../how_tos/threading_and_queues/index.md).
@@QueueBase
@@FIFOQueue
@@PaddingFIFOQueue
@@RandomShuffleQueue
@@PriorityQueue
## Dealing with the filesystem
@@matching_files
@@read_file
## Input pipeline
TensorFlow functions for setting up an input-prefetching pipeline.
Please see the [reading data how-to](../../how_tos/reading_data/index.md)
for context.
### Beginning of an input pipeline
The "producer" functions add a queue to the graph and a corresponding
`QueueRunner` for running the subgraph that fills that queue.
@@match_filenames_once
@@limit_epochs
@@input_producer
@@range_input_producer
@@slice_input_producer
@@string_input_producer
### Batching at the end of an input pipeline
These functions add a queue to the graph to assemble a batch of
examples, with possible shuffling. They also add a `QueueRunner` for
running the subgraph that fills that queue.
Use [`batch`](#batch) or [`batch_join`](#batch_join) for batching
examples that have already been well shuffled. Use
[`shuffle_batch`](#shuffle_batch) or
[`shuffle_batch_join`](#shuffle_batch_join) for examples that would
benefit from additional shuffling.
Use [`batch`](#batch) or [`shuffle_batch`](#shuffle_batch) if you want a
single thread producing examples to batch, or if you have a
single subgraph producing examples but you want to run it in *N* threads
(where you increase *N* until it can keep the queue full). Use
[`batch_join`](#batch_join) or [`shuffle_batch_join`](#shuffle_batch_join)
if you have *N* different subgraphs producing examples to batch and you
want them run by *N* threads.
@@batch
@@batch_join
@@shuffle_batch
@@shuffle_batch_join
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import gen_io_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_io_ops import *
# pylint: enable=wildcard-import
# pylint: disable=protected-access
def _save(filename, tensor_names, tensors, tensor_slices=None, name="save"):
"""Save a list of tensors to a file with given names.
Example usage without slice info:
Save("/foo/bar", ["w", "b"], [w, b])
Example usage with slices:
Save("/foo/bar", ["w", "w"], [slice0, slice1],
tensor_slices=["4 10 0,2:-", "4 10 2,2:-"])
Args:
filename: the file name of the sstable.
tensor_names: a list of strings.
tensors: the list of tensors to be saved.
tensor_slices: Optional list of strings to specify the shape and slices of
a larger virtual tensor that each tensor is a part of. If not specified
each tensor is saved as a full slice.
name: string. Optional name for the op.
Requires:
The length of tensors should match the size of tensor_names and of
tensor_slices.
Returns:
An Operation that saves the tensors.
"""
if tensor_slices is None:
return gen_io_ops._save(filename, tensor_names, tensors, name=name)
else:
return gen_io_ops._save_slices(filename, tensor_names, tensor_slices,
tensors, name=name)
def _restore_slice(file_pattern, tensor_name, shape_and_slice, tensor_type,
name="restore_slice", preferred_shard=-1):
"""Restore a tensor slice from a set of files with a given pattern.
Example usage:
RestoreSlice("/foo/bar-?????-of-?????", "w", "10 10 0,2:-", DT_FLOAT)
Args:
file_pattern: the file pattern used to match a set of checkpoint files.
tensor_name: the name of the tensor to restore.
shape_and_slice: the shape-and-slice spec of the slice.
tensor_type: the type of the tensor to restore.
name: string. Optional name for the op.
preferred_shard: Int. Optional shard to open first in the checkpoint file.
Returns:
A tensor of type "tensor_type".
"""
base_type = dtypes.as_dtype(tensor_type).base_dtype
return gen_io_ops._restore_slice(
file_pattern, tensor_name, shape_and_slice, base_type,
preferred_shard, name=name)
ops.RegisterShape("Restore")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("RestoreSlice")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Save")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SaveSlices")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ShardedFilename")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ShardedFilespec")(common_shapes.call_cpp_shape_fn)
class ReaderBase(object):
"""Base class for different Reader types, that produce a record every step.
Conceptually, Readers convert string 'work units' into records (key,
value pairs). Typically the 'work units' are filenames and the
records are extracted from the contents of those files. We want a
single record produced per step, but a work unit can correspond to
many records.
Therefore we introduce some decoupling using a queue. The queue
contains the work units and the Reader dequeues from the queue when
it is asked to produce a record (via Read()) but it has finished the
last work unit.
"""
def __init__(self, reader_ref, supports_serialize=False):
"""Creates a new ReaderBase.
Args:
reader_ref: The operation that implements the reader.
supports_serialize: True if the reader implementation can
serialize its state.
"""
self._reader_ref = reader_ref
self._supports_serialize = supports_serialize
@property
def reader_ref(self):
"""Op that implements the reader."""
return self._reader_ref
def read(self, queue, name=None):
"""Returns the next record (key, value pair) produced by a reader.
Will dequeue a work unit from queue if necessary (e.g. when the
Reader needs to start reading from a new file since it has
finished with the previous file).
Args:
queue: A Queue or a mutable string Tensor representing a handle
to a Queue, with string work items.
name: A name for the operation (optional).
Returns:
A tuple of Tensors (key, value).
key: A string scalar Tensor.
value: A string scalar Tensor.
"""
if isinstance(queue, ops.Tensor):
queue_ref = queue
else:
queue_ref = queue.queue_ref
return gen_io_ops._reader_read(self._reader_ref, queue_ref, name=name)
def read_up_to(self, queue, num_records, # pylint: disable=invalid-name
name=None):
"""Returns up to num_records (key, value pairs) produced by a reader.
Will dequeue a work unit from queue if necessary (e.g., when the
Reader needs to start reading from a new file since it has
finished with the previous file).
It may return less than num_records even before the last batch.
Args:
queue: A Queue or a mutable string Tensor representing a handle
to a Queue, with string work items.
num_records: Number of records to read.
name: A name for the operation (optional).
Returns:
A tuple of Tensors (keys, values).
keys: A 1-D string Tensor.
values: A 1-D string Tensor.
"""
if isinstance(queue, ops.Tensor):
queue_ref = queue
else:
queue_ref = queue.queue_ref
return gen_io_ops._reader_read_up_to(self._reader_ref,
queue_ref,
num_records,
name=name)
def num_records_produced(self, name=None):
"""Returns the number of records this reader has produced.
This is the same as the number of Read executions that have
succeeded.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
return gen_io_ops._reader_num_records_produced(self._reader_ref, name=name)
def num_work_units_completed(self, name=None):
"""Returns the number of work units this reader has finished processing.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
return gen_io_ops._reader_num_work_units_completed(self._reader_ref,
name=name)
def serialize_state(self, name=None):
"""Produce a string tensor that encodes the state of a reader.
Not all Readers support being serialized, so this can produce an
Unimplemented error.
Args:
name: A name for the operation (optional).
Returns:
A string Tensor.
"""
return gen_io_ops._reader_serialize_state(self._reader_ref, name=name)
def restore_state(self, state, name=None):
"""Restore a reader to a previously saved state.
Not all Readers support being restored, so this can produce an
Unimplemented error.
Args:
state: A string Tensor.
Result of a SerializeState of a Reader with matching type.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
return gen_io_ops._reader_restore_state(self._reader_ref, state, name=name)
@property
def supports_serialize(self):
"""Whether the Reader implementation can serialize its state."""
return self._supports_serialize
def reset(self, name=None):
"""Restore a reader to its initial clean state.
Args:
name: A name for the operation (optional).
Returns:
The created Operation.
"""
return gen_io_ops._reader_reset(self._reader_ref, name=name)
ops.NotDifferentiable("ReaderRead")
ops.NotDifferentiable("ReaderReadUpTo")
ops.NotDifferentiable("ReaderNumRecordsProduced")
ops.NotDifferentiable("ReaderNumWorkUnitsCompleted")
ops.NotDifferentiable("ReaderSerializeState")
ops.NotDifferentiable("ReaderRestoreState")
ops.NotDifferentiable("ReaderReset")
class WholeFileReader(ReaderBase):
"""A Reader that outputs the entire contents of a file as a value.
To use, enqueue filenames in a Queue. The output of Read will
be a filename (key) and the contents of that file (value).
See ReaderBase for supported methods.
"""
def __init__(self, name=None):
"""Create a WholeFileReader.
Args:
name: A name for the operation (optional).
"""
rr = gen_io_ops._whole_file_reader(name=name)
super(WholeFileReader, self).__init__(rr, supports_serialize=True)
ops.NotDifferentiable("WholeFileReader")
class TextLineReader(ReaderBase):
"""A Reader that outputs the lines of a file delimited by newlines.
Newlines are stripped from the output.
See ReaderBase for supported methods.
"""
# TODO(josh11b): Support serializing and restoring state.
def __init__(self, skip_header_lines=None, name=None):
"""Create a TextLineReader.
Args:
skip_header_lines: An optional int. Defaults to 0. Number of lines
to skip from the beginning of every file.
name: A name for the operation (optional).
"""
rr = gen_io_ops._text_line_reader(skip_header_lines=skip_header_lines,
name=name)
super(TextLineReader, self).__init__(rr)
ops.NotDifferentiable("TextLineReader")
class FixedLengthRecordReader(ReaderBase):
"""A Reader that outputs fixed-length records from a file.
See ReaderBase for supported methods.
"""
# TODO(josh11b): Support serializing and restoring state.
def __init__(self, record_bytes, header_bytes=None, footer_bytes=None,
name=None):
"""Create a FixedLengthRecordReader.
Args:
record_bytes: An int.
header_bytes: An optional int. Defaults to 0.
footer_bytes: An optional int. Defaults to 0.
name: A name for the operation (optional).
"""
rr = gen_io_ops._fixed_length_record_reader(
record_bytes=record_bytes, header_bytes=header_bytes,
footer_bytes=footer_bytes, name=name)
super(FixedLengthRecordReader, self).__init__(rr)
ops.NotDifferentiable("FixedLengthRecordReader")
class TFRecordReader(ReaderBase):
"""A Reader that outputs the records from a TFRecords file.
See ReaderBase for supported methods.
"""
# TODO(josh11b): Support serializing and restoring state.
def __init__(self, name=None, options=None):
"""Create a TFRecordReader.
Args:
name: A name for the operation (optional).
options: A TFRecordOptions object (optional).
"""
compression_type = python_io.TFRecordOptions.get_compression_type_string(
options)
rr = gen_io_ops._tf_record_reader(
name=name, compression_type=compression_type)
super(TFRecordReader, self).__init__(rr)
ops.NotDifferentiable("TFRecordReader")
class IdentityReader(ReaderBase):
"""A Reader that outputs the queued work as both the key and value.
To use, enqueue strings in a Queue. Read will take the front
work string and output (work, work).
See ReaderBase for supported methods.
"""
def __init__(self, name=None):
"""Create a IdentityReader.
Args:
name: A name for the operation (optional).
"""
rr = gen_io_ops._identity_reader(name=name)
super(IdentityReader, self).__init__(rr, supports_serialize=True)
ops.NotDifferentiable("IdentityReader")
ops.RegisterShape("FixedLengthRecordReader")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IdentityReader")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("TextLineReader")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("WholeFileReader")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("TFRecordReader")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ReaderNumRecordsProduced")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ReaderNumWorkUnitsCompleted")(
common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ReaderSerializeState")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ReaderRead")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ReaderReadUpTo")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ReaderReset")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ReaderRestoreState")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ReadFile")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("MatchingFiles")(common_shapes.call_cpp_shape_fn)
|
|
#!/usr/bin/python
import pprint
MLAB_ORG_DOMAIN = 'measurement-lab.org'
def breakdown(host_index, v4prefix):
octet_list = v4prefix.split('.')
assert(len(octet_list) == 4)
net_prefix = ".".join(octet_list[0:3])
net_offset = int(octet_list[3])
mlab_offset = net_offset + ((host_index - 1) * 13) + 9
return (net_prefix, net_offset, mlab_offset)
def pl_interface(host_index, v4prefix):
(net_prefix, net_offset, mlab_offset) = breakdown(host_index, v4prefix)
interface={}
interface['type'] = 'ipv4'
interface['method'] = 'static'
interface['network'] = v4prefix
interface['dns1'] = '8.8.8.8'
interface['dns2'] = '8.8.4.4'
interface['netmask'] = '255.255.255.192'
interface['is_primary'] = True
interface['gateway'] = '%s.%d' % (net_prefix, net_offset + 1)
interface['broadcast'] = '%s.%d' % (net_prefix, net_offset + 63)
interface['ip'] = '%s.%d' % (net_prefix, mlab_offset)
return interface
def split_prefix(v4prefix):
octets = v4prefix.split('.')
assert(len(octets) == 4)
net_prefix = ".".join(octets[0:3])
net_offset = int(octets[3])
return (net_prefix, net_offset)
def ml_site_ipv4(v4prefix, index):
net_prefix, net_offset = split_prefix(v4prefix)
return '%s.%d' % (net_prefix, net_offset + index)
def pl_v6_iplist(host_index, v6prefix, last_octet):
mlab_offset = last_octet + ((host_index - 1) * 13) + 9
ret = []
for ip in range(mlab_offset + 1, mlab_offset + 13):
ret.append(v6prefix+str(ip))
return ret
def pl_v6_primary(host_index, v6prefix, last_octet):
mlab_offset = last_octet + ((host_index - 1) * 13) + 9
return v6prefix+str(mlab_offset)
def pl_iplist(host_index, v4prefix):
(net_prefix, net_offset, mlab_offset) = breakdown(host_index, v4prefix)
ret = []
for ip in range(mlab_offset + 1, mlab_offset + 13):
ret.append('%s.%s' % (net_prefix,ip))
return ret
def pl_dracip(host_index, v4prefix):
(net_prefix, net_offset, mlab_offset) = breakdown(host_index, v4prefix)
return '%s.%d' % (net_prefix, net_offset+3+host_index)
def pl_v6gw(v6prefix, v6gw=None):
return v6prefix + "1" if v6gw is None else v6gw
class Location(dict):
def __init__(self, city, country, lat, lon, **kwargs):
self['city'] = city
self['country'] = country
self['latitude'] = lat
self['longitude'] = lon
super(Location, self).__init__(**kwargs)
class Network(dict):
"""The Network() object encapsulates the IP and network objects for the IPv4
and IPv6 settings for an M-Lab site.
Network() constructor expects two parameters:
v4 - string, a /26 IPv4 prefix i.e. 192.168.10.0
v6 - string, a /64 IPv6 prefix i.e. 2604:ca00:f000::
v6gw - optional, string, a specific gateway other than the
default, <prefix>::1
Attributes:
Network['v4'] is a NetworkIPv4() object
Network['v6'] is a NetworkIPv6() object
"""
legacy_network_remap = None
def __str__(self):
return pprint.pformat(self)
def __init__(self, **kwargs):
if 'v4' not in kwargs:
raise Exception("'v4' is a mandatory argument. i.e. 64.9.225.128")
if 'v6' not in kwargs:
msg = "'v6' is a mandatory argument. i.e. 2604:ca00:f000::"
raise Exception(msg)
if 'v6gw' not in kwargs:
kwargs['v6gw'] = None
kwargs['v4'] = NetworkIPv4(prefix=kwargs['v4'])
# Allow disabling IPv6
if kwargs['v6'] is not None:
kwargs['v6'] = NetworkIPv6(prefix=kwargs['v6'],
last_octet=kwargs['v4'].last(),
v6gw=kwargs['v6gw'])
super(Network, self).__init__(**kwargs)
class NetworkIPv6(dict):
"""The NetworkIPv6() object encapsulates operations for IPv6 network
configuration on M-Lab sites and nodes. It has built-in methods for
extracting per-node attributes and managing IP assignment to slices.
NetworkIPv6() constructor expects these parameters:
prefix - string, a /64 IPv6 prefix i.e. 2604:ca00:f000::
last_octet - string the last octet of the IPv4 site prefix. This
value is used to offset all addresses. Should be one of 0,
64, 128, or 192.
v6gw - a specific gateway, if None, defaults to <prefix>::1
"""
def __str__(self):
return pprint.pformat(self)
def __init__(self, **kwargs):
if 'prefix' not in kwargs:
msg = "'prefix' is a mandatory argument. i.e. 2604:ca00:f000::"
raise Exception(msg)
if 'last_octet' not in kwargs:
msg ="'last_octet' is a mandatory argument. i.e. if v4 "
msg+="prefix is 192.168.10.64 then last_octet is 64"
raise Exception(msg)
if 'v6gw' not in kwargs:
raise Exception("'v6gw' is a mandatory argument. Can be None.")
super(NetworkIPv6, self).__init__(**kwargs)
def ipv6_defaultgw(self):
""" Returns the IPv6 gateway as calculated from prefix & v6gw """
return pl_v6gw(self['prefix'], self['v6gw'])
def ipv6addr(self, host_index):
""" Returns the host IPv6 address for the host_index node; host_index
should be less than 4, the maximum number of nodes at a site."""
return pl_v6_primary(host_index, self['prefix'],
int(self['last_octet']))
def ipv6addr_secondaries(self, index):
""" Returns a list of 12 IPv6 addresses assigned to given host_index """
# NOTE: the natural, sorted order is re-ordered according to
# legacy_network_remap if present.
ipv6_list = pl_v6_iplist(index, self['prefix'], int(self['last_octet']))
if ( Network.legacy_network_remap is not None and
self['name'] in Network.legacy_network_remap and
index in Network.legacy_network_remap[self['name']] ):
site = self['name']
index_list = Network.legacy_network_remap[site][index].split(",")
re_order = [ ipv6_list[int(i)] for i in index_list ]
return re_order
return ipv6_list
class NetworkIPv4(dict):
"""The NetworkIPv4() object encapsulates the IP and network settings for an
M-Lab site. It has built-in methods for extracting per-node attributes and
managing IP assignment to slices.
NetworkIPv4() constructor expects these parameters:
prefix - string, a /26 IPv4 prefix i.e. 192.168.10.0
"""
def __str__(self):
return pprint.pformat(self)
def __init__(self, **kwargs):
if 'prefix' not in kwargs:
msg="'prefix' is a mandatory argument. i.e. 192.168.10.0"
raise Exception(msg)
super(NetworkIPv4, self).__init__(**kwargs)
def interface(self, index):
""" Returns the myPLC interface definition for the given host index"""
return pl_interface(index, self['prefix'])
def iplist(self, index):
""" Returns a list of 12 IPv4 addresses for the given host index """
ip_list = pl_iplist(index, self['prefix'])
if (Network.legacy_network_remap is not None and
self['name'] in Network.legacy_network_remap and
index in Network.legacy_network_remap[self['name']] ):
site = self['name']
index_list = Network.legacy_network_remap[site][index].split(",")
re_order = [ ip_list[int(i)] for i in index_list ]
return re_order
return ip_list
def drac(self, index):
""" Returns the IPv4 address reserved for the DRAC interface"""
return pl_dracip(index, self['prefix'])
def last(self):
""" Returns the last octet of 'prefix' """
l = self['prefix'].split('.')[3]
return int(l)
class Site(dict):
"""Site() - represents an M-Lab site. Also wraps the creation of site
Node()s and PCU()s.
Site() constructor expects:
name - a short name for a site, i.e. nuq01
net - a Network() object consisting of at least an IPv4 prefix
Optional:
count - the number of nodes at the site (default: 3)
nodegroup - the nodegroup with which to associate new nodes at
this site. (default: MeasurementLab)
users - a list of people to add as PI's for a new site.
(default: Stephen Stuart)
login_base_prefix - a constant prefix for to prepend to 'name'
(default: mlab).
"""
def __str__(self):
return pprint.pformat(self)
def __init__(self, **kwargs):
if 'name' not in kwargs:
raise Exception("'name' is a mandatory argument. i.e. nuq01, lga02")
if 'net' not in kwargs:
raise Exception("'net' is a mandatory argument.")
if 'count' not in kwargs:
kwargs['count'] = 3
if 'nodegroup' not in kwargs:
kwargs['nodegroup'] = 'MeasurementLab'
if 'users' not in kwargs:
kwargs['users'] = [ ("Stephen","Stuart","[email protected]") ]
if 'login_base_prefix' not in kwargs:
kwargs['login_base_prefix'] = 'mlab'
if 'location' not in kwargs:
kwargs['location'] = None
if 'arch' not in kwargs:
kwargs['arch'] = ''
if kwargs['net'] is not None:
if kwargs['net']['v4'] is not None:
kwargs['net']['v4']['name'] = kwargs['name']
if kwargs['net']['v6'] is not None:
kwargs['net']['v6']['name'] = kwargs['name']
else:
# 'net' is None only if there are no nodes for this site
assert(kwargs['count'] == 0)
if 'login_base' not in kwargs:
kwargs['login_base'] = '%s%s' % (kwargs['login_base_prefix'],
kwargs['name'])
kwargs['sitename'] = 'MLab - %s' % kwargs['name'].upper()
if 'nodes' not in kwargs:
kwargs['nodes'] = {}
for i in range(1,kwargs['count']+1):
p = PCU(name=kwargs['name'], index=i, net=kwargs['net'])
exclude_ipv6=True
if ( 'exclude' not in kwargs or
('exclude' in kwargs and i not in kwargs['exclude'])
):
exclude_ipv6=False
n = Node(name=kwargs['name'], index=i, net=kwargs['net'],
pcu=p, nodegroup=kwargs['nodegroup'],
arch=kwargs['arch'], exclude_ipv6=exclude_ipv6,
login_base=kwargs['login_base'])
kwargs['nodes'][n.hostname()] = n
super(Site, self).__init__(**kwargs)
def ipv4(self, index=0):
return ml_site_ipv4(self['net']['v4']['prefix'], index)
def makesite(name, v4prefix, v6prefix, city, country,
latitude, longitude, user_list, **kwargs):
v6gw=None # use default
if 'v6gw' in kwargs: # but, if provided
v6gw=kwargs['v6gw'] # save for Network() object below
del kwargs['v6gw'] # and don't pass to Site()
location=None
if city is not None and latitude is not None and longitude is not None:
# NOTE: only create Location() if city, lat, and long are specified.
location = Location(city, country, round(latitude, 4) , round(longitude, 4))
#Don't allow site to be created without this info--unless it's a test site
elif name.find("0t") < 0:
raise Exception("city, latititude and/or longitude were not specified")
return Site(name=name,
net=Network(v4=v4prefix, v6=v6prefix, v6gw=v6gw),
location=location,
users=user_list,
**kwargs)
class PCU(dict):
""" PCU() - represents an M-Lab PCU at a parent Site()
PCU() constructor expects:
name - the site name, i.e. nuq01
net - the NetworkIPv4() object representing the parent site.
index - the host index for the machine this PCU is associated with.
Optional:
username - drac username (default: admin)
password - drac password (default: changeme)
model - drac model (default: DRAC)
"""
def __str__(self):
#return pprint.pformat(self)
return self.fields()
def __init__(self, **kwargs):
if 'name' not in kwargs:
raise Exception("'name' is a mandatory argument. i.e. nuq01")
if 'net' not in kwargs:
raise Exception("'net' is a mandatory argument.")
if 'index' not in kwargs:
raise Exception("'index' is a mandatory argument. i.e. 1,2,3")
if 'username' not in kwargs:
kwargs['username'] = 'admin'
if 'password' not in kwargs:
kwargs['password'] = 'changeme'
if 'model' not in kwargs:
kwargs['model'] = 'DRAC'
super(PCU, self).__init__(**kwargs)
def hostname(self):
"""Returns the hostname for this DRAC."""
return '.'.join((self.recordname(), MLAB_ORG_DOMAIN))
def recordname(self):
"""Returns the DRAC resource record, e.g. hostname without domain."""
return "mlab%dd.%s" % (self['index'], self['name'])
def ipv4(self):
"""Returns the DRAC IPv4 address."""
return self['net']['v4'].drac(self['index'])
def fields(self):
"""Returns a dict() with the PCU values for use by myplc AddPCU() """
return { 'username': self['username'],
'password': self["password"], # password is updated later.
'model' : self['model'],
'ip' : self['net']['v4'].drac(self['index']),
'hostname': self.hostname() }
class Node(dict):
""" Node() - represents an M-Lab node at a parent Site().
Node() constructor expects these parameters:
name - the site name, i.e. nuq01
index - the host index for the machine this PCU is associated with.
net - the Network() object representing the parent site, will contain
both ipv4 & ipv6 information (if present).
exclude_ipv6 - whether or not to exclude ipv6 from the configuration
"""
def __str__(self):
return str({ 'interface' : self.interface(),
'iplist' : self.iplist(),
'iplistv6' : self.iplistv6(),
'pcu' : self['pcu'].fields()})
def __init__(self, **kwargs):
if 'name' not in kwargs:
raise Exception("'name' is a mandatory argument. i.e. FQDN")
if 'index' not in kwargs:
raise Exception("'index' is a mandatory argument. i.e. 1,2,3")
if 'net' not in kwargs:
raise Exception("'net' is a mandatory argument.")
if 'exclude_ipv6' not in kwargs:
raise Exception("'exclude_ipv6' is a mandatory argument.")
if 'login_base' not in kwargs:
kwargs['login_base'] = 'mlab%s' % kwargs['name']
kwargs['slicelist'] = []
kwargs['ipv6_secondary'] = []
super(Node, self).__init__(**kwargs)
def interface(self):
return self['net']['v4'].interface(self['index'])
def iplist(self):
return self['net']['v4'].iplist(self['index'])
def iplistv6(self):
return self['net']['v6'].ipv6addr_secondaries(self['index'])
def v4gw(self):
return self['net']['v4'].interface(self['index'])['gateway']
def v6gw(self):
return self['net']['v6'].ipv6_defaultgw()
def hostname(self):
"""Returns the Node FQDN."""
return '%s.%s' % (self.recordname(), MLAB_ORG_DOMAIN)
def recordname(self, decoration=''):
"""Returns the Node resource record, e.g. hostname without domain."""
return 'mlab%d%s.%s' % (self['index'], decoration, self['name'])
def ipv4(self):
"""Returns the Node primary IPv4 address."""
return self['net']['v4'].interface(self['index'])['ip']
def ipv6(self):
"""Returns the Node primary IPv6 address, if enabled."""
if self.ipv6_is_enabled():
return self['net']['v6'].ipv6addr(self['index'])
else:
return ''
def v6interface_tags(self):
secondary_list = self['net']['v6'].ipv6addr_secondaries(self['index'])
goal = {
"ipv6_defaultgw" : self['net']['v6'].ipv6_defaultgw(),
"ipv6addr" : self['net']['v6'].ipv6addr(self['index']),
"ipv6addr_secondaries" : " ".join(secondary_list)
}
# TODO: secondaries should be added after slices with ipv6 addresses
# are added, right?
return goal
def addslice(self, slicename):
if slicename not in self['slicelist']:
self['slicelist'].append(slicename)
def ipv6_is_enabled(self):
return (self['net'] is not None and self['net']['v6'] is not None)
def get_interface_attr(self, slice_obj):
""" Used to construct the Interface() object for this node in myplc """
attr = None
if slice_obj['index'] is None:
return None
ip_index = int(slice_obj['index'])
v4ip=self.iplist()[ip_index]
v4gw=self.v4gw()
v6ip=""
v6gw=""
ip_addresses = v4ip
# update values when the node and the slice have ipv6 enabled
if ( self.ipv6_is_enabled() and
slice_obj.ipv6_is_enabled(self.hostname())):
v6ip=self.iplistv6()[ip_index]
v6gw=self.v6gw()
ip_addresses = v4ip + "," + v6ip
if self['nodegroup'] in ['MeasurementLabLXC']:
ipv6_is_enabled = slice_obj.ipv6_is_enabled(self.hostname())
ipv6init = "yes" if ipv6_is_enabled else "no"
attr = Attr(self.hostname(),
interface=repr({'bridge':'public0',
'DEVICE':'eth0',
'BOOTPROTO':'static',
'ONBOOT':'yes',
'DNS1' : '8.8.8.8',
'DNS2' : '8.8.4.4',
'PRIMARY' : 'yes',
'NETMASK' : '255.255.255.192',
'IPADDR' : v4ip,
'GATEWAY' : v4gw,
'IPV6INIT' : ipv6init,
'IPV6ADDR' : v6ip,
'IPV6_DEFAULTGW' : v6gw,}))
elif self['nodegroup'] in ['MeasurementLab', 'MeasurementLabK32',
'MeasurementLabCentos']:
attr = Attr(self.hostname(), ip_addresses=ip_addresses)
else:
raise Exception("unknown nodegroup: %s" % self['nodegroup'])
return attr
class Attr(dict):
"""Attr() are attributes of a slice, i.e. a key=value pair.
Slice attributes apply key=value pairs to some context. Possible contexts
are 'all nodes', 'only a specific node', 'only a specific nodegroup'.
Attr() constructor expects one argument, and a key=value pair.
arg[0] - the context for this slice attribute; may be one of:
None - which represents all hosts
<hostname> - a hostname recognized by having a '.'
<nodegroup> - a nodegroup name, recognized by not having a '.'
key=value - the key and value are not arbitrary. The key must be one
of a pre-defined set of recognized keys defined by the
PlanetLab api. The value for a given key, should be a valid
value. Though, no type checking is performed here.
"""
def __init__(self, *args, **kwargs):
if len(args) != 1:
raise Exception(("The first argument should be the name "+
"of a NodeGroup, hostname, or None"))
if type(args[0]) == type(None):
kwargs['attrtype'] = 'all'
kwargs['all'] = True
if type(args[0]) == str:
if '.' in args[0]:
kwargs['attrtype'] = 'hostname'
kwargs['hostname'] = args[0]
else:
kwargs['attrtype'] = 'nodegroup'
kwargs['nodegroup'] = args[0]
super(Attr, self).__init__(**kwargs)
class Slice(dict):
""" Slice() - represents an M-Lab slice. Provides an interface for passing
additional slice attributes, and associating IP addresses.
Slice() constructor expects the following parameters:
name - the slice name, i.e. 'iupui_ndt', or 'mlab_neubot'
Optional:
index - int, the index in the 12-slots for slices with IPv4 addresses.
attrs - [], a list of Attr() objects with attributes for this slice.
users - [], a list of three-tuples with (first, last, email) of users
that should already exist and be associated with this slice
use_initscript - bool, default is False. If True, use the
mlab_generic_initscript for this slice. The initscript
sets up the slice yum repos on first-creation to
automatically install a custom rpm package. In particular,
the rpm package should be named the same as the slicename.
For slice iupui_ndt, the initscript installs the custom
package called "iupui_ndt-*.rpm" automatically.
CAUTION: this attribute is applied per-node. Also, only to
nodes on which plsync is called explicitly.
ipv6 - how to enable IPv6 for this slice. Options are:
"all" - add IPv6 addres to all nodes
[] - a list of abbreviated hostnames and/or sites, i.e.,
['mlab1.nuq01', 'sea03', 'mia01', 'mlab3.den04', ...]
None - do not enble IPv6 addressing anywhere.
"""
def __str__(self):
return "\n%s \n\t %s" % (self['name'], pprint.pformat(self))
def __init__(self, **kwargs):
if 'name' not in kwargs:
raise Exception(("The first argument should be the name "+
"of a NodeGroup, hostname, or None"))
if 'users' not in kwargs:
if 'index' in kwargs:
raise Exception("An indexed slice must have a users list")
else:
# NOTE: but non-indexed slices don't need a users list.
kwargs['users'] = None
if 'index' not in kwargs:
kwargs['index'] = None
if 'use_initscript' not in kwargs:
kwargs['use_initscript'] = False
if 'ipv6' not in kwargs:
# None means ipv6 is OFF by default
kwargs['ipv6'] = None
else:
if type(kwargs['ipv6']) == str:
kwargs['ipv6'] = "all"
elif type(kwargs['ipv6']) == type([]):
ipv6_hosts = []
for host in kwargs['ipv6']:
# This is a node (e.g., mlab1.sea02)
if len(host.split('.')) == 2:
ipv6_hosts.append('%s.%s' % (host, MLAB_ORG_DOMAIN))
# This is a site (e.g., sea02)
elif len(host.split('.')) == 1:
for node in ['mlab1', 'mlab2', 'mlab3', 'mlab4']:
ipv6_hosts.append('%s.%s.%s' % (node, host,
MLAB_ORG_DOMAIN))
# We don't know what this is. Raise an error.
else:
raise Exception("Unrecognized node/site for ipv6"+
"parameter: %s" % host)
kwargs['ipv6'] = ipv6_hosts
else:
raise Exception("Unrecognized type for ipv6 parameter: %s" %
type(kwargs['ipv6']))
if 'rsync_modules' not in kwargs:
kwargs['rsync_modules'] = []
if 'attrs' not in kwargs:
kwargs['attrs'] = []
kwargs['network_list'] = []
super(Slice, self).__init__(**kwargs)
def dnsname(self):
"""Returns the slice name in DNS form, e.g. group_name to name.group"""
fields = self['name'].split('_')
# When a name has multiple '_', rejoin all parts after the group name.
return '.'.join(fields[1:] + fields[:1])
def hostname(self, node):
"""Returns the FQDN for a slice on the given node."""
return '.'.join((self.dnsname(), node.hostname()))
def recordname(self, server, decoration=''):
"""Returns the Slice resource record, e.g. hostname without domain."""
return '%s.%s' % (self.dnsname(), server.recordname(decoration))
def sitename(self, server, decoration=''):
"""Returns the FQDN for a slice, without the machine name."""
return '%s%s.%s' % (self.dnsname(), decoration, server['name'])
def ipv4(self, node):
"""Returns the IPv4 address for the slice on the given node."""
return node.iplist()[self['index']]
def ipv6(self, node):
"""Returns the IPv6 address for the slice on the given node."""
if (node.ipv6_is_enabled() and self.ipv6_is_enabled(node.hostname())):
return node.iplistv6()[self['index']]
else:
return ""
def add_node_address(self, node):
self['network_list'].append((node.hostname(), node))
def ipv6_is_enabled(self, hostname):
return ((isinstance(self['ipv6'], list) and hostname in self['ipv6']) or
(isinstance(self['ipv6'], str) and "all" == self['ipv6']) )
|
|
from bika.lims.browser import BrowserView
from bika.lims.interfaces import IAnalysis
from bika.lims.interfaces import IFieldIcons
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t, isnumber
from bika.lims import logger
from bika.lims.utils import to_utf8
from Products.Archetypes.config import REFERENCE_CATALOG
from Products.CMFCore.utils import getToolByName
from Products.PythonScripts.standard import html_quote
from bika.lims.utils.analysis import format_numeric_result
from zope.component import adapts
from zope.component import getAdapters
from zope.interface import implements
import json
import math
import plone
class CalculationResultAlerts(object):
"""This uses IAnalysis.ResultOutOfRange on values in request.
To validate results at ajax calculation time, make more adapters like this
one, from IFieldIcons. Any existing IAnalysis/IFieldIcon adapters
(AnalysisOutOfRange) have already been called.
"""
adapts(IAnalysis)
implements(IFieldIcons)
def __init__(self, context):
self.context = context
def __call__(self, result=None, specification=None, **kwargs):
workflow = getToolByName(self.context, 'portal_workflow')
astate = workflow.getInfoFor(self.context, 'review_state')
if astate == 'retracted':
return {}
result = self.context.getResult() if result is None else result
alerts = {}
path = '++resource++bika.lims.images'
uid = self.context.UID()
try:
indet = result.startswith("<") or result.startswith(">")
except AttributeError:
indet = False
if indet:
alert = {'field': 'Result',
'icon': path + '/exclamation.png',
'msg': t(_("Indeterminate result"))}
if uid in alerts:
alerts[uid].append(alert)
else:
alerts[uid] = [alert, ]
return alerts
class ajaxCalculateAnalysisEntry(BrowserView):
""" This view is called by javascript when an analysis' result or interim
field value is entered. Returns a JSON dictionary, or None if no
action is required or possible.
"""
def __init__(self, context, request):
self.context = context
self.request = request
def calculate(self, uid=None):
analysis = self.analyses[uid]
form_result = self.current_results[uid]['result']
service = analysis.getService()
calculation = service.getCalculation()
if analysis.portal_type == 'ReferenceAnalysis':
deps = {}
else:
deps = {}
for dep in analysis.getDependencies():
deps[dep.UID()] = dep
path = '++resource++bika.lims.images'
mapping = {}
# values to be returned to form for this UID
Result = {'uid': uid, 'result': form_result, 'result_str': self.value}
try:
Result['result'] = float(form_result)
except:
if form_result == "0/0":
Result['result'] = ""
if calculation:
'''
We need first to create the map of available parameters
acording to the interims, analyses and wildcards:
params = {
<as-1-keyword> : <analysis_result>,
<as-1-keyword>.<wildcard-1> : <wildcard_1_value>,
<as-1-keyword>.<wildcard-2> : <wildcard_2_value>,
<interim-1> : <interim_result>,
...
}
'''
# Get dependent analyses results and wildcard values to the
# mapping. If dependent analysis without result found,
# break and abort calculation
unsatisfied = False
for dependency_uid, dependency in deps.items():
if dependency_uid in self.ignore_uids:
unsatisfied = True
break
# LIMS-1769. Allow to use LDL and UDL in calculations.
# https://jira.bikalabs.com/browse/LIMS-1769
analysisvalues = {}
if dependency_uid in self.current_results:
analysisvalues = self.current_results[dependency_uid]
else:
# Retrieve the result and DLs from the analysis
analysisvalues = {
'keyword': dependency.getKeyword(),
'result': dependency.getResult(),
'ldl': dependency.getLowerDetectionLimit(),
'udl': dependency.getUpperDetectionLimit(),
'belowldl': dependency.isBelowLowerDetectionLimit(),
'aboveudl': dependency.isAboveUpperDetectionLimit(),
}
if analysisvalues['result']=='':
unsatisfied = True
break;
key = analysisvalues.get('keyword',dependency.getService().getKeyword())
# Analysis result
# All result mappings must be float, or they are ignored.
try:
mapping[key] = float(analysisvalues.get('result'))
mapping['%s.%s' % (key, 'RESULT')] = float(analysisvalues.get('result'))
mapping['%s.%s' % (key, 'LDL')] = float(analysisvalues.get('ldl'))
mapping['%s.%s' % (key, 'UDL')] = float(analysisvalues.get('udl'))
mapping['%s.%s' % (key, 'BELOWLDL')] = int(analysisvalues.get('belowldl'))
mapping['%s.%s' % (key, 'ABOVEUDL')] = int(analysisvalues.get('aboveudl'))
except:
# If not floatable, then abort!
unsatisfied = True
break
if unsatisfied:
# unsatisfied means that one or more result on which we depend
# is blank or unavailable, so we set blank result and abort.
self.results.append({'uid': uid,
'result': '',
'formatted_result': ''})
return None
# Add all interims to mapping
for i_uid, i_data in self.item_data.items():
for i in i_data:
# if this interim belongs to current analysis and is blank,
# return an empty result for this analysis.
if i_uid == uid and i['value'] == '':
self.results.append({'uid': uid,
'result': '',
'formatted_result': ''})
return None
# All interims must be float, or they are ignored.
try:
i['value'] = float(i['value'])
except:
pass
# all interims are ServiceKeyword.InterimKeyword
if i_uid in deps:
key = "%s.%s" % (deps[i_uid].getService().getKeyword(),
i['keyword'])
mapping[key] = i['value']
# this analysis' interims get extra reference
# without service keyword prefix
if uid == i_uid:
mapping[i['keyword']] = i['value']
# Grab values for hidden InterimFields for only for current calculation
# we can't allow non-floats through here till we change the eval's
# interpolation
hidden_fields = []
c_fields = calculation.getInterimFields()
s_fields = service.getInterimFields()
for field in c_fields:
if field.get('hidden', False):
hidden_fields.append(field['keyword'])
try:
mapping[field['keyword']] = float(field['value'])
except ValueError:
pass
# also grab stickier defaults from AnalysisService
for field in s_fields:
if field['keyword'] in hidden_fields:
try:
mapping[field['keyword']] = float(field['value'])
except ValueError:
pass
# convert formula to a valid python string, ready for interpolation
formula = calculation.getMinifiedFormula()
formula = formula.replace('[', '%(').replace(']', ')f')
try:
formula = eval("'%s'%%mapping" % formula,
{"__builtins__": None,
'math': math,
'context': self.context},
{'mapping': mapping})
# calculate
result = eval(formula)
Result['result'] = result
self.current_results[uid]['result'] = result
except TypeError as e:
# non-numeric arguments in interim mapping?
alert = {'field': 'Result',
'icon': path + '/exclamation.png',
'msg': "{0}: {1} ({2}) ".format(
t(_("Type Error")),
html_quote(str(e.args[0])),
formula)}
if uid in self.alerts:
self.alerts[uid].append(alert)
else:
self.alerts[uid] = [alert, ]
except ZeroDivisionError as e:
Result['result'] = '0/0'
Result['formatted_result'] = '0/0'
self.current_results[uid]['result'] = '0/0'
self.results.append(Result)
alert = {'field': 'Result',
'icon': path + '/exclamation.png',
'msg': "{0}: {1} ({2}) ".format(
t(_("Division by zero")),
html_quote(str(e.args[0])),
formula)}
if uid in self.alerts:
self.alerts[uid].append(alert)
else:
self.alerts[uid] = [alert, ]
return None
except KeyError as e:
alert = {'field': 'Result',
'icon': path + '/exclamation.png',
'msg': "{0}: {1} ({2}) ".format(
t(_("Key Error")),
html_quote(str(e.args[0])),
formula)}
if uid in self.alerts:
self.alerts[uid].append(alert)
else:
self.alerts[uid] = [alert, ]
if analysis.portal_type == 'ReferenceAnalysis':
# The analysis is a Control or Blank. We might use the
# reference results instead other specs
uid = analysis.getServiceUID()
specs = analysis.aq_parent.getResultsRangeDict().get(uid, {})
else:
# Get the specs directly from the analysis. The getResultsRange
# function already takes care about which are the specs to be used:
# AR, client or lab.
specs = analysis.getResultsRange()
# format result
belowmin = False
abovemax = False
hidemin = specs.get('hidemin', '')
hidemax = specs.get('hidemax', '')
if Result.get('result', ''):
fresult = Result['result']
try:
belowmin = hidemin and fresult < float(hidemin) or False
except ValueError:
belowmin = False
pass
try:
abovemax = hidemax and fresult > float(hidemax) or False
except ValueError:
abovemax = False
pass
if belowmin is True:
Result['formatted_result'] = '< %s' % hidemin
elif abovemax is True:
Result['formatted_result'] = '> %s' % hidemax
else:
try:
Result['formatted_result'] = format_numeric_result(analysis,
Result['result'])
except ValueError:
# non-float
Result['formatted_result'] = Result['result']
# calculate Dry Matter result
# if parent is not an AR, it's never going to be calculable
dm = hasattr(analysis.aq_parent, 'getReportDryMatter') and \
analysis.aq_parent.getReportDryMatter() and \
analysis.getService().getReportDryMatter()
if dm:
dry_service = self.context.bika_setup.getDryMatterService()
# get the UID of the DryMatter Analysis from our parent AR
dry_analysis = [a for a in
analysis.aq_parent.getAnalyses(full_objects=True)
if a.getService().UID() == dry_service.UID()]
if dry_analysis:
dry_analysis = dry_analysis[0]
dry_uid = dry_analysis.UID()
# get the current DryMatter analysis result from the form
if dry_uid in self.current_results:
try:
dry_result = float(self.current_results[dry_uid])
except:
dm = False
else:
try:
dry_result = float(dry_analysis.getResult())
except:
dm = False
else:
dm = False
Result['dry_result'] = dm and dry_result and \
'%.2f' % ((Result['result'] / dry_result) * 100) or ''
self.results.append(Result)
# if App.config.getConfiguration().debug_mode:
# logger.info("calc.py: %s->%s %s" % (analysis.aq_parent.id,
# analysis.id,
# Result))
# LIMS-1808 Uncertainty calculation on DL
# https://jira.bikalabs.com/browse/LIMS-1808
flres = Result.get('result', None)
if flres and isnumber(flres):
flres = float(flres)
anvals = self.current_results[uid]
isldl = anvals.get('isldl', False)
isudl = anvals.get('isudl', False)
ldl = anvals.get('ldl',0)
udl = anvals.get('udl',0)
ldl = float(ldl) if isnumber(ldl) else 0
udl = float(udl) if isnumber(udl) else 10000000
belowldl = (isldl or flres < ldl)
aboveudl = (isudl or flres > udl)
unc = '' if (belowldl or aboveudl) else analysis.getUncertainty(Result.get('result'))
if not (belowldl or aboveudl):
self.uncertainties.append({'uid': uid, 'uncertainty': unc})
# maybe a service who depends on us must be recalculated.
if analysis.portal_type == 'ReferenceAnalysis':
dependents = []
else:
dependents = analysis.getDependents()
if dependents:
for dependent in dependents:
dependent_uid = dependent.UID()
# ignore analyses that no longer exist.
if dependent_uid in self.ignore_uids or \
dependent_uid not in self.analyses:
continue
self.calculate(dependent_uid)
# These self.alerts are just for the json return.
# we're placing the entire form's results in kwargs.
adapters = getAdapters((analysis, ), IFieldIcons)
for name, adapter in adapters:
alerts = adapter(result=Result['result'], form_results=self.current_results)
if alerts:
if analysis.UID() in self.alerts:
self.alerts[analysis.UID()].extend(alerts[analysis.UID()])
else:
self.alerts[analysis.UID()] = alerts[analysis.UID()]
def __call__(self):
self.rc = getToolByName(self.context, REFERENCE_CATALOG)
plone.protect.CheckAuthenticator(self.request)
plone.protect.PostOnly(self.request)
self.spec = self.request.get('specification', None)
# information about the triggering element
uid = self.request.get('uid')
self.field = self.request.get('field')
self.value = self.request.get('value')
self.current_results = json.loads(self.request.get('results'))
form_results = json.loads(self.request.get('results'))
self.item_data = json.loads(self.request.get('item_data'))
# these get sent back the the javascript
self.alerts = {}
self.uncertainties = []
self.results = []
self.services = {}
self.analyses = {}
# ignore these analyses if objects no longer exist
self.ignore_uids = []
for analysis_uid, result in self.current_results.items():
analysis = self.rc.lookupObject(analysis_uid)
if not analysis:
self.ignore_uids.append(analysis_uid)
continue
self.analyses[analysis_uid] = analysis
if uid not in self.ignore_uids:
self.calculate(uid)
results = []
for result in self.results:
if result['uid'] in form_results.keys() and \
result['result'] != form_results[result['uid']]:
results.append(result)
return json.dumps({'alerts': self.alerts,
'uncertainties': self.uncertainties,
'results': results})
class ajaxGetMethodCalculation(BrowserView):
""" Returns the calculation assigned to the defined method.
uid: unique identifier of the method
"""
def __call__(self):
plone.protect.CheckAuthenticator(self.request)
calcdict = {}
uc = getToolByName(self, 'uid_catalog')
method = uc(UID=self.request.get("uid", '0'))
if method and len(method) == 1:
calc = method[0].getObject().getCalculation()
if calc:
calcdict = {'uid': calc.UID(),
'title': calc.Title()}
return json.dumps(calcdict)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ocw import dataset as ds
import datetime
import numpy as np
import numpy.ma as ma
import scipy.interpolate
import scipy.ndimage
from scipy.ndimage import map_coordinates
import netCDF4
import logging
logger = logging.getLogger(__name__)
def temporal_subset(month_start, month_end, target_dataset, average_each_year=False):
""" Temporally subset data given month_index.
:param month_start: An integer for beginning month (Jan=1)
:type month_start: :class:`int`
:param month_end: An integer for ending month (Jan=1)
:type month_end: :class:`int`
:param target_dataset: Dataset object that needs temporal subsetting
:type target_dataset: Open Climate Workbench Dataset Object
:param average_each_year: If True, output dataset is averaged for each year
:type average_each_year: :class:'boolean'
:returns: A temporal subset OCW Dataset
:rtype: Open Climate Workbench Dataset Object
"""
if month_start > month_end:
month_index = range(month_start,13)
month_index.extend(range(1, month_end+1))
else:
month_index = range(month_start, month_end+1)
dates = target_dataset.times
months = np.array([d.month for d in dates])
time_index = []
for m_value in month_index:
time_index = np.append(time_index, np.where(months == m_value)[0])
if m_value == month_index[0]:
time_index_first = np.min(np.where(months == m_value)[0])
if m_value == month_index[-1]:
time_index_last = np.max(np.where(months == m_value)[0])
time_index = np.sort(time_index)
time_index = time_index[np.where((time_index >= time_index_first) & (time_index <= time_index_last))]
time_index = list(time_index)
new_dataset = ds.Dataset(target_dataset.lats,
target_dataset.lons,
target_dataset.times[time_index],
target_dataset.values[time_index,:],
variable=target_dataset.variable,
units=target_dataset.units,
name=target_dataset.name)
if average_each_year:
nmonth = len(month_index)
ntime = new_dataset.times.size
nyear = ntime/nmonth
averaged_time = []
ny, nx = target_dataset.values.shape[1:]
averaged_values =ma.zeros([nyear, ny, nx])
for iyear in np.arange(nyear):
# centered time index of the season between month_start and month_end in each year
center_index = int(nmonth/2)+iyear*nmonth
if nmonth == 1:
center_index = iyear
averaged_time.append(new_dataset.times[center_index])
averaged_values[iyear,:] = ma.average(new_dataset.values[nmonth*iyear:nmonth*iyear+nmonth,:], axis=0)
new_dataset = ds.Dataset(target_dataset.lats,
target_dataset.lons,
np.array(averaged_time),
averaged_values,
variable=target_dataset.variable,
units=target_dataset.units,
name=target_dataset.name)
return new_dataset
def temporal_rebin(target_dataset, temporal_resolution):
""" Rebin a Dataset to a new temporal resolution
:param target_dataset: Dataset object that needs temporal rebinned
:type target_dataset: :class:`dataset.Dataset`
:param temporal_resolution: The new temporal bin size
:type temporal_resolution: :class:`datetime.timedelta`
:returns: A new temporally rebinned Dataset
:rtype: :class:`dataset.Dataset`
"""
# Decode the temporal resolution into a string format that
# _rcmes_calc_average_on_new_time_unit_K() can understand
day_count = temporal_resolution.days
time_unit = None
if day_count == 1:
time_unit = 'daily'
elif day_count > 1 and day_count <= 31:
time_unit = 'monthly'
elif day_count > 31 and day_count <= 366:
time_unit = 'annual'
else:
time_unit = 'full'
masked_values = target_dataset.values.view(ma.MaskedArray)
binned_values, binned_dates = _rcmes_calc_average_on_new_time_unit_K(masked_values, target_dataset.times, time_unit)
binned_dates = np.array(binned_dates)
new_dataset = ds.Dataset(target_dataset.lats,
target_dataset.lons,
binned_dates,
binned_values,
variable=target_dataset.variable,
units=target_dataset.units,
name=target_dataset.name,
origin=target_dataset.origin)
return new_dataset
def spatial_regrid(target_dataset, new_latitudes, new_longitudes):
""" Regrid a Dataset using the new latitudes and longitudes
:param target_dataset: Dataset object that needs spatially regridded
:type target_dataset: :class:`dataset.Dataset`
:param new_latitudes: Array of latitudes
:type new_latitudes: :class:`numpy.ndarray`
:param new_longitudes: Array of longitudes
:type new_longitudes: :class:`numpy.ndarray`
:returns: A new spatially regridded Dataset
:rtype: :class:`dataset.Dataset`
"""
# Create grids of the given lats and lons for the underlying API
# NOTE: np.meshgrid() requires inputs (x, y) and returns data
# of shape(y|lat|rows, x|lon|columns). So we pass in lons, lats
# and get back data.shape(lats, lons)
if target_dataset.lons.ndim ==1 and target_dataset.lats.ndim ==1:
lons, lats = np.meshgrid(target_dataset.lons, target_dataset.lats)
else:
lons = target_dataset.lons
lats = target_dataset.lats
if new_longitudes.ndim ==1 and new_latitudes.ndim ==1:
new_lons, new_lats = np.meshgrid(new_longitudes, new_latitudes)
else:
new_lons = new_longitudes
new_lats = new_latitudes
# Make masked array of shape (times, new_latitudes,new_longitudes)
new_values = ma.zeros([len(target_dataset.times),
new_lats.shape[0],
new_lons.shape[1]])
# Convert all lats and lons into Numpy Masked Arrays
lats = ma.array(lats)
lons = ma.array(lons)
new_lats = ma.array(new_lats)
new_lons = ma.array(new_lons)
target_values = ma.array(target_dataset.values)
# Call _rcmes_spatial_regrid on each time slice
for i in range(len(target_dataset.times)):
new_values[i] = _rcmes_spatial_regrid(target_values[i],
lats,
lons,
new_lats,
new_lons)
# TODO:
# This will call down to the _congrid() function and the lat and lon
# axis will be adjusted with the time axis being held constant
# Create a new Dataset Object to return using new data
regridded_dataset = ds.Dataset(new_latitudes,
new_longitudes,
target_dataset.times,
new_values,
variable=target_dataset.variable,
units=target_dataset.units,
name=target_dataset.name,
origin=target_dataset.origin)
return regridded_dataset
def ensemble(datasets):
"""
Generate a single dataset which is the mean of the input datasets
An ensemble datasets combines input datasets assuming the all have
similar shape, dimensions, and units.
:param datasets: Datasets to be used to compose the ensemble dataset from.
All Datasets must be the same shape.
:type datasets: :class:`list` of :class:`dataset.Dataset`
:returns: New Dataset with a name of 'Dataset Ensemble'
:rtype: :class:`dataset.Dataset`
"""
_check_dataset_shapes(datasets)
dataset_values = [dataset.values for dataset in datasets]
ensemble_values = ma.mean(dataset_values, axis=0)
# Build new dataset object from the input datasets and the ensemble values and return it
ensemble_dataset = ds.Dataset(datasets[0].lats,
datasets[0].lons,
datasets[0].times,
ensemble_values,
units=datasets[0].units,
name="Dataset Ensemble")
return ensemble_dataset
def subset(subregion, target_dataset, subregion_name=None):
'''Subset given dataset(s) with subregion information
:param subregion: The Bounds with which to subset the target Dataset.
:type subregion: :class:`dataset.Bounds`
:param target_dataset: The Dataset object to subset.
:type target_dataset: :class:`dataset.Dataset`
:param subregion_name: The subset-ed Dataset name
:type subregion_name: :mod:`string`
:returns: The subset-ed Dataset object
:rtype: :class:`dataset.Dataset`
:raises: ValueError
'''
if not subregion.start:
subregion.start = target_dataset.times[0]
subregion.end = target_dataset.times[-1]
# Ensure that the subregion information is well formed
_are_bounds_contained_by_dataset(subregion, target_dataset)
# Get subregion indices into subregion data
dataset_slices = _get_subregion_slice_indices(subregion, target_dataset)
if not subregion_name:
subregion_name = target_dataset.name
# Slice the values array with our calculated slice indices
if target_dataset.values.ndim == 2:
subset_values = ma.zeros([len(target_dataset.values[
dataset_slices["lat_start"]:dataset_slices["lat_end"]]),
len(target_dataset.values[
dataset_slices["lon_start"]:dataset_slices["lon_end"]])])
subset_values = target_dataset.values[
dataset_slices["lat_start"]:dataset_slices["lat_end"] + 1,
dataset_slices["lon_start"]:dataset_slices["lon_end"] + 1]
elif target_dataset.values.ndim == 3:
subset_values = ma.zeros([len(target_dataset.values[
dataset_slices["time_start"]:dataset_slices["time_end"]]),
len(target_dataset.values[
dataset_slices["lat_start"]:dataset_slices["lat_end"]]),
len(target_dataset.values[
dataset_slices["lon_start"]:dataset_slices["lon_end"]])])
subset_values = target_dataset.values[
dataset_slices["time_start"]:dataset_slices["time_end"] + 1,
dataset_slices["lat_start"]:dataset_slices["lat_end"] + 1,
dataset_slices["lon_start"]:dataset_slices["lon_end"] + 1]
# Build new dataset with subset information
return ds.Dataset(
# Slice the lats array with our calculated slice indices
target_dataset.lats[dataset_slices["lat_start"]:
dataset_slices["lat_end"] + 1],
# Slice the lons array with our calculated slice indices
target_dataset.lons[dataset_slices["lon_start"]:
dataset_slices["lon_end"] + 1],
# Slice the times array with our calculated slice indices
target_dataset.times[dataset_slices["time_start"]:
dataset_slices["time_end"]+ 1],
# Slice the values array with our calculated slice indices
subset_values,
variable=target_dataset.variable,
units=target_dataset.units,
name=subregion_name,
origin=target_dataset.origin
)
def safe_subset(subregion, target_dataset, subregion_name=None):
'''Safely subset given dataset with subregion information
A standard subset requires that the provided subregion be entirely contained
within the datasets bounds. `safe_subset` returns the overlap of the
subregion and dataset without returning an error.
:param subregion: The Bounds with which to subset the target Dataset.
:type subregion: :class:`dataset.Bounds`
:param target_dataset: The Dataset object to subset.
:type target_dataset: :class:`dataset.Dataset`
:param subregion_name: The subset-ed Dataset name
:type subregion_name: :mod:`string`
:returns: The subset-ed Dataset object
:rtype: :class:`dataset.Dataset`
'''
lat_min, lat_max, lon_min, lon_max = target_dataset.spatial_boundaries()
start, end = target_dataset.time_range()
if subregion.lat_min < lat_min:
subregion.lat_min = lat_min
if subregion.lat_max > lat_max:
subregion.lat_max = lat_max
if subregion.lon_min < lon_min:
subregion.lon_min = lon_min
if subregion.lon_max > lon_max:
subregion.lon_max = lon_max
if subregion.start:
if subregion.start < start:
subregion.start = start
if subregion.end:
if subregion.end > end:
subregion.end = end
return subset(subregion, target_dataset, subregion_name)
def normalize_dataset_datetimes(dataset, timestep):
''' Normalize Dataset datetime values.
Force daily to an hour time value of 00:00:00.
Force monthly data to the first of the month at midnight.
:param dataset: The Dataset which will have its time value normalized.
:type dataset: :class:`dataset.Dataset`
:param timestep: The timestep of the Dataset's values. Either 'daily' or
'monthly'.
:type timestep: :mod:`string`
:returns: A new Dataset with normalized datetime values.
:rtype: :class:`dataset.Dataset`
'''
new_times = _rcmes_normalize_datetimes(dataset.times, timestep)
return ds.Dataset(
dataset.lats,
dataset.lons,
np.array(new_times),
dataset.values,
variable=dataset.variable,
units=dataset.units,
name=dataset.name,
origin=dataset.origin
)
def write_netcdf(dataset, path, compress=True):
''' Write a dataset to a NetCDF file.
:param dataset: The dataset to write.
:type dataset: :class:`dataset.Dataset`
:param path: The output file path.
:type path: :mod:`string`
'''
out_file = netCDF4.Dataset(path, 'w', format='NETCDF4')
# Set attribute lenghts
lat_len = len(dataset.lats)
lon_len = len(dataset.lons)
time_len = len(dataset.times)
# Create attribute dimensions
lat_dim = out_file.createDimension('lat', lat_len)
lon_dim = out_file.createDimension('lon', lon_len)
time_dim = out_file.createDimension('time', time_len)
# Create variables
lats = out_file.createVariable('lat', 'f8', ('lat',), zlib=compress)
lons = out_file.createVariable('lon', 'f8', ('lon',), zlib=compress)
times = out_file.createVariable('time', 'f8', ('time',), zlib=compress)
var_name = dataset.variable if dataset.variable else 'var'
values = out_file.createVariable(var_name,
'f8',
('time', 'lat', 'lon'),
zlib=compress)
# Set the time variable units
# We don't deal with hourly/minutely/anything-less-than-a-day data so
# we can safely stick with a 'days since' offset here. Note that the
# NetCDF4 helper date2num doesn't support 'months' or 'years' instead
# of days.
times.units = "days since %s" % dataset.times[0]
# Store the dataset's values
lats[:] = dataset.lats
lons[:] = dataset.lons
times[:] = netCDF4.date2num(dataset.times, times.units)
values[:] = dataset.values
values.units = dataset.units
out_file.close()
def write_netcdf_multiple_datasets_with_subregions(ref_dataset, ref_name,
model_dataset_array, model_names,
path,
subregions = None, subregion_array = None,
ref_subregion_mean = None, ref_subregion_std = None,
model_subregion_mean = None, model_subregion_std = None):
#Write multiple reference and model datasets and their subregional means and standard deivations in a NetCDF file.
#:To be updated
#
out_file = netCDF4.Dataset(path, 'w', format='NETCDF4')
dataset = ref_dataset
# Set attribute lenghts
nobs = 1
nmodel = len(model_dataset_array)
lat_len = len(dataset.lats)
lon_len = len(dataset.lons)
time_len = len(dataset.times)
if not subregions == None:
nsubregion = len(subregions)
# Create attribute dimensions
lat_dim = out_file.createDimension('y', lat_len)
lon_dim = out_file.createDimension('x', lon_len)
time_dim = out_file.createDimension('time', time_len)
# Create variables and store the values
lats = out_file.createVariable('lat', 'f8', ('y'))
lats[:] = dataset.lats
lons = out_file.createVariable('lon', 'f8', ('x'))
lons[:] = dataset.lons
times = out_file.createVariable('time', 'f8', ('time',))
times.units = "days since %s" % dataset.times[0]
times[:] = netCDF4.date2num(dataset.times, times.units)
#mask_array = np.zeros([time_len, lat_len, lon_len])
#for iobs in np.arange(nobs):
# index = np.where(ref_dataset_array[iobs].values.mask[:] == True)
# mask_array[index] = 1
out_file.createVariable(ref_name, 'f8', ('time','y','x'))
out_file.variables[ref_name][:] = ref_dataset.values
out_file.variables[ref_name].units = ref_dataset.units
for imodel in np.arange(nmodel):
out_file.createVariable(model_names[imodel], 'f8', ('time','y','x'))
#out_file.variables[model_names[imodel]][:] = ma.array(model_dataset_array[imodel].values, mask = mask_array)
out_file.variables[model_names[imodel]][:] = model_dataset_array[imodel].values
out_file.variables[model_names[imodel]].units = model_dataset_array[imodel].units
if not subregions == None:
out_file.createVariable('subregion_array', 'i4', ('y','x'))
out_file.variables['subregion_array'][:] = subregion_array[:]
nsubregion = len(subregions)
out_file.createDimension('nsubregion', nsubregion)
out_file.createDimension('nobs', nobs)
out_file.createDimension('nmodel', nmodel)
out_file.createVariable('obs_subregion_mean', 'f8', ('nobs','time','nsubregion'))
out_file.variables['obs_subregion_mean'][:] = ref_subregion_mean[:]
out_file.createVariable('obs_subregion_std', 'f8', ('nobs','time','nsubregion'))
out_file.variables['obs_subregion_std'][:] = ref_subregion_std[:]
out_file.createVariable('model_subregion_mean', 'f8', ('nmodel','time','nsubregion'))
out_file.variables['model_subregion_mean'][:] = model_subregion_mean[:]
out_file.createVariable('model_subregion_std', 'f8', ('nmodel','time','nsubregion'))
out_file.variables['model_subregion_std'][:] = model_subregion_std[:]
out_file.close()
def water_flux_unit_conversion(dataset):
''' Convert water flux variables units as necessary
Convert full SI units water flux units to more common units.
:param dataset: The dataset to convert.
:type dataset: :class:`dataset.Dataset`
:returns: A Dataset with values converted to new units.
:rtype: :class:`dataset.Dataset`
'''
water_flux_variables = ['pr', 'prec','evspsbl', 'mrro', 'swe']
variable = dataset.variable.lower()
if any(sub_string in variable for sub_string in water_flux_variables):
dataset_units = dataset.units.lower()
if variable in 'swe':
if any(unit in dataset_units for unit in ['m', 'meter']):
dataset.values = 1.e3 * dataset.values
dataset.units = 'km'
else:
if any(unit in dataset_units
for unit in ['kg m-2 s-1', 'mm s-1', 'mm/sec']):
dataset.values = 86400. * dataset.values
dataset.units = 'mm/day'
return dataset
def temperature_unit_conversion(dataset):
''' Convert temperature units as necessary
Automatically convert Celcius to Kelvin in the given dataset.
:param dataset: The dataset for which units should be updated.
:type dataset; :class:`dataset.Dataset`
:returns: The dataset with (potentially) updated units.
:rtype: :class:`dataset.Dataset`
'''
temperature_variables = ['temp','tas','tasmax','taxmin','T']
variable = dataset.variable.lower()
if any(sub_string in variable for sub_string in temperature_variables):
dataset_units = dataset.units.lower()
if dataset_units == 'c':
dataset.values = 273.15 + dataset.values
dataset.units = 'K'
return dataset
def variable_unit_conversion(dataset):
''' Convert water flux or temperature variables units as necessary
For water flux variables, convert full SI units water flux units to more common units.
For temperature, convert Celcius to Kelvin.
:param dataset: The dataset to convert.
:type dataset: :class:`dataset.Dataset`
:returns: A Dataset with values converted to new units.
:rtype: :class:`dataset.Dataset`
'''
dataset = water_flux_unit_conversion(dataset)
dataset = temperature_unit_conversion(dataset)
return dataset
def _rcmes_normalize_datetimes(datetimes, timestep):
""" Normalize Dataset datetime values.
Force daily to an hour time value of 00:00:00.
Force monthly data to the first of the month at midnight.
:param datetimes: The datetimes to normalize.
:type datetimes: List of `datetime` values.
:param timestep: The flag for how to normalize the datetimes.
:type timestep: String
"""
normalDatetimes = []
if timestep.lower() == 'monthly':
for inputDatetime in datetimes:
if inputDatetime.day != 1:
# Clean the inputDatetime
inputDatetimeString = inputDatetime.strftime('%Y%m%d')
normalInputDatetimeString = inputDatetimeString[:6] + '01'
inputDatetime = datetime.datetime.strptime(normalInputDatetimeString, '%Y%m%d')
normalDatetimes.append(inputDatetime)
elif timestep.lower() == 'daily':
for inputDatetime in datetimes:
if inputDatetime.hour != 0 or inputDatetime.minute != 0 or inputDatetime.second != 0:
datetimeString = inputDatetime.strftime('%Y%m%d%H%M%S')
normalDatetimeString = datetimeString[:8] + '000000'
inputDatetime = datetime.datetime.strptime(normalDatetimeString, '%Y%m%d%H%M%S')
normalDatetimes.append(inputDatetime)
return normalDatetimes
def mask_missing_data(dataset_array):
''' Check missing values in observation and model datasets.
If any of dataset in dataset_array has missing values at a grid point,
the values at the grid point in all other datasets are masked.
:param dataset_array: an array of OCW datasets
'''
mask_array = np.zeros(dataset_array[0].values.shape)
for dataset in dataset_array:
index = np.where(dataset.values.mask == True)
if index[0].size >0:
mask_array[index] = 1
masked_array = []
for dataset in dataset_array:
dataset.values = ma.array(dataset.values, mask=mask_array)
masked_array.append(dataset)
return [masked_dataset for masked_dataset in masked_array]
def _rcmes_spatial_regrid(spatial_values, lat, lon, lat2, lon2, order=1):
'''
Spatial regrid from one set of lat,lon values onto a new set (lat2,lon2)
:param spatial_values: Values in a spatial grid that need to be regridded
:type spatial_values: 2d masked numpy array. shape (latitude, longitude)
:param lat: Grid of latitude values which map to the spatial values
:type lat: 2d numpy array. shape(latitudes, longitudes)
:param lon: Grid of longitude values which map to the spatial values
:type lon: 2d numpy array. shape(latitudes, longitudes)
:param lat2: Grid of NEW latitude values to regrid the spatial_values onto
:type lat2: 2d numpy array. shape(latitudes, longitudes)
:param lon2: Grid of NEW longitude values to regrid the spatial_values onto
:type lon2: 2d numpy array. shape(latitudes, longitudes)
:param order: Interpolation order flag. 1=bi-linear, 3=cubic spline
:type order: [optional] Integer
:returns: 2d masked numpy array with shape(len(lat2), len(lon2))
:rtype: (float, float)
'''
nlat = spatial_values.shape[0]
nlon = spatial_values.shape[1]
#print nlat, nlon, "lats, lons - incoming dataset"
nlat2 = lat2.shape[0]
nlon2 = lon2.shape[1]
#print nlat2, nlon2, "NEW lats, lons - for the new grid output"
# To make our lives easier down the road, let's
# turn these into arrays of x & y coords
loni = lon2.ravel()
lati = lat2.ravel()
loni = loni.copy() # NB. it won't run unless you do this...
lati = lati.copy()
# Now, we'll set points outside the boundaries to lie along an edge
loni[loni > lon.max()] = lon.max()
loni[loni < lon.min()] = lon.min()
# To deal with the "hard" break, we'll have to treat y differently,
# so we're just setting the min here...
lati[lati > lat.max()] = lat.max()
lati[lati < lat.min()] = lat.min()
# We need to convert these to (float) indicies
# (xi should range from 0 to (nx - 1), etc)
loni = (nlon - 1) * (loni - lon.min()) / (lon.max() - lon.min())
# Deal with the "hard" break in the y-direction
lati = (nlat - 1) * (lati - lat.min()) / (lat.max() - lat.min())
"""
TODO: Review this docstring and see if it still holds true.
NOTE: This function doesn't use MDI currently. These are legacy comments
Notes on dealing with MDI when regridding data.
Method adopted here:
Use bilinear interpolation of data by default (but user can specify other order using order=... in call)
Perform bilinear interpolation of data, and of mask.
To be conservative, new grid point which contained some missing data on the old grid is set to missing data.
-this is achieved by looking for any non-zero interpolated mask values.
To avoid issues with bilinear interpolation producing strong gradients leading into the MDI,
set values at MDI points to mean data value so little gradient visible = not ideal, but acceptable for now.
Set values in MDI so that similar to surroundings so don't produce large gradients when interpolating
Preserve MDI mask, by only changing data part of masked array object.
"""
for shift in (-1, 1):
for axis in (0, 1):
q_shifted = np.roll(spatial_values, shift=shift, axis=axis)
idx = ~q_shifted.mask * spatial_values.mask
spatial_values.data[idx] = q_shifted[idx]
# Now we actually interpolate
# map_coordinates does cubic interpolation by default,
# use "order=1" to preform bilinear interpolation instead...
regridded_values = map_coordinates(spatial_values, [lati, loni], order=order)
regridded_values = regridded_values.reshape([nlat2, nlon2])
# Set values to missing data outside of original domain
regridded_values = ma.masked_array(regridded_values, mask=np.logical_or(np.logical_or(lat2 >= lat.max(),
lat2 <= lat.min()),
np.logical_or(lon2 <= lon.min(),
lon2 >= lon.max())))
# Make second map using nearest neighbour interpolation -use this to determine locations with MDI and mask these
qmdi = np.zeros_like(spatial_values)
qmdi[spatial_values.mask == True] = 1.
qmdi[spatial_values.mask == False] = 0.
qmdi_r = map_coordinates(qmdi, [lati, loni], order=order)
qmdi_r = qmdi_r.reshape([nlat2, nlon2])
mdimask = (qmdi_r != 0.0)
# Combine missing data mask, with outside domain mask define above.
regridded_values.mask = np.logical_or(mdimask, regridded_values.mask)
return regridded_values
def _rcmes_create_mask_using_threshold(masked_array, threshold=0.5):
'''Mask an array if percent of values missing data is above a threshold.
For each value along an axis, if the proportion of steps that are missing
data is above ``threshold`` then the value is marked as missing data.
..note:: The 0th axis is currently always used.
:param masked_array: Masked array of data
:type masked_array: Numpy Masked Array
:param threshold: (optional) Threshold proportion above which a value is
marked as missing data.
:type threshold: Float
:returns: A Numpy array describing the mask for masked_array.
'''
# try, except used as some model files don't have a full mask, but a single bool
# the except catches this situation and deals with it appropriately.
try:
nT = masked_array.mask.shape[0]
# For each pixel, count how many times are masked.
nMasked = masked_array.mask[:, :, :].sum(axis=0)
# Define new mask as when a pixel has over a defined threshold ratio of masked data
# e.g. if the threshold is 75%, and there are 10 times,
# then a pixel will be masked if more than 5 times are masked.
mymask = nMasked > (nT * threshold)
except:
mymask = np.zeros_like(masked_array.data[0, :, :])
return mymask
def _rcmes_calc_average_on_new_time_unit_K(data, dates, unit):
""" Rebin 3d array and list of dates using the provided unit parameter
:param data: Input data that needs to be averaged
:type data: 3D masked numpy array of shape (times, lats, lons)
:param dates: List of dates that correspond to the given data values
:type dates: Python datetime objects
:param unit: Time unit to average the data into
:type unit: String matching one of these values : full | annual | monthly | daily
:returns: meanstorem, newTimesList
:rtype: 3D numpy masked array the same shape as the input array, list of python datetime objects
"""
# Check if the user-selected temporal grid is valid. If not, EXIT
acceptable = (unit=='full')|(unit=='annual')|(unit=='monthly')|(unit=='daily')
if not acceptable:
print 'Error: unknown unit type selected for time averaging: EXIT'
return -1,-1,-1,-1
# Calculate arrays of: annual timeseries: year (2007,2007),
# monthly time series: year-month (200701,200702),
# daily timeseries: year-month-day (20070101,20070102)
# depending on user-selected averaging period.
# Year list
if unit=='annual':
timeunits = np.array([int(d.strftime("%Y")) for d in dates])
unique_times = np.unique(timeunits)
# YearMonth format list
if unit=='monthly':
timeunits = np.array([int(d.strftime("%Y%m")) for d in dates])
unique_times = np.unique(timeunits)
# YearMonthDay format list
if unit=='daily':
timeunits = np.array([int(d.strftime("%Y%m%d")) for d in dates])
unique_times = np.unique(timeunits)
# TODO: add pentad setting using Julian days?
# Full list: a special case
if unit == 'full':
# Calculating means data over the entire time range: i.e., annual-mean climatology
timeunits = []
for i in np.arange(len(dates)):
timeunits.append(999) # i.e. we just want the same value for all times.
timeunits = np.array(timeunits, dtype=int)
unique_times = np.unique(timeunits)
# empty list to store new times
newTimesList = []
# Decide whether or not you need to do any time averaging.
# i.e. if data are already on required time unit then just pass data through and
# calculate and return representative datetimes.
processing_required = True
if len(timeunits)==(len(unique_times)):
processing_required = False
# 1D data arrays, i.e. time series
if data.ndim==1:
# Create array to store the resulting data
meanstore = np.zeros(len(unique_times))
# Calculate the means across each unique time unit
i=0
for myunit in unique_times:
if processing_required:
datam=ma.masked_array(data,timeunits!=myunit)
meanstore[i] = ma.average(datam)
# construct new times list
yyyy, mm, dd = _create_new_year_month_day(myunit, dates)
newTimesList.append(datetime.datetime(yyyy,mm,dd,0,0,0,0))
i = i+1
# 3D data arrays
if data.ndim==3:
# Create array to store the resulting data
meanstore = np.zeros([len(unique_times),data.shape[1],data.shape[2]])
# Calculate the means across each unique time unit
i=0
datamask_store = []
for myunit in unique_times:
if processing_required:
mask = np.zeros_like(data)
mask[timeunits!=myunit,:,:] = 1.0
# Calculate missing data mask within each time unit...
datamask_at_this_timeunit = np.zeros_like(data)
datamask_at_this_timeunit[:]= _rcmes_create_mask_using_threshold(data[timeunits==myunit,:,:],threshold=0.75)
# Store results for masking later
datamask_store.append(datamask_at_this_timeunit[0])
# Calculate means for each pixel in this time unit, ignoring missing data (using masked array).
datam = ma.masked_array(data,np.logical_or(mask,datamask_at_this_timeunit))
meanstore[i,:,:] = ma.average(datam,axis=0)
# construct new times list
yyyy, mm, dd = _create_new_year_month_day(myunit, dates)
newTimesList.append(datetime.datetime(yyyy,mm,dd))
i += 1
if not processing_required:
meanstorem = data
if processing_required:
# Create masked array (using missing data mask defined above)
datamask_store = np.array(datamask_store)
meanstorem = ma.masked_array(meanstore, datamask_store)
return meanstorem, newTimesList
def _create_new_year_month_day(time_unit, dates):
smyunit = str(time_unit)
if len(smyunit)==4: # YYYY
yyyy = int(smyunit[0:4])
mm = 1
dd = 1
elif len(smyunit)==6: # YYYYMM
yyyy = int(smyunit[0:4])
mm = int(smyunit[4:6])
dd = 1
elif len(smyunit)==8: # YYYYMMDD
yyyy = int(smyunit[0:4])
mm = int(smyunit[4:6])
dd = int(smyunit[6:8])
elif len(smyunit)==3: # Full time range
# Need to set an appropriate time representing the mid-point of the entire time span
dt = dates[-1]-dates[0]
halfway = dates[0]+(dt/2)
yyyy = int(halfway.year)
mm = int(halfway.month)
dd = int(halfway.day)
return (yyyy, mm, dd)
def _congrid(a, newdims, method='linear', centre=False, minusone=False):
'''
This function is from http://wiki.scipy.org/Cookbook/Rebinning - Example 3
It has been refactored and changed a bit, but the original functionality
has been preserved.
Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL''s congrid routine, which apparently originally came from a VAX/VMS
routine of the same name.
method:
neighbour - closest value from original data
nearest and linear - uses n x 1-D interpolations using
scipy.interpolate.interp1d
(see Numerical Recipes for validity of use of n 1-D interpolations)
spline - uses ndimage.map_coordinates
centre:
True - interpolation points are at the centres of the bins
False - points are at the front edge of the bin
minusone:
For example- inarray.shape = (i,j) & new dimensions = (x,y)
False - inarray is resampled by factors of (i/x) * (j/y)
True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)
This prevents extrapolation one element beyond bounds of input array.
'''
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
# this will merely take the True/False input and convert it to an array(1) or array(0)
m1 = np.cast[int](minusone)
# this also casts the True False input into a floating point number of 0.5 or 0.0
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print "[congrid] dimensions error. " \
"This routine currently only supports " \
"rebinning to the same number of dimensions."
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
newa = _congrid_neighbor(a, newdims, m1, ofs)
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
newa = mint( dimlist[-1] )
trorder = [ndims - 1] + range( ndims - 1 )
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0, j) for j in old ]
oldcoords = np.ogrid[oslices]
nslices = [ slice(0, j) for j in list(newdims) ]
newcoords = np.mgrid[nslices]
newcoords_dims = range(np.rank(newcoords))
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print "Congrid error: Unrecognized interpolation type.\n", \
"Currently only \'neighbour\', \'nearest\',\'linear\',", \
"and \'spline\' are supported."
return None
def _check_dataset_shapes(datasets):
""" If the datasets are not the same shape throw a ValueError Exception
:param datasets: OCW Datasets to check for a consistent shape
:type datasets: List of OCW Dataset Objects
:raises: ValueError
"""
dataset_shape = None
for dataset in datasets:
if dataset_shape == None:
dataset_shape = dataset.values.shape
else:
if dataset.values.shape != dataset_shape:
msg = "%s != %s" % (dataset.values.shape, dataset_shape)
raise ValueError("Input datasets must be the same shape for an ensemble :: ", msg)
else:
pass
def _congrid_neighbor(values, new_dims, minus_one, offset):
""" Use the nearest neighbor to create a new array
:param values: Array of values that need to be interpolated
:type values: Numpy ndarray
:param new_dims: Longitude resolution in degrees
:type new_dims: float
:param lat_resolution: Latitude resolution in degrees
:type lat_resolution: float
:returns: A new spatially regridded Dataset
:rtype: Open Climate Workbench Dataset Object
"""
ndims = len( values.shape )
dimlist = []
old_dims = np.array( values.shape )
for i in range( ndims ):
base = np.indices(new_dims)[i]
dimlist.append( (old_dims[i] - minus_one) / (new_dims[i] - minus_one) \
* (base + offset) - offset )
cd = np.array( dimlist ).round().astype(int)
new_values = values[list( cd )]
return new_values
def _are_bounds_contained_by_dataset(bounds, dataset):
'''Check if a Dataset fully contains a bounds.
:param bounds: The Bounds object to check.
:type bounds: Bounds
:param dataset: The Dataset that should be fully contain the
Bounds
:type dataset: Dataset
:returns: True if the Bounds are contained by the Dataset, Raises
a ValueError otherwise
'''
lat_min, lat_max, lon_min, lon_max = dataset.spatial_boundaries()
start, end = dataset.time_range()
errors = []
# TODO: THIS IS TERRIBLY inefficent and we need to use a geometry lib instead in the future
if not lat_min <= bounds.lat_min <= lat_max:
error = "bounds.lat_min: %s is not between lat_min: %s and lat_max: %s" % (bounds.lat_min, lat_min, lat_max)
errors.append(error)
if not lat_min <= bounds.lat_max <= lat_max:
error = "bounds.lat_max: %s is not between lat_min: %s and lat_max: %s" % (bounds.lat_max, lat_min, lat_max)
errors.append(error)
if not lon_min <= bounds.lon_min <= lon_max:
error = "bounds.lon_min: %s is not between lon_min: %s and lon_max: %s" % (bounds.lon_min, lon_min, lon_max)
errors.append(error)
if not lon_min <= bounds.lon_max <= lon_max:
error = "bounds.lon_max: %s is not between lon_min: %s and lon_max: %s" % (bounds.lon_max, lon_min, lon_max)
errors.append(error)
if not start <= bounds.start <= end:
error = "bounds.start: %s is not between start: %s and end: %s" % (bounds.start, start, end)
errors.append(error)
if not start <= bounds.end <= end:
error = "bounds.end: %s is not between start: %s and end: %s" % (bounds.end, start, end)
errors.append(error)
if len(errors) == 0:
return True
else:
error_message = '\n'.join(errors)
raise ValueError(error_message)
def _get_subregion_slice_indices(subregion, target_dataset):
'''Get the indices for slicing Dataset values to generate the subregion.
:param subregion: The Bounds that specify the subset of the Dataset
that should be extracted.
:type subregion: Bounds
:param target_dataset: The Dataset to subset.
:type target_dataset: Dataset
:returns: The indices to slice the Datasets arrays as a Dictionary.
'''
latStart = min(np.nonzero(target_dataset.lats >= subregion.lat_min)[0])
latEnd = max(np.nonzero(target_dataset.lats <= subregion.lat_max)[0])
lonStart = min(np.nonzero(target_dataset.lons >= subregion.lon_min)[0])
lonEnd = max(np.nonzero(target_dataset.lons <= subregion.lon_max)[0])
timeStart = min(np.nonzero(target_dataset.times >= subregion.start)[0])
timeEnd = max(np.nonzero(target_dataset.times <= subregion.end)[0])
return {
"lat_start" : latStart,
"lat_end" : latEnd,
"lon_start" : lonStart,
"lon_end" : lonEnd,
"time_start" : timeStart,
"time_end" : timeEnd
}
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
%prog demo
Illustrate three different types of alignments.
- Pairwise sequence alignment, aka, "dot plot"
- Read alignment, similar to the visualization of a BAM file
- Optical map alignment, matchings between restriction fragments
"""
import sys
from bisect import bisect
from random import choice, randint
from jcvi.utils.iter import pairwise
from jcvi.utils.range import range_overlap
from jcvi.graphics.chromosome import Chromosome, HorizontalChromosome
from jcvi.graphics.glyph import BaseGlyph, GeneGlyph
from jcvi.graphics.base import FancyArrow, Rectangle, plt, savefig, normalize_axes
from jcvi.apps.base import OptionParser
class BaseAlign (object):
def __init__(self, fig, xywh, xpad=0, ypad=0, xmax=100):
x, y, w, h = xywh
self.ax = fig.add_axes(xywh)
self.sax = fig.add_axes([x + xpad * w, y + ypad * h,
(1 - 2 * xpad) * w, (1 - 2 * ypad) * h])
self.amax = self.bmax = xmax
self.a = [(1, xmax)]
self.b = [(1, xmax)]
self.apatch = self.bpatch = None
self.apatchcolor = self.bpatchcolor = 'darkslategrey'
self.xpad = xpad
self.ypad = ypad
self.canvas = 1 - 2 * xpad
def convert(self, pos, xmax):
return self.xpad + pos * self.canvas / xmax
def invert(self, a, b):
self.a = [(1, a), (a, b), (b, self.amax)]
self.b = [(1, a), (b, a), (b, self.bmax)]
self.apatch = (self.convert(a, self.amax),
self.convert(b, self.amax))
self.bpatch = (self.convert(a, self.bmax),
self.convert(b, self.bmax))
self.bpatchcolor = 'y'
def delete(self, a, b):
self.bmax -= b - a
self.a = [(1, a), (b, self.amax)]
self.b = [(1, a), (a, self.bmax)]
self.apatch = (self.convert(a, self.amax),
self.convert(b, self.amax))
def duplicate(self, a, b, gap=0):
self.bmax += b - a + gap
self.a = [(1, b), (a, self.amax)]
self.b = [(1, b), (b + gap, self.bmax)]
self.apatch = (self.convert(a, self.amax),
self.convert(b, self.amax))
self.bpatch = (self.convert(a, self.bmax),
self.convert(b, self.bmax),
self.convert(b + gap, self.bmax),
self.convert(2 * b - a + gap, self.bmax))
self.bpatchcolor = 'tomato'
class PairwiseAlign (BaseAlign):
def __init__(self, fig, xywh, xpad=.15, ypad=.15):
super(PairwiseAlign, self).__init__(fig, xywh, xpad, ypad)
def draw(self, width=.03):
HorizontalChromosome(self.ax, self.xpad, 1 - self.xpad,
self.ypad - .05, height=width * 1.5,
patch=self.apatch, lw=2)
Chromosome(self.ax, self.xpad - .05, self.ypad, 1 - self.ypad,
width=width, patch=self.bpatch,
patchcolor=self.bpatchcolor, lw=2)
for a, b in zip(self.a, self.b):
self.sax.plot(a, b, "-", color="darkslategrey", lw=2)
self.sax.set_xticklabels([])
self.sax.set_yticklabels([])
self.sax.set_xlim((1, self.amax))
self.sax.set_ylim((1, self.bmax))
normalize_axes(self.ax)
class ReadAlign (BaseAlign):
def __init__(self, fig, xywh, xpad=.05, ypad=.2, readlen=6, gap=3):
super(ReadAlign, self).__init__(fig, xywh, xpad, ypad)
self.readlen = readlen
self.gap = gap
self.reads = []
self.ymax = 12
self.ntracks = 0
self.layout(1, self.amax)
def layout(self, start, end, maxtracks=8):
readrange = 2 * self.readlen + self.gap
end -= readrange
assert start < end, "end must be > start + readlen"
reads = []
for x in xrange(100):
pos = randint(start, end)
reads.append(PairedRead(pos, readlen=self.readlen, gap=self.gap))
reads, ntracks = self.arrange(reads, self.ntracks, maxtracks=maxtracks)
self.reads += reads
self.ntracks += ntracks
def arrange(self, reads, ntracks, maxtracks=8):
track_ends = [0]
reads.sort(key=lambda x: x.start)
for r in reads:
m = min(track_ends)
mi = track_ends.index(m)
if r.start > m + .005:
track_ends[mi] = r.end
else:
if len(track_ends) >= maxtracks:
continue
track_ends.append(r.end)
mi = len(track_ends) - 1
r.set_y(ntracks + mi)
ntracks = len(track_ends)
reads = [x for x in reads if x.y is not None]
return reads, ntracks
def remove(self, a, b, maxtracks=0):
self.reads = [r for r in self.reads \
if not (a <= r.start <= b and a <= r.end <= b \
and r.y >= maxtracks)]
def draw(self, width=.03):
HorizontalChromosome(self.ax, self.xpad, 1 - self.xpad,
self.ypad - width / 2, height=width * 1.5,
patch=self.apatch, lw=2)
for r in self.reads:
r.draw(self.sax)
self.sax.set_xlim((1, self.amax))
self.sax.set_ylim((-1, self.ymax))
normalize_axes(self.ax)
self.sax.set_axis_off()
def highlight(self, a, b):
self.apatch = (self.convert(a, self.amax),
self.convert(b, self.amax))
self.sax.plot((a, a), (-1, self.ntracks), "m-", lw=2)
self.sax.plot((b, b), (-1, self.ntracks), "m-", lw=2)
def invert(self, a, b):
reads = []
for r in self.reads:
r.set_y(None)
keep = True
if r.start < a < r.end or r.start < b < r.end:
adist, bdist = abs(a - r.mid), abs(b - r.mid)
flipr = r.r2 if adist > bdist else r.r1
flipr.x1 = a + b - flipr.x1
flipr.x2 = a + b - flipr.x2
flipr.color = 'y'
if adist > self.gap and bdist > self.gap:
keep = False
if keep:
reads.append(r)
self.reads, self.ntracks = self.arrange(reads, 0)
self.highlight(a, b)
def delete(self, a, b):
self.remove(a, b)
for r in self.reads:
r.breakpoint(a, 'g', 'lightgrey')
r.breakpoint(b, 'lightgrey', 'g')
self.highlight(a, b)
def duplicate(self, a, b):
self.layout(1, self.amax, maxtracks=4)
self.remove(1, a, maxtracks=6)
self.remove(b, self.amax, maxtracks=6)
for r in self.reads:
r.paint(a, b, 'tomato')
r.breakpoint(a, 'k', 'tomato')
r.breakpoint(b, 'tomato', 'k')
r.breakpoint(a, 'lightgrey', 'tomato', ystart=6)
r.breakpoint(b, 'tomato', 'lightgrey', ystart=6)
self.highlight(a, b)
class OpticalMapAlign (BaseAlign):
def __init__(self, fig, xywh, xpad=.05, ypad=.3):
super(OpticalMapAlign, self).__init__(fig, xywh, xpad, ypad)
om = self.from_silico()
self.om1 = OpticalMapTrack(self.sax, om)
self.om2 = OpticalMapTrack(self.sax, om, ystart=-3, color='orange')
def from_silico(self, filename="Ecoli.silico", nfrags=25):
fp = open(filename)
fp.next()
ar = [0] + [int(x) for x in fp.next().split()]
sizes = [] # Only retain frags beyond certain size
for a, b in pairwise(ar):
size = b - a
if size < max(ar[:nfrags]) / 100:
continue
sizes.append(size)
sizes = [choice(sizes) for x in xrange(nfrags)]
return sizes
def draw(self):
self.om1.draw()
self.om2.draw()
self.sax.set_xlim(0, self.om1.amax)
self.sax.set_ylim(-8, 8)
normalize_axes(self.ax)
self.sax.set_axis_off()
def invert(self, a, b):
ai, bi = self.om2.invert(a, b)
self.om1.highlight(ai, bi, 'lightslategrey')
self.om2.highlight(ai, bi, 'y', arrow_inverse=True)
def delete(self, a, b):
ai, bi = self.om2.delete(a, b)
self.om1.highlight(ai, bi, 'lightslategrey')
self.om2.highlight(ai, bi, None)
def duplicate(self, a, b):
(ai, bi), (ci, di) = self.om1.duplicate(a, b)
(ai, bi), (ci, di) = self.om2.duplicate(a, b)
self.om1.highlight(ai, bi, None)
self.om1.highlight(ci, di, 'lightslategrey')
self.om2.highlight(ai, bi, 'tomato')
self.om2.highlight(ci, di, 'tomato')
class OpticalMapTrack (BaseGlyph):
def __init__(self, ax, sizes, ystart=0, color='darkslategrey',
height=1, wiggle=3):
super(OpticalMapTrack, self).__init__(ax)
self.ax = ax
self.sizes = sizes[:]
self.ystart = ystart
self.height = height
self.color = color
self.wiggle = wiggle
self.make_wiggles()
def draw(self):
ar = self.ar
pad = self.pad
pads = 0
for (a, b), w, color in zip(pairwise(ar), self.wiggles, self.colors):
yf = self.ystart + w * 1. / self.wiggle
if color:
p = Rectangle((a + pads, yf), b - a, self.height, color=color)
self.append(p)
pads += pad
self.add_patches()
def get_endpoints(self, a, b, xmax=100):
ar = self.ar
a, b = max(ar) * a / xmax, max(ar) * b / xmax
return bisect(ar, a) - 1, bisect(ar, b)
def invert(self, a, b):
ai, bi = self.get_endpoints(a, b)
bb = self.sizes[ai:bi]
self.sizes = self.sizes[:ai] + bb[::-1] + self.sizes[bi:]
return ai, bi
def delete(self, a, b):
return self.get_endpoints(a, b)
def duplicate(self, a, b):
ai, bi = self.get_endpoints(a, b)
ai += self.wiggle / 2
bi += self.wiggle / 2
ci, di = ai - self.wiggle, ai
bb = self.sizes[ai:bi]
bs = len(bb)
self.sizes = self.sizes[:ci] + bb + self.sizes[ci:]
self.make_wiggles()
return (ci, ci + bs), (di + bs, di + 2 * bs)
def highlight(self, ai, bi, color, arrow_inverse=False):
self.colors[ai:bi] = [color] * (bi - ai)
ar = self.ar
a, b = ar[ai], ar[bi]
a += self.pad * (ai - 1)
b += self.pad * (bi - 1)
if self.ystart < 0:
yy = self.ystart - 2
shape = 'left'
else:
yy = self.ystart + 4
shape = 'right'
if arrow_inverse:
a, b = b, a
shape = 'right' if shape == 'left' else 'left'
if not color:
return
p = FancyArrow(a, yy, b - a, 0, fc=color, lw=0, shape=shape,
length_includes_head=True, width=1,
head_length=abs(b - a) * .15, head_width=3)
self.ax.add_patch(p)
@property
def amax(self):
return sum(self.sizes) + (self.length - 1) * self.pad
@property
def length(self):
return len(self.sizes)
@property
def ar(self):
cumsizes = [0]
for a in self.sizes:
cumsizes.append(cumsizes[-1] + a)
return cumsizes
def make_wiggles(self):
ar = [self.wiggle / 2 + 1]
while len(ar) <= self.length:
ar += range(self.wiggle, 0, -1)
self.wiggles = ar[:self.length]
self.colors = [self.color] * self.length
ar = self.ar
self.pad = max(ar) / 100
class SingleRead (object):
def __init__(self, start, readlen, sign=1):
self.x1 = start
self.x2 = start + sign * readlen
self.y = None
self.color = 'k'
self.broken = None
@property
def sign(self):
return 1 if self.x2 >= self.x1 else -1
@property
def start(self):
return min(self.x1, self.x2)
@property
def end(self):
return max(self.x1, self.x2)
@property
def span(self):
return self.end - self.start + 1
def draw(self, ax, height=.6):
if self.broken is None:
GeneGlyph(ax, self.x1, self.x2, self.y, height, tip=2,
color=self.color, gradient=True)
else:
a, lcolor, rcolor = self.broken
if self.sign < 0:
lcolor, rcolor = rcolor, lcolor
GeneGlyph(ax, self.x1, a, self.y, height, tip=0,
color=lcolor, gradient=True)
GeneGlyph(ax, a, self.x2, self.y, height, tip=2,
color=rcolor, gradient=True)
def breakpoint(self, a, lcolor, rcolor):
if a > self.end:
self.color = lcolor
elif a < self.start:
self.color = rcolor
else:
self.broken = (a, lcolor, rcolor)
class PairedRead (object):
def __init__(self, start, readlen, gap):
self.r1 = SingleRead(start, readlen)
self.r2 = SingleRead(start + gap + 2 * readlen, readlen, sign=-1)
self.color = 'k'
self.y = None
@property
def start(self):
return min(self.r1.start, self.r2.start)
@property
def end(self):
return max(self.r1.end, self.r2.end)
@property
def i1(self):
return min(self.r1.end, self.r2.end)
@property
def i2(self):
return max(self.r1.start, self.r2.start)
@property
def mid(self):
return (self.start + self.end) * .5
def set_y(self, y):
self.y = y
self.r1.y = self.r2.y = y
def draw(self, ax):
self.r1.draw(ax)
self.r2.draw(ax)
ax.plot((self.i1, self.i2), (self.y, self.y), "-",
color=self.color)
def paint(self, a, b, color):
if range_overlap((0, self.start + 1 , self.end - 1),
(0, a, b)):
self.r1.color = self.r2.color = self.color = color
def breakpoint(self, a, lcolor, rcolor, ystart=0):
if not self.start < a < self.end:
return
if self.y < ystart:
return
self.color = lcolor if a > self.mid else rcolor
self.r1.breakpoint(a, lcolor, rcolor)
self.r2.breakpoint(a, lcolor, rcolor)
def main():
p = OptionParser(__doc__)
opts, args, iopts = p.set_image_options(figsize="9x7")
if len(args) != 1:
sys.exit(not p.print_help())
mode, = args
assert mode == "demo"
a, b = 30, 70
pad = .08
w = .31
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
# Row separators
yy = 1 - pad
for i in xrange(3):
root.plot((0, 1), (yy, yy), "-", lw=2, color="lightgray")
yy -= w
# Row headers
xx = pad * .6
yy = 1 - pad - .5 * w
for title in ("Inversion", "Indel", "Duplication"):
root.text(xx, yy, title, ha="center", va="center")
yy -= w
# Column headers
xx = pad + .5 * w
yy = 1 - pad / 2
for title in ("Assembly alignment", "Read alignment", "Optical map alignment"):
root.text(xx, yy, title, ha="center", va="center")
xx += w
p = PairwiseAlign(fig, [pad, 2 * w, w, w])
p.invert(a, b)
p.draw()
p = PairwiseAlign(fig, [pad, w, w, w])
p.delete(a, b)
p.draw()
p = PairwiseAlign(fig, [pad, 0, w, w])
p.duplicate(a, b, gap=5)
p.draw()
p = ReadAlign(fig, [pad + w, 2 * w, w, w])
p.invert(a, b)
p.draw()
p = ReadAlign(fig, [pad + w, w, w, w])
p.delete(a, b)
p.draw()
p = ReadAlign(fig, [pad + w, 0, w, w])
p.duplicate(a, b)
p.draw()
p = OpticalMapAlign(fig, [pad + 2 * w, 2 * w, w, w])
p.invert(a, b)
p.draw()
p = OpticalMapAlign(fig, [pad + 2 * w, w, w, w])
p.delete(a, b)
p.draw()
p = OpticalMapAlign(fig, [pad + 2 * w, 0, w, w])
p.duplicate(a, b)
p.draw()
normalize_axes(root)
image_name = mode + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
if __name__ == '__main__':
main()
|
|
import sys
import socket
import select
import threading
import struct
import time
class S5Req:
def __init__(self,buf):
self.ver, self.cmd, self.rsv, self.atyp = struct.unpack("BBBB", buf[0:4])
self.dst_addr = None
self.dst_port = None
def parse_port(self,buf):
if len(buf) < 2:
return False
port = struct.unpack("H",buf[0:2])[0]
self.dst_port = socket.ntohs(int(port))
return True
def parse_ipv4(self,buf):
if len(buf) < 6:
return False
self.dst_addr = socket.inet_ntoa(buf[0:4])
if self.parse_port(buf[4:]):
return True
return False
def parse_domain_name(self,buf):
buf_size = len(buf)
if buf_size < 1:
return False
name_len = struct.unpack("B",buf[0:1])[0]
if name_len+3 != buf_size:
return False
self.dst_addr = buf[1:name_len+1]
if self.parse_port(buf[1+name_len:]):
return True
return False
def parse_netloc(self,buf):
if self.atyp == 3:
return self.parse_domain_name(buf)
if self.atyp == 1:
return self.parse_ipv4(buf)
return False
class S5Resp:
def __init__(self):
self.ver = 5
self.rep = 1
self.rsv = 0
self.atyp = 1
self.bnd_addr = None
self.bnd_port = None
def pack(self):
addr = 0
port = 0
if self.bnd_addr:
addr = struct.unpack("I",socket.inet_aton(self.bnd_addr))[0]
if self.bnd_port:
port = socket.htons(self.bnd_port)
buf = struct.pack("BBBBIH",self.ver, self.rep, self.rsv, self.atyp,addr,port)
return buf
class Socks5Error(Exception):
pass
class Socks5Thread(threading.Thread):
wait = 8.0
buf_size = 1024*4
def __init__(self,s,ip,port):
self.s = s
self.dst_s = None
self.ip = ip
self.port = port
threading.Thread.__init__(self)
def run(self):
resp = S5Resp()
try:
buf = self.s.recv(255)
if not buf:
raise socket.error
self.s.send("\x05\x00")
buf = self.s.recv(4)
if not buf or len(buf) != 4:
raise socket.error
req = S5Req(buf)
if req.ver != 5:
resp.rep = 1
raise Socks5Error
if req.cmd != 1:
resp.rep = 7
raise Socks5Error
if req.atyp != 1 and req.atyp != 3:
resp.rep = 8
raise Socks5Error
count = 255
if req.atyp == 1:
count = 6
buf = self.s.recv(count)
if not buf:
raise socket.error
if not req.parse_netloc(buf):
resp.rep = 1
raise Socks5Error
if req.atyp == 3:
try:
addr = socket.gethostbyname(req.dst_addr)
except socket.error:
resp.rep = 4
raise Socks5Error
else:
addr = req.dst_addr
self.dst_s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
self.dst_s.connect((addr,req.dst_port))
except socket.error:
resp.rep = 4
raise Socks5Error
addr,port = self.dst_s.getsockname()
resp.rep = 0
resp.dst_addr = addr
resp.dst_port = port
self.s.send(resp.pack())
self.forward_loop()
except Socks5Error:
self.s.send(resp.pack())
except socket.error:
pass
finally:
if self.s:
self.s.close()
if self.dst_s:
self.dst_s.close()
def forward_loop(self):
while 1:
r,w,x = select.select([self.s,self.dst_s],[],[],self.wait)
if not r:
continue
for s in r:
if s is self.s:
buf = self.s.recv(self.buf_size)
if not buf:
raise socket.error
self.dst_s.send(buf)
if s is self.dst_s:
buf = self.dst_s.recv(self.buf_size)
if not buf:
raise socket.error
self.s.send(buf)
time.sleep(0.01)
class Socks5(threading.Thread):
def __init__(self,ip="0.0.0.0",port=8080):
self.ip = ip
self.port = port
self.s = None
threading.Thread.__init__(self)
def run(self):
try:
self.s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.s.bind((self.ip,self.port))
self.s.listen(5)
except socket.error, msg:
print msg
if self.s:
self.s.close()
self.s = None
return False
while 1:
try:
conn, addr = self.s.accept()
except socket.error, msg:
print msg
self.s.close()
self.s = None
return False
thread = Socks5Thread(conn,addr[0],addr[1])
thread.start()
return True
def main():
ip_addr = "0.0.0.0"
port = 4444
try:
ip_addr = sys.argv[1]
port = int(sys.argv[2])
except IndexError:
pass
s5 = Socks5(ip_addr,port)
s5.start()
if __name__=='__main__':
main()
|
|
from ConfigParser import SafeConfigParser
import json
from feedgen.feed import FeedGenerator
from bs4 import BeautifulSoup
import threading
import operator
import datetime
import requests
import pickle
import pytz
import time
import os
# Initialize conf
cust_path = os.path.dirname(os.path.realpath(__file__))
parser = SafeConfigParser()
parser.read(cust_path + '/technowatch.conf')
# Initialize feed
fg = FeedGenerator()
# Initialize app
if parser.get('wsgi', 'activated') == "True":
from flask import Flask
app = Flask(__name__)
def upload():
from ftplib import FTP
print "Uploading ..."
ftp = FTP(parser.get('ftp', 'host'),
parser.get('ftp', 'user'),
parser.get('ftp', 'pass'))
ftp.cwd(parser.get('ftp', 'path'))
fg.rss_file(cust_path + '/static/' + parser.get('ftp', 'filename'))
ftp.storbinary("STOR " + parser.get('ftp', 'filename'),
open(cust_path + '/static/' + parser.get('ftp', 'filename'), 'r'))
ftp.close()
print "Uploaded ..."
def build():
global fg
fg = FeedGenerator()
fg.title(parser.get('technowatch', 'name'))
fg.language('en')
fg.description(parser.get('technowatch', 'name'))
fg.link(href=parser.get('technowatch', 'link'), rel='alternate')
# Cleaning stories if too much
if len(known_stories) > int(parser.get('technowatch', 'cache_max')):
clean()
# Sorting stories by crawled date
for item in sorted(known_stories.values(), key=operator.itemgetter('crawledDate'), reverse=True):
fe = fg.add_entry()
fe.link(href=item['url'], rel='alternate')
fe.title("[" + item['type'] + "] " + item['title'])
fe.category({
'label': item['type'],
'term': item['type']
})
fe.author({'name': item['by']})
fe.description(item['desc'])
fe.pubdate(item['crawledDate'])
# Caching RSS building
pickle.dump(known_stories, open(cust_path + "/technowatch.data", "wb"))
if parser.get('wsgi', 'activated') == "True":
fg.rss_file(cust_path + '/static/rss.xml')
if parser.get('ftp', 'activated') == "True":
upload()
# Initialize global variable
try:
known_stories = pickle.load(open(cust_path + "/technowatch.data", "rb"))
except IOError:
known_stories = {}
build()
def check_dribble():
rebuild = False
# requesting dribble
html_doc = requests.get('https://dribbble.com/shots?list=animated').content
soup = BeautifulSoup(html_doc)
for li in soup.find_all('li', {'class': 'group'}):
try:
if li.get('id') is not None:
key = "drib-" + li.get('id')
if key not in known_stories:
link = "https://dribbble.com" + li.find("a", {'class': 'dribbble-link'}).get('href')
img = li.find("noscript").find("img").get('src').replace('_teaser', '')
item = {'title': li.find('strong').get_text(),
'url': link,
'by': li.find("a", {"class": 'url'}).get('title'),
'crawledDate': datetime.datetime.now().replace(tzinfo=pytz.utc),
'type': "dribble",
'key': key,
'desc': li.find('strong').get_text() + "<br />"
"<a href='" + link + "'>"
"<img src='" + img + "' />"
"</a>"}
known_stories[key] = item
rebuild = True
except:
print "Dribble bugged..."
return rebuild
def check_githubtrend():
rebuild = False
# requesting github + bs
html_doc = requests.get('https://github.com/trending').content
soup = BeautifulSoup(html_doc)
for li in soup.find_all('li', {'class': 'repo-list-item'}):
try:
title = li.find("h3", {'class': 'repo-list-name'}).a.get('href')
desc = li.find("p", {'class': 'repo-list-description'})
desc = desc.get_text() if desc is not None else ""
lang = li.find("p", {'class': 'repo-list-meta'}).get_text().split('\n')[1]
if title not in known_stories:
item = {'title': "[" + lang.replace(" ", "") + "] " + title,
'url': "https://github.com" + title,
'by': title.split("/")[1],
'crawledDate': datetime.datetime.now().replace(tzinfo=pytz.utc),
'type': "github",
'key': title,
'desc': desc}
known_stories[title] = item
rebuild = True
except:
print "Github bugged ..."
return rebuild
def check_producthunt():
rebuild = False
# requesting github + bs
html_doc = requests.get('http://www.producthunt.com/').content
soup = BeautifulSoup(html_doc)
for li in soup.find_all('li', {'data-react-class': 'PostItem'})[:10]:
try:
j = json.loads(li.get('data-react-props'))
key = "ph-" + str(j['id'])
if key not in known_stories:
item = {'title': j['name'],
'url': "http://www.producthunt.com" + j['shortened_url'],
'by': 'no one',
'crawledDate': datetime.datetime.now().replace(tzinfo=pytz.utc),
'type': "producthunt",
'key': key,
'desc': j['tagline']}
known_stories[key] = item
rebuild = True
except:
print "Product Hunt bugged..."
return rebuild
def check_hackernews():
rebuild = False
# API request for top stories
noise = int(parser.get('technowatch', 'hackernews_noise'))
top = requests.get('https://hacker-news.firebaseio.com/v0/topstories.json').json()[:noise]
for story in top:
if story not in known_stories:
# Getting and storing new top story information
item = requests.get('https://hacker-news.firebaseio.com/v0/item/' + str(story) + '.json').json()
item['crawledDate'] = datetime.datetime.now().replace(tzinfo=pytz.utc)
item['type'] = "hacker-news"
item['key'] = story
item['desc'] = item['title'] + " <br /> " + item['url']
known_stories[story] = item
rebuild = True
return rebuild
def check_news():
rebuild = False
# Checking all new news
for check in (check_hackernews, check_githubtrend, check_producthunt, check_dribble):
rebuild = True if check() else rebuild
if rebuild:
# If new stories, rebuilding feed
build()
def clean():
left = len(known_stories) - int(parser.get('technowatch', 'cache_min'))
for item in sorted(known_stories.values(), key=operator.itemgetter('crawledDate')):
del known_stories[item['key']]
left -= 1
if left == 0:
return
def threaded():
while True:
check_news()
time.sleep(int(parser.get('technowatch', 'refresh')))
if parser.get('wsgi', 'activated') == "True":
@app.route('/')
def show_rss():
# Simply return cached RSS
return app.send_static_file('rss.xml')
if __name__ == '__main__':
thread = threading.Thread(None, threaded, None)
thread.start()
if parser.get('wsgi', 'activated') == "True":
app.run(host=parser.get('wsgi', 'host'), port=int(parser.get('wsgi', 'port')))
|
|
"""
Contains a CLICommand that implements ping test functionality.
Uses the following from :py:class:`swiftly.cli.context.CLIContext`:
=============== ====================================================
client_manager For connecting to Swift.
concurrency The number of concurrent actions that can be
performed.
io_manager For directing output.
limit The maximum number of Swift nodes to output
information about.
object_ring An instance of swift.common.ring.ring.Ring if you
want a report based on Swift nodes with implied
usage during the ping test.
ping_begin The first time.time() when the entire ping test
began.
ping_begin_last The time.time() the last ping task started.
ping_count The number of objects to use.
ping_verbose True if you want a full ping report rather than just
the overall time.
threshold Defines the threshold for the threshold node report.
This is the multiplier over the average request
time.
=============== ====================================================
"""
"""
Copyright 2011-2013 Gregory Holt
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import six
from six import moves
import collections
from six.moves import StringIO
import time
import traceback
import uuid
from swiftly.concurrency import Concurrency
from swiftly.cli.command import CLICommand, ReturnCode
try:
from eventlet import sleep
except ImportError:
sleep = time.sleep
def _cli_ping_status(context, heading, ident, status, reason, headers,
contents):
if headers:
ident = headers.get('x-trans-id') or ident
if hasattr(contents, 'read'):
contents.read()
if status and status // 100 != 2:
raise ReturnCode(
'with %s: %s %s %s' % (heading, status, reason, ident))
now = time.time()
if context.ping_verbose:
with context.io_manager.with_stdout() as fp:
fp.write(
'% 6.02fs %s %s\n' %
(now - context.ping_begin_last, heading, ident))
fp.flush()
context.ping_begin_last = now
def _cli_ping_objects(context, heading, conc, container, objects, func,
results):
begin = time.time()
for obj in objects:
for (exc_type, exc_value, exc_tb, result) in \
six.itervalues(conc.get_results()):
if exc_value:
with context.io_manager.with_stderr() as fp:
fp.write(str(exc_value))
fp.write('\n')
fp.flush()
conc.spawn(obj, func, context, results, container, obj)
conc.join()
for (exc_type, exc_value, exc_tb, result) in \
six.itervalues(conc.get_results()):
if exc_value:
with context.io_manager.with_stderr() as fp:
fp.write(str(exc_value))
fp.write('\n')
fp.flush()
elapsed = time.time() - begin
_cli_ping_status(
context,
'object %s x%d at %d concurrency, %.02f/s' %
(heading, len(objects), conc.concurrency, len(objects) / elapsed),
None, None, None, None, None)
overall = results.get('overall')
if overall:
overall = sorted(overall, key=lambda x: x[0])
results['overall'] = overall
if context.ping_verbose or context.graphite:
best = overall[0][0]
worst = overall[-1][0]
mean = overall[len(overall) / 2][0]
median = sum(x[0] for x in overall) / len(overall)
threshold = max(2, mean * 2)
slows = 0
for x in overall:
if x[0] > 2 and x[0] > threshold:
slows += 1
slow_percentage = 100.0 * slows / len(overall)
with context.io_manager.with_stdout() as fp:
if context.ping_verbose:
fp.write(
' best %.02fs, worst %.02fs, mean %.02fs, '
'median %.02fs\n %d slower than 2s and twice '
'the mean, %.02f%%\n' % (
best, worst, mean, median, slows, slow_percentage))
fp.flush()
if context.graphite:
fp.write(
'%s.%s.slow_percentage %.02f %d\n' % (
context.graphite, heading, slow_percentage,
time.time()))
def _cli_ping_object_put(context, results, container, obj):
with context.client_manager.with_client() as client:
begin = time.time()
try:
status, reason, headers, contents = client.put_object(
container, obj, StringIO('swiftly-ping'))
except Exception:
raise ReturnCode(
'putting object %r: %s' % (obj, traceback.format_exc()))
if status // 100 != 2:
raise ReturnCode(
'putting object %r: %s %s %s' %
(obj, status, reason, headers.get('x-trans-id') or '-'))
elapsed = time.time() - begin
results['overall'].append((elapsed, headers.get('x-trans-id') or obj))
if context.object_ring:
for node in context.object_ring.get_nodes(
client.get_account_hash(), container, obj)[1]:
results[node['ip']].append(
(elapsed, headers.get('x-trans-id') or obj))
def _cli_ping_object_get(context, results, container, obj):
with context.client_manager.with_client() as client:
begin = time.time()
try:
status, reason, headers, contents = client.get_object(
container, obj, stream=False)
except Exception:
raise ReturnCode(
'getting object %r: %s' % (obj, traceback.format_exc()))
if status // 100 != 2:
raise ReturnCode(
'getting object %r: %s %s %s' %
(obj, status, reason, headers.get('x-trans-id') or '-'))
elapsed = time.time() - begin
results['overall'].append((elapsed, headers.get('x-trans-id') or obj))
if context.object_ring:
for node in context.object_ring.get_nodes(
client.get_account_hash(), container, obj)[1]:
results[node['ip']].append(
(elapsed, headers.get('x-trans-id') or obj))
def _cli_ping_object_delete(context, results, container, obj):
with context.client_manager.with_client() as client:
begin = time.time()
try:
status, reason, headers, contents = client.delete_object(
container, obj)
except Exception:
raise ReturnCode(
'deleting object %r: %s' % (obj, traceback.format_exc()))
if status // 100 != 2 and status != 404:
raise ReturnCode(
'deleting object %r: %s %s %s' %
(obj, status, reason, headers.get('x-trans-id') or '-'))
elapsed = time.time() - begin
results['overall'].append((elapsed, headers.get('x-trans-id') or obj))
if context.object_ring:
for node in context.object_ring.get_nodes(
client.get_account_hash(), container, obj)[1]:
results[node['ip']].append(
(elapsed, headers.get('x-trans-id') or obj))
def _cli_ping_ring_report(context, timings_dict, label):
timings_dict.pop('overall', None) # Not currently used in this function
if not timings_dict:
return
worsts = {}
for ip, timings in six.iteritems(timings_dict):
worst = [0, None]
for timing in timings:
if timing[0] > worst[0]:
worst = timing
worsts[ip] = worst
with context.io_manager.with_stdout() as fp:
fp.write(
'Worst %s times for up to %d nodes with implied usage:\n' %
(label, context.limit))
for ip, (elapsed, xid) in sorted(
six.iteritems(worsts), key=lambda x: x[1][0],
reverse=True)[:context.limit]:
fp.write(' %20s % 6.02fs %s\n' % (ip, elapsed, xid))
fp.flush()
with context.io_manager.with_stdout() as fp:
averages = {}
for ip, timings in six.iteritems(timings_dict):
averages[ip] = sum(t[0] for t in timings) / len(timings)
fp.write(
'Average %s times for up to %d nodes with implied usage:\n' %
(label, context.limit))
for ip, elapsed in sorted(
six.iteritems(averages), key=lambda x: x[1],
reverse=True)[:context.limit]:
fp.write(' %20s % 6.02fs\n' % (ip, elapsed))
fp.flush()
total = 0.0
count = 0
for ip, timings in six.iteritems(timings_dict):
total += sum(t[0] for t in timings)
count += len(timings)
threshold = total / count * context.threshold
counts = collections.defaultdict(lambda: 0)
for ip, timings in six.iteritems(timings_dict):
for t in timings:
if t[0] > threshold:
counts[ip] += 1
with context.io_manager.with_stdout() as fp:
fp.write(
'Count of %s times past (average * %d) for up to %d nodes with '
'implied usage:\n' % (label, context.threshold, context.limit))
for ip, count in sorted(
six.iteritems(counts), key=lambda x: x[1],
reverse=True)[:context.limit]:
fp.write(' %20s % 6d\n' % (ip, count))
fp.flush()
percentages = {}
for ip, count in six.iteritems(counts):
percentages[ip] = (
100.0 * count / len(timings_dict[ip]),
count, len(timings_dict[ip]))
with context.io_manager.with_stdout() as fp:
fp.write(
'Percentage of %s times past (average * %d) for up to %d nodes '
'with implied usage:\n' %
(label, context.threshold, context.limit))
for ip, percentage in sorted(
six.iteritems(percentages), key=lambda x: x[1][0],
reverse=True)[:context.limit]:
fp.write(
' %20s % 6.02f%% %d of %d\n' %
(ip, percentage[0], percentage[1], percentage[2]))
fp.flush()
def cli_ping(context, prefix):
"""
Performs a ping test.
See :py:mod:`swiftly.cli.ping` for context usage information.
See :py:class:`CLIPing` for more information.
:param context: The :py:class:`swiftly.cli.context.CLIContext` to
use.
:param prefix: The container name prefix to use. Default:
swiftly-ping
"""
if not prefix:
prefix = 'swiftly-ping'
ping_ring_object_puts = collections.defaultdict(lambda: [])
ping_ring_object_gets = collections.defaultdict(lambda: [])
ping_ring_object_deletes = collections.defaultdict(lambda: [])
context.ping_begin = context.ping_begin_last = time.time()
container = prefix + '-' + uuid.uuid4().hex
objects = [uuid.uuid4().hex for x in moves.range(context.ping_count)]
conc = Concurrency(context.concurrency)
with context.client_manager.with_client() as client:
client.auth()
_cli_ping_status(context, 'auth', '-', None, None, None, None)
_cli_ping_status(context, 'account head', '-', *client.head_account())
_cli_ping_status(
context, 'container put', '-', *client.put_container(container))
if _cli_ping_objects(
context, 'put', conc, container, objects, _cli_ping_object_put,
ping_ring_object_puts):
with context.io_manager.with_stderr() as fp:
fp.write(
'ERROR put objects did not complete successfully due to '
'previous error; but continuing\n')
fp.flush()
if _cli_ping_objects(
context, 'get', conc, container, objects, _cli_ping_object_get,
ping_ring_object_gets):
with context.io_manager.with_stderr() as fp:
fp.write(
'ERROR get objects did not complete successfully due to '
'previous error; but continuing\n')
fp.flush()
if _cli_ping_objects(
context, 'delete', conc, container, objects,
_cli_ping_object_delete, ping_ring_object_deletes):
with context.io_manager.with_stderr() as fp:
fp.write(
'ERROR delete objects did not complete successfully due to '
'previous error; but continuing\n')
fp.flush()
for attempt in moves.range(5):
if attempt:
sleep(2**attempt)
with context.client_manager.with_client() as client:
try:
_cli_ping_status(
context, 'container delete', '-',
*client.delete_container(container))
break
except ReturnCode as err:
with context.io_manager.with_stderr() as fp:
fp.write(str(err))
fp.write('\n')
fp.flush()
else:
with context.io_manager.with_stderr() as fp:
fp.write(
'ERROR could not confirm deletion of container due to '
'previous error; but continuing\n')
fp.flush()
end = time.time()
with context.io_manager.with_stdout() as fp:
if context.graphite:
fp.write(
'%s.ping_overall %.02f %d\n' % (
context.graphite, end - context.ping_begin, time.time()))
if context.ping_verbose:
fp.write('% 6.02fs total\n' % (end - context.ping_begin))
elif not context.graphite:
fp.write('%.02fs\n' % (end - context.ping_begin))
fp.flush()
ping_ring_overall = collections.defaultdict(lambda: [])
_cli_ping_ring_report(context, ping_ring_object_puts, 'PUT')
for ip, timings in six.iteritems(ping_ring_object_puts):
ping_ring_overall[ip].extend(timings)
_cli_ping_ring_report(context, ping_ring_object_gets, 'GET')
for ip, timings in six.iteritems(ping_ring_object_gets):
ping_ring_overall[ip].extend(timings)
_cli_ping_ring_report(context, ping_ring_object_deletes, 'DELETE')
for ip, timings in six.iteritems(ping_ring_object_deletes):
ping_ring_overall[ip].extend(timings)
_cli_ping_ring_report(context, ping_ring_overall, 'overall')
class CLIPing(CLICommand):
"""
A CLICommand that implements ping test functionality.
See the output of ``swiftly help ping`` for more information.
"""
def __init__(self, cli):
super(CLIPing, self).__init__(
cli, 'ping', max_args=1, usage="""
Usage: %prog [main_options] ping [options] [path]
For help on [main_options] run %prog with no args.
Runs a ping test against the account.
The [path] will be used as a prefix to the random container name used (default:
swiftly-ping).""".strip())
self.option_parser.add_option(
'-v', '--verbose', dest='ping_verbose', action='store_true',
help='Outputs additional information as ping works.')
self.option_parser.add_option(
'-c', '--count', dest='ping_count', default=1,
help='Count of objects to create; default 1.')
self.option_parser.add_option(
'-o', '--object-ring', dest='object_ring',
help='The current object ring of the cluster being pinged. This '
'will enable output of which nodes are involved in the '
'object requests and their implied behavior. Use of this '
'also requires the main Swift code is installed and '
'importable.')
self.option_parser.add_option(
'-l', '--limit', dest='limit',
help='Limits the node output tables to LIMIT nodes.')
self.option_parser.add_option(
'-t', '--threshold', dest='threshold',
help='Changes the threshold for the final (average * x) reports. '
'This will define the value of x, defaults to 2.')
self.option_parser.add_option(
'-g', '--graphite', dest='graphite', metavar='PREFIX',
help='Switches to "graphite" output. The output will be lines of '
'"PREFIX.metric value timestamp" suitable for piping to '
'graphite (through netcat or something similar).')
def __call__(self, args):
options, args, context = self.parse_args_and_create_context(args)
context.ping_count = int(options.ping_count or 1)
context.ping_verbose = options.ping_verbose
context.object_ring = None
if options.object_ring:
import swift.common.ring.ring
context.object_ring = swift.common.ring.ring.Ring(
options.object_ring)
context.limit = int(options.limit or 10)
context.threshold = int(options.threshold or 2)
context.graphite = options.graphite
prefix = args.pop(0) if args else 'swiftly-ping'
return cli_ping(context, prefix)
|
|
# Copyright (c) 2015-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from cloudify import exceptions as cfy_exc
from vcloud_server_plugin import vdc
import vcloud_plugin_common
from tests.unittests import test_mock_base
class ServerPluginVdcMockTestCase(test_mock_base.TestBase):
def test_creation_validation(self):
"""check validation for vdc operations"""
fake_client = self.generate_client()
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
fake_ctx = self.generate_node_context_with_current_ctx(
properties={}
)
# no vdc name
with self.assertRaises(cfy_exc.NonRecoverableError):
vdc.creation_validation(ctx=fake_ctx, vca_client=None)
# name exist but someone already created this vdc
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'name': 'not_existed'
}
)
fake_client.get_vdc = mock.MagicMock(
return_value=mock.MagicMock()
)
with self.assertRaises(cfy_exc.NonRecoverableError):
vdc.creation_validation(ctx=fake_ctx, vca_client=None)
fake_client.get_vdc.assert_called_with('not_existed')
# everthing fine
fake_client.get_vdc = mock.MagicMock(return_value=None)
vdc.creation_validation(ctx=fake_ctx, vca_client=None)
# external but without name
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'use_external_resource': True
}
)
with self.assertRaises(cfy_exc.NonRecoverableError):
vdc.creation_validation(ctx=fake_ctx, vca_client=None)
# use unexisted vdc
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'use_external_resource': True,
'resource_id': 'not_existed'
}
)
fake_client.get_vdc = mock.MagicMock(return_value=None)
with self.assertRaises(cfy_exc.NonRecoverableError):
vdc.creation_validation(ctx=fake_ctx, vca_client=None)
fake_client.get_vdc.assert_called_with('not_existed')
# exist everything
fake_client.get_vdc = mock.MagicMock(
return_value=mock.MagicMock()
)
vdc.creation_validation(ctx=fake_ctx, vca_client=None)
fake_client.get_vdc.assert_called_with('not_existed')
def test_create(self):
"""check vdc creation operation"""
fake_client = self.generate_client()
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
# tried to create new vdc on subscription
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
}
}
)
with self.assertRaises(cfy_exc.NonRecoverableError):
vdc.create(ctx=fake_ctx, vca_client=None)
# use ondemand
# use external resource without vdc
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'service_type':
vcloud_plugin_common.ONDEMAND_SERVICE_TYPE
},
'use_external_resource': True,
'resource_id': 'not_existed'
}
)
fake_client.get_vdc = mock.MagicMock(return_value=None)
with self.assertRaises(cfy_exc.NonRecoverableError):
vdc.create(ctx=fake_ctx, vca_client=None)
fake_client.get_vdc.assert_called_with('not_existed')
# successful for create on external resource
fake_client.get_vdc = mock.MagicMock(
return_value=mock.MagicMock()
)
vdc.create(ctx=fake_ctx, vca_client=None)
# no name for vdc
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'service_type':
vcloud_plugin_common.ONDEMAND_SERVICE_TYPE
},
'use_external_resource': False,
}
)
with self.assertRaises(cfy_exc.NonRecoverableError):
vdc.create(ctx=fake_ctx, vca_client=None)
# create new vdc for deployment
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'service_type':
vcloud_plugin_common.ONDEMAND_SERVICE_TYPE
},
'use_external_resource': False,
'name': "something"
}
)
# no task returned
fake_client.create_vdc = mock.MagicMock(
return_value=None
)
with self.assertRaises(cfy_exc.NonRecoverableError):
vdc.create(ctx=fake_ctx, vca_client=None)
# everything fine
fake_client.create_vdc = mock.MagicMock(
return_value=self.generate_task(
vcloud_plugin_common.TASK_STATUS_SUCCESS
)
)
vdc.create(ctx=fake_ctx, vca_client=None)
def test_delete(self):
"""check vdc deletion operation"""
fake_client = self.generate_client()
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
# external resorce
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'service_type':
vcloud_plugin_common.ONDEMAND_SERVICE_TYPE
},
'use_external_resource': True,
'resource_id': 'not_existed'
}
)
vdc.delete(ctx=fake_ctx, vca_client=None)
# return fail from delete vdc
fake_client.delete_vdc = mock.MagicMock(
return_value=(False, None)
)
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'service_type':
vcloud_plugin_common.ONDEMAND_SERVICE_TYPE
},
'use_external_resource': False,
'name': "something"
},
runtime_properties={
vdc.VDC_NAME: "something"
}
)
with self.assertRaises(cfy_exc.NonRecoverableError):
vdc.delete(ctx=fake_ctx, vca_client=None)
fake_client.delete_vdc.assert_called_with("something")
self.assertTrue(
vdc.VDC_NAME in fake_ctx.instance.runtime_properties
)
# succesful delete
fake_client.delete_vdc = mock.MagicMock(
return_value=(True, self.generate_task(
vcloud_plugin_common.TASK_STATUS_SUCCESS
))
)
vdc.delete(ctx=fake_ctx, vca_client=None)
self.assertFalse(
vdc.VDC_NAME in fake_ctx.instance.runtime_properties
)
|
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main
from functools import partial
from qiita_client.testing import PluginTestCase
from qiita_client import ArtifactInfo
import os
from os import remove
from os.path import exists, isdir, join
from shutil import rmtree, copyfile
from tempfile import TemporaryDirectory
from qp_shogun import plugin
from tempfile import mkdtemp
from json import dumps
from biom import Table
import numpy as np
from io import StringIO
from qp_shogun.shogun.utils import (
get_dbs, get_dbs_list, generate_shogun_dflt_params,
import_shogun_biom, shogun_db_functional_parser, shogun_parse_module_table,
shogun_parse_enzyme_table, shogun_parse_pathway_table)
from qp_shogun.shogun.shogun import (
generate_shogun_align_commands, _format_params,
generate_shogun_assign_taxonomy_commands, generate_fna_file,
generate_shogun_functional_commands, generate_shogun_redist_commands,
shogun)
SHOGUN_PARAMS = {
'Database': 'database', 'Aligner tool': 'aligner',
'Number of threads': 'threads'}
class ShogunTests(PluginTestCase):
def setUp(self):
plugin("https://localhost:21174", 'register', 'ignored')
out_dir = mkdtemp()
self.maxDiff = None
self.out_dir = out_dir
self.db_path = os.environ["QC_SHOGUN_DB_DP"]
self.params = {
'Database': join(self.db_path, 'rep82'),
'Aligner tool': 'bowtie2',
'Number of threads': 5
}
self._clean_up_files = []
self._clean_up_files.append(out_dir)
self.enzymes = ('K00001\t'
'"1. Oxidoreductases"\t'
'"1.1 Acting on the CH-OH group of donors"\t'
'"1.1.1 With NAD+ or NADP+ as acceptor"\t'
'"1.1.1.1 alcohol dehydrogenase"\n'
'K00002\t'
'"1. Oxidoreductases"\t'
'"1.1 Acting on the CH-OH group of donors"\t'
'"1.1.1 With NAD+ or NADP+ as acceptor"\t'
'"1.1.1.2 alcohol dehydrogenase (NADP+)"\n'
'K00003\t'
'"1. Oxidoreductases"\t'
'"1.1 Acting on the CH-OH group of donors"\t'
'"1.1.1 With NAD+ or NADP+ as acceptor"\t'
'"1.1.1.3 homoserine dehydrogenase"')
self.enz_md = {
'K00001': {'taxonomy': ['1. Oxidoreductases',
'1.1 Acting on the CH-OH group of donors',
'1.1.1 With NAD+ or NADP+ as acceptor',
'1.1.1.1 alcohol dehydrogenase']},
'K00002': {'taxonomy': ['1. Oxidoreductases',
'1.1 Acting on the CH-OH group of donors',
'1.1.1 With NAD+ or NADP+ as acceptor',
'1.1.1.2 alcohol dehydrogenase (NADP+)']},
'K00003': {'taxonomy': ['1. Oxidoreductases',
'1.1 Acting on the CH-OH group of donors',
'1.1.1 With NAD+ or NADP+ as acceptor',
'1.1.1.3 homoserine dehydrogenase']}}
self.modules = (
'K00003\t'
'"Pathway module"\t'
'"Nucleotide and amino acid metabolism"\t'
'"Cysteine and methionine metabolism"\t'
'"M00017 Methionine biosynthesis,'
' apartate => homoserine => methionine [PATH:map00270]"\n'
'K00003\t'
'"Pathway module"\t'
'"Nucleotide and amino acid metabolism"\t'
'"Serine and threonine metabolism"\t'
'"M00018 Threonine biosynthesis, '
'apartate => homoserine => threonine [PATH:map00260]"\n'
'K00133\t'
'"Pathway module"\t'
'"Nucleotide and amino acid metabolism"\t'
'"Cysteine and methionine metabolism"\t'
'"M00017 Methionine biosynthesis,'
' apartate => homoserine => methionine [PATH:map00270]"')
self.mod_md = {
'M00017': {'taxonomy': ['Pathway module',
'Nucleotide and amino acid metabolism',
'Cysteine and methionine metabolism',
'Methionine biosynthesis,' +
' apartate => homoserine => ' +
'methionine [PATH:map00270]']},
'M00018': {'taxonomy': ['Pathway module',
'Nucleotide and amino acid metabolism',
'Serine and threonine metabolism',
'Threonine biosynthesis,' +
' apartate => homoserine => ' +
'threonine [PATH:map00260]']}}
self.pathways = ('K00271\t'
'"Enzymes"\t'
'"1. Oxidoreductases"\t'
'"1.4 Acting on the CH-NH2 group of donors"\t'
'"1.4.1 With NAD+ or NADP+ as acceptor"\t'
'"1.4.1.23 valine dehydrogenase (NAD+)"\n'
'K00272\t'
'"Enzymes"\t'
'"1. Oxidoreductases"\t'
'"1.4 Acting on the CH-NH2 group of donors"\t'
'"1.4.3 With oxygen as acceptor"\t'
'"1.4.3.1 D-aspartate oxidase"\n'
'K00273\t'
'"Enzymes"\t'
'"1. Oxidoreductases"\t'
'"1.4 Acting on the CH-NH2 group of donors"\t'
'"1.4.3 With oxygen as acceptor"\t'
'"1.4.3.3 D-amino-acid oxidase"')
self.path_md = {
'1.4.1 With NAD+ or NADP+ as acceptor': {
'taxonomy': ['Enzymes',
'1. Oxidoreductases',
'1.4 Acting on the CH-NH2 group of donors']},
'1.4.3 With oxygen as acceptor': {
'taxonomy': ['Enzymes',
'1. Oxidoreductases',
'1.4 Acting on the CH-NH2 group of donors']}}
def tearDown(self):
for fp in self._clean_up_files:
if exists(fp):
if isdir(fp):
rmtree(fp)
else:
remove(fp)
def test_get_dbs(self):
db_path = self.db_path
obs = get_dbs(db_path)
exp = {'rep82': join(db_path, 'rep82')}
self.assertEqual(obs, exp)
def test_get_dbs_list(self):
db_path = self.db_path
obs = get_dbs_list(db_path)
exp = join(join('"'+db_path, 'rep82')+'"')
self.assertEqual(obs, exp)
def test_generate_shogun_dflt_params(self):
obs = generate_shogun_dflt_params()
exp = {
'rep82_bowtie2': {
'Database': join(self.db_path, 'rep82'),
'Aligner tool': 'bowtie2',
'Number of threads': 5},
'rep82_utree': {
'Database': join(self.db_path, 'rep82'),
'Aligner tool': 'utree',
'Number of threads': 5},
# 'rep82_burst': {
# 'Database': join(self.db_path, 'rep82'),
# 'Aligner tool': 'burst',
# 'Number of threads': 5}
}
self.assertEqual(obs, exp)
def test_generate_fna_file(self):
out_dir = self.out_dir
with TemporaryDirectory(dir=out_dir, prefix='shogun_') as fp:
sample = [
('s1', 'SKB8.640193', 'support_files/kd_test_1_R1.fastq.gz',
'support_files/kd_test_1_R2.fastq.gz')
]
exp = join(fp, 'combined.fna')
obs = generate_fna_file(fp, sample)
self.assertEqual(obs, exp)
# test with only forward
with TemporaryDirectory(dir=out_dir, prefix='shogun_') as fp:
sample = [
('s1', 'SKB8.640193', 'support_files/kd_test_1_R1.fastq.gz',
None)
]
exp = join(fp, 'combined.fna')
obs = generate_fna_file(fp, sample)
self.assertEqual(obs, exp)
def test_shogun_db_functional_parser(self):
db_path = self.params['Database']
func_prefix = 'function/ko'
exp = {
'enzyme': join(db_path, '%s-enzyme-annotations.txt' % func_prefix),
'module': join(db_path, '%s-module-annotations.txt' % func_prefix),
'pathway': join(db_path, '%s-pathway-annotations.txt'
% func_prefix)}
obs = shogun_db_functional_parser(db_path)
self.assertEqual(obs, exp)
def test_shogun_parse_enzyme_table(self):
out_table = shogun_parse_enzyme_table(StringIO(self.enzymes))
self.assertDictEqual(self.enz_md, out_table)
def test_shogun_parse_module_table(self):
out_table = shogun_parse_module_table(StringIO(self.modules))
self.assertDictEqual(self.mod_md, out_table)
def test_shogun_parse_pathway_table(self):
out_table = shogun_parse_pathway_table(StringIO(self.pathways))
self.assertDictEqual(self.path_md, out_table)
def test_import_shogun_biom(self):
shogun_table = ('#OTU ID\t1450\t2563\n'
'k__Archaea\t26\t25\n'
'k__Archaea;p__Crenarchaeota\t3\t5\n'
'k__Archaea;p__Crenarchaeota;c__Thermoprotei\t1\t25\n')
exp_biom = Table(np.array([[26, 25],
[3, 5],
[1, 25]]),
['k__Archaea',
'k__Archaea;p__Crenarchaeota',
'k__Archaea;p__Crenarchaeota;c__Thermoprotei'],
['1450',
'2563'])
obs_biom = import_shogun_biom(StringIO(shogun_table))
self.assertEqual(exp_biom, obs_biom)
tax_metadata = {'k__Archaea': {
'taxonomy': ['k__Archaea']},
'k__Archaea;p__Crenarchaeota': {
'taxonomy': ['k__Archaea',
'p__Crenarchaeota']},
'k__Archaea;p__Crenarchaeota;c__Thermoprotei': {
'taxonomy': ['k__Archaea',
'p__Crenarchaeota',
'c__Thermoprotei']}}
exp_biom_tax = Table(np.array([[26, 25],
[3, 5],
[1, 25]]),
['k__Archaea',
'k__Archaea;p__Crenarchaeota',
'k__Archaea;p__Crenarchaeota;c__Thermoprotei'],
['1450',
'2563'])
exp_biom_tax.add_metadata(tax_metadata, axis='observation')
obs_biom_tax = import_shogun_biom(
StringIO(shogun_table), names_to_taxonomy=True)
self.assertEqual(exp_biom_tax, obs_biom_tax)
# test modules
module_table = ('#MODULE ID\t1450\t2563\n'
'M00017\t26\t25\n'
'M00018\t3\t5\n')
exp_m_biom = Table(np.array([[26, 25],
[3, 5]]),
['M00017', 'M00018'],
['1450', '2563'])
exp_m_biom.add_metadata(self.mod_md, axis='observation')
obs_m_biom = import_shogun_biom(
StringIO(module_table), annotation_table=StringIO(self.modules),
annotation_type='module')
self.assertEqual(exp_m_biom, obs_m_biom)
# test pathways
path_table = ('#PATHWAY ID\t1450\t2563\n'
'1.4.1 With NAD+ or NADP+ as acceptor\t26\t25\n'
'1.4.3 With oxygen as acceptor\t3\t5\n')
exp_p_biom = Table(np.array([[26, 25],
[3, 5]]),
['1.4.1 With NAD+ or NADP+ as acceptor',
'1.4.3 With oxygen as acceptor'],
['1450', '2563'])
exp_p_biom.add_metadata(self.path_md, axis='observation')
obs_p_biom = import_shogun_biom(
StringIO(path_table), annotation_table=StringIO(self.pathways),
annotation_type='pathway')
self.assertEqual(exp_p_biom, obs_p_biom)
# test enzymes
enzyme_table = ('#KEGG ID\t1450\t2563\n'
'K00001\t26\t25\n'
'K00002\t3\t5\n'
'K00003\t1\t25\n')
exp_e_biom = Table(np.array([[26, 25],
[3, 5],
[1, 25]]),
['K00001',
'K00002',
'K00003'],
['1450', '2563'])
exp_e_biom.add_metadata(self.enz_md, axis='observation')
obs_e_biom = import_shogun_biom(
StringIO(enzyme_table), annotation_table=StringIO(self.enzymes),
annotation_type='enzyme')
self.assertEqual(exp_e_biom, obs_e_biom)
# test empty
empty_table = ('#KEGG ID\t1450\t2563\n')
exp_empty_biom = Table(np.zeros((0, 2)),
[],
['1450', '2563'])
obs_empty_biom = import_shogun_biom(
StringIO(empty_table), annotation_table=StringIO(self.enzymes),
annotation_type='enzyme')
self.assertEqual(exp_empty_biom, obs_empty_biom)
def test_format_shogun_params(self):
obs = _format_params(self.params, SHOGUN_PARAMS)
exp = {
'database': join(self.db_path, 'rep82'),
'aligner': 'bowtie2',
'threads': 5
}
self.assertEqual(obs, exp)
def test_generate_shogun_align_commands(self):
out_dir = self.out_dir
with TemporaryDirectory(dir=out_dir, prefix='shogun_') as temp_dir:
exp_cmd = [
('shogun align --aligner bowtie2 --threads 5 '
'--database %srep82 --input %s/combined.fna '
'--output %s') %
(self.db_path, temp_dir, temp_dir)
]
params = _format_params(self.params, SHOGUN_PARAMS)
obs_cmd = generate_shogun_align_commands(
join(temp_dir, 'combined.fna'), temp_dir, params)
self.assertEqual(obs_cmd, exp_cmd)
def test_generate_shogun_assign_taxonomy_commands(self):
out_dir = self.out_dir
with TemporaryDirectory(dir=out_dir, prefix='shogun_') as temp_dir:
exp_cmd = [
('shogun assign-taxonomy --aligner bowtie2 '
'--database %srep82 --input %s/alignment.bowtie2.sam '
'--output %s/profile.tsv') %
(self.db_path, temp_dir, temp_dir)
]
exp_output_fp = join(temp_dir, 'profile.tsv')
params = _format_params(self.params, SHOGUN_PARAMS)
obs_cmd, obs_output_fp = generate_shogun_assign_taxonomy_commands(
temp_dir, params)
self.assertEqual(obs_cmd, exp_cmd)
self.assertEqual(obs_output_fp, exp_output_fp)
def test_generate_shogun_functional_commands(self):
out_dir = self.out_dir
with TemporaryDirectory(dir=out_dir, prefix='shogun_') as temp_dir:
exp_cmd = [
('shogun functional '
'--database %srep82 --input %s '
'--output %s --level species') %
(self.db_path, join(temp_dir, 'profile.tsv'),
join(temp_dir, 'functional'))
]
profile_dir = join(temp_dir, 'profile.tsv')
params = _format_params(self.params, SHOGUN_PARAMS)
obs_cmd, output = generate_shogun_functional_commands(
profile_dir, temp_dir, params, 'species')
self.assertEqual(obs_cmd, exp_cmd)
def test_generate_shogun_redist_commands(self):
out_dir = self.out_dir
with TemporaryDirectory(dir=out_dir, prefix='shogun_') as temp_dir:
exp_cmd = [
('shogun redistribute '
'--database %srep82 --level species --input %s '
'--output %s') %
(self.db_path, join(temp_dir, 'profile.tsv'),
join(temp_dir, 'profile.redist.species.tsv'))
]
profile_dir = join(temp_dir, 'profile.tsv')
params = _format_params(self.params, SHOGUN_PARAMS)
obs_cmd, output = generate_shogun_redist_commands(
profile_dir, temp_dir, params, 'species')
self.assertEqual(obs_cmd, exp_cmd)
# Testing shogun with bowtie2
def _helper_shogun_bowtie(self):
# generating filepaths
in_dir = mkdtemp()
self._clean_up_files.append(in_dir)
fp1_1 = join(in_dir, 'S22205_S104_L001_R1_001.fastq.gz')
fp1_2 = join(in_dir, 'S22205_S104_L001_R2_001.fastq.gz')
fp2_1 = join(in_dir, 'S22282_S102_L001_R1_001.fastq.gz')
fp2_2 = join(in_dir, 'S22282_S102_L001_R2_001.fastq.gz')
copyfile('support_files/S22205_S104_L001_R1_001.fastq.gz', fp1_1)
copyfile('support_files/S22205_S104_L001_R2_001.fastq.gz', fp1_2)
copyfile('support_files/S22282_S102_L001_R1_001.fastq.gz', fp2_1)
copyfile('support_files/S22282_S102_L001_R2_001.fastq.gz', fp2_2)
return fp1_1, fp1_2, fp2_1, fp2_2
def test_shogun_bt2(self):
# inserting new prep template
prep_info_dict = {
'SKB8.640193': {'run_prefix': 'S22205_S104'},
'SKD8.640184': {'run_prefix': 'S22282_S102'}}
data = {'prep_info': dumps(prep_info_dict),
# magic #1 = testing study
'study': 1,
'data_type': 'Metagenomic'}
pid = self.qclient.post('/apitest/prep_template/', data=data)['prep']
# inserting artifacts
fp1_1, fp1_2, fp2_1, fp2_2 = self._helper_shogun_bowtie()
data = {
'filepaths': dumps([
(fp1_1, 'raw_forward_seqs'),
(fp1_2, 'raw_reverse_seqs'),
(fp2_1, 'raw_forward_seqs'),
(fp2_2, 'raw_reverse_seqs')]),
'type': "per_sample_FASTQ",
'name': "Test Shogun artifact",
'prep': pid}
aid = self.qclient.post('/apitest/artifact/', data=data)['artifact']
self.params['input'] = aid
data = {'user': '[email protected]',
'command': dumps(['qp-shogun', '0.1.5', 'Shogun']),
'status': 'running',
'parameters': dumps(self.params)}
jid = self.qclient.post('/apitest/processing_job/', data=data)['job']
out_dir = mkdtemp()
self._clean_up_files.append(out_dir)
success, ainfo, msg = shogun(self.qclient, jid, self.params, out_dir)
self.assertEqual("", msg)
self.assertTrue(success)
# we are expecting 1 artifacts in total
pout_dir = partial(join, out_dir)
self.assertCountEqual(ainfo, [
ArtifactInfo('Shogun Alignment Profile', 'BIOM',
[(pout_dir('otu_table.alignment.profile.biom'),
'biom')]),
ArtifactInfo('Taxonomic Predictions - phylum', 'BIOM',
[(pout_dir('otu_table.redist.phylum.biom'),
'biom')]),
ArtifactInfo('Taxonomic Predictions - genus', 'BIOM',
[(pout_dir('otu_table.redist.genus.biom'),
'biom')]),
ArtifactInfo('Taxonomic Predictions - species', 'BIOM',
[(pout_dir('otu_table.redist.species.biom'),
'biom')])])
def test_shogun_burst(self):
# inserting new prep template
prep_info_dict = {
'SKB8.640193': {'run_prefix': 'S22205_S104'},
'SKD8.640184': {'run_prefix': 'S22282_S102'}}
data = {'prep_info': dumps(prep_info_dict),
# magic #1 = testing study
'study': 1,
'data_type': 'Metagenomic'}
pid = self.qclient.post('/apitest/prep_template/', data=data)['prep']
# inserting artifacts
fp1_1, fp1_2, fp2_1, fp2_2 = self._helper_shogun_bowtie()
data = {
'filepaths': dumps([
(fp1_1, 'raw_forward_seqs'),
(fp1_2, 'raw_reverse_seqs'),
(fp2_1, 'raw_forward_seqs'),
(fp2_2, 'raw_reverse_seqs')]),
'type': "per_sample_FASTQ",
'name': "Test Shogun artifact",
'prep': pid}
aid = self.qclient.post('/apitest/artifact/', data=data)['artifact']
self.params['input'] = aid
self.params['Aligner tool'] = 'burst'
data = {'user': '[email protected]',
'command': dumps(['qp-shogun', '0.1.5', 'Shogun']),
'status': 'running',
'parameters': dumps(self.params)}
jid = self.qclient.post('/apitest/processing_job/', data=data)['job']
out_dir = mkdtemp()
self._clean_up_files.append(out_dir)
success, ainfo, msg = shogun(self.qclient, jid, self.params, out_dir)
self.assertEqual("", msg)
self.assertTrue(success)
# we are expecting 1 artifacts in total
pout_dir = partial(join, out_dir)
self.assertCountEqual(ainfo, [
ArtifactInfo('Shogun Alignment Profile', 'BIOM',
[(pout_dir('otu_table.alignment.profile.biom'),
'biom')]),
ArtifactInfo('Taxonomic Predictions - phylum', 'BIOM',
[(pout_dir('otu_table.redist.phylum.biom'),
'biom')]),
ArtifactInfo('Taxonomic Predictions - genus', 'BIOM',
[(pout_dir('otu_table.redist.genus.biom'),
'biom')]),
ArtifactInfo('Taxonomic Predictions - species', 'BIOM',
[(pout_dir('otu_table.redist.species.biom'),
'biom')])])
def test_shogun_utree(self):
# inserting new prep template
prep_info_dict = {
'SKB8.640193': {'run_prefix': 'S22205_S104'},
'SKD8.640184': {'run_prefix': 'S22282_S102'}}
data = {'prep_info': dumps(prep_info_dict),
# magic #1 = testing study
'study': 1,
'data_type': 'Metagenomic'}
pid = self.qclient.post('/apitest/prep_template/', data=data)['prep']
# inserting artifacts
fp1_1, fp1_2, fp2_1, fp2_2 = self._helper_shogun_bowtie()
data = {
'filepaths': dumps([
(fp1_1, 'raw_forward_seqs'),
(fp1_2, 'raw_reverse_seqs'),
(fp2_1, 'raw_forward_seqs'),
(fp2_2, 'raw_reverse_seqs')]),
'type': "per_sample_FASTQ",
'name': "Test Shogun artifact",
'prep': pid}
aid = self.qclient.post('/apitest/artifact/', data=data)['artifact']
self.params['input'] = aid
self.params['Aligner tool'] = 'utree'
data = {'user': '[email protected]',
'command': dumps(['qp-shogun', '0.1.5', 'Shogun']),
'status': 'running',
'parameters': dumps(self.params)}
jid = self.qclient.post('/apitest/processing_job/', data=data)['job']
out_dir = mkdtemp()
self._clean_up_files.append(out_dir)
success, ainfo, msg = shogun(self.qclient, jid, self.params, out_dir)
self.assertEqual("", msg)
self.assertTrue(success)
# we are expecting 1 artifacts in total
pout_dir = partial(join, out_dir)
self.assertCountEqual(ainfo, [
ArtifactInfo('Shogun Alignment Profile', 'BIOM',
[(pout_dir('otu_table.alignment.profile.biom'),
'biom')]),
ArtifactInfo('Taxonomic Predictions - phylum', 'BIOM',
[(pout_dir('otu_table.redist.phylum.biom'),
'biom')]),
ArtifactInfo('Taxonomic Predictions - genus', 'BIOM',
[(pout_dir('otu_table.redist.genus.biom'),
'biom')]),
ArtifactInfo('Taxonomic Predictions - species', 'BIOM',
[(pout_dir('otu_table.redist.species.biom'),
'biom')])])
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
import argparse
import logging
import csv
import os
import random
import six
import sys
this_dir = os.path.realpath(os.path.dirname(__file__))
sys.path.append(os.path.realpath(os.path.join(os.pardir, os.pardir)))
from geodata.address_expansions.abbreviations import abbreviate
from geodata.address_expansions.gazetteers import street_types_gazetteer, toponym_abbreviations_gazetteer
from geodata.address_formatting.formatter import AddressFormatter
from geodata.addresses.components import AddressComponents
from geodata.countries.constants import Countries
from geodata.openaddresses.formatter import *
from geodata.encoding import safe_decode
from geodata.i18n.languages import get_country_languages
from geodata.math.sampling import cdf, weighted_choice
from geodata.places.config import place_config
from geodata.text.utils import is_numeric, is_numeric_strict
from geodata.csv_utils import unicode_csv_reader
OPENADDRESSES_UK_FORMAT_DATA_TAGGED_FILENAME = 'uk_openaddresses_formatted_addresses_tagged.tsv'
OPENADDRESSES_UK_FORMAT_DATA_FILENAME = 'uk_openaddresses_formatted_addresses.tsv'
class OpenAddressesUKFormatter(object):
field_map = {
'pao': AddressFormatter.HOUSE_NUMBER,
'street.name': AddressFormatter.ROAD,
'town.name': AddressFormatter.CITY,
'postcode.name': AddressFormatter.POSTCODE
}
def __init__(self):
self.formatter = AddressFormatter()
component_validators = {
AddressFormatter.HOUSE_NUMBER: OpenAddressesFormatter.validators.validate_house_number,
AddressFormatter.ROAD: OpenAddressesFormatter.validators.validate_street,
AddressFormatter.POSTCODE: OpenAddressesFormatter.validators.validate_postcode,
}
cldr_country_probability = 0.3
address_only_probability = 0.4
drop_address_probability = 0.6
drop_address_and_postcode_probability = 0.1
@classmethod
def cleanup_number(cls, num, strip_commas=False):
num = num.strip()
if strip_commas:
num = num.replace(six.u(','), six.u(''))
try:
num_int = int(num)
except (ValueError, TypeError):
try:
num_float = float(num)
leading_zeros = 0
for c in num:
if c == six.u('0'):
leading_zeros += 1
else:
break
num = safe_decode(int(num_float))
if leading_zeros:
num = six.u('{}{}').format(six.u('0') * leading_zeros, num)
except (ValueError, TypeError):
pass
return num
def fix_component_encodings(self, components):
return {k: ftfy.fix_encoding(safe_decode(v)) for k, v in six.iteritems(components)}
def formatted_addresses(self, path, tag_components=True):
country = Countries.UNITED_KINGDOM
candidate_languages = get_country_languages(country).items()
f = open(path)
reader = unicode_csv_reader(f)
headers = reader.next()
header_indices = {i: self.field_map[k] for i, k in enumerate(headers) if k in self.field_map}
for row in reader:
components = {}
for i, key in six.iteritems(header_indices):
value = row[i].strip()
if not value:
continue
if not_applicable_regex.match(value) or null_regex.match(value) or unknown_regex.match(value):
continue
value = value.strip(', -')
validator = self.component_validators.get(key, None)
if validator is not None and not validator(value):
continue
if value:
components[key] = value
if components:
components = self.fix_component_encodings(components)
language = AddressComponents.address_language(components, candidate_languages)
street = components.get(AddressFormatter.ROAD, None)
if street is not None:
street = street.strip()
street = AddressComponents.cleaned_name(street)
if AddressComponents.street_name_is_valid(street):
street = abbreviate(street_types_gazetteer, street, language)
components[AddressFormatter.ROAD] = street
else:
components.pop(AddressFormatter.ROAD)
street = None
house_number = components.get(AddressFormatter.HOUSE_NUMBER, None)
if house_number:
house_number = self.cleanup_number(house_number, strip_commas=True)
if house_number is not None:
components[AddressFormatter.HOUSE_NUMBER] = house_number
postcode = components.get(AddressFormatter.POSTCODE, None)
# If there's a postcode, we can still use just the city/state/postcode, otherwise discard
if not street or (street and house_number and (street.lower() == house_number.lower())):
if not postcode:
continue
components = AddressComponents.drop_address(components)
country_name = AddressComponents.cldr_country_name(country, language)
if country_name:
components[AddressFormatter.COUNTRY] = country_name
for component_key in AddressFormatter.BOUNDARY_COMPONENTS:
component = components.get(component_key, None)
if component is not None:
component = abbreviate(toponym_abbreviations_gazetteer, component, language)
component = AddressComponents.name_hyphens(component)
components[component_key] = component
AddressComponents.replace_names(components)
AddressComponents.prune_duplicate_names(components)
AddressComponents.remove_numeric_boundary_names(components)
AddressComponents.add_house_number_phrase(components, language, country=country)
# Component dropout
components = place_config.dropout_components(components, country=country)
formatted = self.formatter.format_address(components, country, language=language,
minimal_only=False, tag_components=tag_components)
yield (language, country, formatted)
if random.random() < self.address_only_probability and street:
address_only_components = AddressComponents.drop_places(components)
address_only_components = AddressComponents.drop_postcode(address_only_components)
formatted = self.formatter.format_address(address_only_components, country, language=language,
minimal_only=False, tag_components=tag_components)
yield (language, country, formatted)
rand_val = random.random()
if street and house_number and rand_val < self.drop_address_probability:
components = AddressComponents.drop_address(components)
if rand_val < self.drop_address_and_postcode_probability:
components = AddressComponents.drop_postcode(components)
if components and (len(components) > 1):
formatted = self.formatter.format_address(components, country, language=language,
minimal_only=False, tag_components=tag_components)
yield (language, country, formatted)
def build_training_data(self, infile, out_dir, tag_components=True):
if tag_components:
formatted_tagged_file = open(os.path.join(out_dir, OPENADDRESSES_UK_FORMAT_DATA_TAGGED_FILENAME), 'w')
writer = csv.writer(formatted_tagged_file, 'tsv_no_quote')
else:
formatted_tagged_file = open(os.path.join(out_dir, OPENADDRESSES_UK_FORMAT_DATA_FILENAME), 'w')
writer = csv.writer(formatted_tagged_file, 'tsv_no_quote')
i = 0
for language, country, formatted_address in self.formatted_addresses(infile, tag_components=tag_components):
if tag_components:
row = (language, country, formatted_address)
else:
row = (formatted_address,)
writer.writerow(row)
i += 1
if i % 1000 == 0 and i > 0:
print('did {} formatted addresses'.format(i))
if __name__ == '__main__':
# Handle argument parsing here
parser = argparse.ArgumentParser()
parser.add_argument('sources', nargs='*')
parser.add_argument('-i', '--openaddresses-uk-file',
help='Path to OpenAddresses UK addresses.csv file')
parser.add_argument('-f', '--format',
action='store_true',
default=False,
help='Save formatted addresses (slow)')
parser.add_argument('-u', '--untagged',
action='store_true',
default=False,
help='Save untagged formatted addresses (slow)')
parser.add_argument('-o', '--out-dir',
default=os.getcwd(),
help='Output directory')
args = parser.parse_args()
if args.openaddresses_uk_file and args.format:
oa_formatter = OpenAddressesUKFormatter()
oa_formatter.build_training_data(args.openaddresses_uk_file, args.out_dir, tag_components=not args.untagged)
|
|
# -*- coding: utf-8 -*-
"""
.. _tut-events-vs-annotations:
Parsing events from raw data
============================
This tutorial describes how to read experimental events from raw recordings,
and how to convert between the two different representations of events within
MNE-Python (Events arrays and Annotations objects).
In the :ref:`introductory tutorial <overview-tut-events-section>` we saw an
example of reading experimental events from a :term:`"STIM" channel <stim
channel>`; here we'll discuss :term:`events` and :term:`annotations` more
broadly, give more detailed information about reading from STIM channels, and
give an example of reading events that are in a marker file or included in
the data file as an embedded array. The tutorials :ref:`tut-event-arrays` and
:ref:`tut-annotate-raw` discuss how to plot, combine, load, save, and
export :term:`events` and `~mne.Annotations` (respectively), and the
latter tutorial also covers interactive annotation of `~mne.io.Raw`
objects.
We'll begin by loading the Python modules we need, and loading the same
:ref:`example data <sample-dataset>` we used in the :ref:`introductory tutorial
<tut-overview>`, but to save memory we'll crop the `~mne.io.Raw` object
to just 60 seconds before loading it into RAM:
"""
import os
import numpy as np
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=60).load_data()
###############################################################################
# The Events and Annotations data structures
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Generally speaking, both the Events and `~mne.Annotations` data
# structures serve the same purpose: they provide a mapping between times
# during an EEG/MEG recording and a description of what happened at those
# times. In other words, they associate a *when* with a *what*. The main
# differences are:
#
# 1. **Units**: the Events data structure represents the *when* in terms of
# samples, whereas the `~mne.Annotations` data structure represents
# the *when* in seconds.
# 2. **Limits on the description**: the Events data structure represents the
# *what* as an integer "Event ID" code, whereas the `~mne.Annotations` data
# structure represents the *what* as a string.
# 3. **How duration is encoded**: Events in an Event array do not have a
# duration (though it is possible to represent duration with pairs of
# onset/offset events within an Events array), whereas each element of an
# `~mne.Annotations` object necessarily includes a duration (though
# the duration can be zero if an instantaneous event is desired).
# 4. **Internal representation**: Events are stored as an ordinary
# :class:`NumPy array <numpy.ndarray>`, whereas `~mne.Annotations` is
# a :class:`list`-like class defined in MNE-Python.
#
#
# .. _stim-channel-defined:
#
# What is a STIM channel?
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# A :term:`stim channel` (short for "stimulus channel") is a channel that does
# not receive signals from an EEG, MEG, or other sensor. Instead, STIM channels
# record voltages (usually short, rectangular DC pulses of fixed magnitudes
# sent from the experiment-controlling computer) that are time-locked to
# experimental events, such as the onset of a stimulus or a button-press
# response by the subject (those pulses are sometimes called `TTL`_ pulses,
# event pulses, trigger signals, or just "triggers"). In other cases, these
# pulses may not be strictly time-locked to an experimental event, but instead
# may occur in between trials to indicate the type of stimulus (or experimental
# condition) that is about to occur on the upcoming trial.
#
# The DC pulses may be all on one STIM channel (in which case different
# experimental events or trial types are encoded as different voltage
# magnitudes), or they may be spread across several channels, in which case the
# channel(s) on which the pulse(s) occur can be used to encode different events
# or conditions. Even on systems with multiple STIM channels, there is often
# one channel that records a weighted sum of the other STIM channels, in such a
# way that voltage levels on that channel can be unambiguously decoded as
# particular event types. On older Neuromag systems (such as that used to
# record the sample data) this "summation channel" was typically ``STI 014``;
# on newer systems it is more commonly ``STI101``. You can see the STIM
# channels in the raw data file here:
raw.copy().pick_types(meg=False, stim=True).plot(start=3, duration=6)
###############################################################################
# You can see that ``STI 014`` (the summation channel) contains pulses of
# different magnitudes whereas pulses on other channels have consistent
# magnitudes. You can also see that every time there is a pulse on one of the
# other STIM channels, there is a corresponding pulse on ``STI 014``.
#
# .. TODO: somewhere in prev. section, link out to a table of which systems
# have STIM channels vs. which have marker files or embedded event arrays
# (once such a table has been created).
#
#
# Converting a STIM channel signal to an Events array
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# If your data has events recorded on a STIM channel, you can convert them into
# an events array using `~mne.find_events`. The sample number of the onset
# (or offset) of each pulse is recorded as the event time, the pulse magnitudes
# are converted into integers, and these pairs of sample numbers plus integer
# codes are stored in :class:`NumPy arrays <numpy.ndarray>` (usually called
# "the events array" or just "the events"). In its simplest form, the function
# requires only the `~mne.io.Raw` object, and the name of the channel(s)
# from which to read events:
events = mne.find_events(raw, stim_channel='STI 014')
print(events[:5]) # show the first 5
###############################################################################
# .. sidebar:: The middle column of the Events array
#
# MNE-Python events are actually *three* values: in between the sample
# number and the integer event code is a value indicating what the event
# code was on the immediately preceding sample. In practice, that value is
# almost always ``0``, but it can be used to detect the *endpoint* of an
# event whose duration is longer than one sample. See the documentation of
# `~mne.find_events` for more details.
#
# If you don't provide the name of a STIM channel, `~mne.find_events`
# will first look for MNE-Python :ref:`config variables <tut-configure-mne>`
# for variables ``MNE_STIM_CHANNEL``, ``MNE_STIM_CHANNEL_1``, etc. If those are
# not found, channels ``STI 014`` and ``STI101`` are tried, followed by the
# first channel with type "STIM" present in ``raw.ch_names``. If you regularly
# work with data from several different MEG systems with different STIM channel
# names, setting the ``MNE_STIM_CHANNEL`` config variable may not be very
# useful, but for researchers whose data is all from a single system it can be
# a time-saver to configure that variable once and then forget about it.
#
# `~mne.find_events` has several options, including options for aligning
# events to the onset or offset of the STIM channel pulses, setting the minimum
# pulse duration, and handling of consecutive pulses (with no return to zero
# between them). For example, you can effectively encode event duration by
# passing ``output='step'`` to `~mne.find_events`; see the documentation
# of `~mne.find_events` for details. More information on working with
# events arrays (including how to plot, combine, load, and save event arrays)
# can be found in the tutorial :ref:`tut-event-arrays`.
#
#
# Reading embedded events as Annotations
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Some EEG/MEG systems generate files where events are stored in a separate
# data array rather than as pulses on one or more STIM channels. For example,
# the EEGLAB format stores events as a collection of arrays in the :file:`.set`
# file. When reading those files, MNE-Python will automatically convert the
# stored events into an `~mne.Annotations` object and store it as the
# :attr:`~mne.io.Raw.annotations` attribute of the `~mne.io.Raw` object:
testing_data_folder = mne.datasets.testing.data_path()
eeglab_raw_file = os.path.join(testing_data_folder, 'EEGLAB', 'test_raw.set')
eeglab_raw = mne.io.read_raw_eeglab(eeglab_raw_file)
print(eeglab_raw.annotations)
###############################################################################
# The core data within an `~mne.Annotations` object is accessible
# through three of its attributes: ``onset``, ``duration``, and
# ``description``. Here we can see that there were 154 events stored in the
# EEGLAB file, they all had a duration of zero seconds, there were two
# different types of events, and the first event occurred about 1 second after
# the recording began:
print(len(eeglab_raw.annotations))
print(set(eeglab_raw.annotations.duration))
print(set(eeglab_raw.annotations.description))
print(eeglab_raw.annotations.onset[0])
###############################################################################
# More information on working with `~mne.Annotations` objects, including
# how to add annotations to `~mne.io.Raw` objects interactively, and how
# to plot, concatenate, load, save, and export `~mne.Annotations`
# objects can be found in the tutorial :ref:`tut-annotate-raw`.
#
#
# Converting between Events arrays and Annotations objects
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Once your experimental events are read into MNE-Python (as either an Events
# array or an `~mne.Annotations` object), you can easily convert between
# the two formats as needed. You might do this because, e.g., an Events array
# is needed for epoching continuous data, or because you want to take advantage
# of the "annotation-aware" capability of some functions, which automatically
# omit spans of data if they overlap with certain annotations.
#
# To convert an `~mne.Annotations` object to an Events array, use the
# function `mne.events_from_annotations` on the `~mne.io.Raw` file
# containing the annotations. This function will assign an integer Event ID to
# each unique element of ``raw.annotations.description``, and will return the
# mapping of descriptions to integer Event IDs along with the derived Event
# array. By default, one event will be created at the onset of each annotation;
# this can be modified via the ``chunk_duration`` parameter of
# `~mne.events_from_annotations` to create equally spaced events within
# each annotation span (see :ref:`chunk-duration`, below, or see
# :ref:`fixed-length-events` for direct creation of an Events array of
# equally-spaced events).
events_from_annot, event_dict = mne.events_from_annotations(eeglab_raw)
print(event_dict)
print(events_from_annot[:5])
###############################################################################
# If you want to control which integers are mapped to each unique description
# value, you can pass a :class:`dict` specifying the mapping as the
# ``event_id`` parameter of `~mne.events_from_annotations`; this
# :class:`dict` will be returned unmodified as the ``event_dict``.
#
# Note that this ``event_dict`` can be used when creating `~mne.Epochs` from
# `~mne.io.Raw` objects, as demonstrated in the tutorial
# :ref:`tut-epochs-class`.
custom_mapping = {'rt': 77, 'square': 42}
(events_from_annot,
event_dict) = mne.events_from_annotations(eeglab_raw, event_id=custom_mapping)
print(event_dict)
print(events_from_annot[:5])
###############################################################################
# To make the opposite conversion (from an Events array to an
# `~mne.Annotations` object), you can create a mapping from integer
# Event ID to string descriptions, use `~mne.annotations_from_events`
# to construct the `~mne.Annotations` object, and call the
# `~mne.io.Raw.set_annotations` method to add the annotations to the
# `~mne.io.Raw` object.
#
# Because the :ref:`sample data <sample-dataset>` was recorded on a Neuromag
# system (where sample numbering starts when the acquisition system is
# initiated, not when the *recording* is initiated), we also need to pass in
# the ``orig_time`` parameter so that the onsets are properly aligned relative
# to the start of recording:
mapping = {1: 'auditory/left', 2: 'auditory/right', 3: 'visual/left',
4: 'visual/right', 5: 'smiley', 32: 'buttonpress'}
annot_from_events = mne.annotations_from_events(
events=events, event_desc=mapping, sfreq=raw.info['sfreq'],
orig_time=raw.info['meas_date'])
raw.set_annotations(annot_from_events)
###############################################################################
# Now, the annotations will appear automatically when plotting the raw data,
# and will be color-coded by their label value:
raw.plot(start=5, duration=5)
###############################################################################
# .. _`chunk-duration`:
#
# Making multiple events per annotation
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# As mentioned above, you can generate equally-spaced events from an
# `~mne.Annotations` object using the ``chunk_duration`` parameter of
# `~mne.events_from_annotations`. For example, suppose we have an
# annotation in our `~mne.io.Raw` object indicating when the subject was
# in REM sleep, and we want to perform a resting-state analysis on those spans
# of data. We can create an Events array with a series of equally-spaced events
# within each "REM" span, and then use those events to generate (potentially
# overlapping) epochs that we can analyze further.
# create the REM annotations
rem_annot = mne.Annotations(onset=[5, 41],
duration=[16, 11],
description=['REM'] * 2)
raw.set_annotations(rem_annot)
(rem_events,
rem_event_dict) = mne.events_from_annotations(raw, chunk_duration=1.5)
###############################################################################
# Now we can check that our events indeed fall in the ranges 5-21 seconds and
# 41-52 seconds, and are ~1.5 seconds apart (modulo some jitter due to the
# sampling frequency). Here are the event times rounded to the nearest
# millisecond:
print(np.round((rem_events[:, 0] - raw.first_samp) / raw.info['sfreq'], 3))
###############################################################################
# Other examples of resting-state analysis can be found in the online
# documentation for `~mne.make_fixed_length_events`, such as
# :ref:`ex-envelope-correlation`.
#
# .. LINKS
#
# .. _`TTL`: https://en.wikipedia.org/wiki/Transistor%E2%80%93transistor_logic
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from copy import copy
import os
import pytest
import tempfile
from subprocess import call, check_call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_cluster import ImpalaCluster
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfLocal
from tests.common.test_dimensions import (
create_exec_option_dimension,
create_exec_option_dimension_from_dict,
create_uncompressed_text_dimension)
from tests.util.calculation_util import get_random_id
from tests.util.filesystem_utils import get_fs_path, IS_S3
from tests.verifiers.metric_verifier import MetricVerifier
class TestUdfBase(ImpalaTestSuite):
"""
Base class with utility functions for testing UDFs.
"""
def _check_mem_limit_exception(self, e):
"""Return without error if the exception is MEM_LIMIT_EXCEEDED, re-raise 'e'
in all other cases."""
if 'Memory limit exceeded' in str(e):
return
raise e
def _run_query_all_impalads(self, exec_options, query, expected):
impala_cluster = ImpalaCluster()
for impalad in impala_cluster.impalads:
client = impalad.service.create_beeswax_client()
result = self.execute_query_expect_success(client, query, exec_options)
assert result.data == expected
def _load_functions(self, template, vector, database, location):
queries = template.format(database=database, location=location)
# Split queries and remove empty lines
queries = [q for q in queries.split(';') if q.strip()]
exec_options = vector.get_value('exec_option')
for query in queries:
if query.strip() == '': continue
result = self.execute_query_expect_success(self.client, query, exec_options)
assert result is not None
# Create sample UDA functions in {database} from library {location}
create_sample_udas_template = """
create aggregate function {database}.test_count(int) returns bigint
location '{location}' update_fn='CountUpdate';
create aggregate function {database}.hll(int) returns string
location '{location}' update_fn='HllUpdate';
create aggregate function {database}.sum_small_decimal(decimal(9,2))
returns decimal(9,2) location '{location}' update_fn='SumSmallDecimalUpdate';
"""
# Create test UDA functions in {database} from library {location}
create_test_udas_template = """
create aggregate function {database}.trunc_sum(double)
returns bigint intermediate double location '{location}'
update_fn='TruncSumUpdate' merge_fn='TruncSumMerge'
serialize_fn='TruncSumSerialize' finalize_fn='TruncSumFinalize';
create aggregate function {database}.arg_is_const(int, int)
returns boolean location '{location}'
init_fn='ArgIsConstInit' update_fn='ArgIsConstUpdate' merge_fn='ArgIsConstMerge';
create aggregate function {database}.toggle_null(int)
returns int location '{location}'
update_fn='ToggleNullUpdate' merge_fn='ToggleNullMerge';
create aggregate function {database}.count_nulls(bigint)
returns bigint location '{location}'
update_fn='CountNullsUpdate' merge_fn='CountNullsMerge';
create aggregate function {database}.agg_intermediate(int)
returns bigint intermediate string location '{location}'
init_fn='AggIntermediateInit' update_fn='AggIntermediateUpdate'
merge_fn='AggIntermediateMerge' finalize_fn='AggIntermediateFinalize';
create aggregate function {database}.agg_decimal_intermediate(decimal(2,1), int)
returns decimal(6,5) intermediate decimal(4,3) location '{location}'
init_fn='AggDecimalIntermediateInit' update_fn='AggDecimalIntermediateUpdate'
merge_fn='AggDecimalIntermediateMerge' finalize_fn='AggDecimalIntermediateFinalize';
create aggregate function {database}.agg_string_intermediate(decimal(20,10), bigint, string)
returns decimal(20,0) intermediate string location '{location}'
init_fn='AggStringIntermediateInit' update_fn='AggStringIntermediateUpdate'
merge_fn='AggStringIntermediateMerge' finalize_fn='AggStringIntermediateFinalize';
create aggregate function {database}.char_intermediate_sum(int) returns int
intermediate char(10) LOCATION '{location}' update_fn='AggCharIntermediateUpdate'
init_fn='AggCharIntermediateInit' merge_fn='AggCharIntermediateMerge'
serialize_fn='AggCharIntermediateSerialize' finalize_fn='AggCharIntermediateFinalize';
"""
# Create test UDF functions in {database} from library {location}
create_udfs_template = """
create function {database}.identity(boolean) returns boolean
location '{location}' symbol='Identity';
create function {database}.identity(tinyint) returns tinyint
location '{location}' symbol='Identity';
create function {database}.identity(smallint) returns smallint
location '{location}' symbol='Identity';
create function {database}.identity(int) returns int
location '{location}' symbol='Identity';
create function {database}.identity(bigint) returns bigint
location '{location}' symbol='Identity';
create function {database}.identity(float) returns float
location '{location}' symbol='Identity';
create function {database}.identity(double) returns double
location '{location}' symbol='Identity';
create function {database}.identity(string) returns string
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_9StringValE';
create function {database}.identity(timestamp) returns timestamp
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_12TimestampValE';
create function {database}.identity(decimal(9,0)) returns decimal(9,0)
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_10DecimalValE';
create function {database}.identity(decimal(18,1)) returns decimal(18,1)
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_10DecimalValE';
create function {database}.identity(decimal(38,10)) returns decimal(38,10)
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_10DecimalValE';
create function {database}.all_types_fn(
string, boolean, tinyint, smallint, int, bigint, float, double, decimal(2,0))
returns int
location '{location}' symbol='AllTypes';
create function {database}.no_args() returns string
location '{location}'
symbol='_Z6NoArgsPN10impala_udf15FunctionContextE';
create function {database}.var_and(boolean...) returns boolean
location '{location}' symbol='VarAnd';
create function {database}.var_sum(int...) returns int
location '{location}' symbol='VarSum';
create function {database}.var_sum(double...) returns double
location '{location}' symbol='VarSum';
create function {database}.var_sum(string...) returns int
location '{location}' symbol='VarSum';
create function {database}.var_sum(decimal(4,2)...) returns decimal(18,2)
location '{location}' symbol='VarSum';
create function {database}.var_sum_multiply(double, int...) returns double
location '{location}'
symbol='_Z14VarSumMultiplyPN10impala_udf15FunctionContextERKNS_9DoubleValEiPKNS_6IntValE';
create function {database}.var_sum_multiply2(double, int...) returns double
location '{location}'
symbol='_Z15VarSumMultiply2PN10impala_udf15FunctionContextERKNS_9DoubleValEiPKNS_6IntValE';
create function {database}.xpow(double, double) returns double
location '{location}'
symbol='_ZN6impala13MathFunctions3PowEPN10impala_udf15FunctionContextERKNS1_9DoubleValES6_';
create function {database}.to_lower(string) returns string
location '{location}'
symbol='_Z7ToLowerPN10impala_udf15FunctionContextERKNS_9StringValE';
create function {database}.to_upper(string) returns string
location '{location}'
symbol='_Z7ToUpperPN10impala_udf15FunctionContextERKNS_9StringValE';
create function {database}.constant_timestamp() returns timestamp
location '{location}' symbol='ConstantTimestamp';
create function {database}.validate_arg_type(string) returns boolean
location '{location}' symbol='ValidateArgType';
create function {database}.count_rows() returns bigint
location '{location}' symbol='Count' prepare_fn='CountPrepare' close_fn='CountClose';
create function {database}.constant_arg(int) returns int
location '{location}' symbol='ConstantArg' prepare_fn='ConstantArgPrepare' close_fn='ConstantArgClose';
create function {database}.validate_open(int) returns boolean
location '{location}' symbol='ValidateOpen'
prepare_fn='ValidateOpenPrepare' close_fn='ValidateOpenClose';
create function {database}.mem_test(bigint) returns bigint
location '{location}' symbol='MemTest'
prepare_fn='MemTestPrepare' close_fn='MemTestClose';
create function {database}.mem_test_leaks(bigint) returns bigint
location '{location}' symbol='MemTest'
prepare_fn='MemTestPrepare';
-- Regression test for IMPALA-1475
create function {database}.unmangled_symbol() returns bigint
location '{location}' symbol='UnmangledSymbol';
create function {database}.four_args(int, int, int, int) returns int
location '{location}' symbol='FourArgs';
create function {database}.five_args(int, int, int, int, int) returns int
location '{location}' symbol='FiveArgs';
create function {database}.six_args(int, int, int, int, int, int) returns int
location '{location}' symbol='SixArgs';
create function {database}.seven_args(int, int, int, int, int, int, int) returns int
location '{location}' symbol='SevenArgs';
create function {database}.eight_args(int, int, int, int, int, int, int, int) returns int
location '{location}' symbol='EightArgs';
create function {database}.twenty_args(int, int, int, int, int, int, int, int, int, int,
int, int, int, int, int, int, int, int, int, int) returns int
location '{location}' symbol='TwentyArgs';
create function {database}.twenty_one_args(int, int, int, int, int, int, int, int, int, int,
int, int, int, int, int, int, int, int, int, int, int) returns int
location '{location}' symbol='TwentyOneArgs';
"""
class TestUdfExecution(TestUdfBase):
"""Test execution of UDFs with a combination of different query options."""
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestUdfExecution, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension_from_dict({"disable_codegen" : [False, True],
"disable_codegen_rows_threshold" : [0],
"exec_single_node_rows_threshold" : [0,100],
"enable_expr_rewrites" : [False, True]}))
# There is no reason to run these tests using all dimensions.
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
def test_native_functions(self, vector, unique_database):
enable_expr_rewrites = vector.get_value('exec_option')['enable_expr_rewrites']
self._load_functions(
self.create_udfs_template, vector, unique_database,
get_fs_path('/test-warehouse/libTestUdfs.so'))
self._load_functions(
self.create_sample_udas_template, vector, unique_database,
get_fs_path('/test-warehouse/libudasample.so'))
self._load_functions(
self.create_test_udas_template, vector, unique_database,
get_fs_path('/test-warehouse/libTestUdas.so'))
self.run_test_case('QueryTest/udf', vector, use_db=unique_database)
if not vector.get_value('exec_option')['disable_codegen']:
self.run_test_case('QueryTest/udf-codegen-required', vector, use_db=unique_database)
self.run_test_case('QueryTest/uda', vector, use_db=unique_database)
self.run_test_case('QueryTest/udf-init-close', vector, use_db=unique_database)
# Some tests assume no expr rewrites.
if enable_expr_rewrites:
self.run_test_case('QueryTest/udf-init-close-deterministic', vector,
use_db=unique_database)
else:
self.run_test_case('QueryTest/udf-no-expr-rewrite', vector,
use_db=unique_database)
def test_ir_functions(self, vector, unique_database):
if vector.get_value('exec_option')['disable_codegen']:
# IR functions require codegen to be enabled.
return
enable_expr_rewrites = vector.get_value('exec_option')['enable_expr_rewrites']
self._load_functions(
self.create_udfs_template, vector, unique_database,
get_fs_path('/test-warehouse/test-udfs.ll'))
self.run_test_case('QueryTest/udf', vector, use_db=unique_database)
self.run_test_case('QueryTest/udf-init-close', vector, use_db=unique_database)
# Some tests assume determinism or non-determinism, which depends on expr rewrites.
if enable_expr_rewrites:
self.run_test_case('QueryTest/udf-init-close-deterministic', vector,
use_db=unique_database)
else:
self.run_test_case('QueryTest/udf-no-expr-rewrite', vector, use_db=unique_database)
def test_java_udfs(self, vector, unique_database):
self.run_test_case('QueryTest/load-java-udfs', vector, use_db=unique_database)
self.run_test_case('QueryTest/java-udf', vector, use_db=unique_database)
def test_udf_errors(self, vector, unique_database):
# Only run with codegen disabled to force interpretation path to be taken.
# Aim to exercise two failure cases:
# 1. too many arguments
# 2. IR UDF
fd, dir_name = tempfile.mkstemp()
hdfs_path = get_fs_path("/test-warehouse/{0}_bad_udf.ll".format(unique_database))
try:
with open(dir_name, "w") as f:
f.write("Hello World")
check_call(["hadoop", "fs", "-put", "-f", f.name, hdfs_path])
if vector.get_value('exec_option')['disable_codegen']:
self.run_test_case('QueryTest/udf-errors', vector, use_db=unique_database)
finally:
if os.path.exists(f.name):
os.remove(f.name)
call(["hadoop", "fs", "-rm", "-f", hdfs_path])
os.close(fd)
# Run serially because this will blow the process limit, potentially causing other
# queries to fail
@pytest.mark.execute_serially
def test_mem_limits(self, vector, unique_database):
# Set the mem_limit and buffer_pool_limit high enough that the query makes it through
# admission control and a simple scan can run.
vector = copy(vector)
vector.get_value('exec_option')['mem_limit'] = '1mb'
vector.get_value('exec_option')['buffer_pool_limit'] = '32kb'
try:
self.run_test_case('QueryTest/udf-mem-limit', vector, use_db=unique_database)
assert False, "Query was expected to fail"
except ImpalaBeeswaxException, e:
self._check_mem_limit_exception(e)
try:
self.run_test_case('QueryTest/uda-mem-limit', vector, use_db=unique_database)
assert False, "Query was expected to fail"
except ImpalaBeeswaxException, e:
self._check_mem_limit_exception(e)
# It takes a long time for Impala to free up memory after this test, especially if
# ASAN is enabled. Verify that all fragments finish executing before moving on to the
# next test to make sure that the next test is not affected.
for impalad in ImpalaCluster().impalads:
verifier = MetricVerifier(impalad.service)
verifier.wait_for_metric("impala-server.num-fragments-in-flight", 0)
verifier.verify_num_unused_buffers()
def test_udf_constant_folding(self, vector, unique_database):
"""Test that constant folding of UDFs is handled correctly. Uses count_rows(),
which returns a unique value every time it is evaluated in the same thread."""
exec_options = copy(vector.get_value('exec_option'))
# Execute on a single node so that all counter values will be unique.
exec_options["num_nodes"] = 1
create_fn_query = """create function {database}.count_rows() returns bigint
location '{location}' symbol='Count' prepare_fn='CountPrepare'
close_fn='CountClose'"""
self._load_functions(create_fn_query, vector, unique_database,
get_fs_path('/test-warehouse/libTestUdfs.so'))
# Only one distinct value if the expression is constant folded, otherwise one
# value per row in alltypes
expected_ndv = 1 if exec_options['enable_expr_rewrites'] else 7300
# Test fully constant expression, evaluated in FE.
query = "select `{0}`.count_rows() from functional.alltypes".format(unique_database)
result = self.execute_query_expect_success(self.client, query, exec_options)
actual_ndv = len(set(result.data))
assert actual_ndv == expected_ndv
# Test constant argument to a non-constant expr. The argument value can be
# cached in the backend.
query = """select concat(cast(`{0}`.count_rows() as string), '-', string_col)
from functional.alltypes""".format(unique_database)
result = self.execute_query_expect_success(self.client, query, exec_options)
actual_ndv = len(set(value.split("-")[0] for value in result.data))
assert actual_ndv == expected_ndv
class TestUdfTargeted(TestUdfBase):
"""Targeted UDF tests that don't need to be run under the full combination of
exec options."""
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestUdfTargeted, cls).add_test_dimensions()
# There is no reason to run these tests using all dimensions.
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
def test_udf_invalid_symbol(self, vector, unique_database):
""" IMPALA-1642: Impala crashes if the symbol for a Hive UDF doesn't exist
Crashing is non-deterministic so we run the UDF several times."""
src_udf_path = os.path.join(
os.environ['IMPALA_HOME'], 'testdata/udfs/impala-hive-udfs.jar')
tgt_udf_path = get_fs_path(
'/test-warehouse/{0}.db/impala-hive-udfs.jar'.format(unique_database))
drop_fn_stmt = (
"drop function if exists `{0}`.fn_invalid_symbol(STRING)".format(unique_database))
create_fn_stmt = (
"create function `{0}`.fn_invalid_symbol(STRING) returns "
"STRING LOCATION '{1}' SYMBOL='not.a.Symbol'".format(
unique_database, tgt_udf_path))
query = "select `{0}`.fn_invalid_symbol('test')".format(unique_database)
check_call(["hadoop", "fs", "-put", "-f", src_udf_path, tgt_udf_path])
self.client.execute(drop_fn_stmt)
self.client.execute(create_fn_stmt)
for _ in xrange(5):
ex = self.execute_query_expect_failure(self.client, query)
assert "Unable to find class" in str(ex)
self.client.execute(drop_fn_stmt)
@SkipIfLocal.multiple_impalad
def test_hive_udfs_missing_jar(self, vector, unique_database):
""" IMPALA-2365: Impalad shouldn't crash if the udf jar isn't present
on HDFS"""
# Copy hive-exec.jar to a temporary file
jar_path = get_fs_path("/test-warehouse/{0}.db/".format(unique_database)
+ get_random_id(5) + ".jar")
hive_jar = get_fs_path("/test-warehouse/hive-exec.jar")
check_call(["hadoop", "fs", "-cp", hive_jar, jar_path])
drop_fn_stmt = (
"drop function if exists "
"`{0}`.`pi_missing_jar`()".format(unique_database))
create_fn_stmt = (
"create function `{0}`.`pi_missing_jar`() returns double location '{1}' "
"symbol='org.apache.hadoop.hive.ql.udf.UDFPI'".format(unique_database, jar_path))
cluster = ImpalaCluster()
impalad = cluster.get_any_impalad()
client = impalad.service.create_beeswax_client()
# Create and drop functions with sync_ddl to make sure they are reflected
# in every impalad.
exec_option = copy(vector.get_value('exec_option'))
exec_option['sync_ddl'] = 1
self.execute_query_expect_success(client, drop_fn_stmt, exec_option)
self.execute_query_expect_success(client, create_fn_stmt, exec_option)
# Delete the udf jar
check_call(["hadoop", "fs", "-rm", jar_path])
different_impalad = cluster.get_different_impalad(impalad)
client = different_impalad.service.create_beeswax_client()
# Run a query using the udf from an impalad other than the one
# we used to create the function. This is to bypass loading from
# the cache
try:
self.execute_query_using_client(
client, "select `{0}`.`pi_missing_jar`()".format(unique_database), vector)
assert False, "Query expected to fail"
except ImpalaBeeswaxException, e:
assert "Failed to get file info" in str(e)
def test_libs_with_same_filenames(self, vector, unique_database):
self.run_test_case('QueryTest/libs_with_same_filenames', vector, use_db=unique_database)
def test_udf_update_via_drop(self, vector, unique_database):
"""Test updating the UDF binary without restarting Impala. Dropping
the function should remove the binary from the local cache."""
# Run with sync_ddl to guarantee the drop is processed by all impalads.
exec_options = copy(vector.get_value('exec_option'))
exec_options['sync_ddl'] = 1
old_udf = os.path.join(
os.environ['IMPALA_HOME'], 'testdata/udfs/impala-hive-udfs.jar')
new_udf = os.path.join(
os.environ['IMPALA_HOME'], 'tests/test-hive-udfs/target/test-hive-udfs-1.0.jar')
udf_dst = get_fs_path(
'/test-warehouse/{0}.db/impala-hive-udfs.jar'.format(unique_database))
drop_fn_stmt = (
'drop function if exists `{0}`.`udf_update_test_drop`()'.format(unique_database))
create_fn_stmt = (
"create function `{0}`.`udf_update_test_drop`() returns string LOCATION '{1}' "
"SYMBOL='org.apache.impala.TestUpdateUdf'".format(unique_database, udf_dst))
query_stmt = "select `{0}`.`udf_update_test_drop`()".format(unique_database)
# Put the old UDF binary on HDFS, make the UDF in Impala and run it.
check_call(["hadoop", "fs", "-put", "-f", old_udf, udf_dst])
self.execute_query_expect_success(self.client, drop_fn_stmt, exec_options)
self.execute_query_expect_success(self.client, create_fn_stmt, exec_options)
self._run_query_all_impalads(exec_options, query_stmt, ["Old UDF"])
# Update the binary, drop and create the function again. The new binary should
# be running.
check_call(["hadoop", "fs", "-put", "-f", new_udf, udf_dst])
self.execute_query_expect_success(self.client, drop_fn_stmt, exec_options)
self.execute_query_expect_success(self.client, create_fn_stmt, exec_options)
self._run_query_all_impalads(exec_options, query_stmt, ["New UDF"])
def test_udf_update_via_create(self, vector, unique_database):
"""Test updating the UDF binary without restarting Impala. Creating a new function
from the library should refresh the cache."""
# Run with sync_ddl to guarantee the create is processed by all impalads.
exec_options = copy(vector.get_value('exec_option'))
exec_options['sync_ddl'] = 1
old_udf = os.path.join(
os.environ['IMPALA_HOME'], 'testdata/udfs/impala-hive-udfs.jar')
new_udf = os.path.join(
os.environ['IMPALA_HOME'], 'tests/test-hive-udfs/target/test-hive-udfs-1.0.jar')
udf_dst = get_fs_path(
'/test-warehouse/{0}.db/impala-hive-udfs.jar'.format(unique_database))
old_function_name = "udf_update_test_create1"
new_function_name = "udf_update_test_create2"
drop_fn_template = 'drop function if exists `{0}`.`{{0}}`()'.format(unique_database)
self.execute_query_expect_success(
self.client, drop_fn_template.format(old_function_name), exec_options)
self.execute_query_expect_success(
self.client, drop_fn_template.format(new_function_name), exec_options)
create_fn_template = (
"create function `{0}`.`{{0}}`() returns string LOCATION '{1}' "
"SYMBOL='org.apache.impala.TestUpdateUdf'".format(unique_database, udf_dst))
query_template = "select `{0}`.`{{0}}`()".format(unique_database)
# Put the old UDF binary on HDFS, make the UDF in Impala and run it.
check_call(["hadoop", "fs", "-put", "-f", old_udf, udf_dst])
self.execute_query_expect_success(
self.client, create_fn_template.format(old_function_name), exec_options)
self._run_query_all_impalads(
exec_options, query_template.format(old_function_name), ["Old UDF"])
# Update the binary, and create a new function using the binary. The new binary
# should be running.
check_call(["hadoop", "fs", "-put", "-f", new_udf, udf_dst])
self.execute_query_expect_success(
self.client, create_fn_template.format(new_function_name), exec_options)
self._run_query_all_impalads(
exec_options, query_template.format(new_function_name), ["New UDF"])
# The old function should use the new library now
self._run_query_all_impalads(
exec_options, query_template.format(old_function_name), ["New UDF"])
def test_drop_function_while_running(self, vector, unique_database):
self.client.execute("drop function if exists `{0}`.drop_while_running(BIGINT)"
.format(unique_database))
self.client.execute(
"create function `{0}`.drop_while_running(BIGINT) returns "
"BIGINT LOCATION '{1}' SYMBOL='Identity'".format(
unique_database,
get_fs_path('/test-warehouse/libTestUdfs.so')))
query = ("select `{0}`.drop_while_running(l_orderkey) from tpch.lineitem limit 10000"
.format(unique_database))
# Run this query asynchronously.
handle = self.execute_query_async(query, vector.get_value('exec_option'),
table_format=vector.get_value('table_format'))
# Fetch some rows from the async query to make sure the UDF is being used
results = self.client.fetch(query, handle, 1)
assert results.success
assert len(results.data) == 1
# Drop the function while the original query is running.
self.client.execute(
"drop function `{0}`.drop_while_running(BIGINT)".format(unique_database))
# Fetch the rest of the rows, this should still be able to run the UDF
results = self.client.fetch(query, handle, -1)
assert results.success
assert len(results.data) == 9999
|
|
######################################################################################
# Date: 2016/June/03
#
# Module: manager_color.py
#
# VERSION: 1.0
#
# AUTHOR: Miguel Ibarra ([email protected]) ed. Matt Thoburn ([email protected])
#
# DESCRIPTION: This module a class to manage color for use in plotting
#
#######################################################################################
import palettable
import matplotlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import palettable.tableau as tb
import palettable.wesanderson as wes
import palettable.cubehelix as cubhx
import palettable.colorbrewer.sequential as seq
import palettable.colorbrewer.diverging as div
import palettable.colorbrewer.qualitative as qual
class colorHandler:
"""class to handle colors used in graphing"""
def __init__(self,pal,col):
#All color palettes in palettable
palettes={
"diverging": {
"BrBG_10": div.BrBG_10,
"BrBG_11": div.BrBG_11,
"BrBG_3": div.BrBG_3,
"BrBG_4": div.BrBG_4,
"BrBG_5": div.BrBG_5,
"BrBG_6": div.BrBG_6,
"BrBG_7": div.BrBG_7,
"BrBG_8": div.BrBG_8,
"BrBG_9": div.BrBG_9,
"PRGn_10": div.PRGn_10,
"PRGn_11": div.PRGn_11,
"PRGn_3": div.PRGn_3,
"PRGn_4": div.PRGn_4,
"PRGn_5": div.PRGn_5,
"PRGn_6": div.PRGn_6,
"PRGn_7": div.PRGn_7,
"PRGn_8": div.PRGn_8,
"PRGn_9": div.PRGn_9,
"PiYG_10": div.PiYG_10,
"PiYG_11": div.PiYG_11,
"PiYG_3": div.PiYG_3,
"PiYG_4": div.PiYG_4,
"PiYG_5": div.PiYG_5,
"PiYG_6": div.PiYG_6,
"PiYG_7": div.PiYG_7,
"PiYG_8": div.PiYG_8,
"PiYG_9": div.PiYG_9,
"PuOr_10": div.PuOr_10,
"PuOr_11": div.PuOr_11,
"PuOr_3": div.PuOr_3,
"PuOr_4": div.PuOr_4,
"PuOr_5": div.PuOr_5,
"PuOr_6": div.PuOr_6,
"PuOr_7": div.PuOr_7,
"PuOr_8": div.PuOr_8,
"PuOr_9": div.PuOr_9,
"RdBu_10": div.RdBu_10,
"RdBu_11": div.RdBu_11,
"RdBu_3": div.RdBu_3,
"RdBu_4": div.RdBu_4,
"RdBu_5": div.RdBu_5,
"RdBu_6": div.RdBu_6,
"RdBu_7": div.RdBu_7,
"RdBu_8": div.RdBu_8,
"RdBu_9": div.RdBu_9,
"RdGy_10": div.RdGy_10,
"RdGy_11": div.RdGy_11,
"RdGy_3": div.RdGy_3,
"RdGy_4": div.RdGy_4,
"RdGy_5": div.RdGy_5,
"RdGy_6": div.RdGy_6,
"RdGy_7": div.RdGy_7,
"RdGy_8": div.RdGy_8,
"RdGy_9": div.RdGy_9,
"RdYlBu_10": div.RdYlBu_10,
"RdYlBu_11": div.RdYlBu_11,
"RdYlBu_3": div.RdYlBu_3,
"RdYlBu_4": div.RdYlBu_4,
"RdYlBu_5": div.RdYlBu_5,
"RdYlBu_6": div.RdYlBu_6,
"RdYlBu_7": div.RdYlBu_7,
"RdYlBu_8": div.RdYlBu_8,
"RdYlBu_9": div.RdYlBu_9,
"RdYlGn_10": div.RdYlGn_10,
"RdYlGn_11": div.RdYlGn_11,
"RdYlGn_3": div.RdYlGn_3,
"RdYlGn_4": div.RdYlGn_4,
"RdYlGn_5": div.RdYlGn_5,
"RdYlGn_6": div.RdYlGn_6,
"RdYlGn_7": div.RdYlGn_7,
"RdYlGn_8": div.RdYlGn_8,
"RdYlGn_9": div.RdYlGn_9,
"Spectral_10": div.Spectral_10,
"Spectral_11": div.Spectral_11,
"Spectral_3": div.Spectral_3,
"Spectral_4": div.Spectral_4,
"Spectral_5": div.Spectral_5,
"Spectral_6": div.Spectral_6,
"Spectral_7": div.Spectral_7,
"Spectral_8": div.Spectral_8,
"Spectral_9": div.Spectral_9
},
"qualitative": {
"Accent_3": qual.Accent_3,
"Accent_4": qual.Accent_4,
"Accent_5": qual.Accent_5,
"Accent_6": qual.Accent_6,
"Accent_7": qual.Accent_7,
"Accent_8": qual.Accent_8,
"Dark2_3": qual.Dark2_3,
"Dark2_4": qual.Dark2_4,
"Dark2_5": qual.Dark2_5,
"Dark2_6": qual.Dark2_6,
"Dark2_7": qual.Dark2_7,
"Dark2_8": qual.Dark2_8,
"Paired_10": qual.Paired_10,
"Paired_11": qual.Paired_11,
"Paired_12": qual.Paired_12,
"Paired_3": qual.Paired_3,
"Paired_4": qual.Paired_4,
"Paired_5": qual.Paired_5,
"Paired_6": qual.Paired_6,
"Paired_7": qual.Paired_7,
"Paired_8": qual.Paired_8,
"Paired_9": qual.Paired_9,
"Pastel1_3": qual.Pastel1_3,
"Pastel1_4": qual.Pastel1_4,
"Pastel1_5": qual.Pastel1_5,
"Pastel1_6": qual.Pastel1_6,
"Pastel1_7": qual.Pastel1_7,
"Pastel1_8": qual.Pastel1_8,
"Pastel1_9": qual.Pastel1_9,
"Pastel2_3": qual.Pastel2_3,
"Pastel2_4": qual.Pastel2_4,
"Pastel2_5": qual.Pastel2_5,
"Pastel2_6": qual.Pastel2_6,
"Pastel2_7": qual.Pastel2_7,
"Pastel2_8": qual.Pastel2_8,
"Set1_3": qual.Set1_3,
"Set1_4": qual.Set1_4,
"Set1_5": qual.Set1_5,
"Set1_6": qual.Set1_6,
"Set1_7": qual.Set1_7,
"Set1_8": qual.Set1_8,
"Set1_9": qual.Set1_9,
"Set2_3": qual.Set2_3,
"Set2_4": qual.Set2_4,
"Set2_5": qual.Set2_5,
"Set2_6": qual.Set2_6,
"Set2_7": qual.Set2_7,
"Set2_8": qual.Set2_8,
"Set3_10": qual.Set3_10,
"Set3_11": qual.Set3_11,
"Set3_12": qual.Set3_12,
"Set3_3": qual.Set3_3,
"Set3_4": qual.Set3_4,
"Set3_5": qual.Set3_5,
"Set3_6": qual.Set3_6,
"Set3_7": qual.Set3_7,
"Set3_8": qual.Set3_8,
"Set3_9": qual.Set3_9
},
"sequential": {
"Blues_3": seq.Blues_3,
"Blues_4": seq.Blues_4,
"Blues_5": seq.Blues_5,
"Blues_6": seq.Blues_6,
"Blues_7": seq.Blues_7,
"Blues_8": seq.Blues_8,
"Blues_9": seq.Blues_9,
"BuGn_3": seq.BuGn_3,
"BuGn_4": seq.BuGn_4,
"BuGn_5": seq.BuGn_5,
"BuGn_6": seq.BuGn_6,
"BuGn_7": seq.BuGn_7,
"BuGn_8": seq.BuGn_8,
"BuGn_9": seq.BuGn_9,
"BuPu_3": seq.BuPu_3,
"BuPu_4": seq.BuPu_4,
"BuPu_5": seq.BuPu_5,
"BuPu_6": seq.BuPu_6,
"BuPu_7": seq.BuPu_7,
"BuPu_8": seq.BuPu_8,
"BuPu_9": seq.BuPu_9,
"GnBu_3": seq.GnBu_3,
"GnBu_4": seq.GnBu_4,
"GnBu_5": seq.GnBu_5,
"GnBu_6": seq.GnBu_6,
"GnBu_7": seq.GnBu_7,
"GnBu_8": seq.GnBu_8,
"GnBu_9": seq.GnBu_9,
"Greens_3": seq.Greens_3,
"Greens_4": seq.Greens_4,
"Greens_5": seq.Greens_5,
"Greens_6": seq.Greens_6,
"Greens_7": seq.Greens_7,
"Greens_8": seq.Greens_8,
"Greens_9": seq.Greens_9,
"Greys_3": seq.Greys_3,
"Greys_4": seq.Greys_4,
"Greys_5": seq.Greys_5,
"Greys_6": seq.Greys_6,
"Greys_7": seq.Greys_7,
"Greys_8": seq.Greys_8,
"Greys_9": seq.Greys_9,
"OrRd_3": seq.OrRd_3,
"OrRd_4": seq.OrRd_4,
"OrRd_5": seq.OrRd_5,
"OrRd_6": seq.OrRd_6,
"OrRd_7": seq.OrRd_7,
"OrRd_8": seq.OrRd_8,
"OrRd_9": seq.OrRd_9,
"Oranges_3": seq.Oranges_3,
"Oranges_4": seq.Oranges_4,
"Oranges_5": seq.Oranges_5,
"Oranges_6": seq.Oranges_6,
"Oranges_7": seq.Oranges_7,
"Oranges_8": seq.Oranges_8,
"Oranges_9": seq.Oranges_9,
"PuBuGn_3": seq.PuBuGn_3,
"PuBuGn_4": seq.PuBuGn_4,
"PuBuGn_5": seq.PuBuGn_5,
"PuBuGn_6": seq.PuBuGn_6,
"PuBuGn_7": seq.PuBuGn_7,
"PuBuGn_8": seq.PuBuGn_8,
"PuBuGn_9": seq.PuBuGn_9,
"PuBu_3": seq.PuBu_3,
"PuBu_4": seq.PuBu_4,
"PuBu_5": seq.PuBu_5,
"PuBu_6": seq.PuBu_6,
"PuBu_7": seq.PuBu_7,
"PuBu_8": seq.PuBu_8,
"PuBu_9": seq.PuBu_9,
"PuRd_3": seq.PuRd_3,
"PuRd_4": seq.PuRd_4,
"PuRd_5": seq.PuRd_5,
"PuRd_6": seq.PuRd_6,
"PuRd_7": seq.PuRd_7,
"PuRd_8": seq.PuRd_8,
"PuRd_9": seq.PuRd_9,
"Purples_3": seq.Purples_3,
"Purples_4": seq.Purples_4,
"Purples_5": seq.Purples_5,
"Purples_6": seq.Purples_6,
"Purples_7": seq.Purples_7,
"Purples_8": seq.Purples_8,
"Purples_9": seq.Purples_9,
"RdPu_3": seq.RdPu_3,
"RdPu_4": seq.RdPu_4,
"RdPu_5": seq.RdPu_5,
"RdPu_6": seq.RdPu_6,
"RdPu_7": seq.RdPu_7,
"RdPu_8": seq.RdPu_8,
"RdPu_9": seq.RdPu_9,
"Reds_3": seq.Reds_3,
"Reds_4": seq.Reds_4,
"Reds_5": seq.Reds_5,
"Reds_6": seq.Reds_6,
"Reds_7": seq.Reds_7,
"Reds_8": seq.Reds_8,
"Reds_9": seq.Reds_9,
"YlGnBu_3": seq.YlGnBu_3,
"YlGnBu_4": seq.YlGnBu_4,
"YlGnBu_5": seq.YlGnBu_5,
"YlGnBu_6": seq.YlGnBu_6,
"YlGnBu_7": seq.YlGnBu_7,
"YlGnBu_8": seq.YlGnBu_8,
"YlGnBu_9": seq.YlGnBu_9,
"YlGn_3": seq.YlGn_3,
"YlGn_4": seq.YlGn_4,
"YlGn_5": seq.YlGn_5,
"YlGn_6": seq.YlGn_6,
"YlGn_7": seq.YlGn_7,
"YlGn_8": seq.YlGn_8,
"YlGn_9": seq.YlGn_9,
"YlOrBr_3": seq.YlOrBr_3,
"YlOrBr_4": seq.YlOrBr_4,
"YlOrBr_5": seq.YlOrBr_5,
"YlOrBr_6": seq.YlOrBr_6,
"YlOrBr_7": seq.YlOrBr_7,
"YlOrBr_8": seq.YlOrBr_8,
"YlOrBr_9": seq.YlOrBr_9,
"YlOrRd_3": seq.YlOrRd_3,
"YlOrRd_4": seq.YlOrRd_4,
"YlOrRd_5": seq.YlOrRd_5,
"YlOrRd_6": seq.YlOrRd_6,
"YlOrRd_7": seq.YlOrRd_7,
"YlOrRd_8": seq.YlOrRd_8,
"YlOrRd_9": seq.YlOrRd_9
},
"cubehelix": {
"classic_16": cubhx.classic_16,
"cubehelix1_16": cubhx.cubehelix1_16,
"cubehelix2_16": cubhx.cubehelix2_16,
"cubehelix3_16": cubhx.cubehelix3_16,
"jim_special_16": cubhx.jim_special_16,
"perceptual_rainbow_16": cubhx.perceptual_rainbow_16,
"purple_16": cubhx.purple_16,
"red_16": cubhx.red_16
},
"tableau": {
"BlueRed_12": tb.BlueRed_12,
"BlueRed_6": tb.BlueRed_6,
"ColorBlind_10": tb.ColorBlind_10,
"Gray_5": tb.Gray_5,
"GreenOrange_12": tb.GreenOrange_12,
"GreenOrange_6": tb.GreenOrange_6,
"PurpleGray_12": tb.PurpleGray_12,
"PurpleGray_6": tb.PurpleGray_6,
"TableauLight_10": tb.TableauLight_10,
"TableauMedium_10": tb.TableauMedium_10,
"Tableau_10": tb.Tableau_10,
"Tableau_20": tb.Tableau_20,
"TrafficLight_9": tb.TrafficLight_9
},
"wesanderson": {
"Aquatic1_5": wes.Aquatic1_5,
"Aquatic2_5": wes.Aquatic2_5,
"Aquatic3_5": wes.Aquatic3_5,
"Cavalcanti_5": wes.Cavalcanti_5,
"Chevalier_4": wes.Chevalier_4,
"Darjeeling1_4": wes.Darjeeling1_4,
"Darjeeling2_5": wes.Darjeeling2_5,
"Darjeeling3_5": wes.Darjeeling3_5,
"FantasticFox1_5": wes.FantasticFox1_5,
"FantasticFox2_5": wes.FantasticFox2_5,
"GrandBudapest1_4": wes.GrandBudapest1_4,
"GrandBudapest2_4": wes.GrandBudapest2_4,
"GrandBudapest3_6": wes.GrandBudapest3_6,
"GrandBudapest4_5": wes.GrandBudapest4_5,
"GrandBudapest5_5": wes.GrandBudapest5_5,
"Margot1_5": wes.Margot1_5,
"Margot2_4": wes.Margot2_4,
"Margot3_4": wes.Margot3_4,
"Mendl_4": wes.Mendl_4,
"Moonrise1_5": wes.Moonrise1_5,
"Moonrise2_4": wes.Moonrise2_4,
"Moonrise3_4": wes.Moonrise3_4,
"Moonrise4_5": wes.Moonrise4_5,
"Moonrise5_6": wes.Moonrise5_6,
"Moonrise6_5": wes.Moonrise6_5,
"Moonrise7_5": wes.Moonrise7_5,
"Royal1_4": wes.Royal1_4,
"Royal2_5": wes.Royal2_5,
"Royal3_5": wes.Royal3_5,
"Zissou_5": wes.Zissou_5
}}
#Select one palette
self.palette = palettes[pal][col]
#Name of the palette
self.name = self.palette.name
#One of the "diverging", "qualitative", "sequential"
self.type = self.palette.type
#Number
self.number = self.palette.number
#Colors RGB(0-255)
self.colors = self.palette.colors
#Hex colors ("#A912F4")
self.hex_colors = self.palette.hex_colors
#mpl_colors as (0-1) python default
self.mpl_colors = self.palette.mpl_colors
#A continous interpolated matplotlib
self.mpl_colormap = self.palette.mpl_colormap
#Methods for palette
#Gets matplotlib colormap and pass custum keywork arguments to
#LineaSegmentedColormap.from_list
self.get_mpl_colormap = self.palette.get_mpl_colormap
#Show the defined colors of the palette in the IPython Notebook.
#Requires ipythonblocks to be installed.
self.show_as_blocks = self.palette.show_as_blocks
#Show the defined colors of the palette in the IPython Notebook.
#Requires matplotlib to be installed.
self.show_discrete_image = self.palette.show_discrete_image
#Show the continuous, interpolated palette in the IPython Notebook.
#Requires matplotlib to be installed.
self.show_continuous_image = self.palette.show_continuous_image
#Save an image of the defined colors of palette to a file.
#Requires matplotlib to be installed.
self.save_discrete_image = self.palette.save_discrete_image
#Save an image of the continuous, interpolated palette to a file.
#Requires matplotlib to be installed.
self.save_continuous_image = self.palette.save_continuous_image
def chompColors(self,start,end):
"""
Chomps colors from the original palette.
Usefull when just want an specific part of the palette.
If you want to chomp it
:Arguments:
:type start: int
:param start: Possition to start the subsetting
:type start: end
:param start: Possition to end the subsetting
"""
# Subsetting colors as python default
self.mpl_colors = self.mpl_colors[int(start):int(end)]
# Re calculate new colormap based on the colors.
self.mpl_colormap = matplotlib.colors.LinearSegmentedColormap.from_list(
colors=self.mpl_colors,
name='subseted')
def getColorsCmapPalette(self,elements):
"""
Gets a list of colors for a given list of elements
:Arguments:
:type elements: list
:param elements: list of elements to get colors from.
:Returns:
:rtype design: list
:return design: list of colors.
"""
#Creates a np array of the list rangin form 0-1
colPosition = np.arange(0,1,1.0/len(elements))
#Get an array of positions in the colormap
colPosition = np.array([x+(1.0/(len(elements)*2)) for x in colPosition])
#Get list of colors out of the positions
colorList = self.mpl_colormap(colPosition)
#Return colorList
return colorList
def getColors(self,design,groups):
"""
Get colors based on a desing file
:Arguments:
:type design: pandas.DataFrame
:param design: A design file.
:type groups: string.
:param groups: the name of the column on design file that contains the groups
:Returns:
:rtype design: pandas.dataFrame
:return design: Copy of the original design file with a new column with the colors
:rtype ugColors: dictionary
:return ugColors: dictionary with unique groups and its respective color
(useful for legeneds)
:rtype combName: string
:return combName: Column on the design that contains the combinations
"""
#Getting the columns we are interested in dropping missing data columns
if len(groups):
self.design = design.loc[:,groups].dropna(axis=0)
else:
self.design = pd.DataFrame(design.index.values,index=design.index,columns=["samples"])
self.design.index.name = "sampleID"
#Getting groups that exists in the design file after dropping
groups = self.design.columns.values
#Creating combinations
self.combName = "_".join(groups)
#Creating combinations
self.design.loc[:,self.combName] = self.design.apply(lambda x: "_".join(map(str,
x[groups].values)),axis=1)
#Getting uniq combinations
uGroups = list(set(self.design[self.combName].values))
#Remove the nan in the selected group column
#Get colours
if self.combName == "samples":
colorsInPalette = self.getColorByIndex(dataFrame=self.design,color_index=0)
elif len(uGroups) > self.palette.number:
colorsInPalette = self.getColorsCmapPalette(uGroups)
else:
colorsInPalette = self.palette.mpl_colors[:len(uGroups)]
#Get colors for each group
self.ugColors = {group:color for group,color in zip(uGroups,colorsInPalette)}
#Creating color for each combination
self.design.loc[:,"colors"] = self.design[self.combName].apply(lambda x: self.ugColors[x])
# Treat columns "samples" as its own group
if "samples" in self.design.columns:
self.design.drop("samples",axis=1,inplace=True)
self.design.loc[:,"samples"] = ["samples"] * len(self.design.index)
#if only one group then use samples as ugroup
if "samples" in self.design.columns:
self.ugColors={"samples":list(set(colorsInPalette))[0]}
#Creates a list of colors from the given dictionary works for groups and not groups
if "samples" in self.design.columns:
self.list_colors = [self.ugColors["samples"]] * len(self.design.index)
else:
self.list_colors = [self.ugColors[group] for group in self.design[self.combName].values]
#Returning design
return self.design,self.ugColors,self.combName
def getColorsByGroup(self,design,group,uGroup):
"""
Gets a list of colors for groups
:Arguments:
:type design: pandas.DataFrame
:param design: A design file.
:type group: string.
:param group: Name of the column on the design that contains the groups
:type uGroup: list.
:param uGroup: List of the unique groups on the design file
:Returns:
:rtype ugColors: list
:return ugColors: list of colors (per group) for each one of the indexes
on the design.
:rtype combName: dictionary
:return combName: Dictionary with group:color
"""
#design file, name of col in des that has groups (treatment), unique Groups
# Calculates colors form a cmap if the number of groups is bigger
# than the defaultnumber of colors on the palette.
if len(uGroup) > self.palette.number:
colorsInPalette = self.getColorsCmapPalette(uGroup)
else:
colorsInPalette = self.palette.mpl_colors[:len(uGroup)]
#Creates a dictionary of group:color
ugColors = {grp:color for grp,color in zip(uGroup,colorsInPalette)}
#Creates a list of colors from the given dictionary
colors = [ugColors[group] for val,group in zip(design.index.values,
design[group].values)]
#Return list of colours and dictionary of colors
return colors,ugColors
def getColorByIndex(self,dataFrame,color_index=0):
"""
This functiuon gets colors for each index of a given dataFrame
:Arguments:
:type dataFrame: pandas.DataFrame
:param dataFrame: Any given dataframe
:type color_index: int.
:param color_index: color of a palette that you want to give to
every element on the dataframe.
:Returns:
:rtype colors: list
:return colors: list of colors for each index (all elements in
the have the same color)
"""
#Get indexes
indexes = dataFrame.index.values
#Set the color that you want for everything
colors = [self.palette.mpl_colors[color_index]] * len(indexes)
#Return list of colors
return colors
|
|
from __future__ import unicode_literals
import datetime
from xml.dom import minidom
try:
import pytz
except ImportError:
pytz = None
from django.contrib.syndication import views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils.feedgenerator import rfc2822_date, rfc3339_date
from django.utils import timezone
from .models import Entry
TZ = timezone.get_default_timezone()
class FeedTestCase(TestCase):
fixtures = ['feeddata.json']
def setUp(self):
# Django cannot deal with very old dates when pytz isn't installed.
if pytz is None:
old_entry = Entry.objects.get(pk=1)
old_entry.updated = datetime.datetime(1980, 1, 1, 12, 30)
old_entry.published = datetime.datetime(1986, 9, 25, 20, 15, 00)
old_entry.save()
def assertChildNodes(self, elem, expected):
actual = set(n.nodeName for n in elem.childNodes)
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(set(i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'), set(expected))
######################################
# Feed view
######################################
class SyndicationFeedTest(FeedTestCase):
"""
Tests for the high-level syndication feed framework.
"""
urls = 'syndication.urls'
def test_rss2_feed(self):
"""
Test the structure and content of feeds generated by Rss201rev2Feed.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
# Find the last build date
d = Entry.objects.latest('published').published
last_build_date = rfc2822_date(timezone.make_aware(d, TZ))
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
self.assertChildNodeContent(chan, {
'title': 'My blog',
'description': 'A more thorough description of my blog.',
'link': 'http://example.com/blog/',
'language': 'en',
'lastBuildDate': last_build_date,
#'atom:link': '',
'ttl': '600',
'copyright': 'Copyright (c) 2007, Sally Smith',
})
self.assertCategories(chan, ['python', 'django'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
# Find the pubdate of the first feed item
d = Entry.objects.get(pk=1).published
pub_date = rfc2822_date(timezone.make_aware(d, TZ))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': '[email protected] (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing'])
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
# Assert that <guid> does not have any 'isPermaLink' attribute
self.assertIsNone(item.getElementsByTagName(
'guid')[0].attributes.get('isPermaLink'))
def test_rss2_feed_guid_permalink_false(self):
"""
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'false'.
"""
response = self.client.get(
'/syndication/rss2/guid_ispermalink_false/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "false")
def test_rss2_feed_guid_permalink_true(self):
"""
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'true'.
"""
response = self.client.get(
'/syndication/rss2/guid_ispermalink_true/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "true")
def test_rss091_feed(self):
"""
Test the structure and content of feeds generated by RssUserland091Feed.
"""
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
"""
Test the structure and content of feeds generated by Atom1Feed.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author'])
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'category',
'updated',
'published',
'rights',
'author',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_atom_feed_published_and_updated_elements(self):
"""
Test that the published and updated elements are not
the same and now adhere to RFC 4287.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
entries = feed.getElementsByTagName('entry')
published = entries[0].getElementsByTagName('published')[0].firstChild.wholeText
updated = entries[0].getElementsByTagName('updated')[0].firstChild.wholeText
self.assertNotEqual(published, updated)
def test_latest_post_date(self):
"""
Test that both the published and updated dates are
considered when determining the latest post date.
"""
# this feed has a `published` element with the latest date
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
latest_published = rfc3339_date(timezone.make_aware(d, TZ))
self.assertEqual(updated, latest_published)
# this feed has an `updated` element with the latest date
response = self.client.get('/syndication/latest/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.exclude(pk=5).latest('updated').updated
latest_updated = rfc3339_date(timezone.make_aware(d, TZ))
self.assertEqual(updated, latest_updated)
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author'])
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'ministry',
'rights',
'author',
'updated',
'published',
'category',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
"""
Tests that titles are escaped correctly in RSS feeds.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEqual(title.firstChild.wholeText, 'A & B < C > D')
def test_naive_datetime_conversion(self):
"""
Test that datetimes are correctly converted to the local time zone.
"""
# Naive date times passed in get converted to the local time zone, so
# check the recived zone offset against the local offset.
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
latest = rfc3339_date(timezone.make_aware(d, TZ))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
"""
Test that datetimes with timezones don't get trodden on.
"""
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
published = doc.getElementsByTagName('published')[0].firstChild.wholeText
self.assertEqual(published[-6:], '+00:42')
def test_feed_last_modified_time(self):
response = self.client.get('/syndication/naive-dates/')
self.assertEqual(response['Last-Modified'], 'Tue, 26 Mar 2013 01:00:00 GMT')
# No last-modified when feed has no item_pubdate
response = self.client.get('/syndication/no_pubdate/')
self.assertFalse(response.has_header('Last-Modified'))
def test_feed_url(self):
"""
Test that the feed_url can be overridden.
"""
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_secure_urls(self):
"""
Test URLs are prefixed with https:// when feed is requested over HTTPS.
"""
response = self.client.get('/syndication/rss2/', **{
'wsgi.url_scheme': 'https',
})
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('channel')[0]
self.assertEqual(
chan.getElementsByTagName('link')[0].firstChild.wholeText[0:5],
'https'
)
atom_link = chan.getElementsByTagName('atom:link')[0]
self.assertEqual(atom_link.getAttribute('href')[0:5], 'https')
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href')[0:5], 'https')
def test_item_link_error(self):
"""
Test that a ImproperlyConfigured is raised if no link could be found
for the item(s).
"""
self.assertRaises(ImproperlyConfigured,
self.client.get,
'/syndication/articles/')
def test_template_feed(self):
"""
Test that the item title and description can be overridden with
templates.
"""
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry',
'description': 'Description in your templates: My first entry',
'link': 'http://example.com/blog/1/',
})
def test_template_context_feed(self):
"""
Test that custom context data can be passed to templates for title
and description.
"""
response = self.client.get('/syndication/template_context/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'My first entry (foo is bar)',
'description': 'My first entry (foo is bar)',
})
def test_add_domain(self):
"""
Test add_domain() prefixes domains onto the correct URLs.
"""
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value', True),
'https://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:[email protected]'),
'mailto:[email protected]'
)
self.assertEqual(
views.add_domain('example.com', '//example.com/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
|
|
import random
from monkeygod import models
def test_avatar(session):
"""Gravatar URL should be generated"""
m1 = models.Monkey(
name='monkey1',
age=10,
email='[email protected]'
)
session.add(m1)
session.commit()
avatar = m1.avatar(128)
expected = (
'http://www.gravatar.com/avatar/90cab8a06b72c3ea49d7a09192b43166'
)
assert avatar[0:len(expected)] == expected
def test_is_friend(session):
m1 = models.Monkey(
name='monkey1',
age=10,
email='[email protected]'
)
m2 = models.Monkey(
name='monkey2',
age=20,
email='[email protected]'
)
m3 = models.Monkey(
name='monkey3',
age=30,
email='[email protected]'
)
session.add_all([m1, m2, m3])
session.commit()
m1.friends.append(m2)
m2.friends.append(m1)
session.add_all([m1, m2])
session.commit()
assert m1.is_friend(m2) is True
assert m2.is_friend(m1) is True
assert m2.is_friend(m3) is False
assert m3.is_friend(m2) is False
def test_friends(session):
"""Database test to ensure a monkey can add/delete friends"""
m1 = models.Monkey(
name='monkey1',
age=10,
email='[email protected]'
)
m2 = models.Monkey(
name='monkey2',
age=20,
email='[email protected]'
)
session.add_all([m1, m2])
session.commit()
assert m1.is_friend(m2) is False, 'Monkeys are not friends initially'
assert m2.is_friend(m1) is False, 'Monkeys are not friends initially'
assert m1.delete_friend(m2) is False, 'Removing non-existing friend fails'
assert m1.add_friend(m1) is False, 'Cant add self to friends'
assert m1.add_friend(m2) is True, 'Adding friend succeeds'
session.add_all([m1, m2])
session.commit()
assert m1.friends.count() == 1, 'Monkey has 1 friend'
assert m2.friends.count() == 1, 'Friendship is bidirectional'
assert m1.is_friend(m2) is True, 'Friend is the correct one'
assert m2.is_friend(m1) is True, 'Second monkey has the correct friend too'
assert m1.add_friend(m2) is False, 'Cant add the existing friend'
assert m1.delete_friend(m2) is True, 'Deleting friend works correctly'
session.add_all([m1, m2])
session.commit()
assert m1.friends.count() == 0, 'Monkey again has no friends'
assert m2.friends.count() == 0, 'Deleting friends is bidirectional'
assert m1.is_friend(m2) is False, 'Monkeys are not friends anymore'
assert m2.is_friend(m1) is False, 'Monkeys are not friends anymore'
def test_many_friends(session):
"""Database test to ensure a monkey can have more than one friend"""
m1 = models.Monkey(
name='monkey1',
age=10,
email='[email protected]'
)
m2 = models.Monkey(
name='monkey2',
age=20,
email='[email protected]'
)
m3 = models.Monkey(
name='monkey3',
age=30,
email='[email protected]'
)
session.add_all([m1, m2, m3])
session.commit()
m1.add_friend(m2)
assert m1.add_friend(m3) is True, 'Monkey can have more than 1 friend'
session.add_all([m1, m2, m3])
session.commit()
assert m1.friends.count() == 2, 'Monkey1 have more than 1 friend'
assert m2.friends.count() == 1, 'Friends added bidirectionally'
assert m3.friends.count() == 1, 'Friends added bidirectionally'
assert m2.is_friend(m3) is False, 'Two other monkeys are not friends'
def test_best_friends(session):
"""Database test to ensure best friend logic works correctly"""
m1 = models.Monkey(
name='monkey1',
age=10,
email='[email protected]'
)
m2 = models.Monkey(
name='monkey2',
age=20,
email='[email protected]'
)
m3 = models.Monkey(
name='monkey3',
age=30,
email='[email protected]'
)
session.add_all([m1, m2, m3])
session.commit()
assert m1.best_friend is None, 'Monkey has no best friend initially'
assert m2.best_friend is None, 'Monkey has no best friend initially'
assert m3.best_friend is None, 'Monkey has no best friend initially'
assert m1.add_best_friend(m1) is False, 'Cant add self as best friend'
assert m1.add_best_friend(m3) is True, 'Can add other monkeys as bf'
assert m2.add_best_friend(m3) is True, (
'Multiple monkeys can consider one monkey best friend'
)
session.add_all([m1, m2, m3])
session.commit()
assert m1.best_friend == m3, 'Monkey has correct best friend'
assert m3.best_friend_of.count() == 2, (
'Monkey3 is considered best friend of multiple monkeys'
)
assert m3.best_friend is None, 'Best friend is not bidirectional'
assert m1.add_best_friend(m2) is True, 'Can change best friend'
m2.best_friend = None
session.add_all([m1, m2, m3])
session.commit()
assert m1.best_friend == m2, 'Changed best friend successfully'
assert m2.best_friend is None, 'Removing best friend succeeds'
assert m1.delete_friend(m2) is True, 'Can delete friend who is also best'
session.add_all([m1, m2, m3])
session.commit()
assert m1.best_friend is None, 'Deleting from friends also clears best'
def test_friends_without_best(session):
m1 = models.Monkey(
name='monkey1',
age=10,
email='[email protected]'
)
m2 = models.Monkey(
name='monkey2',
age=20,
email='[email protected]'
)
m3 = models.Monkey(
name='monkey3',
age=30,
email='[email protected]'
)
session.add_all([m1, m2, m3])
session.commit()
m1.add_friend(m2)
m1.add_best_friend(m3)
session.add_all([m1, m2, m3])
session.commit()
no_bf_friends = m1.friends_without_best()
for friend in no_bf_friends:
assert m1.best_friend != friend
assert (m1.friends.count() - no_bf_friends.count()) == 1, (
'All friends but best'
)
assert m2.friends.count() == m2.friends_without_best().count(), (
'Without best friend lists are the same'
)
def test_non_friends(session):
m1 = models.Monkey(
name='monkey1',
age=10,
email='[email protected]'
)
m2 = models.Monkey(
name='monkey2',
age=20,
email='[email protected]'
)
m3 = models.Monkey(
name='monkey3',
age=30,
email='[email protected]'
)
session.add_all([m1, m2, m3])
session.commit()
m1.add_friend(m2)
session.add_all([m1, m2])
session.commit()
others = m1.non_friends()
assert others.count() == 1, 'Lists one not added friend'
for monkey in others:
assert not m1.is_friend(monkey), 'Monkeys are not friends'
|
|
import threading
import Queue
import logging
WAIT_QUEUE_TIMEOUT = 2 # in seconds
class WikipediaPreProcessor(threading.Thread):
'''The WikipediaPreProcessor class preprocesses text and removes tags, unnecessary links and
other information and keeps the text only
'''
'''constructor
@param input_queue if the preprocessor is used as a thread, articles are read from this queue [optional]
@param output_queue if the preprocessor is used as a thread, articles are written to this queue [optional]
'''
def __init__(self, input_queue=None, output_queue=None):
threading.Thread.__init__(self)
self._input_queue = input_queue
self._output_queue = output_queue
self._end = False
self._REMOVE_PAIRS=(
(u'{{', u'}}'),
(u'=====', u'====='),
(u'====', u'===='),
(u'===', u'==='),
(u'==', u'=='),
(u'[http', u']'),
(u'[File:', u']'),
(u'[Category:', u']')
)
# html tags can end with /> or </tag-name> and needs to be handled separately
# <!-- --> comments also as they can spread multiple lines
'''processes a single article and cleans the text
@param article a dictionary with a text field that contains the text (will be modified to hold the new text)
@return the article with clean text only containing valid links
'''
def process(self, article):
new_text = ''
next_line_starts_in_comment = False
line_starts_in_comment = False
next_line_in_tag = 0
line_in_tag = 0
html_tags = []
# iterate over lines
lines = article['text'].strip().split('\n')
for line in lines:
line = line.strip()
# reset html tags if an empty line (probably one was not properly closed somewhere)
if len(line) == 0:
html_tags = []
# STEP 1 - remove hyphens and restore tags
line = line.replace("'''", "")
line = line.replace("''", "")
line = line.replace('<', '<')
line = line.replace('>', '>')
# keep <sub> and <sup> tags if preceeded by uppercase letter (chemical element)
index = 1
while line.find('<sub>', index) != -1:
index = line.find('<sub>', index)+5
letter_before = line[index-6]
end = line.find('</sub>', index)
content = line[index:end]
# check if content is numeric and letter is uppercase
if content.isdigit() and letter_before == letter_before.upper():
line = line[:index-5] + content + line[end+6:]
index = index-5
# check if the line starts in a comment
line_starts_in_comment = next_line_starts_in_comment
next_line_starts_in_comment = False
# check if the line starts in a tag
line_in_tag = next_line_in_tag
next_line_in_tag = 0
# STEP 2 - remove comments
while line.find('<!--') != -1 or line_starts_in_comment:
start = 0
if not line_starts_in_comment:
start = line.find('<!--')
line_starts_in_comment = False
end = line.find('-->')
if end == -1:
next_line_starts_in_comment = True
line = line[0:start]
else:
# a problem occurred, just ignore the line
if start > end:
line = ''
else:
line = line[0:start] + line[end+3:]
# STEP 3 - remove html tags
index = 0
outer_start_tag = line.find('<')
# if the line already starts within an html tag
if len(html_tags) != 0:
outer_start_tag = 0
while line.find('<', index) != -1:
start = False
index = line.find('<', index)+1
# if the tag is the last sign in the line, just remove it
if index == len(line):
line = line[:-1]
else:
end_tag = line.find('>', index)
if end_tag == -1:
line = line[:line.find('<', index)]
else:
# if tag is a closing tag
if line[index] == '/':
# this query is necessary as some invalid close tags appear on wikipedia - nothging to be done about that
if len(html_tags) != 0:
html_tags.pop()
# this is the outermost html tag
if len(html_tags) == 0:
line = line[:outer_start_tag] + line[end_tag+1:]
# start with next tag
outer_start_tag = line.find('<')
index = 0
# not a closing tag
else:
# a simple tag without an ending one, just remove it
if line[end_tag-1] == '/':
line = line[0:index-1] + line[end_tag+1:]
index-= 1
# if this was the outermost tag, start from the next tag
if index == outer_start_tag:
outer_start_tag = line.find('<')
# a normal tag is simply pushed to the stack
else:
tag_name = line[index:end_tag]
# ignore unclean br tags
if tag_name != 'br':
html_tags.append(line[index:end_tag])
# TODO: refactor
if len(html_tags) > 0:
# there is an opening tag somewhere
if line.find('<') != -1:
line = line[:line.find('<')]
else: # everything is already within a tag
line = ''
# STEP 4 - remove invalid lines
# STEP 9 - strip the line
line = line.strip(' *\r\n')
# remove link-only lines
if len(line) > 4 and line.find('[[') == 0 and line.find('[[', 1) == -1 and line.find(']]') == len(line)-2:
line = ''
# simply ignore too short lines and those that start with an incorrect token
if len(line) > 4 and line[0:2] != ' |' and line[0] != '|' and line[0] != '!':
# STEP 5 - remove incorrect links
line = self._remove_incorrect_links(line)
# STEP 6 - remove pairs
for pair in self._REMOVE_PAIRS:
line = self._remove_pairs(line, pair)
# STEP 7 - remove end of line if { in it
line = self._remove_box(line)
# STEP 8 - remove emtpy brackets and double spaces that remained
line = self._remove_empty_brackets(line)
# append the cleaned line to the new text
if len(line) > 0:
new_text += line + '\n'
# set the cleaned text in the article and return it
article['text'] = new_text
return article
'''the main thread method - should not be called, use start() instead
'''
def run(self):
while not self._end:
try:
# retrieve a new article from the queue
article = self._input_queue.get(True, WAIT_QUEUE_TIMEOUT)
# ignore redirects
if article['type'] == 'article':
# process the article
logging.info('preprocessing article %s' % (article['title'].encode('ascii', 'ignore')))
self.process(article)
# add the cleaned article to the output queue
self._output_queue.put(article)
# mark the task as done
self._input_queue.task_done()
except Queue.Empty:
pass
'''ends the thread
'''
def end(self):
self._end = True
def _remove_pairs(self, line, pair):
length = len(pair[0])
start = line.find(pair[0])
end = line.find(pair[1], start+length)
while start != -1 and end != -1 and start < end:
inner = line.find(pair[0], start+length)
if inner != -1 and inner < end: # there is an inner pair, remove first
line = line[0:start] + self._remove_pairs(line[start+length:], pair)
else: # remove pair itself
line = line[0:start] + line[end+len(pair[1]):]
start = line.find(pair[0])
end = line.find(pair[1], start + length)
return line
def _remove_incorrect_links(self, line):
# iterate over all links
next_link = line.find('[[')
while next_link != -1:
following_link = line.find('[[', next_link+2)
next_colon = line.find(':', next_link)
next_separator = line.find('|', next_link)
next_end = line.find(']]', next_link)
# the next link is invalid if it contains a colon
if next_colon != -1 and (next_colon < next_end and (following_link == -1 or following_link > next_end or following_link > next_colon)):
# remove the opening link target
remove_characters_start = 2
# if there is a separator in the invalid link
if next_separator != -1 and (next_separator < next_end and (following_link == -1 or following_link > next_end or following_link > next_separator)):
# remove everything until the separator
remove_characters_start = (next_separator-next_link)+1
# find matching end brackets
# if there are inner links
if following_link != -1 and following_link < next_end:
# count inner links
inner_link_counter = 0
next_inner_link = following_link
while next_inner_link == -1 or next_inner_link < next_end:
inner_link_counter+= 1
next_inner_link = line.find('[[', next_inner_link+2)
next_end = line.find(']]', next_end+2)
# if a link is not closed, do not parse this line
if next_end == -1:
return ''
# find matching end brackets
end_link = next_end
#while inner_link_counter > 0:
# tmp = line.find(']]', end_link)
# # something went completely wrong here, ignore this line
# if tmp == -1:
# return ''
# else:
# end_link = line.find(']]', end_link+2)
# inner_link_counter-= 1
# if there is no inner_link
else:
end_link = next_end
# remove the ending tag first
line = line[:end_link] + line[end_link+2:]
# then remove the beginning of the link
line = line[:next_link] + line[next_link + remove_characters_start:]
# start at the removed link position
next_link = line.find('[[', next_link)
# if the link is valid
else:
# just continue to the next link
next_link = following_link
return line
def _remove_box(self, line):
if line.find('{') != -1:
line = line[0:line.find('{')]
return line
def _remove_empty_brackets(self, line):
line = line.replace('()', '')
line = line.replace('[]', '')
line = line.replace(' ', ' ')
line = line.replace(' .', '.')
line = line.replace(' ,', ',')
line = line.replace(' :', ':')
line = line.replace(' !', '!')
line = line.replace(' ', ' ')
return line
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.