repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
girving/tensorflow | tensorflow/python/framework/errors_test.py | 16 | 5501 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import pickle
import warnings
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class ErrorsTest(test.TestCase):
def _CountReferences(self, typeof):
"""Count number of references to objects of type |typeof|."""
objs = gc.get_objects()
ref_count = 0
for o in objs:
try:
if isinstance(o, typeof):
ref_count += 1
# Certain versions of python keeps a weakref to deleted objects.
except ReferenceError:
pass
return ref_count
def testUniqueClassForEachErrorCode(self):
for error_code, exc_type in [
(errors.CANCELLED, errors_impl.CancelledError),
(errors.UNKNOWN, errors_impl.UnknownError),
(errors.INVALID_ARGUMENT, errors_impl.InvalidArgumentError),
(errors.DEADLINE_EXCEEDED, errors_impl.DeadlineExceededError),
(errors.NOT_FOUND, errors_impl.NotFoundError),
(errors.ALREADY_EXISTS, errors_impl.AlreadyExistsError),
(errors.PERMISSION_DENIED, errors_impl.PermissionDeniedError),
(errors.UNAUTHENTICATED, errors_impl.UnauthenticatedError),
(errors.RESOURCE_EXHAUSTED, errors_impl.ResourceExhaustedError),
(errors.FAILED_PRECONDITION, errors_impl.FailedPreconditionError),
(errors.ABORTED, errors_impl.AbortedError),
(errors.OUT_OF_RANGE, errors_impl.OutOfRangeError),
(errors.UNIMPLEMENTED, errors_impl.UnimplementedError),
(errors.INTERNAL, errors_impl.InternalError),
(errors.UNAVAILABLE, errors_impl.UnavailableError),
(errors.DATA_LOSS, errors_impl.DataLossError),
]:
# pylint: disable=protected-access
self.assertTrue(
isinstance(
errors_impl._make_specific_exception(None, None, None,
error_code), exc_type))
# pylint: enable=protected-access
def testKnownErrorClassForEachErrorCodeInProto(self):
for error_code in error_codes_pb2.Code.values():
# pylint: disable=line-too-long
if error_code in (
error_codes_pb2.OK, error_codes_pb2.
DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_
):
continue
# pylint: enable=line-too-long
with warnings.catch_warnings(record=True) as w:
# pylint: disable=protected-access
exc = errors_impl._make_specific_exception(None, None, None, error_code)
# pylint: enable=protected-access
self.assertEqual(0, len(w)) # No warning is raised.
self.assertTrue(isinstance(exc, errors_impl.OpError))
self.assertTrue(errors_impl.OpError in exc.__class__.__bases__)
def testUnknownErrorCodeCausesWarning(self):
with warnings.catch_warnings(record=True) as w:
# pylint: disable=protected-access
exc = errors_impl._make_specific_exception(None, None, None, 37)
# pylint: enable=protected-access
self.assertEqual(1, len(w))
self.assertTrue("Unknown error code: 37" in str(w[0].message))
self.assertTrue(isinstance(exc, errors_impl.OpError))
def testStatusDoesNotLeak(self):
try:
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.DeleteFile(
compat.as_bytes("/DOES_NOT_EXIST/"), status)
except:
pass
gc.collect()
self.assertEqual(0, self._CountReferences(c_api_util.ScopedTFStatus))
def testPickleable(self):
for error_code in [
errors.CANCELLED,
errors.UNKNOWN,
errors.INVALID_ARGUMENT,
errors.DEADLINE_EXCEEDED,
errors.NOT_FOUND,
errors.ALREADY_EXISTS,
errors.PERMISSION_DENIED,
errors.UNAUTHENTICATED,
errors.RESOURCE_EXHAUSTED,
errors.FAILED_PRECONDITION,
errors.ABORTED,
errors.OUT_OF_RANGE,
errors.UNIMPLEMENTED,
errors.INTERNAL,
errors.UNAVAILABLE,
errors.DATA_LOSS,
]:
# pylint: disable=protected-access
exc = errors_impl._make_specific_exception(None, None, None, error_code)
# pylint: enable=protected-access
unpickled = pickle.loads(pickle.dumps(exc))
self.assertEqual(exc.node_def, unpickled.node_def)
self.assertEqual(exc.op, unpickled.op)
self.assertEqual(exc.message, unpickled.message)
self.assertEqual(exc.error_code, unpickled.error_code)
if __name__ == "__main__":
test.main()
| apache-2.0 |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/state/__init__.py | 1 | 98243 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes IS MT-ISN state.
"""
__slots__ = ("_path_helper", "_extmethods", "__type")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/state/type (identityref)
YANG Description: The type of TLV being described. The type of TLV is
expressed as a canonical name.
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/state/type (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: The type of TLV being described. The type of TLV is
expressed as a canonical name.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'AREA_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:AREA_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IIS_NEIGHBORS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IIS_NEIGHBORS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'INSTANCE_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:INSTANCE_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'AUTHENTICATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:AUTHENTICATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'PURGE_OI': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:PURGE_OI': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'EXTENDED_IS_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_ALIAS_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_ALIAS_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_INTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'NLPID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:NLPID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_EXTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'EXTENDED_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'DYNAMIC_NAME': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:DYNAMIC_NAME': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_ISN': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_ISN': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MULTI_TOPOLOGY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MULTI_TOPOLOGY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ROUTER_CAPABILITIES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ROUTER_CAPABILITIES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
type = __builtin__.property(_get_type)
_pyangbind_elements = OrderedDict([("type", type)])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes IS MT-ISN state.
"""
__slots__ = ("_path_helper", "_extmethods", "__type")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/state/type (identityref)
YANG Description: The type of TLV being described. The type of TLV is
expressed as a canonical name.
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/state/type (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: The type of TLV being described. The type of TLV is
expressed as a canonical name.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'AREA_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:AREA_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IIS_NEIGHBORS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IIS_NEIGHBORS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'INSTANCE_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:INSTANCE_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'AUTHENTICATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:AUTHENTICATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'PURGE_OI': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:PURGE_OI': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'EXTENDED_IS_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_ALIAS_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_ALIAS_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_INTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'NLPID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:NLPID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_EXTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'EXTENDED_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'DYNAMIC_NAME': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:DYNAMIC_NAME': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_ISN': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_ISN': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MULTI_TOPOLOGY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MULTI_TOPOLOGY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ROUTER_CAPABILITIES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ROUTER_CAPABILITIES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
type = __builtin__.property(_get_type)
_pyangbind_elements = OrderedDict([("type", type)])
| apache-2.0 |
joshbohde/scikit-learn | examples/document_clustering.py | 1 | 3008 | """
===============================================
Clustering text documents using MiniBatchKmeans
===============================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
"""
print __doc__
# Author: Peter Prettenhofer <[email protected]>
# License: Simplified BSD
from time import time
import logging
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import Vectorizer
from sklearn import metrics
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import randindex
from sklearn.preprocessing import Normalizer
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
# categories = None
print "Loading 20 newsgroups dataset for categories:"
print categories
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42)
filenames = np.concatenate((data_train.filenames, data_test.filenames))
target_names = set(data_train.target_names + data_test.target_names)
print "%d documents" % len(filenames)
print "%d categories" % len(target_names)
print
# split a training set and a test set
labels = np.concatenate((data_train.target, data_test.target))
true_k = np.unique(labels).shape[0]
print "Extracting features from the training dataset using a sparse vectorizer"
t0 = time()
vectorizer = Vectorizer(max_features=10000)
X = vectorizer.fit_transform((open(f).read() for f in filenames))
X = Normalizer(norm="l2", copy=False).transform(X)
print "done in %fs" % (time() - t0)
print "n_samples: %d, n_features: %d" % X.shape
print
###############################################################################
# Now sparse MiniBatchKmeans
print "_" * 80
mbkm = MiniBatchKMeans(init="random", k=true_k, max_iter=10, random_state=13,
chunk_size=1000, tol=0.0, n_init=1)
print "Clustering sparse data with %s" % str(mbkm)
print
t0 = time()
mbkm.fit(X)
print "done in %0.3fs" % (time() - t0)
ri = randindex(labels, mbkm.labels_)
vmeasure = metrics.v_measure_score(labels, mbkm.labels_)
print "Homogeneity: %0.3f" % metrics.homogeneity_score(labels, mbkm.labels_)
print "Completeness: %0.3f" % metrics.completeness_score(labels, mbkm.labels_)
print "V-measure: %0.3f" % vmeasure
print "Rand-Index: %.3f" % ri
print
| bsd-3-clause |
direvus/ansible | lib/ansible/plugins/action/netconf.py | 19 | 3542 | #
# Copyright 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import sys
from ansible.plugins.action.normal import ActionModule as _ActionModule
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
if self._play_context.connection not in ['netconf', 'local'] and self._task.action == 'netconf_config':
return {'failed': True, 'msg': 'Connection type %s is not valid for netconf_config module. '
'Valid connection type is netconf or local (deprecated)' % self._play_context.connection}
elif self._play_context.connection not in ['netconf'] and self._task.action != 'netconf_config':
return {'failed': True, 'msg': 'Connection type %s is not valid for %s module. '
'Valid connection type is netconf.' % (self._play_context.connection, self._task.action)}
if self._play_context.connection == 'local' and self._task.action == 'netconf_config':
args = self._task.args
pc = copy.deepcopy(self._play_context)
pc.connection = 'netconf'
pc.port = int(args.get('port') or self._play_context.port or 830)
pc.remote_user = args.get('username') or self._play_context.connection_user
pc.password = args.get('password') or self._play_context.password
pc.private_key_file = args.get('ssh_keyfile') or self._play_context.private_key_file
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
timeout = args.get('timeout')
command_timeout = int(timeout) if timeout else connection.get_option('persistent_command_timeout')
connection.set_options(direct={'persistent_command_timeout': command_timeout, 'look_for_keys': args.get('look_for_keys'),
'hostkey_verify': args.get('hostkey_verify'),
'allow_agent': args.get('allow_agent')})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
return super(ActionModule, self).run(task_vars=task_vars)
| gpl-3.0 |
idaholab/raven | framework/UI/SensitivityView.py | 2 | 21972 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A view widget for visualizing the sensitivity coefficients of each locally
constructed model of the data.
"""
#For future compatibility with Python 3
from __future__ import division, print_function, absolute_import
#End compatibility block for Python 3
try:
from PySide import QtCore as qtc
from PySide import QtGui as qtg
from PySide import QtGui as qtw
from PySide import QtSvg as qts
except ImportError as e:
from PySide2 import QtCore as qtc
from PySide2 import QtGui as qtg
from PySide2 import QtWidgets as qtw
from PySide2 import QtSvg as qts
from .BaseTopologicalView import BaseTopologicalView
import math
import numpy as np
## TODO: Fix the fonts
## TODO: Make scale make sense
## TODO: Place labels better
class SensitivityView(BaseTopologicalView):
"""
A view widget for visualizing the sensitivity coefficients of each locally
constructed model of the data.
"""
def __init__(self, parent=None, amsc=None, title=None):
""" Initialization method that can optionally specify the parent widget,
an AMSC object to reference, and a title for this widget.
@ In, parent, an optional QWidget that will be the parent of this widget
@ In, amsc, an optional AMSC_Object specifying the underlying data
object for this widget to use.
@ In, title, an optional string specifying the title of this widget.
"""
super(SensitivityView, self).__init__(parent,amsc,title)
def Reinitialize(self, parent=None, amsc=None, title=None):
""" Reinitialization method that resets this widget and can optionally
specify the parent widget, an AMSC object to reference, and a title for
this widget.
@ In, parent, an optional QWidget that will be the parent of this widget
@ In, amsc, an optional AMSC_Object specifying the underlying data
object for this widget to use.
@ In, title, an optional string specifying the title of this widget.
"""
# Try to apply a new layout, if one already exists then make sure to grab
# it for updating
if self.layout() is None:
self.setLayout(qtw.QVBoxLayout())
layout = self.layout()
self.clearLayout(layout)
self.padding = 2
## General Graphics View/Scene setup
self.scene = qtw.QGraphicsScene()
self.scene.setSceneRect(0,0,100,100)
self.gView = qtw.QGraphicsView(self.scene)
self.gView.setRenderHints(qtg.QPainter.Antialiasing |
qtg.QPainter.SmoothPixmapTransform)
self.gView.setHorizontalScrollBarPolicy(qtc.Qt.ScrollBarAlwaysOff)
self.gView.setVerticalScrollBarPolicy(qtc.Qt.ScrollBarAlwaysOff)
self.font = qtg.QFont('sans-serif', 12)
## Defining the right click menu
self.rightClickMenu = qtw.QMenu()
self.shapeMenu = qtw.QMenu('Layout')
self.shapeGroup = qtw.QActionGroup(self.shapeMenu)
self.rightClickMenu.addMenu(self.shapeMenu)
shapeActions = []
shapeActions.append(self.shapeMenu.addAction('Horizontal Bar'))
shapeActions.append(self.shapeMenu.addAction('Radial'))
for act in shapeActions:
act.setCheckable(True)
self.shapeGroup.addAction(act)
shapeActions[0].setChecked(True)
self.shapeGroup.triggered.connect(self.updateScene)
## Ba da ba ba ba I'm lovin' it
self.valueMenu = qtw.QMenu('Value to Display')
self.valueGroup = qtw.QActionGroup(self.valueMenu)
self.rightClickMenu.addMenu(self.valueMenu)
valueActions = []
valueActions.append(self.valueMenu.addAction('Linear coefficients'))
valueActions.append(self.valueMenu.addAction('Pearson correlation'))
valueActions.append(self.valueMenu.addAction('Spearman rank correlation'))
for act in valueActions:
act.setCheckable(True)
self.valueGroup.addAction(act)
valueActions[0].setChecked(True)
self.valueGroup.triggered.connect(self.updateScene)
self.showLabelsAction = self.rightClickMenu.addAction('Show Labels')
self.showLabelsAction.setCheckable(True)
self.showLabelsAction.setChecked(True)
self.showLabelsAction.triggered.connect(self.updateScene)
self.showNumberAction = self.rightClickMenu.addAction('Show Numeric Values')
self.showNumberAction.setCheckable(True)
self.showNumberAction.setChecked(True)
self.showNumberAction.triggered.connect(self.updateScene)
self.bundledAction = self.rightClickMenu.addAction('Bundled on Dimension')
self.bundledAction.setCheckable(True)
self.bundledAction.setChecked(False)
self.bundledAction.triggered.connect(self.updateScene)
self.signedAction = self.rightClickMenu.addAction('Signed')
self.signedAction.setCheckable(True)
self.signedAction.setChecked(True)
self.signedAction.triggered.connect(self.updateScene)
self.fillAction = self.rightClickMenu.addAction('Fill viewport')
self.fillAction.setCheckable(True)
self.fillAction.setChecked(True)
self.fillAction.triggered.connect(self.updateScene)
captureAction = self.rightClickMenu.addAction('Capture')
captureAction.triggered.connect(self.saveImage)
self.gView.scale(self.gView.width()/self.scene.width(),
self.gView.height()/self.scene.height())
layout.addWidget(self.gView)
self.updateScene()
def saveImage(self, filename=None):
"""
Saves the current display of this view to a static image by loading a
file dialog box.
@ In, filename, string, optional parameter specifying where this image
will be saved. If None, then a dialog box will prompt the user for a
name and location.
@ Out, None
"""
if filename is None:
dialog = qtw.QFileDialog(self)
dialog.setFileMode(qtw.QFileDialog.AnyFile)
dialog.setAcceptMode(qtw.QFileDialog.AcceptSave)
dialog.exec_()
if dialog.result() == qtw.QFileDialog.Accepted:
filename = dialog.selectedFiles()[0]
else:
return
self.scene.clearSelection()
self.scene.setSceneRect(self.scene.itemsBoundingRect())
if filename.endswith('.svg'):
svgGen = qts.QSvgGenerator()
svgGen.setFileName(filename)
svgGen.setSize(self.scene.sceneRect().size().toSize())
svgGen.setViewBox(self.scene.sceneRect())
svgGen.setTitle("Screen capture of " + self.__class__.__name__)
svgGen.setDescription("Generated from RAVEN.")
painter = qtg.QPainter (svgGen)
else:
image = qtg.QImage(self.scene.sceneRect().size().toSize(), qtg.QImage.Format_ARGB32)
image.fill(qtc.Qt.transparent)
painter = qtg.QPainter(image)
self.scene.render(painter)
if not filename.endswith('.svg'):
image.save(filename,quality=100)
del painter
def contextMenuEvent(self,event):
""" An event handler triggered when the user right-clicks on this view that
will force the context menu to appear.
@ In, event, a QContextMenuEvent specifying the context of this event.
"""
self.rightClickMenu.popup(event.globalPos())
def resizeEvent(self,event):
""" An event handler triggered when the user resizes this view.
@ In, event, a QResizeEvent specifying the context of this event.
"""
super(SensitivityView, self).resizeEvent(event)
self.gView.scale(self.gView.width()/self.scene.width(),
self.gView.height()/self.scene.height())
self.updateScene()
def selectionChanged(self):
""" An event handler triggered when the user changes the selection of the
data.
"""
self.updateScene()
def persistenceChanged(self):
""" An event handler triggered when the user changes the persistence setting
of the data.
"""
self.updateScene()
def modelsChanged(self):
""" An event handler triggered when the user requests a new set of local
models.
"""
self.updateScene()
def layoutRadialScene(self):
""" A convenience method for drawing the sensitivity scene in radial fashion
"""
self.scene.clear()
width = self.scene.width()
height = self.scene.height()
minDim = min([width,height])-24. # 12 point font * 2 for top and bottom,
## width is a bit harder...
centerX = width/2.
centerY = height/2.
radius = minDim/2.
axisPen = qtg.QPen(qtc.Qt.black)
self.scene.addEllipse(centerX - radius, centerY - radius, minDim, \
minDim, axisPen)
names = self.amsc.GetNames()[:-1]
for i,name in enumerate(names):
if len(names) <= 2:
theta = 3*math.pi*float(i)/2.
else:
theta = 2*math.pi*float(i)/float(len(names))
endX = radius*math.cos(theta)+centerX
endY = radius*math.sin(theta)+centerY
self.scene.addLine(centerX,centerY,endX,endY,axisPen)
if self.showLabelsAction.isChecked():
txtItem = self.scene.addSimpleText(name,self.font)
txtItem.setPos(endX,endY)
txtItem.setFlag(qtw.QGraphicsItem.ItemIsMovable)
txtItem.setFlag(qtw.QGraphicsItem.ItemIsSelectable)
txtItem.setFlag(qtw.QGraphicsItem.ItemIgnoresTransformations)
selection = self.amsc.GetSelectedSegments()
colorMap = self.amsc.GetColors()
if self.valueGroup.checkedAction().text() == 'Linear coefficients':
fits = self.amsc.SegmentFitCoefficients()
elif self.valueGroup.checkedAction().text() == 'Pearson correlation':
fits = self.amsc.SegmentPearsonCoefficients()
elif self.valueGroup.checkedAction().text() == 'Spearman rank correlation':
fits = self.amsc.SegmentSpearmanCoefficients()
## Check if they selected any extrema
if selection is None or len(selection) == 0:
selection = []
selectedExts = self.amsc.GetSelectedExtrema()
allSegments = self.amsc.GetCurrentLabels()
for minMaxPair in allSegments:
for extIdx in selectedExts:
if extIdx in minMaxPair:
selection.append(minMaxPair)
## Okay, well then we will just plot everything we have for the current
## level
if len(selection) == 0:
selection = allSegments
if self.valueGroup.checkedAction().text() == 'Linear coefficients':
maxValue = 0
for extPair in selection:
if maxValue < max(map(abs,fits[extPair])):
maxValue = max(map(abs,fits[extPair]))
else:
maxValue = 1
for extPair in selection:
myColor = colorMap[extPair]
myPen = qtg.QPen(qtg.QColor('#000000'))
brushColor = qtg.QColor(myColor)
brushColor.setAlpha(127)
myBrush = qtg.QBrush(brushColor)
myPoly = qtg.QPolygonF()
for i,val in enumerate(map(abs,fits[extPair])):
if len(names) <= 2:
theta = 3*math.pi*float(i)/2.
else:
theta = 2*math.pi*float(i)/float(len(names))
dimX = (val/maxValue)*radius*math.cos(theta)+centerX
dimY = (val/maxValue)*radius*math.sin(theta)+centerY
myPoly.append(qtc.QPointF(dimX,dimY))
if len(names) <= 2:
myPoly.append(qtc.QPointF(centerX,centerY))
self.scene.addPolygon(myPoly,myPen,myBrush)
def layoutBarScene(self):
""" A convenience method for drawing the sensitivity scene in bar fashion.
"""
self.scene.clear()
width = self.scene.width()
height = self.scene.height()
plotWidth = width - 2*self.padding
plotHeight = height - 2*self.padding
maxExtent = plotWidth
axisPen = qtg.QPen(qtc.Qt.black)
names = self.amsc.GetNames()[:-1]
selection = self.amsc.GetSelectedSegments()
colorMap = self.amsc.GetColors()
if self.valueGroup.checkedAction().text() == 'Linear coefficients':
fits = self.amsc.SegmentFitCoefficients()
elif self.valueGroup.checkedAction().text() == 'Pearson correlation':
fits = self.amsc.SegmentPearsonCoefficients()
elif self.valueGroup.checkedAction().text() == 'Spearman rank correlation':
fits = self.amsc.SegmentSpearmanCoefficients()
## Check if they selected any extrema
if selection is None or len(selection) == 0:
selection = []
selectedExts = self.amsc.GetSelectedExtrema()
allSegments = self.amsc.GetCurrentLabels()
for minMaxPair in allSegments:
for extIdx in selectedExts:
if extIdx in minMaxPair:
selection.append(minMaxPair)
## Okay, well then we will just plot everything we have for the current
## level
if len(selection) == 0:
selection = allSegments
if self.valueGroup.checkedAction().text() == 'Linear coefficients':
maxValue = 0
for extPair in selection:
if maxValue < max(map(abs,fits[extPair])):
maxValue = max(map(abs,fits[extPair]))
else:
maxValue = 1
if self.bundledAction.isChecked():
axisHeight = plotHeight/float(len(names))
axisWidth = plotWidth/float(len(names))
for j,extPair in enumerate(selection):
myColor = colorMap[extPair]
myPen = qtg.QPen(qtg.QColor('#000000'))
brushColor = qtg.QColor(myColor)
brushColor.setAlpha(127)
myBrush = qtg.QBrush(brushColor)
for i,val in enumerate(fits[extPair]):
absVal = abs(val)
barExtent = (absVal/maxValue)*maxExtent
if self.signedAction.isChecked():
x = self.padding + maxExtent/2.
if val > 0:
w = barExtent/2.
else:
w = -barExtent/2.
else:
x = self.padding
w = barExtent
y = (height-self.padding) - i*axisHeight \
- j*axisHeight/float(len(selection))
h = -axisHeight / float(len(selection))
if self.showNumberAction.isChecked():
numTxtItem = self.scene.addSimpleText('%.3g' % val, self.font)
numTxtItem.setFlag(qtw.QGraphicsItem.ItemIgnoresTransformations)
fm = qtg.QFontMetrics(numTxtItem.font())
fontWidth = fm.width(numTxtItem.text())
numTxtItem.setPos(self.padding+maxExtent-fontWidth,y+h)
numTxtItem.setFlag(qtw.QGraphicsItem.ItemIsMovable)
numTxtItem.setFlag(qtw.QGraphicsItem.ItemIsSelectable)
numTxtItem.setZValue(2)
myRect = self.scene.addRect(x,y,w,h,myPen,myBrush)
myRect.setToolTip(str(val))
myRect.setAcceptHoverEvents(True)
for i,name in enumerate(names):
x = self.padding
y = height - self.padding - i/float(len(names))*plotHeight
w = plotWidth
h = -axisHeight
if self.showLabelsAction.isChecked():
txtItem = self.scene.addSimpleText(name,self.font)
txtItem.setFlag(qtw.QGraphicsItem.ItemIgnoresTransformations)
fm = qtg.QFontMetrics(txtItem.font())
fontHeight = fm.height()
fontWidth = fm.width(txtItem.text())
txtItem.setPos(self.padding-fontWidth,y+h + (axisHeight-fontHeight)/2.)
txtItem.setFlag(qtw.QGraphicsItem.ItemIsMovable)
txtItem.setFlag(qtw.QGraphicsItem.ItemIsSelectable)
txtItem.setZValue(2)
myRect = self.scene.addRect(x,y,w,h,axisPen)
myRect.setZValue(2) # Any value greater than 1 should work to draw on top
else:
if len(selection) > 0:
axisHeight = plotHeight/float(len(selection))
axisWidth = plotWidth/float(len(selection))
dimCount = len(names)
self.font.setPointSizeF(np.clip(axisHeight/float(dimCount)-2*self.padding,2,18))
for j,extPair in enumerate(selection):
myColor = colorMap[extPair]
myPen = qtg.QPen(qtg.QColor('#000000'))
brushColor = qtg.QColor(myColor)
brushColor.setAlpha(127)
myBrush = qtg.QBrush(brushColor)
for i,val in enumerate(fits[extPair]):
absVal = abs(val)
name = names[i]
barExtent = (absVal/maxValue)*maxExtent
if self.signedAction.isChecked():
x = self.padding + maxExtent/2.
if val > 0:
w = barExtent/2.
else:
w = -barExtent/2.
else:
x = self.padding
w = barExtent
y = (height-self.padding) - j*axisHeight \
- i*axisHeight/float(dimCount)
h = -axisHeight / float(dimCount)
if self.showLabelsAction.isChecked():
txtItem = self.scene.addSimpleText(name,self.font)
## this line can be useful for text sizing, although we cannot
## rotate the text if we ignore the transformations.
# txtItem.setFlag(qtw.QGraphicsItem.ItemIgnoresTransformations)
fm = qtg.QFontMetrics(txtItem.font())
fontHeight = fm.boundingRect(txtItem.text()).height()
fontWidth = fm.boundingRect(txtItem.text()).width()
txtItem.setPos(self.padding,y+0.5*(h-fontHeight))
txtItem.setFlag(qtw.QGraphicsItem.ItemIsMovable)
txtItem.setFlag(qtw.QGraphicsItem.ItemIsSelectable)
txtItem.setZValue(2)
if self.showNumberAction.isChecked():
numTxtItem = self.scene.addSimpleText('%.3g' % val, self.font)
## this line can be useful for text sizing, although we cannot
## rotate the text if we ignore the transformations.
# numTxtItem.setFlag(qtw.QGraphicsItem.ItemIgnoresTransformations)
fm = qtg.QFontMetrics(numTxtItem.font())
fontWidth = fm.boundingRect(numTxtItem.text()).width()
fontHeight = fm.boundingRect(numTxtItem.text()).height()
numTxtItem.setPos(self.padding+maxExtent-fontWidth,y+0.5*(h-fontHeight))
numTxtItem.setFlag(qtw.QGraphicsItem.ItemIsMovable)
numTxtItem.setFlag(qtw.QGraphicsItem.ItemIsSelectable)
numTxtItem.setZValue(2)
myRect = self.scene.addRect(x,y,w,h,myPen,myBrush)
myRect.setToolTip(str(val))
myRect.setAcceptHoverEvents(True)
x = self.padding
y = (height-self.padding) - j*axisHeight
w = maxExtent
h = -axisHeight
myRect = self.scene.addRect(x,y,w,h,axisPen)
myRect.setZValue(2) # Any value greater than 1 should work to draw on top
if self.signedAction.isChecked():
axisPen = qtg.QPen(qtc.Qt.black)
axisPen.setWidthF(.5)
x = self.padding + maxExtent/2.
y = self.padding
h = plotHeight
self.scene.addLine(x,y,x,y+h,axisPen)
def updateScene(self):
""" A method for drawing the scene of this view.
"""
if not self.amsc.FitsSynced():
self.scene.setSceneRect(0,0,self.gView.width(),self.gView.height())
self.scene.clear()
txtItem = self.scene.addSimpleText('Rebuild Local Models',self.font)
txtItem.setPos(self.padding,self.padding)
txtItem.setFlag(qtw.QGraphicsItem.ItemIgnoresTransformations)
else:
if self.fillAction.isChecked():
self.scene.setSceneRect(0,0,100*float(self.gView.width())/float(self.gView.height()),100)
else:
self.scene.setSceneRect(0,0,100,100)
if self.shapeGroup.checkedAction().text() == 'Radial':
self.bundledAction.setEnabled(False)
self.signedAction.setEnabled(False)
self.showNumberAction.setEnabled(False)
self.fillAction.setEnabled(False)
self.layoutRadialScene()
else:
self.bundledAction.setEnabled(True)
self.signedAction.setEnabled(True)
self.showNumberAction.setEnabled(True)
self.fillAction.setEnabled(True)
self.layoutBarScene()
self.gView.fitInView(self.scene.sceneRect(),qtc.Qt.KeepAspectRatio)
self.scene.changed.connect(self.scene.invalidate)
def test(self):
"""
A test function for performing operations on this class that need to be
automatically tested such as simulating mouse and keyboard events, and
other internal operations. For this class in particular, we will test:
- Building of the models (which allows for the actual display of
information on this view)
- Cylcing through all permutations of the display features which
includes the radial/bar layouts, the bundling of dimensions or segments
of data, the display of signed or unsigned information, and whether
the plot fills the viewport or maintains a square aspect ratio.
- Setting the selection of data and ensuring this view updates.
- Saving buffer of this view in both svg and png formats.
- Triggering of the resize event.
@ In, None
@ Out, None
"""
self.amsc.BuildModels()
for action in self.shapeGroup.actions():
action.setChecked(True)
for value in self.valueGroup.actions():
value.setChecked(True)
self.amsc.ClearSelection()
self.signedAction.setChecked(True)
self.bundledAction.setChecked(True)
self.fillAction.setChecked(True)
self.updateScene()
self.signedAction.setChecked(False)
self.bundledAction.setChecked(False)
self.fillAction.setChecked(False)
self.updateScene()
pair = list(self.amsc.GetCurrentLabels())[0]
self.amsc.SetSelection([pair,pair[0],pair[1]])
self.updateScene()
self.saveImage(self.windowTitle()+'.svg')
self.saveImage(self.windowTitle()+'.png')
self.resizeEvent(qtg.QResizeEvent(qtc.QSize(1,1),qtc.QSize(100,100)))
super(SensitivityView, self).test()
| apache-2.0 |
knoguchi/kenix-scm | server/lib/boto/dynamodb2/layer1.py | 11 | 81650 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from binascii import crc32
try:
import json
except ImportError:
import simplejson as json
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.dynamodb2 import exceptions
class DynamoDBConnection(AWSQueryConnection):
"""
Amazon DynamoDB **Overview**
This is the Amazon DynamoDB API Reference. This guide provides
descriptions and samples of the Amazon DynamoDB API.
"""
APIVersion = "2012-08-10"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "dynamodb.us-east-1.amazonaws.com"
ServiceName = "DynamoDB"
TargetPrefix = "DynamoDB_20120810"
ResponseError = JSONResponseError
_faults = {
"ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException,
"LimitExceededException": exceptions.LimitExceededException,
"ConditionalCheckFailedException": exceptions.ConditionalCheckFailedException,
"ResourceInUseException": exceptions.ResourceInUseException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InternalServerError": exceptions.InternalServerError,
"ItemCollectionSizeLimitExceededException": exceptions.ItemCollectionSizeLimitExceededException,
"ValidationException": exceptions.ValidationException,
}
NumberRetries = 10
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
validate_checksums = kwargs.pop('validate_checksums', True)
if not region:
region_name = boto.config.get('DynamoDB', 'region',
self.DefaultRegionName)
for reg in boto.dynamodb2.regions():
if reg.name == region_name:
region = reg
break
# Only set host if it isn't manually overwritten
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(DynamoDBConnection, self).__init__(**kwargs)
self.region = region
self._validate_checksums = boto.config.getbool(
'DynamoDB', 'validate_checksums', validate_checksums)
self.throughput_exceeded_events = 0
def _required_auth_capability(self):
return ['hmac-v4']
def batch_get_item(self, request_items, return_consumed_capacity=None):
"""
The BatchGetItem operation returns the attributes of one or
more items from one or more tables. You identify requested
items by primary key.
A single operation can retrieve up to 1 MB of data, which can
comprise as many as 100 items. BatchGetItem will return a
partial result if the response size limit is exceeded, the
table's provisioned throughput is exceeded, or an internal
processing failure occurs. If a partial result is returned,
the operation returns a value for UnprocessedKeys . You can
use this value to retry the operation starting with the next
item to get.
For example, if you ask to retrieve 100 items, but each
individual item is 50 KB in size, the system returns 20 items
(1 MB) and an appropriate UnprocessedKeys value so you can get
the next page of results. If desired, your application can
include its own logic to assemble the pages of results into
one dataset.
If no items can be processed because of insufficient
provisioned throughput on each of the tables involved in the
request, BatchGetItem throws
ProvisionedThroughputExceededException .
By default, BatchGetItem performs eventually consistent reads
on every table in the request. If you want strongly consistent
reads instead, you can set ConsistentRead to `True` for any or
all tables.
In order to minimize response latency, BatchGetItem fetches
items in parallel.
When designing your application, keep in mind that Amazon
DynamoDB does not return attributes in any particular order.
To help parse the response by item, include the primary key
values for the items in your request in the AttributesToGet
parameter.
If a requested item does not exist, it is not returned in the
result. Requests for nonexistent items consume the minimum
read capacity units according to the type of read. For more
information, see `Capacity Units Calculations`_ in the Amazon
DynamoDB Developer Guide.
:type request_items: map
:param request_items:
A map of one or more table names and, for each table, the corresponding
primary keys for the items to retrieve. Each table name can be
invoked only once.
Each element in the map consists of the following:
+ Keys - An array of primary key attribute values that define specific
items in the table.
+ AttributesToGet - One or more attributes to be retrieved from the
table or index. By default, all attributes are returned. If a
specified attribute is not found, it does not appear in the result.
+ ConsistentRead - If `True`, a strongly consistent read is used; if
`False` (the default), an eventually consistent read is used.
:type return_consumed_capacity: string
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
"""
params = {'RequestItems': request_items, }
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
return self.make_request(action='BatchGetItem',
body=json.dumps(params))
def batch_write_item(self, request_items, return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
The BatchWriteItem operation puts or deletes multiple items in
one or more tables. A single call to BatchWriteItem can write
up to 1 MB of data, which can comprise as many as 25 put or
delete requests. Individual items to be written can be as
large as 64 KB.
BatchWriteItem cannot update items. To update items, use the
UpdateItem API.
The individual PutItem and DeleteItem operations specified in
BatchWriteItem are atomic; however BatchWriteItem as a whole
is not. If any requested operations fail because the table's
provisioned throughput is exceeded or an internal processing
failure occurs, the failed operations are returned in the
UnprocessedItems response parameter. You can investigate and
optionally resend the requests. Typically, you would call
BatchWriteItem in a loop. Each iteration would check for
unprocessed items and submit a new BatchWriteItem request with
those unprocessed items until all items have been processed.
To write one item, you can use the PutItem operation; to
delete one item, you can use the DeleteItem operation.
With BatchWriteItem , you can efficiently write or delete
large amounts of data, such as from Amazon Elastic MapReduce
(EMR), or copy data from another database into Amazon
DynamoDB. In order to improve performance with these large-
scale operations, BatchWriteItem does not behave in the same
way as individual PutItem and DeleteItem calls would For
example, you cannot specify conditions on individual put and
delete requests, and BatchWriteItem does not return deleted
items in the response.
If you use a programming language that supports concurrency,
such as Java, you can use threads to write items in parallel.
Your application must include the necessary logic to manage
the threads.
With languages that don't support threading, such as PHP,
BatchWriteItem will write or delete the specified items one at
a time. In both situations, BatchWriteItem provides an
alternative where the API performs the specified put and
delete operations in parallel, giving you the power of the
thread pool approach without having to introduce complexity
into your application.
Parallel processing reduces latency, but each specified put
and delete request consumes the same number of write capacity
units whether it is processed in parallel or not. Delete
operations on nonexistent items consume one write capacity
unit.
If one or more of the following is true, Amazon DynamoDB
rejects the entire batch write operation:
+ One or more tables specified in the BatchWriteItem request
does not exist.
+ Primary key attributes specified on an item in the request
do not match those in the corresponding table's primary key
schema.
+ You try to perform multiple operations on the same item in
the same BatchWriteItem request. For example, you cannot put
and delete the same item in the same BatchWriteItem request.
+ The total request size exceeds 1 MB.
+ Any individual item in a batch exceeds 64 KB.
:type request_items: map
:param request_items:
A map of one or more table names and, for each table, a list of
operations to be performed ( DeleteRequest or PutRequest ). Each
element in the map consists of the following:
+ DeleteRequest - Perform a DeleteItem operation on the specified item.
The item to be deleted is identified by a Key subelement:
+ Key - A map of primary key attribute values that uniquely identify
the item. Each entry in this map consists of an attribute name and
an attribute value.
+ PutRequest - Perform a PutItem operation on the specified item. The
item to be put is identified by an Item subelement:
+ Item - A map of attributes and their values. Each entry in this map
consists of an attribute name and an attribute value. Attribute
values must not be null; string and binary type attributes must
have lengths greater than zero; and set type attributes must not be
empty. Requests that contain empty values will be rejected with a
ValidationException . If you specify any attributes that are part
of an index key, then the data types for those attributes must
match those of the schema in the table's attribute definition.
:type return_consumed_capacity: string
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
about item collections, if any, that were modified during the
operation are returned in the response. If set to `NONE` (the
default), no statistics are returned..
"""
params = {'RequestItems': request_items, }
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics is not None:
params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
return self.make_request(action='BatchWriteItem',
body=json.dumps(params))
def create_table(self, attribute_definitions, table_name, key_schema,
provisioned_throughput, local_secondary_indexes=None,
global_secondary_indexes=None):
"""
The CreateTable operation adds a new table to your account. In
an AWS account, table names must be unique within each region.
That is, you can have two tables with same name if you create
the tables in different regions.
CreateTable is an asynchronous operation. Upon receiving a
CreateTable request, Amazon DynamoDB immediately returns a
response with a TableStatus of `CREATING`. After the table is
created, Amazon DynamoDB sets the TableStatus to `ACTIVE`. You
can perform read and write operations only on an `ACTIVE`
table.
If you want to create multiple tables with local secondary
indexes on them, you must create them sequentially. Only one
table with local secondary indexes can be in the `CREATING`
state at any given time.
You can use the DescribeTable API to check the table status.
:type attribute_definitions: list
:param attribute_definitions: An array of attributes that describe the
key schema for the table and indexes.
:type table_name: string
:param table_name: The name of the table to create.
:type key_schema: list
:param key_schema: Specifies the attributes that make up the primary
key for the table. The attributes in KeySchema must also be defined
in the AttributeDefinitions array. For more information, see `Data
Model`_ in the Amazon DynamoDB Developer Guide.
Each KeySchemaElement in the array is composed of:
+ AttributeName - The name of this key attribute.
+ KeyType - Determines whether the key attribute is `HASH` or `RANGE`.
For a primary key that consists of a hash attribute, you must specify
exactly one element with a KeyType of `HASH`.
For a primary key that consists of hash and range attributes, you must
specify exactly two elements, in this order: The first element must
have a KeyType of `HASH`, and the second element must have a
KeyType of `RANGE`.
For more information, see `Specifying the Primary Key`_ in the Amazon
DynamoDB Developer Guide.
:type local_secondary_indexes: list
:param local_secondary_indexes:
One or more secondary indexes (the maximum is five) to be created on
the table. Each index is scoped to a given hash key value. There is
a 10 gigabyte size limit per hash key; otherwise, the size of a
local secondary index is unconstrained.
Each secondary index in the array includes the following:
+ IndexName - The name of the secondary index. Must be unique only for
this table.
+ KeySchema - Specifies the key schema for the index. The key schema
must begin with the same hash key attribute as the table.
+ Projection - Specifies attributes that are copied (projected) from
the table into the index. These are in addition to the primary key
attributes and index key attributes, which are automatically
projected. Each attribute specification is composed of:
+ ProjectionType - One of the following:
+ `KEYS_ONLY` - Only the index and primary keys are projected into the
index.
+ `INCLUDE` - Only the specified table attributes are projected into
the index. The list of projected attributes are in NonKeyAttributes
.
+ `ALL` - All of the table attributes are projected into the index.
+ NonKeyAttributes - A list of one or more non-key attribute names that
are projected into the index. The total count of attributes
specified in NonKeyAttributes , summed across all of the local
secondary indexes, must not exceed 20. If you project the same
attribute into two different indexes, this counts as two distinct
attributes when determining the total.
:type global_secondary_indexes: list
:param global_secondary_indexes:
:type provisioned_throughput: dict
:param provisioned_throughput: The provisioned throughput settings for
the specified table. The settings can be modified using the
UpdateTable operation.
For current minimum and maximum provisioned throughput values, see
`Limits`_ in the Amazon DynamoDB Developer Guide.
"""
params = {
'AttributeDefinitions': attribute_definitions,
'TableName': table_name,
'KeySchema': key_schema,
'ProvisionedThroughput': provisioned_throughput,
}
if local_secondary_indexes is not None:
params['LocalSecondaryIndexes'] = local_secondary_indexes
if global_secondary_indexes is not None:
params['GlobalSecondaryIndexes'] = global_secondary_indexes
return self.make_request(action='CreateTable',
body=json.dumps(params))
def delete_item(self, table_name, key, expected=None, return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Deletes a single item in a table by primary key. You can
perform a conditional delete operation that deletes the item
if it exists, or if it has an expected attribute value.
In addition to deleting an item, you can also return the
item's attribute values in the same operation, using the
ReturnValues parameter.
Unless you specify conditions, the DeleteItem is an idempotent
operation; running it multiple times on the same item or
attribute does not result in an error response.
Conditional deletes are useful for only deleting items if
specific conditions are met. If those conditions are met,
Amazon DynamoDB performs the delete. Otherwise, the item is
not deleted.
:type table_name: string
:param table_name: The name of the table from which to delete the item.
:type key: map
:param key: A map of attribute names to AttributeValue objects,
representing the primary key of the item to delete.
:type expected: map
:param expected: A map of attribute/condition pairs. This is the
conditional block for the DeleteItem operation. All the conditions
must be met for the operation to succeed.
Expected allows you to provide an attribute name, and whether or not
Amazon DynamoDB should check to see if the attribute value already
exists; or if the attribute value exists and has a particular value
before changing it.
Each item in Expected represents an attribute name for Amazon DynamoDB
to check, along with the following:
+ Value - The attribute value for Amazon DynamoDB to check.
+ Exists - Causes Amazon DynamoDB to evaluate the value before
attempting a conditional operation:
+ If Exists is `True`, Amazon DynamoDB will check to see if that
attribute value already exists in the table. If it is found, then
the operation succeeds. If it is not found, the operation fails
with a ConditionalCheckFailedException .
+ If Exists is `False`, Amazon DynamoDB assumes that the attribute
value does not exist in the table. If in fact the value does not
exist, then the assumption is valid and the operation succeeds. If
the value is found, despite the assumption that it does not exist,
the operation fails with a ConditionalCheckFailedException .
The default setting for Exists is `True`. If you supply a Value all by
itself, Amazon DynamoDB assumes the attribute exists: You don't
have to set Exists to `True`, because it is implied. Amazon
DynamoDB returns a ValidationException if:
+ Exists is `True` but there is no Value to check. (You expect a value
to exist, but don't specify what that value is.)
+ Exists is `False` but you also specify a Value . (You cannot expect
an attribute to have a value, while also expecting it not to
exist.)
If you specify more than one condition for Exists , then all of the
conditions must evaluate to true. (In other words, the conditions
are ANDed together.) Otherwise, the conditional operation will
fail.
:type return_values: string
:param return_values:
Use ReturnValues if you want to get the item attributes as they
appeared before they were deleted. For DeleteItem , the valid
values are:
+ `NONE` - If ReturnValues is not specified, or if its value is `NONE`,
then nothing is returned. (This is the default for ReturnValues .)
+ `ALL_OLD` - The content of the old item is returned.
:type return_consumed_capacity: string
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
about item collections, if any, that were modified during the
operation are returned in the response. If set to `NONE` (the
default), no statistics are returned..
"""
params = {'TableName': table_name, 'Key': key, }
if expected is not None:
params['Expected'] = expected
if return_values is not None:
params['ReturnValues'] = return_values
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics is not None:
params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
return self.make_request(action='DeleteItem',
body=json.dumps(params))
def delete_table(self, table_name):
"""
The DeleteTable operation deletes a table and all of its
items. After a DeleteTable request, the specified table is in
the `DELETING` state until Amazon DynamoDB completes the
deletion. If the table is in the `ACTIVE` state, you can
delete it. If a table is in `CREATING` or `UPDATING` states,
then Amazon DynamoDB returns a ResourceInUseException . If the
specified table does not exist, Amazon DynamoDB returns a
ResourceNotFoundException . If table is already in the
`DELETING` state, no error is returned.
Amazon DynamoDB might continue to accept data read and write
operations, such as GetItem and PutItem , on a table in the
`DELETING` state until the table deletion is complete.
When you delete a table, any local secondary indexes on that
table are also deleted.
Use the DescribeTable API to check the status of the table.
:type table_name: string
:param table_name: The name of the table to delete.
"""
params = {'TableName': table_name, }
return self.make_request(action='DeleteTable',
body=json.dumps(params))
def describe_table(self, table_name):
"""
Returns information about the table, including the current
status of the table, when it was created, the primary key
schema, and any indexes on the table.
:type table_name: string
:param table_name: The name of the table to describe.
"""
params = {'TableName': table_name, }
return self.make_request(action='DescribeTable',
body=json.dumps(params))
def get_item(self, table_name, key, attributes_to_get=None,
consistent_read=None, return_consumed_capacity=None):
"""
The GetItem operation returns a set of attributes for the item
with the given primary key. If there is no matching item,
GetItem does not return any data.
GetItem provides an eventually consistent read by default. If
your application requires a strongly consistent read, set
ConsistentRead to `True`. Although a strongly consistent read
might take more time than an eventually consistent read, it
always returns the last updated value.
:type table_name: string
:param table_name: The name of the table containing the requested item.
:type key: map
:param key: A map of attribute names to AttributeValue objects,
representing the primary key of the item to retrieve.
:type attributes_to_get: list
:param attributes_to_get: The names of one or more attributes to
retrieve. If no attribute names are specified, then all attributes
will be returned. If any of the requested attributes are not found,
they will not appear in the result.
:type consistent_read: boolean
:param consistent_read: If set to `True`, then the operation uses
strongly consistent reads; otherwise, eventually consistent reads
are used.
:type return_consumed_capacity: string
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
"""
params = {'TableName': table_name, 'Key': key, }
if attributes_to_get is not None:
params['AttributesToGet'] = attributes_to_get
if consistent_read is not None:
params['ConsistentRead'] = consistent_read
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
return self.make_request(action='GetItem',
body=json.dumps(params))
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Returns an array of all the tables associated with the current
account and endpoint.
:type exclusive_start_table_name: string
:param exclusive_start_table_name: The name of the table that starts
the list. If you already ran a ListTables operation and received a
LastEvaluatedTableName value in the response, use that value here
to continue the list.
:type limit: integer
:param limit: A maximum number of table names to return.
"""
params = {}
if exclusive_start_table_name is not None:
params['ExclusiveStartTableName'] = exclusive_start_table_name
if limit is not None:
params['Limit'] = limit
return self.make_request(action='ListTables',
body=json.dumps(params))
def put_item(self, table_name, item, expected=None, return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Creates a new item, or replaces an old item with a new item.
If an item already exists in the specified table with the same
primary key, the new item completely replaces the existing
item. You can perform a conditional put (insert a new item if
one with the specified primary key doesn't exist), or replace
an existing item if it has certain attribute values.
In addition to putting an item, you can also return the item's
attribute values in the same operation, using the ReturnValues
parameter.
When you add an item, the primary key attribute(s) are the
only required attributes. Attribute values cannot be null.
String and binary type attributes must have lengths greater
than zero. Set type attributes cannot be empty. Requests with
empty values will be rejected with a ValidationException .
You can request that PutItem return either a copy of the old
item (before the update) or a copy of the new item (after the
update). For more information, see the ReturnValues
description.
To prevent a new item from replacing an existing item, use a
conditional put operation with Exists set to `False` for the
primary key attribute, or attributes.
For more information about using this API, see `Working with
Items`_ in the Amazon DynamoDB Developer Guide.
:type table_name: string
:param table_name: The name of the table to contain the item.
:type item: map
:param item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
If you specify any attributes that are part of an index key, then the
data types for those attributes must match those of the schema in
the table's attribute definition.
For more information about primary keys, see `Primary Key`_ in the
Amazon DynamoDB Developer Guide.
Each element in the Item map is an AttributeValue object.
:type expected: map
:param expected: A map of attribute/condition pairs. This is the
conditional block for the PutItem operation. All the conditions
must be met for the operation to succeed.
Expected allows you to provide an attribute name, and whether or not
Amazon DynamoDB should check to see if the attribute value already
exists; or if the attribute value exists and has a particular value
before changing it.
Each item in Expected represents an attribute name for Amazon DynamoDB
to check, along with the following:
+ Value - The attribute value for Amazon DynamoDB to check.
+ Exists - Causes Amazon DynamoDB to evaluate the value before
attempting a conditional operation:
+ If Exists is `True`, Amazon DynamoDB will check to see if that
attribute value already exists in the table. If it is found, then
the operation succeeds. If it is not found, the operation fails
with a ConditionalCheckFailedException .
+ If Exists is `False`, Amazon DynamoDB assumes that the attribute
value does not exist in the table. If in fact the value does not
exist, then the assumption is valid and the operation succeeds. If
the value is found, despite the assumption that it does not exist,
the operation fails with a ConditionalCheckFailedException .
The default setting for Exists is `True`. If you supply a Value all by
itself, Amazon DynamoDB assumes the attribute exists: You don't
have to set Exists to `True`, because it is implied. Amazon
DynamoDB returns a ValidationException if:
+ Exists is `True` but there is no Value to check. (You expect a value
to exist, but don't specify what that value is.)
+ Exists is `False` but you also specify a Value . (You cannot expect
an attribute to have a value, while also expecting it not to
exist.)
If you specify more than one condition for Exists , then all of the
conditions must evaluate to true. (In other words, the conditions
are ANDed together.) Otherwise, the conditional operation will
fail.
:type return_values: string
:param return_values:
Use ReturnValues if you want to get the item attributes as they
appeared before they were updated with the PutItem request. For
PutItem , the valid values are:
+ `NONE` - If ReturnValues is not specified, or if its value is `NONE`,
then nothing is returned. (This is the default for ReturnValues .)
+ `ALL_OLD` - If PutItem overwrote an attribute name-value pair, then
the content of the old item is returned.
:type return_consumed_capacity: string
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
about item collections, if any, that were modified during the
operation are returned in the response. If set to `NONE` (the
default), no statistics are returned..
"""
params = {'TableName': table_name, 'Item': item, }
if expected is not None:
params['Expected'] = expected
if return_values is not None:
params['ReturnValues'] = return_values
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics is not None:
params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
return self.make_request(action='PutItem',
body=json.dumps(params))
def query(self, table_name, index_name=None, select=None,
attributes_to_get=None, limit=None, consistent_read=None,
key_conditions=None, scan_index_forward=None,
exclusive_start_key=None, return_consumed_capacity=None):
"""
A Query operation directly accesses items from a table using
the table primary key, or from an index using the index key.
You must provide a specific hash key value. You can narrow the
scope of the query by using comparison operators on the range
key value, or on the index key. You can use the
ScanIndexForward parameter to get results in forward or
reverse order, by range key or by index key.
Queries that do not return results consume the minimum read
capacity units according to the type of read.
If the total number of items meeting the query criteria
exceeds the result set size limit of 1 MB, the query stops and
results are returned to the user with a LastEvaluatedKey to
continue the query in a subsequent operation. Unlike a Scan
operation, a Query operation never returns an empty result set
and a LastEvaluatedKey . The LastEvaluatedKey is only provided
if the results exceed 1 MB, or if you have used Limit .
To request a strongly consistent result, set ConsistentRead to
true.
:type table_name: string
:param table_name: The name of the table containing the requested
items.
:type index_name: string
:param index_name: The name of an index on the table to query.
:type select: string
:param select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index.
+ `ALL_ATTRIBUTES`: Returns all of the item attributes. For a table,
this is the default. For an index, this mode causes Amazon DynamoDB
to fetch the full item from the table for each matching item in the
index. If the index is configured to project all item attributes,
the matching items will not be fetched from the table. Fetching
items from the table incurs additional throughput cost and latency.
+ `ALL_PROJECTED_ATTRIBUTES`: Allowed only when querying an index.
Retrieves all attributes which have been projected into the index.
If the index is configured to project all attributes, this is
equivalent to specifying ALL_ATTRIBUTES .
+ `COUNT`: Returns the number of matching items, rather than the
matching items themselves.
+ `SPECIFIC_ATTRIBUTES` : Returns only the attributes listed in
AttributesToGet . This is equivalent to specifying AttributesToGet
without specifying any value for Select . If you are querying an
index and request only attributes that are projected into that
index, the operation will read only the index and not the table. If
any of the requested attributes are not projected into the index,
Amazon DynamoDB will need to fetch each matching item from the
table. This extra fetching incurs additional throughput cost and
latency.
When neither Select nor AttributesToGet are specified, Amazon DynamoDB
defaults to `ALL_ATTRIBUTES` when accessing a table, and
`ALL_PROJECTED_ATTRIBUTES` when accessing an index. You cannot use
both Select and AttributesToGet together in a single request,
unless the value for Select is `SPECIFIC_ATTRIBUTES`. (This usage
is equivalent to specifying AttributesToGet without any value for
Select .)
:type attributes_to_get: list
:param attributes_to_get: The names of one or more attributes to
retrieve. If no attribute names are specified, then all attributes
will be returned. If any of the requested attributes are not found,
they will not appear in the result.
If you are querying an index and request only attributes that are
projected into that index, the operation will read only the index
and not the table. If any of the requested attributes are not
projected into the index, Amazon DynamoDB will need to fetch each
matching item from the table. This extra fetching incurs additional
throughput cost and latency.
You cannot use both AttributesToGet and Select together in a Query
request, unless the value for Select is `SPECIFIC_ATTRIBUTES`.
(This usage is equivalent to specifying AttributesToGet without any
value for Select .)
:type limit: integer
:param limit: The maximum number of items to evaluate (not necessarily
the number of matching items). If Amazon DynamoDB processes the
number of items up to the limit while processing the results, it
stops the operation and returns the matching values up to that
point, and a LastEvaluatedKey to apply in a subsequent operation,
so that you can pick up where you left off. Also, if the processed
data set size exceeds 1 MB before Amazon DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a LastEvaluatedKey to apply in a subsequent
operation to continue the operation. For more information see
`Query and Scan`_ in the Amazon DynamoDB Developer Guide.
:type consistent_read: boolean
:param consistent_read: If set to `True`, then the operation uses
strongly consistent reads; otherwise, eventually consistent reads
are used.
:type key_conditions: map
:param key_conditions:
The selection criteria for the query.
For a query on a table, you can only have conditions on the table
primary key attributes. You must specify the hash key attribute
name and value as an `EQ` condition. You can optionally specify a
second condition, referring to the range key attribute.
For a query on a secondary index, you can only have conditions on the
index key attributes. You must specify the index hash attribute
name and value as an EQ condition. You can optionally specify a
second condition, referring to the index key range attribute.
Multiple conditions are evaluated using "AND"; in other words, all of
the conditions must be met in order for an item to appear in the
results results.
Each KeyConditions element consists of an attribute name to compare,
along with the following:
+ AttributeValueList - One or more values to evaluate against the
supplied attribute. This list contains exactly one value, except
for a `BETWEEN` or `IN` comparison, in which case the list contains
two values. For type Number, value comparisons are numeric. String
value comparisons for greater than, equals, or less than are based
on ASCII character code values. For example, `a` is greater than
`A`, and `aa` is greater than `B`. For a list of code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For Binary, Amazon DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
evaluating query expressions.
+ ComparisonOperator - A comparator for evaluating attributes. For
example, equals, greater than, less than, etc. Valid comparison
operators for Query: `EQ | LE | LT | GE | GT | BEGINS_WITH |
BETWEEN` For information on specifying data types in JSON, see
`JSON Data Format`_ in the Amazon DynamoDB Developer Guide. The
following are descriptions of each comparison operator.
+ `EQ` : Equal. AttributeValueList can contain only one AttributeValue
of type String, Number, or Binary (not a set). If an item contains
an AttributeValue of a different type than the one specified in the
request, the value does not match. For example, `{"S":"6"}` does
not equal `{"N":"6"}`. Also, `{"N":"6"}` does not equal
`{"NS":["6", "2", "1"]}`.
+ `LE` : Less than or equal. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set). If an
item contains an AttributeValue of a different type than the one
specified in the request, the value does not match. For example,
`{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
compare to `{"NS":["6", "2", "1"]}`.
+ `LT` : Less than. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set). If an
item contains an AttributeValue of a different type than the one
specified in the request, the value does not match. For example,
`{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
compare to `{"NS":["6", "2", "1"]}`.
+ `GE` : Greater than or equal. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set). If an
item contains an AttributeValue of a different type than the one
specified in the request, the value does not match. For example,
`{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
compare to `{"NS":["6", "2", "1"]}`.
+ `GT` : Greater than. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set). If an
item contains an AttributeValue of a different type than the one
specified in the request, the value does not match. For example,
`{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
compare to `{"NS":["6", "2", "1"]}`.
+ `BEGINS_WITH` : checks for a prefix. AttributeValueList can contain
only one AttributeValue of type String or Binary (not a Number or a
set). The target attribute of the comparison must be a String or
Binary (not a Number or a set).
+ `BETWEEN` : Greater than or equal to the first value, and less than
or equal to the second value. AttributeValueList must contain two
AttributeValue elements of the same type, either String, Number, or
Binary (not a set). A target attribute matches if the target value
is greater than, or equal to, the first element and less than, or
equal to, the second element. If an item contains an AttributeValue
of a different type than the one specified in the request, the
value does not match. For example, `{"S":"6"}` does not compare to
`{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6",
"2", "1"]}`
:type scan_index_forward: boolean
:param scan_index_forward: Specifies ascending (true) or descending
(false) traversal of the index. Amazon DynamoDB returns results
reflecting the requested order determined by the range key. If the
data type is Number, the results are returned in numeric order. For
String, the results are returned in order of ASCII character code
values. For Binary, Amazon DynamoDB treats each byte of the binary
data as unsigned when it compares binary values.
If ScanIndexForward is not specified, the results are returned in
ascending order.
:type exclusive_start_key: map
:param exclusive_start_key: The primary key of the first item that this
operation will evaluate. Use the value that was returned for
LastEvaluatedKey in the previous operation.
The data type for ExclusiveStartKey must be String, Number or Binary.
No set data types are allowed.
:type return_consumed_capacity: string
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
"""
params = {'TableName': table_name, }
if index_name is not None:
params['IndexName'] = index_name
if select is not None:
params['Select'] = select
if attributes_to_get is not None:
params['AttributesToGet'] = attributes_to_get
if limit is not None:
params['Limit'] = limit
if consistent_read is not None:
params['ConsistentRead'] = consistent_read
if key_conditions is not None:
params['KeyConditions'] = key_conditions
if scan_index_forward is not None:
params['ScanIndexForward'] = scan_index_forward
if exclusive_start_key is not None:
params['ExclusiveStartKey'] = exclusive_start_key
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
return self.make_request(action='Query',
body=json.dumps(params))
def scan(self, table_name, attributes_to_get=None, limit=None,
select=None, scan_filter=None, exclusive_start_key=None,
return_consumed_capacity=None, total_segments=None,
segment=None):
"""
The Scan operation returns one or more items and item
attributes by accessing every item in the table. To have
Amazon DynamoDB return fewer items, you can provide a
ScanFilter .
If the total number of scanned items exceeds the maximum data
set size limit of 1 MB, the scan stops and results are
returned to the user with a LastEvaluatedKey to continue the
scan in a subsequent operation. The results also include the
number of items exceeding the limit. A scan can result in no
table data meeting the filter criteria.
The result set is eventually consistent.
By default, Scan operations proceed sequentially; however, for
faster performance on large tables, applications can request a
parallel Scan by specifying the Segment and TotalSegments
parameters. For more information, see `Parallel Scan`_ in the
Amazon DynamoDB Developer Guide.
:type table_name: string
:param table_name: The name of the table containing the requested
items.
:type attributes_to_get: list
:param attributes_to_get: The names of one or more attributes to
retrieve. If no attribute names are specified, then all attributes
will be returned. If any of the requested attributes are not found,
they will not appear in the result.
:type limit: integer
:param limit: The maximum number of items to evaluate (not necessarily
the number of matching items). If Amazon DynamoDB processes the
number of items up to the limit while processing the results, it
stops the operation and returns the matching values up to that
point, and a LastEvaluatedKey to apply in a subsequent operation,
so that you can pick up where you left off. Also, if the processed
data set size exceeds 1 MB before Amazon DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a LastEvaluatedKey to apply in a subsequent
operation to continue the operation. For more information see
`Query and Scan`_ in the Amazon DynamoDB Developer Guide.
:type select: string
:param select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index.
+ `ALL_ATTRIBUTES`: Returns all of the item attributes. For a table,
this is the default. For an index, this mode causes Amazon DynamoDB
to fetch the full item from the table for each matching item in the
index. If the index is configured to project all item attributes,
the matching items will not be fetched from the table. Fetching
items from the table incurs additional throughput cost and latency.
+ `ALL_PROJECTED_ATTRIBUTES`: Retrieves all attributes which have been
projected into the index. If the index is configured to project all
attributes, this is equivalent to specifying ALL_ATTRIBUTES .
+ `COUNT`: Returns the number of matching items, rather than the
matching items themselves.
+ `SPECIFIC_ATTRIBUTES` : Returns only the attributes listed in
AttributesToGet . This is equivalent to specifying AttributesToGet
without specifying any value for Select . If you are querying an
index and request only attributes that are projected into that
index, the operation will read only the index and not the table. If
any of the requested attributes are not projected into the index,
Amazon DynamoDB will need to fetch each matching item from the
table. This extra fetching incurs additional throughput cost and
latency.
When neither Select nor AttributesToGet are specified, Amazon DynamoDB
defaults to `ALL_ATTRIBUTES` when accessing a table, and
`ALL_PROJECTED_ATTRIBUTES` when accessing an index. You cannot use
both Select and AttributesToGet together in a single request,
unless the value for Select is `SPECIFIC_ATTRIBUTES`. (This usage
is equivalent to specifying AttributesToGet without any value for
Select .)
:type scan_filter: map
:param scan_filter:
Evaluates the scan results and returns only the desired values.
Multiple conditions are treated as "AND" operations: all conditions
must be met to be included in the results.
Each ScanConditions element consists of an attribute name to compare,
along with the following:
+ AttributeValueList - One or more values to evaluate against the
supplied attribute. This list contains exactly one value, except
for a `BETWEEN` or `IN` comparison, in which case the list contains
two values. For type Number, value comparisons are numeric. String
value comparisons for greater than, equals, or less than are based
on ASCII character code values. For example, `a` is greater than
`A`, and `aa` is greater than `B`. For a list of code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For Binary, Amazon DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
evaluating query expressions.
+ ComparisonOperator - A comparator for evaluating attributes. For
example, equals, greater than, less than, etc. Valid comparison
operators for Scan: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL
| CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` For
information on specifying data types in JSON, see `JSON Data
Format`_ in the Amazon DynamoDB Developer Guide. The following are
descriptions of each comparison operator.
+ `EQ` : Equal. AttributeValueList can contain only one AttributeValue
of type String, Number, or Binary (not a set). If an item contains
an AttributeValue of a different type than the one specified in the
request, the value does not match. For example, `{"S":"6"}` does
not equal `{"N":"6"}`. Also, `{"N":"6"}` does not equal
`{"NS":["6", "2", "1"]}`.
+ `NE` : Not equal. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set). If an
item contains an AttributeValue of a different type than the one
specified in the request, the value does not match. For example,
`{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
equal `{"NS":["6", "2", "1"]}`.
+ `LE` : Less than or equal. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set). If an
item contains an AttributeValue of a different type than the one
specified in the request, the value does not match. For example,
`{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
compare to `{"NS":["6", "2", "1"]}`.
+ `LT` : Less than. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set). If an
item contains an AttributeValue of a different type than the one
specified in the request, the value does not match. For example,
`{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
compare to `{"NS":["6", "2", "1"]}`.
+ `GE` : Greater than or equal. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set). If an
item contains an AttributeValue of a different type than the one
specified in the request, the value does not match. For example,
`{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
compare to `{"NS":["6", "2", "1"]}`.
+ `GT` : Greater than. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set). If an
item contains an AttributeValue of a different type than the one
specified in the request, the value does not match. For example,
`{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
compare to `{"NS":["6", "2", "1"]}`.
+ `NOT_NULL` : The attribute exists.
+ `NULL` : The attribute does not exist.
+ `CONTAINS` : checks for a subsequence, or value in a set.
AttributeValueList can contain only one AttributeValue of type
String, Number, or Binary (not a set). If the target attribute of
the comparison is a String, then the operation checks for a
substring match. If the target attribute of the comparison is
Binary, then the operation looks for a subsequence of the target
that matches the input. If the target attribute of the comparison
is a set ("SS", "NS", or "BS"), then the operation checks for a
member of the set (not as a substring).
+ `NOT_CONTAINS` : checks for absence of a subsequence, or absence of a
value in a set. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set). If
the target attribute of the comparison is a String, then the
operation checks for the absence of a substring match. If the
target attribute of the comparison is Binary, then the operation
checks for the absence of a subsequence of the target that matches
the input. If the target attribute of the comparison is a set
("SS", "NS", or "BS"), then the operation checks for the absence of
a member of the set (not as a substring).
+ `BEGINS_WITH` : checks for a prefix. AttributeValueList can contain
only one AttributeValue of type String or Binary (not a Number or a
set). The target attribute of the comparison must be a String or
Binary (not a Number or a set).
+ `IN` : checks for exact matches. AttributeValueList can contain more
than one AttributeValue of type String, Number, or Binary (not a
set). The target attribute of the comparison must be of the same
type and exact value to match. A String never matches a String set.
+ `BETWEEN` : Greater than or equal to the first value, and less than
or equal to the second value. AttributeValueList must contain two
AttributeValue elements of the same type, either String, Number, or
Binary (not a set). A target attribute matches if the target value
is greater than, or equal to, the first element and less than, or
equal to, the second element. If an item contains an AttributeValue
of a different type than the one specified in the request, the
value does not match. For example, `{"S":"6"}` does not compare to
`{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6",
"2", "1"]}`
:type exclusive_start_key: map
:param exclusive_start_key: The primary key of the first item that this
operation will evaluate. Use the value that was returned for
LastEvaluatedKey in the previous operation.
The data type for ExclusiveStartKey must be String, Number or Binary.
No set data types are allowed.
In a parallel scan, a Scan request that includes ExclusiveStartKey must
specify the same segment whose previous Scan returned the
corresponding value of LastEvaluatedKey .
:type return_consumed_capacity: string
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
:type total_segments: integer
:param total_segments: For a parallel Scan request, TotalSegments
represents the total number of segments into which the Scan
operation will be divided. The value of TotalSegments corresponds
to the number of application workers that will perform the parallel
scan. For example, if you want to scan a table using four
application threads, you would specify a TotalSegments value of 4.
The value for TotalSegments must be greater than or equal to 1, and
less than or equal to 4096. If you specify a TotalSegments value of
1, the Scan will be sequential rather than parallel.
If you specify TotalSegments , you must also specify Segment .
:type segment: integer
:param segment: For a parallel Scan request, Segment identifies an
individual segment to be scanned by an application worker.
Segment IDs are zero-based, so the first segment is always 0. For
example, if you want to scan a table using four application
threads, the first thread would specify a Segment value of 0, the
second thread would specify 1, and so on.
The value of LastEvaluatedKey returned from a parallel Scan request
must be used as ExclusiveStartKey with the same Segment ID in a
subsequent Scan operation.
The value for Segment must be greater than or equal to 0, and less than
the value provided for TotalSegments .
If you specify Segment , you must also specify TotalSegments .
"""
params = {'TableName': table_name, }
if attributes_to_get is not None:
params['AttributesToGet'] = attributes_to_get
if limit is not None:
params['Limit'] = limit
if select is not None:
params['Select'] = select
if scan_filter is not None:
params['ScanFilter'] = scan_filter
if exclusive_start_key is not None:
params['ExclusiveStartKey'] = exclusive_start_key
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if total_segments is not None:
params['TotalSegments'] = total_segments
if segment is not None:
params['Segment'] = segment
return self.make_request(action='Scan',
body=json.dumps(params))
def update_item(self, table_name, key, attribute_updates=None,
expected=None, return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Edits an existing item's attributes, or inserts a new item if
it does not already exist. You can put, delete, or add
attribute values. You can also perform a conditional update
(insert a new attribute name-value pair if it doesn't exist,
or replace an existing name-value pair if it has certain
expected attribute values).
In addition to updating an item, you can also return the
item's attribute values in the same operation, using the
ReturnValues parameter.
:type table_name: string
:param table_name: The name of the table containing the item to update.
:type key: map
:param key: The primary key that defines the item. Each element
consists of an attribute name and a value for that attribute.
:type attribute_updates: map
:param attribute_updates: The names of attributes to be modified, the
action to perform on each, and the new value for each. If you are
updating an attribute that is an index key attribute for any
indexes on that table, the attribute type must match the index key
type defined in the AttributesDefinition of the table description.
You can use UpdateItem to update any non-key attributes.
Attribute values cannot be null. String and binary type attributes must
have lengths greater than zero. Set type attributes must not be
empty. Requests with empty values will be rejected with a
ValidationException .
Each AttributeUpdates element consists of an attribute name to modify,
along with the following:
+ Value - The new value, if applicable, for this attribute.
+ Action - Specifies how to perform the update. Valid values for Action
are `PUT`, `DELETE`, and `ADD`. The behavior depends on whether the
specified primary key already exists in the table. **If an item
with the specified Key is found in the table:**
+ `PUT` - Adds the specified attribute to the item. If the attribute
already exists, it is replaced by the new value.
+ `DELETE` - If no value is specified, the attribute and its value are
removed from the item. The data type of the specified value must
match the existing value's data type. If a set of values is
specified, then those values are subtracted from the old set. For
example, if the attribute value was the set `[a,b,c]` and the
DELETE action specified `[a,c]`, then the final attribute value
would be `[b]`. Specifying an empty set is an error.
+ `ADD` - If the attribute does not already exist, then the attribute
and its values are added to the item. If the attribute does exist,
then the behavior of `ADD` depends on the data type of the
attribute:
+ If the existing attribute is a number, and if Value is also a number,
then the Value is mathematically added to the existing attribute.
If Value is a negative number, then it is subtracted from the
existing attribute. If you use `ADD` to increment or decrement a
number value for an item that doesn't exist before the update,
Amazon DynamoDB uses 0 as the initial value. In addition, if you
use `ADD` to update an existing item, and intend to increment or
decrement an attribute value which does not yet exist, Amazon
DynamoDB uses `0` as the initial value. For example, suppose that
the item you want to update does not yet have an attribute named
itemcount , but you decide to `ADD` the number `3` to this
attribute anyway, even though it currently does not exist. Amazon
DynamoDB will create the itemcount attribute, set its initial value
to `0`, and finally add `3` to it. The result will be a new
itemcount attribute in the item, with a value of `3`.
+ If the existing data type is a set, and if the Value is also a set,
then the Value is added to the existing set. (This is a set
operation, not mathematical addition.) For example, if the
attribute value was the set `[1,2]`, and the `ADD` action specified
`[3]`, then the final attribute value would be `[1,2,3]`. An error
occurs if an Add action is specified for a set attribute and the
attribute type specified does not match the existing set type. Both
sets must have the same primitive data type. For example, if the
existing data type is a set of strings, the Value must also be a
set of strings. The same holds true for number sets and binary
sets.
This action is only valid for an existing attribute whose data type is
number or is a set. Do not use `ADD` for any other data types.
**If no item with the specified Key is found:**
+ `PUT` - Amazon DynamoDB creates a new item with the specified primary
key, and then adds the attribute.
+ `DELETE` - Nothing happens; there is no attribute to delete.
+ `ADD` - Amazon DynamoDB creates an item with the supplied primary key
and number (or set of numbers) for the attribute value. The only
data types allowed are number and number set; no other data types
can be specified.
If you specify any attributes that are part of an index key, then the
data types for those attributes must match those of the schema in
the table's attribute definition.
:type expected: map
:param expected: A map of attribute/condition pairs. This is the
conditional block for the UpdateItem operation. All the conditions
must be met for the operation to succeed.
Expected allows you to provide an attribute name, and whether or not
Amazon DynamoDB should check to see if the attribute value already
exists; or if the attribute value exists and has a particular value
before changing it.
Each item in Expected represents an attribute name for Amazon DynamoDB
to check, along with the following:
+ Value - The attribute value for Amazon DynamoDB to check.
+ Exists - Causes Amazon DynamoDB to evaluate the value before
attempting a conditional operation:
+ If Exists is `True`, Amazon DynamoDB will check to see if that
attribute value already exists in the table. If it is found, then
the operation succeeds. If it is not found, the operation fails
with a ConditionalCheckFailedException .
+ If Exists is `False`, Amazon DynamoDB assumes that the attribute
value does not exist in the table. If in fact the value does not
exist, then the assumption is valid and the operation succeeds. If
the value is found, despite the assumption that it does not exist,
the operation fails with a ConditionalCheckFailedException .
The default setting for Exists is `True`. If you supply a Value all by
itself, Amazon DynamoDB assumes the attribute exists: You don't
have to set Exists to `True`, because it is implied. Amazon
DynamoDB returns a ValidationException if:
+ Exists is `True` but there is no Value to check. (You expect a value
to exist, but don't specify what that value is.)
+ Exists is `False` but you also specify a Value . (You cannot expect
an attribute to have a value, while also expecting it not to
exist.)
If you specify more than one condition for Exists , then all of the
conditions must evaluate to true. (In other words, the conditions
are ANDed together.) Otherwise, the conditional operation will
fail.
:type return_values: string
:param return_values:
Use ReturnValues if you want to get the item attributes as they
appeared either before or after they were updated. For UpdateItem ,
the valid values are:
+ `NONE` - If ReturnValues is not specified, or if its value is `NONE`,
then nothing is returned. (This is the default for ReturnValues .)
+ `ALL_OLD` - If UpdateItem overwrote an attribute name-value pair,
then the content of the old item is returned.
+ `UPDATED_OLD` - The old versions of only the updated attributes are
returned.
+ `ALL_NEW` - All of the attributes of the new version of the item are
returned.
+ `UPDATED_NEW` - The new versions of only the updated attributes are
returned.
:type return_consumed_capacity: string
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
about item collections, if any, that were modified during the
operation are returned in the response. If set to `NONE` (the
default), no statistics are returned..
"""
params = {'TableName': table_name, 'Key': key, }
if attribute_updates is not None:
params['AttributeUpdates'] = attribute_updates
if expected is not None:
params['Expected'] = expected
if return_values is not None:
params['ReturnValues'] = return_values
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics is not None:
params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
return self.make_request(action='UpdateItem',
body=json.dumps(params))
def update_table(self, table_name, provisioned_throughput=None,
global_secondary_index_updates=None):
"""
Updates the provisioned throughput for the given table.
Setting the throughput for a table helps you manage
performance and is part of the provisioned throughput feature
of Amazon DynamoDB.
The provisioned throughput values can be upgraded or
downgraded based on the maximums and minimums listed in the
`Limits`_ section in the Amazon DynamoDB Developer Guide.
The table must be in the `ACTIVE` state for this operation to
succeed. UpdateTable is an asynchronous operation; while
executing the operation, the table is in the `UPDATING` state.
While the table is in the `UPDATING` state, the table still
has the provisioned throughput from before the call. The new
provisioned throughput setting is in effect only when the
table returns to the `ACTIVE` state after the UpdateTable
operation.
You cannot add, modify or delete local secondary indexes using
UpdateTable . Local secondary indexes can only be defined at
table creation time.
:type table_name: string
:param table_name: The name of the table to be updated.
:type provisioned_throughput: dict
:param provisioned_throughput: The provisioned throughput settings for
the specified table. The settings can be modified using the
UpdateTable operation.
For current minimum and maximum provisioned throughput values, see
`Limits`_ in the Amazon DynamoDB Developer Guide.
:type global_secondary_index_updates: list
:param global_secondary_index_updates:
"""
params = {'TableName': table_name, }
if provisioned_throughput is not None:
params['ProvisionedThroughput'] = provisioned_throughput
if global_secondary_index_updates is not None:
params['GlobalSecondaryIndexUpdates'] = global_secondary_index_updates
return self.make_request(action='UpdateTable',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.host,
'Content-Type': 'application/x-amz-json-1.0',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body, host=self.host)
response = self._mexe(http_request, sender=None,
override_num_retries=self.NumberRetries,
retry_handler=self._retry_handler)
response_body = response.read()
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
def _retry_handler(self, response, i, next_sleep):
status = None
boto.log.debug("Saw HTTP status: %s" % response.status)
if response.status == 400:
response_body = response.read()
boto.log.debug(response_body)
data = json.loads(response_body)
if 'ProvisionedThroughputExceededException' in data.get('__type'):
self.throughput_exceeded_events += 1
msg = "%s, retry attempt %s" % (
'ProvisionedThroughputExceededException',
i
)
next_sleep = self._exponential_time(i)
i += 1
status = (msg, i, next_sleep)
if i == self.NumberRetries:
# If this was our last retry attempt, raise
# a specific error saying that the throughput
# was exceeded.
raise exceptions.ProvisionedThroughputExceededException(
response.status, response.reason, data)
elif 'ConditionalCheckFailedException' in data.get('__type'):
raise exceptions.ConditionalCheckFailedException(
response.status, response.reason, data)
elif 'ValidationException' in data.get('__type'):
raise exceptions.ValidationException(
response.status, response.reason, data)
else:
raise self.ResponseError(response.status, response.reason,
data)
expected_crc32 = response.getheader('x-amz-crc32')
if self._validate_checksums and expected_crc32 is not None:
boto.log.debug('Validating crc32 checksum for body: %s',
response.read())
actual_crc32 = crc32(response.read()) & 0xffffffff
expected_crc32 = int(expected_crc32)
if actual_crc32 != expected_crc32:
msg = ("The calculated checksum %s did not match the expected "
"checksum %s" % (actual_crc32, expected_crc32))
status = (msg, i + 1, self._exponential_time(i))
return status
def _exponential_time(self, i):
if i == 0:
next_sleep = 0
else:
next_sleep = 0.05 * (2 ** i)
return next_sleep
| apache-2.0 |
jordiclariana/ansible | lib/ansible/plugins/lookup/lastpass.py | 122 | 2519 | # (c) 2016, Andrew Zenk <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from subprocess import Popen, PIPE
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LPassException(AnsibleError):
pass
class LPass(object):
def __init__(self, path='lpass'):
self._cli_path = path
@property
def cli_path(self):
return self._cli_path
@property
def logged_in(self):
out, err = self._run(self._build_args("logout"), stdin="n\n", expected_rc=1)
return err.startswith("Are you sure you would like to log out?")
def _run(self, args, stdin=None, expected_rc=0):
p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
out, err = p.communicate(stdin)
rc = p.wait()
if rc != expected_rc:
raise LPassException(err)
return out, err
def _build_args(self, command, args=None):
if args is None:
args = []
args = [command] + args
args += ["--color=never"]
return args
def get_field(self, key, field):
if field in ['username', 'password', 'url', 'notes', 'id', 'name']:
out, err = self._run(self._build_args("show", ["--{0}".format(field), key]))
else:
out, err = self._run(self._build_args("show", ["--field={0}".format(field), key]))
return out.strip()
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
lp = LPass()
if not lp.logged_in:
raise AnsibleError("Not logged into lastpass: please run 'lpass login' first")
field = kwargs.get('field', 'password')
values = []
for term in terms:
values.append(lp.get_field(term, field))
return values
| gpl-3.0 |
MortimerGoro/servo | components/script/dom/bindings/codegen/parser/tests/test_securecontext_extended_attribute.py | 113 | 16702 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
[SecureContext]
interface TestSecureContextOnInterface {
const octet TEST_CONSTANT = 0;
readonly attribute byte testAttribute;
void testMethod(byte foo);
};
partial interface TestSecureContextOnInterface {
const octet TEST_CONSTANT_2 = 0;
readonly attribute byte testAttribute2;
void testMethod2(byte foo);
};
""")
results = parser.finish()
harness.check(len(results[0].members), 6, "TestSecureContextOnInterface should have six members")
harness.ok(results[0].getExtendedAttribute("SecureContext"),
"Interface should have [SecureContext] extended attribute")
harness.ok(results[0].members[0].getExtendedAttribute("SecureContext"),
"[SecureContext] should propagate from interface to constant members")
harness.ok(results[0].members[1].getExtendedAttribute("SecureContext"),
"[SecureContext] should propagate from interface to attribute members")
harness.ok(results[0].members[2].getExtendedAttribute("SecureContext"),
"[SecureContext] should propagate from interface to method members")
harness.ok(results[0].members[3].getExtendedAttribute("SecureContext"),
"[SecureContext] should propagate from interface to constant members from partial interface")
harness.ok(results[0].members[4].getExtendedAttribute("SecureContext"),
"[SecureContext] should propagate from interface to attribute members from partial interface")
harness.ok(results[0].members[5].getExtendedAttribute("SecureContext"),
"[SecureContext] should propagate from interface to method members from partial interface")
# Same thing, but with the partial interface specified first:
parser = parser.reset()
parser.parse("""
partial interface TestSecureContextOnInterfaceAfterPartialInterface {
const octet TEST_CONSTANT_2 = 0;
readonly attribute byte testAttribute2;
void testMethod2(byte foo);
};
[SecureContext]
interface TestSecureContextOnInterfaceAfterPartialInterface {
const octet TEST_CONSTANT = 0;
readonly attribute byte testAttribute;
void testMethod(byte foo);
};
""")
results = parser.finish()
harness.check(len(results[1].members), 6, "TestSecureContextOnInterfaceAfterPartialInterface should have six members")
harness.ok(results[1].getExtendedAttribute("SecureContext"),
"Interface should have [SecureContext] extended attribute")
harness.ok(results[1].members[0].getExtendedAttribute("SecureContext"),
"[SecureContext] should propagate from interface to constant members")
harness.ok(results[1].members[1].getExtendedAttribute("SecureContext"),
"[SecureContext] should propagate from interface to attribute members")
harness.ok(results[1].members[2].getExtendedAttribute("SecureContext"),
"[SecureContext] should propagate from interface to method members")
harness.ok(results[1].members[3].getExtendedAttribute("SecureContext"),
"[SecureContext] should propagate from interface to constant members from partial interface")
harness.ok(results[1].members[4].getExtendedAttribute("SecureContext"),
"[SecureContext] should propagate from interface to attribute members from partial interface")
harness.ok(results[1].members[5].getExtendedAttribute("SecureContext"),
"[SecureContext] should propagate from interface to method members from partial interface")
parser = parser.reset()
parser.parse("""
interface TestSecureContextOnPartialInterface {
const octet TEST_CONSTANT = 0;
readonly attribute byte testAttribute;
void testMethod(byte foo);
};
[SecureContext]
partial interface TestSecureContextOnPartialInterface {
const octet TEST_CONSTANT_2 = 0;
readonly attribute byte testAttribute2;
void testMethod2(byte foo);
};
""")
results = parser.finish()
harness.check(len(results[0].members), 6, "TestSecureContextOnPartialInterface should have six members")
harness.ok(results[0].getExtendedAttribute("SecureContext") is None,
"[SecureContext] should not propagate from a partial interface to the interface")
harness.ok(results[0].members[0].getExtendedAttribute("SecureContext") is None,
"[SecureContext] should not propagate from a partial interface to the interface's constant members")
harness.ok(results[0].members[1].getExtendedAttribute("SecureContext") is None,
"[SecureContext] should not propagate from a partial interface to the interface's attribute members")
harness.ok(results[0].members[2].getExtendedAttribute("SecureContext") is None,
"[SecureContext] should not propagate from a partial interface to the interface's method members")
harness.ok(results[0].members[3].getExtendedAttribute("SecureContext"),
"Constant members from [SecureContext] partial interface should be [SecureContext]")
harness.ok(results[0].members[4].getExtendedAttribute("SecureContext"),
"Attribute members from [SecureContext] partial interface should be [SecureContext]")
harness.ok(results[0].members[5].getExtendedAttribute("SecureContext"),
"Method members from [SecureContext] partial interface should be [SecureContext]")
parser = parser.reset()
parser.parse("""
interface TestSecureContextOnInterfaceMembers {
const octet TEST_NON_SECURE_CONSTANT_1 = 0;
[SecureContext]
const octet TEST_SECURE_CONSTANT = 1;
const octet TEST_NON_SECURE_CONSTANT_2 = 2;
readonly attribute byte testNonSecureAttribute1;
[SecureContext]
readonly attribute byte testSecureAttribute;
readonly attribute byte testNonSecureAttribute2;
void testNonSecureMethod1(byte foo);
[SecureContext]
void testSecureMethod(byte foo);
void testNonSecureMethod2(byte foo);
};
""")
results = parser.finish()
harness.check(len(results[0].members), 9, "TestSecureContextOnInterfaceMembers should have nine members")
harness.ok(results[0].getExtendedAttribute("SecureContext") is None,
"[SecureContext] on members should not propagate up to the interface")
harness.ok(results[0].members[0].getExtendedAttribute("SecureContext") is None,
"Constant should not have [SecureContext] extended attribute")
harness.ok(results[0].members[1].getExtendedAttribute("SecureContext"),
"Constant should have [SecureContext] extended attribute")
harness.ok(results[0].members[2].getExtendedAttribute("SecureContext") is None,
"Constant should not have [SecureContext] extended attribute")
harness.ok(results[0].members[3].getExtendedAttribute("SecureContext") is None,
"Attribute should not have [SecureContext] extended attribute")
harness.ok(results[0].members[4].getExtendedAttribute("SecureContext"),
"Attribute should have [SecureContext] extended attribute")
harness.ok(results[0].members[5].getExtendedAttribute("SecureContext") is None,
"Attribute should not have [SecureContext] extended attribute")
harness.ok(results[0].members[6].getExtendedAttribute("SecureContext") is None,
"Method should not have [SecureContext] extended attribute")
harness.ok(results[0].members[7].getExtendedAttribute("SecureContext"),
"Method should have [SecureContext] extended attribute")
harness.ok(results[0].members[8].getExtendedAttribute("SecureContext") is None,
"Method should not have [SecureContext] extended attribute")
parser = parser.reset()
parser.parse("""
interface TestSecureContextOnPartialInterfaceMembers {
};
partial interface TestSecureContextOnPartialInterfaceMembers {
const octet TEST_NON_SECURE_CONSTANT_1 = 0;
[SecureContext]
const octet TEST_SECURE_CONSTANT = 1;
const octet TEST_NON_SECURE_CONSTANT_2 = 2;
readonly attribute byte testNonSecureAttribute1;
[SecureContext]
readonly attribute byte testSecureAttribute;
readonly attribute byte testNonSecureAttribute2;
void testNonSecureMethod1(byte foo);
[SecureContext]
void testSecureMethod(byte foo);
void testNonSecureMethod2(byte foo);
};
""")
results = parser.finish()
harness.check(len(results[0].members), 9, "TestSecureContextOnPartialInterfaceMembers should have nine members")
harness.ok(results[0].members[0].getExtendedAttribute("SecureContext") is None,
"Constant from partial interface should not have [SecureContext] extended attribute")
harness.ok(results[0].members[1].getExtendedAttribute("SecureContext"),
"Constant from partial interface should have [SecureContext] extended attribute")
harness.ok(results[0].members[2].getExtendedAttribute("SecureContext") is None,
"Constant from partial interface should not have [SecureContext] extended attribute")
harness.ok(results[0].members[3].getExtendedAttribute("SecureContext") is None,
"Attribute from partial interface should not have [SecureContext] extended attribute")
harness.ok(results[0].members[4].getExtendedAttribute("SecureContext"),
"Attribute from partial interface should have [SecureContext] extended attribute")
harness.ok(results[0].members[5].getExtendedAttribute("SecureContext") is None,
"Attribute from partial interface should not have [SecureContext] extended attribute")
harness.ok(results[0].members[6].getExtendedAttribute("SecureContext") is None,
"Method from partial interface should not have [SecureContext] extended attribute")
harness.ok(results[0].members[7].getExtendedAttribute("SecureContext"),
"Method from partial interface should have [SecureContext] extended attribute")
harness.ok(results[0].members[8].getExtendedAttribute("SecureContext") is None,
"Method from partial interface should not have [SecureContext] extended attribute")
parser = parser.reset()
threw = False
try:
parser.parse("""
[SecureContext=something]
interface TestSecureContextTakesNoValue1 {
const octet TEST_SECURE_CONSTANT = 0;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "[SecureContext] must take no arguments (testing on interface)")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestSecureContextForOverloads1 {
[SecureContext]
void testSecureMethod(byte foo);
};
partial interface TestSecureContextForOverloads1 {
void testSecureMethod(byte foo, byte bar);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "If [SecureContext] appears on an overloaded operation, then it MUST appear on all overloads")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestSecureContextForOverloads2 {
[SecureContext]
void testSecureMethod(byte foo);
};
partial interface TestSecureContextForOverloads2 {
[SecureContext]
void testSecureMethod(byte foo, byte bar);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(not threw, "[SecureContext] can appear on an overloaded operation if it appears on all overloads")
parser = parser.reset()
threw = False
try:
parser.parse("""
[SecureContext]
interface TestSecureContextOnInterfaceAndMember {
[SecureContext]
void testSecureMethod(byte foo);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "[SecureContext] must not appear on an interface and interface member")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestSecureContextOnPartialInterfaceAndMember {
};
[SecureContext]
partial interface TestSecureContextOnPartialInterfaceAndMember {
[SecureContext]
void testSecureMethod(byte foo);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "[SecureContext] must not appear on a partial interface and one of the partial interface's member's")
parser = parser.reset()
threw = False
try:
parser.parse("""
[SecureContext]
interface TestSecureContextOnInterfaceAndPartialInterfaceMember {
};
partial interface TestSecureContextOnInterfaceAndPartialInterfaceMember {
[SecureContext]
void testSecureMethod(byte foo);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "[SecureContext] must not appear on an interface and one of its partial interface's member's")
parser = parser.reset()
threw = False
try:
parser.parse("""
[SecureContext]
interface TestSecureContextOnInheritedInterface {
};
interface TestSecureContextNotOnInheritingInterface : TestSecureContextOnInheritedInterface {
void testSecureMethod(byte foo);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "[SecureContext] must appear on interfaces that inherit from another [SecureContext] interface")
# Test 'implements'. The behavior tested here may have to change depending
# on the resolution of https://github.com/heycam/webidl/issues/118
parser = parser.reset()
parser.parse("""
[SecureContext]
interface TestSecureContextInterfaceThatImplementsNonSecureContextInterface {
const octet TEST_CONSTANT = 0;
};
interface TestNonSecureContextInterface {
const octet TEST_CONSTANT_2 = 0;
readonly attribute byte testAttribute2;
void testMethod2(byte foo);
};
TestSecureContextInterfaceThatImplementsNonSecureContextInterface implements TestNonSecureContextInterface;
""")
results = parser.finish()
harness.check(len(results[0].members), 4, "TestSecureContextInterfaceThatImplementsNonSecureContextInterface should have two members")
harness.ok(results[0].getExtendedAttribute("SecureContext"),
"Interface should have [SecureContext] extended attribute")
harness.ok(results[0].members[0].getExtendedAttribute("SecureContext"),
"[SecureContext] should propagate from interface to constant members even when other members are copied from a non-[SecureContext] interface")
harness.ok(results[0].members[1].getExtendedAttribute("SecureContext") is None,
"Constants copied from non-[SecureContext] interface should not be [SecureContext]")
harness.ok(results[0].members[2].getExtendedAttribute("SecureContext") is None,
"Attributes copied from non-[SecureContext] interface should not be [SecureContext]")
harness.ok(results[0].members[3].getExtendedAttribute("SecureContext") is None,
"Methods copied from non-[SecureContext] interface should not be [SecureContext]")
# Test SecureContext and NoInterfaceObject
parser = parser.reset()
parser.parse("""
[NoInterfaceObject, SecureContext]
interface TestSecureContextNoInterfaceObject {
void testSecureMethod(byte foo);
};
""")
results = parser.finish()
harness.check(len(results[0].members), 1, "TestSecureContextNoInterfaceObject should have only one member")
harness.ok(results[0].getExtendedAttribute("SecureContext"),
"Interface should have [SecureContext] extended attribute")
harness.ok(results[0].members[0].getExtendedAttribute("SecureContext"),
"Interface member should have [SecureContext] extended attribute")
| mpl-2.0 |
sysadmin75/ansible | lib/ansible/playbook/attribute.py | 68 | 4639 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from copy import copy, deepcopy
_CONTAINERS = frozenset(('list', 'dict', 'set'))
class Attribute:
def __init__(
self,
isa=None,
private=False,
default=None,
required=False,
listof=None,
priority=0,
class_type=None,
always_post_validate=False,
inherit=True,
alias=None,
extend=False,
prepend=False,
static=False,
):
"""
:class:`Attribute` specifies constraints for attributes of objects which
derive from playbook data. The attributes of the object are basically
a schema for the yaml playbook.
:kwarg isa: The type of the attribute. Allowable values are a string
representation of any yaml basic datatype, python class, or percent.
(Enforced at post-validation time).
:kwarg private: Not used at runtime. The docs playbook keyword dumper uses it to determine
that a keyword should not be documented. mpdehaan had plans to remove attributes marked
private from the ds so they would not have been available at all.
:kwarg default: Default value if unspecified in the YAML document.
:kwarg required: Whether or not the YAML document must contain this field.
If the attribute is None when post-validated, an error will be raised.
:kwarg listof: If isa is set to "list", this can optionally be set to
ensure that all elements in the list are of the given type. Valid
values here are the same as those for isa.
:kwarg priority: The order in which the fields should be parsed. Generally
this does not need to be set, it is for rare situations where another
field depends on the fact that another field was parsed first.
:kwarg class_type: If isa is set to "class", this can be optionally set to
a class (not a string name). The YAML data for this field will be
passed to the __init__ method of that class during post validation and
the field will be an instance of that class.
:kwarg always_post_validate: Controls whether a field should be post
validated or not (default: False).
:kwarg inherit: A boolean value, which controls whether the object
containing this field should attempt to inherit the value from its
parent object if the local value is None.
:kwarg alias: An alias to use for the attribute name, for situations where
the attribute name may conflict with a Python reserved word.
"""
self.isa = isa
self.private = private
self.default = default
self.required = required
self.listof = listof
self.priority = priority
self.class_type = class_type
self.always_post_validate = always_post_validate
self.inherit = inherit
self.alias = alias
self.extend = extend
self.prepend = prepend
self.static = static
if default is not None and self.isa in _CONTAINERS and not callable(default):
raise TypeError('defaults for FieldAttribute may not be mutable, please provide a callable instead')
def __eq__(self, other):
return other.priority == self.priority
def __ne__(self, other):
return other.priority != self.priority
# NB: higher priority numbers sort first
def __lt__(self, other):
return other.priority < self.priority
def __gt__(self, other):
return other.priority > self.priority
def __le__(self, other):
return other.priority <= self.priority
def __ge__(self, other):
return other.priority >= self.priority
class FieldAttribute(Attribute):
pass
| gpl-3.0 |
iansealy/projecteuler | 71.py | 1 | 1392 | #!/usr/bin/env python
"""This script solves the Project Euler problem "Ordered fractions". The
problem is: By listing the set of reduced proper fractions for d <= 1,000,000
in ascending order of size, find the numerator of the fraction immediately to
the left of 3/7.
"""
from __future__ import division
import argparse
def main(args):
"""Ordered fractions"""
# Constants
RIGHT_FRAC = [3, 7]
left_frac = [2, 7]
for d in range(2, args.limit + 1):
start_n = int(left_frac[0] / left_frac[1] * d)
end_n = int(RIGHT_FRAC[0] / RIGHT_FRAC[1] * d) + 1
for n in range(start_n, end_n + 1):
if compare_fractions([n, d], RIGHT_FRAC) >= 0:
continue
if compare_fractions([n, d], left_frac) > 0:
left_frac = [n, d]
print(left_frac[0])
def compare_fractions(frac1, frac2):
"""Compare two fractions"""
big_numer1 = frac1[0] * frac2[1]
big_numer2 = frac2[0] * frac1[1]
if big_numer1 > big_numer2:
return(1)
elif big_numer1 < big_numer2:
return(-1)
else:
return(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Ordered fractions')
parser.add_argument(
'limit', metavar='LIMIT', type=int, default=1000000, nargs='?',
help='The maximum denominator')
args = parser.parse_args()
main(args)
| gpl-3.0 |
scottcunningham/ansible | v1/ansible/runner/connection_plugins/fireball.py | 110 | 4841 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import base64
from ansible.callbacks import vvv
from ansible import utils
from ansible import errors
from ansible import constants
HAVE_ZMQ=False
try:
import zmq
HAVE_ZMQ=True
except ImportError:
pass
class Connection(object):
''' ZeroMQ accelerated connection '''
def __init__(self, runner, host, port, *args, **kwargs):
self.runner = runner
self.has_pipelining = False
# attempt to work around shared-memory funness
if getattr(self.runner, 'aes_keys', None):
utils.AES_KEYS = self.runner.aes_keys
self.host = host
self.key = utils.key_for_hostname(host)
self.context = None
self.socket = None
if port is None:
self.port = constants.ZEROMQ_PORT
else:
self.port = port
self.become_methods_supported=[]
def connect(self):
''' activates the connection object '''
if not HAVE_ZMQ:
raise errors.AnsibleError("zmq is not installed")
# this is rough/temporary and will likely be optimized later ...
self.context = zmq.Context()
socket = self.context.socket(zmq.REQ)
addr = "tcp://%s:%s" % (self.host, self.port)
socket.connect(addr)
self.socket = socket
return self
def exec_command(self, cmd, tmp_path, become_user, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the remote host '''
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
vvv("EXEC COMMAND %s" % cmd)
if self.runner.become and sudoable:
raise errors.AnsibleError(
"When using fireball, do not specify sudo or su to run your tasks. " +
"Instead sudo the fireball action with sudo. " +
"Task will communicate with the fireball already running in sudo mode."
)
data = dict(
mode='command',
cmd=cmd,
tmp_path=tmp_path,
executable=executable,
)
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
self.socket.send(data)
response = self.socket.recv()
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
data = file(in_path).read()
data = base64.b64encode(data)
data = dict(mode='put', data=data, out_path=out_path)
# TODO: support chunked file transfer
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
self.socket.send(data)
response = self.socket.recv()
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
# no meaningful response needed for this
def fetch_file(self, in_path, out_path):
''' save a remote file to the specified path '''
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
data = dict(mode='fetch', in_path=in_path)
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
self.socket.send(data)
response = self.socket.recv()
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
response = response['data']
response = base64.b64decode(response)
fh = open(out_path, "w")
fh.write(response)
fh.close()
def close(self):
''' terminate the connection '''
# Be a good citizen
try:
self.socket.close()
self.context.term()
except:
pass
| gpl-3.0 |
davidcusatis/horizon | openstack_dashboard/dashboards/project/instances/console.py | 27 | 2562 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import logging
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
import six
from horizon import exceptions
from novaclient import exceptions as nova_exception
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
CONSOLES = OrderedDict([('VNC', api.nova.server_vnc_console),
('SPICE', api.nova.server_spice_console),
('RDP', api.nova.server_rdp_console),
('SERIAL', api.nova.server_serial_console)])
def get_console(request, console_type, instance):
"""Get a tuple of console url and console type."""
if console_type == 'AUTO':
check_consoles = CONSOLES
else:
try:
check_consoles = {console_type: CONSOLES[console_type]}
except KeyError:
msg = _('Console type "%s" not supported.') % console_type
raise exceptions.NotAvailable(msg)
# Ugly workaround due novaclient API change from 2.17 to 2.18.
try:
httpnotimplemented = nova_exception.HttpNotImplemented
except AttributeError:
httpnotimplemented = nova_exception.HTTPNotImplemented
for con_type, api_call in six.iteritems(check_consoles):
try:
console = api_call(request, instance.id)
# If not supported, don't log it to avoid lot of errors in case
# of AUTO.
except httpnotimplemented:
continue
except Exception:
LOG.debug('Console not available', exc_info=True)
continue
if con_type == 'SERIAL':
console_url = console.url
else:
console_url = "%s&%s(%s)" % (
console.url,
urlencode({'title': getattr(instance, "name", "")}),
instance.id)
return (con_type, console_url)
raise exceptions.NotAvailable(_('No available console found.'))
| apache-2.0 |
pycomlink/pycomlink | pycomlink/util/temporal.py | 2 | 2643 | import pandas as pd
def aggregate_df_onto_DatetimeIndex(
df, new_index, method, label="right", new_index_tz="utc"
):
"""
Aggregate a DataFrame or Series using a given DatetimeIndex
Parameters
----------
df : pandas.DataFrame
The dataframe that should be reindexed
new_index : pandas.DatetimeIndex
The time stamp index on which `df` should be aggregated
method : numpy function
The function to be used for aggregation via
`DataFrame.groupby('new_time_ix').agg(method)`
label : str {'right', 'left'}, optional
Which side of the aggregated period to take the label for the new
index from
new_index_tz : str, optional
Defaults to 'utc'. Note that if `new_index` already has time zone
information, this kwarg is ignored
Returns
-------
df_reindexed : pandas.DataFrame
"""
if label == "right":
fill_method = "bfill"
elif label == "left":
fill_method = "ffill"
else:
raise NotImplementedError('`label` must be "left" or "right"')
# Make sure we work with a DataFrame and make a copy of it
df_temp = pd.DataFrame(df).copy()
# Generate DataFrame with desired DatetimeIndex as data,
# which will later be reindexed by DatetimeIndex of original DataFrame
df_new_t = pd.DataFrame(index=new_index, data={"time": new_index})
# Update time zone info if there is none
if not df_new_t.index.tzinfo:
df_new_t.index = df_new_t.index.tz_localize(new_index_tz)
# Crop both time series to make them cover the same period.
# This is to avoid the ffill or bfill to run outside of the
# range of the new index, which produces wrong result for the
# end point of the time series in the aggregated result
t_start = max(df_temp.index.min(), df_new_t.index.min())
t_stop = min(df_temp.index.max(), df_new_t.index.max())
df_new_t = df_new_t.loc[t_start:t_stop]
df_temp = df_temp.loc[t_start:t_stop]
# Reindex to get the forward filled or backwar filled time stamp of the
# new index which can be used for aggregation in the next step
df_new_t = df_new_t.reindex(df_temp.index, method=fill_method)
# Aggregate data onto new DatetimeIndex
df_temp["new_time_ix"] = df_new_t.time
df_reindexed = df_temp.groupby("new_time_ix").agg(method)
# Update name and timezone of new index
df_reindexed.index.name = df_temp.index.name
if not df_reindexed.index.tzinfo:
df_reindexed.index = df_reindexed.index.tz_localize("UTC").tz_convert(
df_temp.index.tzinfo
)
return df_reindexed
| bsd-3-clause |
gacarrillor/QGIS | python/plugins/processing/algs/qgis/HubDistanceLines.py | 26 | 6712 | # -*- coding: utf-8 -*-
"""
***************************************************************************
HubDistanceLines.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsField,
QgsGeometry,
QgsDistanceArea,
QgsFeature,
QgsFeatureSink,
QgsFeatureRequest,
QgsWkbTypes,
QgsUnitTypes,
QgsProcessing,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterField,
QgsProcessingParameterEnum,
QgsProcessingParameterFeatureSink,
QgsProcessingException,
QgsSpatialIndex)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from math import sqrt
class HubDistanceLines(QgisAlgorithm):
INPUT = 'INPUT'
HUBS = 'HUBS'
FIELD = 'FIELD'
UNIT = 'UNIT'
OUTPUT = 'OUTPUT'
LAYER_UNITS = 'LAYER_UNITS'
UNITS = [QgsUnitTypes.DistanceMeters,
QgsUnitTypes.DistanceFeet,
QgsUnitTypes.DistanceMiles,
QgsUnitTypes.DistanceKilometers,
LAYER_UNITS
]
def group(self):
return self.tr('Vector analysis')
def groupId(self):
return 'vectoranalysis'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.units = [self.tr('Meters'),
self.tr('Feet'),
self.tr('Miles'),
self.tr('Kilometers'),
self.tr('Layer units')]
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Source points layer')))
self.addParameter(QgsProcessingParameterFeatureSource(self.HUBS,
self.tr('Destination hubs layer')))
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Hub layer name attribute'), parentLayerParameterName=self.HUBS))
self.addParameter(QgsProcessingParameterEnum(self.UNIT,
self.tr('Measurement unit'), self.units))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Hub distance'), QgsProcessing.TypeVectorLine))
def name(self):
return 'distancetonearesthublinetohub'
def displayName(self):
return self.tr('Distance to nearest hub (line to hub)')
def processAlgorithm(self, parameters, context, feedback):
if parameters[self.INPUT] == parameters[self.HUBS]:
raise QgsProcessingException(
self.tr('Same layer given for both hubs and spokes'))
point_source = self.parameterAsSource(parameters, self.INPUT, context)
if point_source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
hub_source = self.parameterAsSource(parameters, self.HUBS, context)
if hub_source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.HUBS))
fieldName = self.parameterAsString(parameters, self.FIELD, context)
units = self.UNITS[self.parameterAsEnum(parameters, self.UNIT, context)]
fields = point_source.fields()
fields.append(QgsField('HubName', QVariant.String))
fields.append(QgsField('HubDist', QVariant.Double))
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, QgsWkbTypes.LineString, point_source.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
index = QgsSpatialIndex(hub_source.getFeatures(QgsFeatureRequest().setSubsetOfAttributes([]).setDestinationCrs(point_source.sourceCrs(), context.transformContext())))
distance = QgsDistanceArea()
distance.setSourceCrs(point_source.sourceCrs(), context.transformContext())
distance.setEllipsoid(context.ellipsoid())
# Scan source points, find nearest hub, and write to output file
features = point_source.getFeatures()
total = 100.0 / point_source.featureCount() if point_source.featureCount() else 0
for current, f in enumerate(features):
if feedback.isCanceled():
break
if not f.hasGeometry():
sink.addFeature(f, QgsFeatureSink.FastInsert)
continue
src = f.geometry().boundingBox().center()
neighbors = index.nearestNeighbor(src, 1)
ft = next(hub_source.getFeatures(QgsFeatureRequest().setFilterFid(neighbors[0]).setSubsetOfAttributes([fieldName], hub_source.fields()).setDestinationCrs(point_source.sourceCrs(), context.transformContext())))
closest = ft.geometry().boundingBox().center()
hubDist = distance.measureLine(src, closest)
if units != self.LAYER_UNITS:
hub_dist_in_desired_units = distance.convertLengthMeasurement(hubDist, units)
else:
hub_dist_in_desired_units = hubDist
attributes = f.attributes()
attributes.append(ft[fieldName])
attributes.append(hub_dist_in_desired_units)
feat = QgsFeature()
feat.setAttributes(attributes)
feat.setGeometry(QgsGeometry.fromPolylineXY([src, closest]))
sink.addFeature(feat, QgsFeatureSink.FastInsert)
feedback.setProgress(int(current * total))
return {self.OUTPUT: dest_id}
| gpl-2.0 |
updownlife/multipleK | dependencies/biopython-1.65/Scripts/xbbtools/xbb_search.py | 1 | 5183 | #!/usr/bin/env python
# Created: Sun Dec 3 13:38:52 2000
# Last changed: Time-stamp: <01/09/04 09:51:21 thomas>
# [email protected], http://www.cbs.dtu.dk/thomas
# File: xbb_search.py
import os
import re
import sys
sys.path.insert(0, '.')
try:
from Tkinter import * # Python 2
except ImportError:
from tkinter import * # Python 3
try:
import tkColorChooser as colorchooser # Python 2
except ImportError:
from tkinter import colorchooser # Python 3
from Bio.Data.IUPACData import ambiguous_dna_values
from Bio.Seq import reverse_complement
class DNAsearch:
def __init__(self):
self.init_alphabet()
self.sequence = ''
def init_alphabet(self):
self.alphabet = ambiguous_dna_values
other = ''.join(self.alphabet)
self.alphabet['N'] = self.alphabet['N'] + other
for key in self.alphabet:
if key == 'N':
continue
if key in self.alphabet[key]:
continue
self.alphabet[key] = self.alphabet[key] + key
def SetSeq(self, seq):
self.sequence = seq
def SetPattern(self, pattern):
self.pattern = pattern
self.rx_pattern = self.IUPAC2regex(pattern)
self.rx = re.compile(self.rx_pattern)
def IUPAC2regex(self, s):
rx = ''
for i in s:
r = self.alphabet.get(i, i)
if len(r) > 1:
rx = '%s[%s]' % (rx, r)
else:
rx += r
return rx
def _Search(self, start=0):
pos = self.rx.search(self.sequence, start)
return pos
def Search(self, start=0):
pos = self.rx.search(self.sequence, start)
if pos:
return pos.start()
else:
return -1
def SearchAll(self):
pos = -1
positions = []
while True:
m = self._Search(pos + 1)
if not m:
break
pos = m.start()
if pos == -1:
break
positions.append(pos)
return positions
class XDNAsearch(Toplevel, DNAsearch):
def __init__(self, seq='', master=None, highlight=0):
DNAsearch.__init__(self)
self.master = master
self.highlight = highlight
self.colors = []
self.init_graphics()
self.sequence = seq
self.cur_pos = 0
def init_graphics(self):
Toplevel.__init__(self, self.master)
self.frame = Frame(self)
self.frame.pack(fill=BOTH, expand=1)
self.search_entry = Entry(self.frame)
self.search_entry.pack(fill=BOTH, expand=1)
f2 = Frame(self.frame)
f2.pack(side=TOP, fill=BOTH, expand=1)
f = f2
self.forward = Button(f, text='Search +', command=self.do_search)
self.forward.pack(side=LEFT)
self.forward = Button(f, text='Search -',
command=lambda x=self.do_search: x(other_strand=1))
self.forward.pack(side=LEFT)
self.cancel = Button(f, text='Cancel', command=self.exit)
self.cancel.pack(side=LEFT)
self.current_color = 'cyan'
self.colorb = Button(f, text='Color', command=self.change_color, foreground=self.current_color)
self.colorb.pack(side=LEFT)
self.config_color(self.current_color)
def config_color(self, color=None):
if not self.highlight:
return
if not color:
try:
color = colorchooser.askcolor()[1]
except:
color = 'cyan'
self.current_color = color
self.current_tag = 'searched_%s' % self.current_color
self.master.tag_config(self.current_tag, background=self.current_color)
self.master.tag_config(self.current_tag + 'R', background=self.current_color, underline=1)
self.colors.append(color)
def change_color(self):
self.config_color()
self.colorb.configure(foreground=self.current_color)
self.colorb.update()
def get_pattern(self):
pattern = self.search_entry.get()
return pattern
def do_search(self, other_strand=0):
pattern = self.get_pattern()
if other_strand:
pattern = reverse_complement(pattern)
self.SetPattern(pattern)
pos = self.Search(self.cur_pos)
self.cur_pos = pos + 1
w = self.master
if pos != -1:
if self.highlight:
start, stop = pos, pos + len(self.pattern)
if other_strand:
w.tag_add(self.current_tag + 'R', '1.%d' % start, '1.%s' % stop)
else:
w.tag_add(self.current_tag, '1.%d' % start, '1.%s' % stop)
w.see('1.%d' % start)
def exit(self):
for c in self.colors:
self.master.tag_remove('searched_%s' % c, 1.0, END)
self.master.tag_remove('searched_%sR' % c, 1.0, END)
self.destroy()
del(self)
def showcolor(self):
pass
if __name__ == '__main__':
seq = 'ATGGTGTGTGTGTACGATCGCCCCCCCCAGTCGATCGATGCATCGTA'
win = Tk()
xtest = XDNAsearch(seq=seq, master=win)
win.mainloop()
| gpl-2.0 |
wandec/grr | parsers/linux_file_parser_test.py | 5 | 5155 | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Unit test for the linux file parser."""
import os
import StringIO
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import parsers
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
from grr.parsers import linux_file_parser
class LinuxFileParserTest(test_lib.GRRBaseTest):
"""Test parsing of linux files."""
def testPasswdParser(self):
"""Ensure we can extract users from a passwd file."""
parser = linux_file_parser.PasswdParser()
dat = """
user1:x:1000:1000:User1 Name,,,:/home/user1:/bin/bash
user2:x:1001:1001:User2 Name,,,:/home/user2:/bin/bash
"""
out = list(parser.Parse(None, StringIO.StringIO(dat), None))
self.assertEqual(len(out), 2)
self.assertTrue(isinstance(out[1], rdfvalue.KnowledgeBaseUser))
self.assertTrue(isinstance(out[1], rdfvalue.KnowledgeBaseUser))
self.assertEqual(out[0].username, "user1")
self.assertEqual(out[0].full_name, "User1 Name,,,")
dat = """
user1:x:1000:1000:User1 Name,,,:/home/user1:/bin/bash
user2:x:1001:1001:User2 Name,,,:/home/user
"""
parser = linux_file_parser.PasswdParser()
self.assertRaises(parsers.ParseError,
list, parser.Parse(None, StringIO.StringIO(dat), None))
def testPasswdBufferParser(self):
"""Ensure we can extract users from a passwd file."""
parser = linux_file_parser.PasswdBufferParser()
buf1 = rdfvalue.BufferReference(data="user1:x:1000:1000:User1"
" Name,,,:/home/user1:/bin/bash\n")
buf2 = rdfvalue.BufferReference(data="user2:x:1000:1000:User2"
" Name,,,:/home/user2:/bin/bash\n")
ff_result = rdfvalue.FileFinderResult(matches=[buf1, buf2])
out = list(parser.Parse(ff_result, None))
self.assertEqual(len(out), 2)
self.assertTrue(isinstance(out[1], rdfvalue.KnowledgeBaseUser))
self.assertTrue(isinstance(out[1], rdfvalue.KnowledgeBaseUser))
self.assertEqual(out[0].username, "user1")
self.assertEqual(out[0].full_name, "User1 Name,,,")
def testNetgroupParser(self):
"""Ensure we can extract users from a netgroup file."""
parser = linux_file_parser.NetgroupParser()
dat = u"""group1 (-,user1,) (-,user2,) (-,user3,)
#group1 comment
group2 (-,user4,) (-,user2,)
super_group (-,user5,) (-,user6,) (-,文德文,) group1 group2
super_group2 (-,user7,) super_group
super_group3 (-,user5,) (-,user6,) group1 group2
"""
dat_fd = StringIO.StringIO(dat)
config_lib.CONFIG.Set("Artifacts.netgroup_user_blacklist", ["user2",
"user3"])
out = list(parser.Parse(None, dat_fd, None))
users = []
for result in out:
if isinstance(result, rdfvalue.Anomaly):
self.assertTrue(utils.SmartUnicode(u"文德文") in result.symptom)
else:
users.append(result)
self.assertItemsEqual([x.username for x in users],
[u"user1", u"user4", u"user5", u"user6", u"user7"])
dat_fd.seek(0)
config_lib.CONFIG.Set("Artifacts.netgroup_filter_regexes",
[r"^super_group3$"])
out = list(parser.Parse(None, dat_fd, None))
self.assertItemsEqual([x.username for x in out],
[u"user5", u"user6"])
def testNetgroupBufferParser(self):
"""Ensure we can extract users from a netgroup file."""
parser = linux_file_parser.NetgroupBufferParser()
buf1 = rdfvalue.BufferReference(data="group1 (-,user1,) (-,user2,) "
"(-,user3,)\n")
buf2 = rdfvalue.BufferReference(data="super_group3 (-,user5,) (-,user6,)"
" group1 group2\n")
ff_result = rdfvalue.FileFinderResult(matches=[buf1, buf2])
config_lib.CONFIG.Set("Artifacts.netgroup_user_blacklist", ["user2",
"user3"])
out = list(parser.Parse(ff_result, None))
self.assertItemsEqual([x.username for x in out],
[u"user1", u"user5", u"user6"])
def testNetgroupParserBadInput(self):
parser = linux_file_parser.NetgroupParser()
dat = """group1 (-,user1,) (-,user2,) (-,user3,)
#group1 comment
group2 user4 (-user2,)
super_group (-,,user5,) (-user6,) group1 group2
super_group2 (-,user7,) super_group
"""
self.assertRaises(parsers.ParseError,
list, parser.Parse(None, StringIO.StringIO(dat), None))
def testWtmpParser(self):
"""Test parsing of wtmp file."""
parser = linux_file_parser.LinuxWtmpParser()
path = os.path.join(self.base_path, "wtmp")
with open(path, "rb") as wtmp_fd:
out = list(parser.Parse(None, wtmp_fd, None))
self.assertEqual(len(out), 3)
self.assertItemsEqual(["%s:%d" % (x.username, x.last_logon) for x in out],
["user1:1296552099000000",
"user2:1296552102000000",
"user3:1296569997000000"])
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
mldbai/mldb | testing/MLDBFB-724_classifier_exp_segfault_test.py | 1 | 2094 | #
# MLDB-xxx-explain.py
# Mich, 2016-12-07
# This file is part of MLDB. Copyright 2016 mldb.ai inc. All rights reserved.
#
from mldb import mldb, MldbUnitTest, ResponseException
class Mldbfb724ClassifierExpSegfaultTest(MldbUnitTest): # noqa
def test_it(self):
ds = mldb.create_dataset({'id' : 'ds', 'type' : 'sparse.mutable'})
ds.commit()
ds = mldb.create_dataset({'id' : '_inception', 'type' : 'sparse.mutable'})
ds.commit()
#
query = """SELECT {_inception.* EXCLUDING(image_url)} AS features,
ds.cei AS label
FROM _inception
INNER JOIN ds ON _inception.image_url=ds.image_url"""
mldb.log(query)
mldb.log(mldb.query(query))
mldb.put_async("/v1/procedures/trainer", {
"type": "classifier.experiment",
"params": {
"experimentName": "exp_",
"mode": "boolean",
"inputData": query,
'datasetFolds': [{'trainingWhere': 'rowHash() % 10 != 0',
'testingWhere': 'rowHash() % 10 = 0'}],
"algorithm": "my_bbdt",
"configuration": {
"my_bbdt": {
"type": "bagging",
"verbosity": 3,
"weak_learner": {
"type": "boosting",
"verbosity": 3,
"weak_learner": {
"type": "decision_tree",
"verbosity": 0,
"max_depth": 10,
"random_feature_propn": 1
},
"min_iter": 5,
"max_iter": 30
},
"num_bags": 1
}
},
'modelFileUrlPattern': 'file://$runid.cls',
'evalTrain': False
}
})
if __name__ == '__main__':
mldb.run_tests()
| apache-2.0 |
mandeepdhami/nova | nova/api/openstack/compute/plugins/v3/virtual_interfaces.py | 30 | 2733 | # Copyright (C) 2011 Midokura KK
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The virtual interfaces extension."""
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import network
ALIAS = 'os-virtual-interfaces'
authorize = extensions.os_compute_authorizer(ALIAS)
def _translate_vif_summary_view(_context, vif):
"""Maps keys for VIF summary view."""
d = {}
d['id'] = vif['uuid']
d['mac_address'] = vif['address']
return d
class ServerVirtualInterfaceController(wsgi.Controller):
"""The instance VIF API controller for the OpenStack API.
"""
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
self.network_api = network.API(skip_policy_check=True)
super(ServerVirtualInterfaceController, self).__init__()
def _items(self, req, server_id, entity_maker):
"""Returns a list of VIFs, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
instance = common.get_instance(self.compute_api, context, server_id)
vifs = self.network_api.get_vifs_by_instance(context, instance)
limited_list = common.limited(vifs, req)
res = [entity_maker(context, vif) for vif in limited_list]
return {'virtual_interfaces': res}
@extensions.expected_errors((404))
def index(self, req, server_id):
"""Returns the list of VIFs for a given instance."""
return self._items(req, server_id,
entity_maker=_translate_vif_summary_view)
class VirtualInterfaces(extensions.V3APIExtensionBase):
"""Virtual interface support."""
name = "VirtualInterfaces"
alias = ALIAS
version = 1
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
ALIAS,
controller=ServerVirtualInterfaceController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
def get_controller_extensions(self):
return []
| apache-2.0 |
sdkottegoda/andes | modules/andes-core/common/templating.py | 25 | 3016 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
class Parser:
def __init__(self, **kwargs):
self.output = ""
self.environ = {"out": self.parse}
for k, v in kwargs.items():
self.environ[k] = v
self.text = ""
self.level = 0
self.line = None
def action(self, actor):
text = self.text
self.text = ""
actor(text)
def out(self, text):
self.output += text
def prefix_lines(self, text):
return "%s%s" % ("\n"*(self.line - 1 - text.count("\n")), text)
def evaluate(self, text):
self.out(str(eval(self.prefix_lines(text), self.environ, self.environ)))
def execute(self, text):
exec self.prefix_lines(text) in self.environ, self.environ
def parse(self, input):
old_line = self.line
try:
state = self.start
self.line = 1
for ch in input:
state = state(ch)
if ch == "\n":
self.line += 1
if state == self.start:
self.action(self.out)
elif state == self.alnum:
self.action(self.evaluate)
else:
raise ParseError()
finally:
self.line = old_line
def start(self, ch):
if ch == "$":
return self.dollar
else:
self.text += ch
return self.start
def dollar(self, ch):
if ch == "$":
self.text += "$"
return self.start
elif ch == "(":
self.action(self.out)
return self.expression
elif ch == "{":
self.action(self.out)
return self.block
else:
self.action(self.out)
self.text += ch
return self.alnum
def alnum(self, ch):
if ch.isalnum():
self.text += ch
return self.alnum
else:
self.action(self.evaluate)
self.text += ch
return self.start
def match(self, ch, start, end):
if ch == start:
self.level += 1
if ch == end:
self.level -= 1
def block(self, ch):
if not self.level and ch == "}":
self.action(self.execute)
return self.start
else:
self.match(ch, "{", "}")
self.text += ch
return self.block
def expression(self, ch):
if not self.level and ch == ")":
self.action(self.evaluate)
return self.start
else:
self.match(ch, "(", ")")
self.text += ch
return self.expression
| apache-2.0 |
wbsavage/shinken | test/test_properties.py | 1 | 7769 | #!/usr/bin/env python
# Copyright (C) 2009-2012:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
Test shinken.property
"""
import unittest
import __import_shinken
import shinken.property
from shinken.property import none_object
from shinken_test import *
class PropertyTests:
"""Common tests for all property classes"""
def setUp(self):
pass
def test_no_default_value(self):
p = self.prop_class()
self.assertIs(p.default, none_object)
self.assertFalse(p.has_default)
self.assertTrue(p.required)
def test_default_value(self):
default_value = object()
p = self.prop_class(default=default_value)
self.assertIs(p.default, default_value)
self.assertTrue(p.has_default)
self.assertFalse(p.required)
def test_fill_brok(self):
p = self.prop_class()
self.assertNotIn('full_status', p.fill_brok)
p = self.prop_class(default='0', fill_brok=['full_status'])
self.assertIn('full_status', p.fill_brok)
def test_unused(self):
p = self.prop_class()
self.assertFalse(p.unused)
#ShinkenTest, unittest.TestCase
class TestBoolProp(PropertyTests, ShinkenTest, unittest.TestCase):
"""Test the BoolProp class"""
prop_class = shinken.property.BoolProp
def test_pythonize(self):
p = self.prop_class()
# allowed strings for `True`
self.assertEqual(p.pythonize("1"), True)
self.assertEqual(p.pythonize("yes"), True)
self.assertEqual(p.pythonize("true"), True)
self.assertEqual(p.pythonize("on"), True)
# allowed strings for `False`
self.assertEqual(p.pythonize("0"), False)
self.assertEqual(p.pythonize("no"), False)
self.assertEqual(p.pythonize("false"), False)
self.assertEqual(p.pythonize("off"), False)
class TestIntegerProp(PropertyTests, ShinkenTest, unittest.TestCase):
"""Test the IntegerProp class"""
prop_class = shinken.property.IntegerProp
def test_pythonize(self):
p = self.prop_class()
self.assertEqual(p.pythonize("1"), 1)
self.assertEqual(p.pythonize("0"), 0)
self.assertEqual(p.pythonize("1000.33"), 1000)
class TestFloatProp(PropertyTests, ShinkenTest, unittest.TestCase):
"""Test the FloatProp class"""
prop_class = shinken.property.FloatProp
def test_pythonize(self):
p = self.prop_class()
self.assertEqual(p.pythonize("1"), 1.0)
self.assertEqual(p.pythonize("0"), 0.0)
self.assertEqual(p.pythonize("1000.33"), 1000.33)
class TestStringProp(PropertyTests, ShinkenTest, unittest.TestCase):
"""Test the StringProp class"""
prop_class = shinken.property.StringProp
def test_pythonize(self):
p = self.prop_class()
self.assertEqual(p.pythonize("1"), "1")
self.assertEqual(p.pythonize("yes"), "yes")
self.assertEqual(p.pythonize("0"), "0")
self.assertEqual(p.pythonize("no"), "no")
class TestCharProp(PropertyTests, ShinkenTest, unittest.TestCase):
"""Test the CharProp class"""
prop_class = shinken.property.CharProp
def test_pythonize(self):
p = self.prop_class()
self.assertEqual(p.pythonize("c"), "c")
self.assertEqual(p.pythonize("cxxxx"), "c")
# this raises IndexError. is this intented?
## self.assertEqual(p.pythonize(""), "")
class TestPathProp(TestStringProp):
"""Test the PathProp class"""
prop_class = shinken.property.PathProp
# As of now, PathProp is a subclass of StringProp without any
# relevant change. So no further tests are implemented here.
class TestConfigPathProp(TestStringProp):
"""Test the ConfigPathProp class"""
prop_class = shinken.property.ConfigPathProp
# As of now, ConfigPathProp is a subclass of StringProp without
# any relevant change. So no further tests are implemented here.
class TestListProp(PropertyTests, ShinkenTest, unittest.TestCase):
"""Test the ListProp class"""
prop_class = shinken.property.ListProp
def test_pythonize(self):
p = self.prop_class()
self.assertEqual(p.pythonize(""), [])
self.assertEqual(p.pythonize("1,2,3"), ["1", "2", "3"])
class TestLogLevelProp(PropertyTests, ShinkenTest, unittest.TestCase):
"""Test the LogLevelProp class"""
prop_class = shinken.property.LogLevelProp
def test_pythonize(self):
p = self.prop_class()
self.assertEqual(p.pythonize("NOTSET"), 0)
self.assertEqual(p.pythonize("DEBUG"), 10)
self.assertEqual(p.pythonize("INFO"), 20)
self.assertEqual(p.pythonize("WARN"), 30)
self.assertEqual(p.pythonize("WARNING"), 30)
self.assertEqual(p.pythonize("ERROR"), 40)
## 'FATAL' is not defined in std-module `logging._levelNames`
#self.assertEqual(p.pythonize("FATAL"), 50)
self.assertEqual(p.pythonize("CRITICAL"), 50)
## :todo: fix DictProp error if no `elts_prop` are passed
## class TestDictProp(PropertyTests, ShinkenTest, unittest.TestCase):
## """Test the DictProp class"""
##
## prop_class = shinken.property.DictProp
##
## def test_pythonize(self):
## p = self.prop_class()
## self.assertEqual(p.pythonize(""), "")
class TestAddrProp(PropertyTests, ShinkenTest, unittest.TestCase):
"""Test the AddrProp class"""
prop_class = shinken.property.AddrProp
def test_pythonize_with_IPv4_addr(self):
p = self.prop_class()
self.assertEqual(p.pythonize("192.168.10.11:445"),
{'address': "192.168.10.11",
'port': 445})
# no colon, no port
self.assertEqual(p.pythonize("192.168.10.11"),
{'address': "192.168.10.11"})
# colon but no port number
self.assertRaises(ValueError, p.pythonize, "192.168.10.11:")
# only colon, no addr, no port number
self.assertRaises(ValueError, p.pythonize, ":")
# no address, only port number
self.assertEqual(p.pythonize(":445"),
{'address': "",
'port': 445})
def test_pythonize_with_hostname(self):
p = self.prop_class()
self.assertEqual(p.pythonize("host_123:445"),
{'address': "host_123",
'port': 445})
# no colon, no port
self.assertEqual(p.pythonize("host_123"),
{'address': "host_123"})
# colon but no port number
self.assertRaises(ValueError, p.pythonize, "host_123:")
# only colon, no addr, no port number
self.assertRaises(ValueError, p.pythonize, ":")
# no address, only port number
self.assertEqual(p.pythonize(":445"),
{'address': "",
'port': 445})
# :fixme: IPv6 addresses are no tested since they are not parsed
# correcly
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
mattharrison/pygrametl | pygrametl/steps.py | 1 | 20832 | """This module contains classes for making "steps" in an ETL flow.
Steps can be connected such that a row flows from step to step and
each step does something with the row.
"""
# Copyright (c) 2009, 2010, Christian Thomsen ([email protected])
# All rights reserved.
# Redistribution and use in source anqd binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pygrametl
__author__ = "Christian Thomsen"
__maintainer__ = "Christian Thomsen"
__version__ = '0.1.1.0'
__all__ = ['Step', 'SourceStep', 'MappingStep', 'ValueMappingStep',
'PrintStep', 'DimensionStep', 'SCDimensionStep', 'RenamingStep',
'GarbageStep', 'ConditionalStep', 'CopyStep',
'connectsteps']
def connectsteps(*steps):
"""Set a.next = b, b.next = c, etc. when given the steps a, b, c, ..."""
for i in range(len(steps) - 1):
steps[i].next = steps[i+1]
class Step(object):
"""The basic class for steps in an ETL flow."""
__steps = {}
def __init__(self, worker=None, next=None, name=None):
"""Arguments:
- worker: A function f(row) that performs the Step's operation.
If None, self.defaultworker is used. Default: None
- next: The default next step to use. This should be 1) an instance
of a Step, 2) the name of a Step, or 3) None.
If if is a name, the next step will be looked up dynamically
each time. If it is None, no default step will exist and rows
will not be passed on. Default: None
- name: A name for the Step instance. This is used when another
Step (implicitly or explicitly) passes on rows. If two instanes
have the same name, the name is mapped to the instance that was
created the latest. Default: None
"""
if name is not None:
self.__class__.__steps[name] = self
self.__name = name
self.__redirected = False
self.__row = None
self.worker = (worker or self.defaultworker)
self.next = next
def process(self, row):
"""Perform the Step's operation on the given row.
If the row is not explicitly redirected (see _redirect), it will
be passed on the the next step if this has been set.
"""
self.__redirected = False
self.__row = row
self.worker(row)
self.__row = None
if self.next is None or self.__redirected:
return
self._inject(row, self.next)
def _redirect(self, target):
"""Redirect the current row to the given target.
The target is either an instance of Step or the name of a Step
instance.
"""
self.__redirected = True
self._inject(self.__row, target)
def _inject(self, row, target=None):
"""Give a row to another Step before the current row is passed on.
The target is either 1) an instance of Step, 2) the name of a Step
instance, or 3) None. If None, the next default Step is used
and must be defined.
"""
if target is None:
target = self.next
if isinstance(target, Step):
target.process(row)
else:
self.__class__.__steps[target].process(row)
def __call__(self, row):
self.process(row)
def name(self):
"""Return the name of the Step instance"""
return self.__name
@classmethod
def getstep(cls, name):
"""Return the Step instance with the given name"""
return cls.__steps.get(name)
def defaultworker(self, row):
"""Perform the Step's operation on the given row.
Inheriting classes should implement this method.
"""
pass
class SourceStep(Step):
"""A Step that iterates over a data source and gives each row to the
next step. The start method must be called.
"""
def __init__(self, source, next=None, name=None):
"""Arguments:
- source: The data source. Must be iterable.
- next: The default next step to use. This should be 1) an instance
of a Step, 2) the name of a Step, or 3) None.
If if is a name, the next step will be looked up dynamically
each time. If it is None, no default step will exist and rows
will not be passed on. Default: None
- name: A name for the Step instance. This is used when another
Step (implicitly or explicitly) passes on rows. If two instanes
have the same name, the name is mapped to the instance that was
created the latest. Default: None
"""
Step.__init__(self, None, next, name)
self.source = source
def start(self):
"""Start the iteration of the source's rows and pass them on."""
for row in self.source:
self.process(row)
class MappingStep(Step):
"""A Step that applies functions to attributes in rows."""
def __init__(self, targets, requiretargets=True, next=None, name=None):
"""Argument:
- targets: A sequence of (name, function) pairs. For each element,
row[name] is set to function(row[name]) for each row given to the
step.
- requiretargets: A flag that decides if a KeyError should be raised
if a name from targets does not exist in a row. If True, a
KeyError is raised, if False the missing attribute is ignored and
not set. Default: True
- next: The default next step to use. This should be 1) an instance
of a Step, 2) the name of a Step, or 3) None.
If if is a name, the next step will be looked up dynamically
each time. If it is None, no default step will exist and rows
will not be passed on. Default: None
- name: A name for the Step instance. This is used when another
Step (implicitly or explicitly) passes on rows. If two instanes
have the same name, the name is mapped to the instance that was
created the latest. Default: None
"""
Step.__init__(self, None, next, name)
self.targets = targets
self.requiretargets = requiretargets
def defaultworker(self, row):
for (element, function) in self.targets:
if element in row:
row[element] = function(row[element])
elif self.requiretargets:
raise KeyError, "%s not found in row" % (element,)
class ValueMappingStep(Step):
"""A Step that Maps values to other values (e.g., DK -> Denmark)"""
def __init__(self, outputatt, inputatt, mapping, requireinput=True,
defaultvalue=None, next=None, name=None):
"""Arguments:
- outputatt: The attribute to write the mapped value to in each row.
- inputatt: The attribute to map.
- mapping: A dict with the mapping itself.
- requireinput: A flag that decides if a KeyError should be raised
if inputatt does not exist in a given row. If True, a KeyError
will be raised when the attriubte is missing. If False, a
the outputatt will be set to defaultvalue. Default: True
- defaultvalue: The default value to use when the mapping cannot be
done. Default: None
- next: The default next step to use. This should be 1) an instance
of a Step, 2) the name of a Step, or 3) None.
If if is a name, the next step will be looked up dynamically
each time. If it is None, no default step will exist and rows
will not be passed on. Default: None
- name: A name for the Step instance. This is used when another
Step (implicitly or explicitly) passes on rows. If two instanes
have the same name, the name is mapped to the instance that was
created the latest. Default: None
"""
Step.__init__(self, None, next, name)
self.outputatt = outputatt
self.inputatt = inputatt
self.mapping = mapping
self.defaultvalue = defaultvalue
self.requireinput = requireinput
def defaultworker(self, row):
if self.inputatt in row:
row[self.outputatt] = self.mapping.get(row[self.inputatt],
self.defaultvalue)
elif not self.requireinput:
row[self.attribute] = self.defaultvalue
else:
raise KeyError, "%s not found in row" % (self.attribute,)
class PrintStep(Step):
"""A Step that prints each given row."""
def __init__(self, next=None, name=None):
"""Arguments:
- next: The default next step to use. This should be 1) an instance
of a Step, 2) the name of a Step, or 3) None.
If if is a name, the next step will be looked up dynamically
each time. If it is None, no default step will exist and rows
will not be passed on. Default: None
- name: A name for the Step instance. This is used when another
Step (implicitly or explicitly) passes on rows. If two instanes
have the same name, the name is mapped to the instance that was
created the latest. Default: None
"""
Step.__init__(self, None, next, name)
def defaultworker(self, row):
print(row)
class DimensionStep(Step):
"""A Step that performs ensure(row) on a given dimension for each row."""
def __init__(self, dimension, keyfield=None, next=None, name=None):
"""Arguments:
- dimension: the Dimension object to call ensure on.
- keyfield: the name of the attribute that in each row is set to
hold the key value for the dimension member
- next: The default next step to use. This should be 1) an instance
of a Step, 2) the name of a Step, or 3) None.
If if is a name, the next step will be looked up dynamically
each time. If it is None, no default step will exist and rows
will not be passed on. Default: None
- name: A name for the Step instance. This is used when another
Step (implicitly or explicitly) passes on rows. If two instanes
have the same name, the name is mapped to the instance that was
created the latest. Default: None
"""
Step.__init__(self, None, next, name)
self.dimension = dimension
self.keyfield = keyfield
def defaultworker(self, row):
key = self.dimension.ensure(row)
if self.keyfield is not None:
row[self.keyfield] = key
class SCDimensionStep(Step):
"""A Step that performs scdensure(row) on a given dimension for each row."""
def __init__(self, dimension, next=None, name=None):
"""Arguments:
- dimension: the Dimension object to call ensure on.
- keyfield: the name of the attribute that in each row is set to
hold the key value for the dimension member
- next: The default next step to use. This should be 1) an instance
of a Step, 2) the name of a Step, or 3) None.
If if is a name, the next step will be looked up dynamically
each time. If it is None, no default step will exist and rows
will not be passed on. Default: None
- name: A name for the Step instance. This is used when another
Step (implicitly or explicitly) passes on rows. If two instanes
have the same name, the name is mapped to the instance that was
created the latest. Default: None
"""
Step.__init__(self, None, next, name)
self.dimension = dimension
def defaultworker(self, row):
self.dimension.scdensure(row)
class RenamingFromToStep(Step):
# Performs renamings of attributes in rows.
def __init__(self, renaming, next=None, name=None):
"""Arguments:
- name: A name for the Step instance. This is used when another
Step (implicitly or explicitly) passes on rows. If two instanes
have the same name, the name is mapped to the instance that was
created the latest. Default: None
- renaming: A dict with pairs (oldname, newname) which will
by used by pygrametl.renamefromto to do the renaming
- next: The default next step to use. This should be 1) an instance
of a Step, 2) the name of a Step, or 3) None.
If if is a name, the next step will be looked up dynamically
each time. If it is None, no default step will exist and rows
will not be passed on. Default: None
- name: A name for the Step instance. This is used when another
Step (implicitly or explicitly) passes on rows. If two instanes
have the same name, the name is mapped to the instance that was
created the latest. Default: None
"""
Step.__init__(self, None, next, name)
self.renaming = renaming
def defaultworker(self, row):
pygrametl.renamefromto(row, self.renaming)
RenamingStep = RenamingFromToStep # for backwards compat.
class RenamingToFromStep(RenamingFromToStep):
def defaultworker(self, row):
pygrametl.renametofrom(row, self.renaming)
class GarbageStep(Step):
""" A Step that does nothing. Rows are neither modified nor passed on."""
def __init__(self, name=None):
"""Argument:
- name: A name for the Step instance. This is used when another
Step (implicitly or explicitly) passes on rows. If two instanes
have the same name, the name is mapped to the instance that was
created the latest. Default: None
"""
Step.__init__(self, None, None, name)
def process(self, row):
return
class ConditionalStep(Step):
"""A Step that redirects rows based on a condition."""
def __init__(self, condition, whentrue, whenfalse=None, name=None):
"""Arguments:
- condition: A function f(row) that is evaluated for each row.
- whentrue: The next step to use if the condition evaluates to a
true value. This argument should be 1) an instance of a Step,
2) the name of a Step, or 3) None.
If if is a name, the next step will be looked up dynamically
each time. If it is None, no default step will exist and rows
will not be passed on.
- whenfalse: The Step that rows are sent to when the condition
evaluates to a false value. If None, the rows are silently
discarded. Default=None
- name: A name for the Step instance. This is used when another
Step (implicitly or explicitly) passes on rows. If two instanes
have the same name, the name is mapped to the instance that was
created the latest. Default: None
"""
Step.__init__(self, None, whentrue, name)
self.whenfalse = whenfalse
self.condition = condition
self.__nowhere = GarbageStep()
def defaultworker(self, row):
if not self.condition(row):
if self.whenfalse is None:
self._redirect(self.__nowhere)
else:
self._redirect(self.whenfalse)
# else process will pass on the row to self.next (the whentrue step)
class CopyStep(Step):
"""A Step that copies each row and passes on the copy and the original"""
def __init__(self, originaldest, copydest, deepcopy=False, name=None):
"""Arguments:
- originaldest: The Step each given row is passed on to.
This argument should be 1) an instance of a Step,
2) the name of a Step, or 3) None.
If if is a name, the next step will be looked up dynamically
each time. If it is None, no default step will exist and rows
will not be passed on.
- copydest: The Step a copy of each given row is passed on to.
This argument can be 1) an instance of a Step or 2) the name
of a step.
- name: A name for the Step instance. This is used when another
Step (implicitly or explicitly) passes on rows. If two instanes
have the same name, the name is mapped to the instance that was
created the latest. Default: None
- deepcopy: Decides if the copy should be deep or not.
Default: False
"""
Step.__init__(self, None, originaldest, name)
if copydest is None:
raise ValueError, 'copydest is None'
self.copydest = copydest
import copy
if deepcopy:
self.copyfunc = copy.deepcopy
else:
self.copyfunc = copy.copy
def defaultworker(self, row):
copy = self.copyfunc(row)
self._inject(copy, self.copydest)
# process will pass on row to originaldest = self.next
# For aggregations. Experimental.
class AggregatedRow(dict):
pass
class AggregatingStep(Step):
def __init__(self, aggregator=None, finalizer=None, next=None, name=None):
Step.__init__(self, aggregator, next, name)
self.finalizer = finalizer or self.defaultfinalizer
def process(self, row):
if isinstance(row, AggregatedRow):
self.finalizer(row)
if self.next is not None:
Step._inject(self, row, self.next)
else:
self.worker(row)
def defaultworker(self, row):
pass
def defaultfinalizer(self, row):
pass
class SumAggregator(AggregatingStep):
def __init__(self, field, next=None, name=None):
AggregatingStep.__init__(self, None, None, next, name)
self.sum = 0
self.field = field
def defaultworker(self, row):
self.sum += row[self.field]
def defaultfinalizer(self, row):
row[self.field] = self.sum
self.sum = 0
class AvgAggregator(AggregatingStep):
def __init__(self, field, next=None, name=None):
AggregatingStep.__init__(self, None, None, next, name)
self.sum = 0
self.cnt = 0
self.field = field
def defaultworker(self, row):
self.sum += row[self.field]
self.cnt += 1
def defaultfinalizer(self, row):
if self.cnt > 0:
row[self.field] = self.sum / float(self.cnt)
else:
row[self.field] = 0
self.sum = 0
self.cnt = 0
class MaxAggregator(AggregatingStep):
def __init__(self, field, next=None, name=None):
AggregatingStep.__init__(self, None, None, next, name)
self.max = None
self.field = field
def defaultworker(self, row):
if self.max is None or row[self.field] > self.max:
self.max = row[self.field]
def defaultfinalizer(self, row):
row[self.field] = self.max
self.max = None
class MinAggregator(AggregatingStep):
def __init__(self, field, next=None, name=None):
AggregatingStep.__init__(self, None, None, next, name)
self.min = None
self.field = field
def defaultworker(self, row):
if self.min is None or row[self.field] < self.min:
self.min = row[self.field]
def defaultfinalizer(self, row):
row[self.field] = self.min
self.min = None
| bsd-2-clause |
afilipovich/graphite-web | webapp/graphite/metrics/views.py | 1 | 9842 | """Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import traceback
from django.http import HttpResponse, HttpResponseBadRequest
from django.conf import settings
from graphite.account.models import Profile
from graphite.util import getProfile, getProfileByUsername, defaultUser, json
from graphite.logger import log
from graphite.storage import STORE
from graphite.metrics.search import searcher
from graphite.carbonlink import CarbonLink
import fnmatch, os
try:
import cPickle as pickle
except ImportError:
import pickle
def index_json(request):
jsonp = request.REQUEST.get('jsonp', False)
matches = []
for root, dirs, files in os.walk(settings.WHISPER_DIR):
root = root.replace(settings.WHISPER_DIR, '')
for basename in files:
if fnmatch.fnmatch(basename, '*.wsp'):
matches.append(os.path.join(root, basename))
matches = [ m.replace('.wsp','').replace('/', '.') for m in sorted(matches) ]
if jsonp:
return HttpResponse("%s(%s)" % (jsonp, json.dumps(matches)), mimetype='text/javascript')
else:
return HttpResponse(json.dumps(matches), mimetype='application/json')
def search_view(request):
try:
query = str( request.REQUEST['query'] )
except:
return HttpResponseBadRequest(content="Missing required parameter 'query'", mimetype="text/plain")
search_request = {
'query' : query,
'max_results' : int( request.REQUEST.get('max_results', 25) ),
'keep_query_pattern' : int(request.REQUEST.get('keep_query_pattern', 0)),
}
#if not search_request['query'].endswith('*'):
# search_request['query'] += '*'
results = sorted(searcher.search(**search_request))
result_data = json.dumps( dict(metrics=results) )
return HttpResponse(result_data, mimetype='application/json')
def find_view(request):
"View for finding metrics matching a given pattern"
profile = getProfile(request)
format = request.REQUEST.get('format', 'treejson')
local_only = int( request.REQUEST.get('local', 0) )
wildcards = int( request.REQUEST.get('wildcards', 0) )
fromTime = int( request.REQUEST.get('from', -1) )
untilTime = int( request.REQUEST.get('until', -1) )
if fromTime == -1:
fromTime = None
if untilTime == -1:
untilTime = None
automatic_variants = int( request.REQUEST.get('automatic_variants', 0) )
try:
query = str( request.REQUEST['query'] )
except:
return HttpResponseBadRequest(content="Missing required parameter 'query'", mimetype="text/plain")
if '.' in query:
base_path = query.rsplit('.', 1)[0] + '.'
else:
base_path = ''
if format == 'completer':
query = query.replace('..', '*.')
if not query.endswith('*'):
query += '*'
if automatic_variants:
query_parts = query.split('.')
for i,part in enumerate(query_parts):
if ',' in part and '{' not in part:
query_parts[i] = '{%s}' % part
query = '.'.join(query_parts)
try:
matches = list( STORE.find(query, fromTime, untilTime, local=local_only) )
except:
log.exception()
raise
log.info('find_view query=%s local_only=%s matches=%d' % (query, local_only, len(matches)))
matches.sort(key=lambda node: node.name)
log.info("received remote find request: pattern=%s from=%s until=%s local_only=%s format=%s matches=%d" % (query, fromTime, untilTime, local_only, format, len(matches)))
if format == 'treejson':
content = tree_json(matches, base_path, wildcards=profile.advancedUI or wildcards)
response = HttpResponse(content, mimetype='application/json')
elif format == 'pickle':
content = pickle_nodes(matches)
response = HttpResponse(content, mimetype='application/pickle')
elif format == 'completer':
results = []
for node in matches:
node_info = dict(path=node.path, name=node.name, is_leaf=str(int(node.is_leaf)))
if not node.is_leaf:
node_info['path'] += '.'
results.append(node_info)
if len(results) > 1 and wildcards:
wildcardNode = {'name' : '*'}
results.append(wildcardNode)
content = json.dumps({ 'metrics' : results })
response = HttpResponse(content, mimetype='application/json')
else:
return HttpResponseBadRequest(content="Invalid value for 'format' parameter", mimetype="text/plain")
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
def list_view(request):
hosts = []
store = STORE
try:
query = str( request.REQUEST['query'] )
except:
return HttpResponseBadRequest(content="Missing required parameter 'query'", mimetype="text/plain")
query_items = query.split('.')
for t in list( store.find('*') ):
for env in list( store.find('%s.*' % t.path) ):
for app in list( store.find('%s.*' % env.path) ):
for dc in list( store.find('%s.*' % app.path) ):
for id in list( store.find('%s.*' % dc.path) ):
is_in = True
for q in query_items:
if q in id.path:
is_in = True
else:
is_in = False
break
if is_in:
hosts.append({'path': id.path})
hosts.sort(key=lambda node: node['path'])
content = json.dumps({ 'metrics' : hosts })
response = HttpResponse(content, mimetype='application/json')
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
def expand_view(request):
"View for expanding a pattern into matching metric paths"
local_only = int( request.REQUEST.get('local', 0) )
group_by_expr = int( request.REQUEST.get('groupByExpr', 0) )
leaves_only = int( request.REQUEST.get('leavesOnly', 0) )
results = {}
for query in request.REQUEST.getlist('query'):
results[query] = set()
for node in STORE.find(query, local=local_only):
if node.is_leaf or not leaves_only:
results[query].add( node.path )
# Convert our results to sorted lists because sets aren't json-friendly
if group_by_expr:
for query, matches in results.items():
results[query] = sorted(matches)
else:
results = sorted( reduce(set.union, results.values(), set()) )
result = {
'results' : results
}
response = HttpResponse(json.dumps(result), mimetype='application/json')
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
def get_metadata_view(request):
key = request.REQUEST['key']
metrics = request.REQUEST.getlist('metric')
results = {}
for metric in metrics:
try:
results[metric] = CarbonLink.get_metadata(metric, key)
except:
log.exception()
results[metric] = dict(error="Unexpected error occurred in CarbonLink.get_metadata(%s, %s)" % (metric, key))
return HttpResponse(json.dumps(results), mimetype='application/json')
def set_metadata_view(request):
results = {}
if request.method == 'GET':
metric = request.GET['metric']
key = request.GET['key']
value = request.GET['value']
try:
results[metric] = CarbonLink.set_metadata(metric, key, value)
except:
log.exception()
results[metric] = dict(error="Unexpected error occurred in CarbonLink.set_metadata(%s, %s)" % (metric, key))
elif request.method == 'POST':
if request.META.get('CONTENT_TYPE') == 'application/json':
operations = json.loads( request.raw_post_data )
else:
operations = json.loads( request.POST['operations'] )
for op in operations:
metric = None
try:
metric, key, value = op['metric'], op['key'], op['value']
results[metric] = CarbonLink.set_metadata(metric, key, value)
except:
log.exception()
if metric:
results[metric] = dict(error="Unexpected error occurred in bulk CarbonLink.set_metadata(%s)" % metric)
else:
results = dict(error="Invalid request method")
return HttpResponse(json.dumps(results), mimetype='application/json')
def tree_json(nodes, base_path, wildcards=False):
results = []
branchNode = {
'allowChildren': 1,
'expandable': 1,
'leaf': 0,
}
leafNode = {
'allowChildren': 0,
'expandable': 0,
'leaf': 1,
}
#Add a wildcard node if appropriate
if len(nodes) > 1 and wildcards:
wildcardNode = {'text' : '*', 'id' : base_path + '*'}
if any(not n.is_leaf for n in nodes):
wildcardNode.update(branchNode)
else:
wildcardNode.update(leafNode)
results.append(wildcardNode)
found = set()
results_leaf = []
results_branch = []
for node in nodes: #Now let's add the matching children
if node.name in found:
continue
found.add(node.name)
resultNode = {
'text' : str(node.name),
'id' : base_path + str(node.name),
}
if node.is_leaf:
resultNode.update(leafNode)
results_leaf.append(resultNode)
else:
resultNode.update(branchNode)
results_branch.append(resultNode)
results.extend(results_branch)
results.extend(results_leaf)
return json.dumps(results)
def pickle_nodes(nodes):
nodes_info = []
for node in nodes:
info = dict(path=node.path, is_leaf=node.is_leaf)
if node.is_leaf:
info['intervals'] = node.intervals
nodes_info.append(info)
return pickle.dumps(nodes_info, protocol=-1)
def any(iterable): #python2.4 compatibility
for i in iterable:
if i:
return True
return False
| apache-2.0 |
inasafe/inasafe | safe_extras/raven/transport/base.py | 12 | 1219 | """
raven.transport.base
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
# Helper for external transports
has_newstyle_transports = True
class Transport(object):
"""
All transport implementations need to subclass this class
You must implement a send method (or an async_send method if
sub-classing AsyncTransport).
"""
is_async = False
scheme = []
def send(self, url, data, headers):
"""
You need to override this to do something with the actual
data. Usually - this is sending to a server
"""
raise NotImplementedError
class AsyncTransport(Transport):
"""
All asynchronous transport implementations should subclass this
class.
You must implement a async_send method.
"""
is_async = True
def async_send(self, url, data, headers, success_cb, error_cb):
"""
Override this method for asynchronous transports. Call
`success_cb()` if the send succeeds or `error_cb(exception)`
if the send fails.
"""
raise NotImplementedError
| gpl-3.0 |
florianholzapfel/home-assistant | homeassistant/components/cover/tellduslive.py | 16 | 1213 | """
Support for Tellstick covers using Tellstick Net.
This platform uses the Telldus Live online service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover.tellduslive/
"""
import logging
from homeassistant.components.cover import CoverDevice
from homeassistant.components.tellduslive import TelldusLiveEntity
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup covers."""
if discovery_info is None:
return
add_devices(TelldusLiveCover(hass, cover) for cover in discovery_info)
class TelldusLiveCover(TelldusLiveEntity, CoverDevice):
"""Representation of a cover."""
@property
def is_closed(self):
"""Return the current position of the cover."""
return self.device.is_down
def close_cover(self, **kwargs):
"""Close the cover."""
self.device.down()
self.changed()
def open_cover(self, **kwargs):
"""Open the cover."""
self.device.up()
self.changed()
def stop_cover(self, **kwargs):
"""Stop the cover."""
self.device.stop()
self.changed()
| mit |
mattrobenolt/AutobahnTestSuite | autobahntestsuite/autobahntestsuite/caseset.py | 4 | 5558 | ###############################################################################
##
## Copyright (C) 2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ("CaseSet",)
import re
class CaseSet:
def __init__(self, CaseSetName, CaseBaseName, Cases, CaseCategories, CaseSubCategories):
self.CaseSetName = CaseSetName
self.CaseBaseName = CaseBaseName
self.Cases = Cases
self.CaseCategories = CaseCategories
self.CaseSubCategories = CaseSubCategories
## Index:
## "1.2.3" => Index (1-based) of Case1_2_3 in Cases
##
self.CasesIndices = {}
i = 1
for c in self.Cases:
self.CasesIndices[self.caseClasstoId(c)] = i
i += 1
## Index:
## "1.2.3" => Case1_2_3
##
self.CasesById = {}
for c in self.Cases:
self.CasesById[self.caseClasstoId(c)] = c
def caseClasstoId(self, klass):
"""
Class1_2_3 => '1.2.3'
"""
l = len(self.CaseBaseName)
return '.'.join(klass.__name__[l:].split("_"))
def caseClasstoIdTuple(self, klass):
"""
Class1_2_3 => (1, 2, 3)
"""
l = len(self.CaseBaseName)
return tuple([int(x) for x in klass.__name__[l:].split("_")])
def caseIdtoIdTuple(self, id):
"""
'1.2.3' => (1, 2, 3)
"""
return tuple([int(x) for x in id.split('.')])
def caseIdTupletoId(self, idt):
"""
(1, 2, 3) => '1.2.3'
"""
return '.'.join([str(x) for x in list(idt)])
def caseClassToPrettyDescription(self, klass):
"""
Truncates the rest of the description after the first HTML tag
and coalesces whitespace
"""
return ' '.join(klass.DESCRIPTION.split('<')[0].split())
def resolveCasePatternList(self, patterns):
"""
Return list of test cases that match against a list of case patterns.
"""
specCases = []
for c in patterns:
if c.find('*') >= 0:
s = c.replace('.', '\.').replace('*', '.*')
p = re.compile(s)
t = []
for x in self.CasesIndices.keys():
if p.match(x):
t.append(self.caseIdtoIdTuple(x))
for h in sorted(t):
specCases.append(self.caseIdTupletoId(h))
else:
specCases.append(c)
return specCases
def parseSpecCases(self, spec):
"""
Return list of test cases that match against case patterns, minus exclude patterns.
"""
specCases = self.resolveCasePatternList(spec["cases"])
if spec.has_key("exclude-cases"):
excludeCases = self.resolveCasePatternList(spec["exclude-cases"])
else:
excludeCases = []
c = list(set(specCases) - set(excludeCases))
cases = [self.caseIdTupletoId(y) for y in sorted([self.caseIdtoIdTuple(x) for x in c])]
return cases
def parseExcludeAgentCases(self, spec):
"""
Parses "exclude-agent-cases" from the spec into a list of pairs
of agent pattern and case pattern list.
"""
if spec.has_key("exclude-agent-cases"):
ee = spec["exclude-agent-cases"]
pats1 = []
for e in ee:
s1 = "^" + e.replace('.', '\.').replace('*', '.*') + "$"
p1 = re.compile(s1)
pats2 = []
for z in ee[e]:
s2 = "^" + z.replace('.', '\.').replace('*', '.*') + "$"
p2 = re.compile(s2)
pats2.append(p2)
pats1.append((p1, pats2))
return pats1
else:
return []
def checkAgentCaseExclude(self, patterns, agent, case):
"""
Check if we should exclude a specific case for given agent.
"""
for p in patterns:
if p[0].match(agent):
for pp in p[1]:
if pp.match(case):
return True
return False
def getCasesByAgent(self, spec):
caseIds = self.parseSpecCases(spec)
epats = self.parseExcludeAgentCases(spec)
res = []
for server in spec['testees']:
agent = server['name']
res2 = []
for caseId in caseIds:
if not self.checkAgentCaseExclude(epats, agent, caseId):
res2.append(self.CasesById[caseId])
if len(res2) > 0:
o = {}
o['name'] = str(server['name'])
o['url'] = str(server['url'])
o['auth'] = server.get('auth', None)
o['cases'] = res2
res.append(o)
return res
def generateCasesByTestee(self, spec):
caseIds = self.parseSpecCases(spec)
epats = self.parseExcludeAgentCases(spec)
res = {}
for obj in spec['testees']:
testee = obj['name']
res[testee] = []
for caseId in caseIds:
if not self.checkAgentCaseExclude(epats, testee, caseId):
res[testee].append(self.CasesById[caseId])
return res
| apache-2.0 |
icomms/wqmanager | reportlab/graphics/charts/barcharts.py | 4 | 53674 | #Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/barcharts.py
__version__=''' $Id: barcharts.py 3604 2009-11-27 16:35:29Z meitham $ '''
__doc__="""This module defines a variety of Bar Chart components.
The basic flavors are stacked and side-by-side, available in horizontal and
vertical versions.
"""
import copy
from reportlab.lib import colors
from reportlab.lib.validators import isNumber, isColor, isColorOrNone, isString,\
isListOfStrings, SequenceOf, isBoolean, isNoneOrShape, isStringOrNone,\
NoneOr, isListOfNumbersOrNone
from reportlab.graphics.widgets.markers import uSymbol2Symbol, isSymbol
from reportlab.lib.formatters import Formatter
from reportlab.lib.attrmap import AttrMap, AttrMapValue
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder
from reportlab.graphics.shapes import Line, Rect, Group, Drawing, NotImplementedError
from reportlab.graphics.charts.axes import XCategoryAxis, YValueAxis, YCategoryAxis, XValueAxis
from reportlab.graphics.charts.textlabels import BarChartLabel, NA_Label, NoneOrInstanceOfNA_Label
from reportlab.graphics.charts.areas import PlotArea
from reportlab.graphics.charts.legends import _objStr
class BarChartProperties(PropHolder):
_attrMap = AttrMap(
strokeColor = AttrMapValue(isColorOrNone, desc='Color of the bar border.'),
fillColor = AttrMapValue(isColorOrNone, desc='Color of the bar interior area.'),
strokeWidth = AttrMapValue(isNumber, desc='Width of the bar border.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array of a line.'),
symbol = AttrMapValue(None, desc='A widget to be used instead of a normal bar.',advancedUsage=1),
name = AttrMapValue(isString, desc='Text to be associated with a bar (eg seriesname)'),
swatchMarker = AttrMapValue(NoneOr(isSymbol), desc="None or makeMarker('Diamond') ...",advancedUsage=1),
)
def __init__(self):
self.strokeColor = None
self.fillColor = colors.blue
self.strokeWidth = 0.5
self.symbol = None
self.strokeDashArray = None
# Bar chart classes.
class BarChart(PlotArea):
"Abstract base class, unusable by itself."
_attrMap = AttrMap(BASE=PlotArea,
useAbsolute = AttrMapValue(isNumber, desc='Flag to use absolute spacing values.',advancedUsage=1),
barWidth = AttrMapValue(isNumber, desc='The width of an individual bar.'),
groupSpacing = AttrMapValue(isNumber, desc='Width between groups of bars.'),
barSpacing = AttrMapValue(isNumber, desc='Width between individual bars.'),
bars = AttrMapValue(None, desc='Handle of the individual bars.'),
valueAxis = AttrMapValue(None, desc='Handle of the value axis.'),
categoryAxis = AttrMapValue(None, desc='Handle of the category axis.'),
data = AttrMapValue(None, desc='Data to be plotted, list of (lists of) numbers.'),
barLabels = AttrMapValue(None, desc='Handle to the list of bar labels.'),
barLabelFormat = AttrMapValue(None, desc='Formatting string or function used for bar labels.'),
barLabelCallOut = AttrMapValue(None, desc='Callout function(label)\nlabel._callOutInfo = (self,g,rowNo,colNo,x,y,width,height,x00,y00,x0,y0)',advancedUsage=1),
barLabelArray = AttrMapValue(None, desc='explicit array of bar label values, must match size of data if present.'),
reversePlotOrder = AttrMapValue(isBoolean, desc='If true, reverse common category plot order.',advancedUsage=1),
naLabel = AttrMapValue(NoneOrInstanceOfNA_Label, desc='Label to use for N/A values.',advancedUsage=1),
annotations = AttrMapValue(None, desc='list of callables, will be called with self, xscale, yscale.'),
)
def makeSwatchSample(self, rowNo, x, y, width, height):
baseStyle = self.bars
styleIdx = rowNo % len(baseStyle)
style = baseStyle[styleIdx]
strokeColor = getattr(style, 'strokeColor', getattr(baseStyle,'strokeColor',None))
fillColor = getattr(style, 'fillColor', getattr(baseStyle,'fillColor',None))
strokeDashArray = getattr(style, 'strokeDashArray', getattr(baseStyle,'strokeDashArray',None))
strokeWidth = getattr(style, 'strokeWidth', getattr(style, 'strokeWidth',None))
swatchMarker = getattr(style, 'swatchMarker', getattr(baseStyle, 'swatchMarker',None))
if swatchMarker:
return uSymbol2Symbol(swatchMarker,x+width/2.,y+height/2.,fillColor)
return Rect(x,y,width,height,strokeWidth=strokeWidth,strokeColor=strokeColor,
strokeDashArray=strokeDashArray,fillColor=fillColor)
def getSeriesName(self,i,default=None):
'''return series name i or default'''
return _objStr(getattr(self.bars[i],'name',default))
def __init__(self):
assert self.__class__.__name__ not in ('BarChart','BarChart3D'), 'Abstract Class %s Instantiated' % self.__class__.__name__
if self._flipXY:
self.categoryAxis = YCategoryAxis()
self.valueAxis = XValueAxis()
else:
self.categoryAxis = XCategoryAxis()
self.valueAxis = YValueAxis()
PlotArea.__init__(self)
self.barSpacing = 0
self.reversePlotOrder = 0
# this defines two series of 3 points. Just an example.
self.data = [(100,110,120,130),
(70, 80, 85, 90)]
# control bar spacing. is useAbsolute = 1 then
# the next parameters are in points; otherwise
# they are 'proportions' and are normalized to
# fit the available space. Half a barSpacing
# is allocated at the beginning and end of the
# chart.
self.useAbsolute = 0 #- not done yet
self.barWidth = 10
self.groupSpacing = 5
self.barSpacing = 0
self.barLabels = TypedPropertyCollection(BarChartLabel)
self.barLabels.boxAnchor = 'c'
self.barLabels.textAnchor = 'middle'
self.barLabelFormat = None
self.barLabelArray = None
# this says whether the origin is inside or outside
# the bar - +10 means put the origin ten points
# above the tip of the bar if value > 0, or ten
# points inside if bar value < 0. This is different
# to label dx/dy which are not dependent on the
# sign of the data.
self.barLabels.nudge = 0
# if you have multiple series, by default they butt
# together.
# we really need some well-designed default lists of
# colors e.g. from Tufte. These will be used in a
# cycle to set the fill color of each series.
self.bars = TypedPropertyCollection(BarChartProperties)
self.bars.strokeWidth = 1
self.bars.strokeColor = colors.black
self.bars.strokeDashArray = None
self.bars[0].fillColor = colors.red
self.bars[1].fillColor = colors.green
self.bars[2].fillColor = colors.blue
self.naLabel = None #NA_Label()
def demo(self):
"""Shows basic use of a bar chart"""
if self.__class__.__name__=='BarChart':
raise NotImplementedError, 'Abstract Class BarChart has no demo'
drawing = Drawing(200, 100)
bc = self.__class__()
drawing.add(bc)
return drawing
def _getConfigureData(self):
cA = self.categoryAxis
data = self.data
if cA.style not in ('parallel','parallel_3d'):
_data = data
data = max(map(len,_data))*[0]
for d in _data:
for i in xrange(len(d)):
data[i] = data[i] + (d[i] or 0)
data = list(_data) + [data]
self._configureData = data
def _getMinMax(self):
'''Attempt to return the data range'''
self._getConfigureData()
self.valueAxis._setRange(self._configureData)
return self.valueAxis._valueMin, self.valueAxis._valueMax
def _drawBegin(self,org,length):
'''Position and configure value axis, return crossing value'''
vA = self.valueAxis
vA.setPosition(self.x, self.y, length)
self._getConfigureData()
vA.configure(self._configureData)
# if zero is in chart, put the other axis there, otherwise use low
crossesAt = vA.scale(0)
if crossesAt > org+length or crossesAt<org:
crossesAt = org
return crossesAt
def _drawFinish(self):
'''finalize the drawing of a barchart'''
cA = self.categoryAxis
vA = self.valueAxis
cA.configure(self._configureData)
self.calcBarPositions()
g = Group()
g.add(self.makeBackground())
cAdgl = getattr(cA,'drawGridLast',False)
vAdgl = getattr(vA,'drawGridLast',False)
if not cAdgl: cA.makeGrid(g,parent=self, dim=vA.getGridDims)
if not vAdgl: vA.makeGrid(g,parent=self, dim=cA.getGridDims)
g.add(self.makeBars())
g.add(cA)
g.add(vA)
if cAdgl: cA.makeGrid(g,parent=self, dim=vA.getGridDims)
if vAdgl: vA.makeGrid(g,parent=self, dim=cA.getGridDims)
for a in getattr(self,'annotations',()): g.add(a(self,cA.scale,vA.scale))
del self._configureData
return g
def calcBarPositions(self):
"""Works out where they go. default vertical.
Sets an attribute _barPositions which is a list of
lists of (x, y, width, height) matching the data.
"""
flipXY = self._flipXY
if flipXY:
org = self.y
else:
org = self.x
cA = self.categoryAxis
cScale = cA.scale
data = self.data
seriesCount = self._seriesCount = len(data)
self._rowLength = rowLength = max(map(len,data))
groupSpacing, barSpacing, barWidth = self.groupSpacing, self.barSpacing, self.barWidth
style = self.categoryAxis.style
if style=='parallel':
groupWidth = groupSpacing+(seriesCount*barWidth)+(seriesCount-1)*barSpacing
bGap = barWidth+barSpacing
else:
accum = rowLength*[0]
groupWidth = groupSpacing+barWidth
bGap = 0
self._groupWidth = groupWidth
useAbsolute = self.useAbsolute
if useAbsolute:
# bar dimensions are absolute
normFactor = 1.0
else:
# bar dimensions are normalized to fit. How wide
# notionally is one group of bars?
availWidth = cScale(0)[1]
normFactor = availWidth/float(groupWidth)
if self.debug:
print '%d series, %d points per series' % (seriesCount, self._rowLength)
print 'width = %d group + (%d bars * %d barWidth) + (%d gaps * %d interBar) = %d total' % (
groupSpacing, seriesCount, barWidth,
seriesCount-1, barSpacing, groupWidth)
# 'Baseline' correction...
vA = self.valueAxis
vScale = vA.scale
vm, vM = vA._valueMin, vA._valueMax
if vm <= 0 <= vM:
baseLine = vScale(0)
elif 0 < vm:
baseLine = vScale(vm)
elif vM < 0:
baseLine = vScale(vM)
self._baseLine = baseLine
COLUMNS = range(max(map(len,data)))
if useAbsolute:
_cScale = cA._scale
self._normFactor = normFactor
width = self.barWidth*normFactor
self._barPositions = []
reversePlotOrder = self.reversePlotOrder
for rowNo in range(seriesCount):
barRow = []
if reversePlotOrder:
xVal = seriesCount-1 - rowNo
else:
xVal = rowNo
xVal = 0.5*groupSpacing+xVal*bGap
for colNo in COLUMNS:
datum = data[rowNo][colNo]
# Ufff...
if useAbsolute:
x = groupWidth*_cScale(colNo) + xVal + org
else:
(g, gW) = cScale(colNo)
x = g + normFactor*xVal
if datum is None:
height = None
y = baseLine
else:
if style not in ('parallel','parallel_3d'):
y = vScale(accum[colNo])
if y<baseLine: y = baseLine
accum[colNo] = accum[colNo] + datum
datum = accum[colNo]
else:
y = baseLine
height = vScale(datum) - y
if -1e-8<height<=1e-8:
height = 1e-8
if datum<-1e-8: height = -1e-8
barRow.append(flipXY and (y,x,height,width) or (x, y, width, height))
self._barPositions.append(barRow)
def _getLabelText(self, rowNo, colNo):
'''return formatted label text'''
labelFmt = self.barLabelFormat
if labelFmt is None:
labelText = None
elif labelFmt == 'values':
labelText = self.barLabelArray[rowNo][colNo]
elif type(labelFmt) is str:
labelText = labelFmt % self.data[rowNo][colNo]
elif callable(labelFmt):
labelText = labelFmt(self.data[rowNo][colNo])
else:
msg = "Unknown formatter type %s, expected string or function" % labelFmt
raise Exception, msg
return labelText
def _labelXY(self,label,x,y,width,height):
'Compute x, y for a label'
nudge = label.nudge
bt = getattr(label,'boxTarget','normal')
anti = bt=='anti'
if anti: nudge = -nudge
pm = value = height
if anti: value = 0
a = x + 0.5*width
nudge = (height>=0 and 1 or -1)*nudge
if bt=='hi':
if value>=0:
b = y + value + nudge
else:
b = y - nudge
pm = -pm
elif bt=='lo':
if value<=0:
b = y + value + nudge
else:
b = y - nudge
pm = -pm
else:
b = y + value + nudge
label._pmv = pm #the plus minus val
return a,b,pm
def _addBarLabel(self, g, rowNo, colNo, x, y, width, height):
text = self._getLabelText(rowNo,colNo)
if text:
self._addLabel(text, self.barLabels[(rowNo, colNo)], g, rowNo, colNo, x, y, width, height)
def _addNABarLabel(self, g, rowNo, colNo, x, y, width, height):
na = self.naLabel
if na and na.text:
na = copy.copy(na)
v = self.valueAxis._valueMax<=0 and -1e-8 or 1e-8
if width is None: width = v
if height is None: height = v
self._addLabel(na.text, na, g, rowNo, colNo, x, y, width, height)
def _addLabel(self, text, label, g, rowNo, colNo, x, y, width, height):
if label.visible:
labelWidth = stringWidth(text, label.fontName, label.fontSize)
flipXY = self._flipXY
if flipXY:
y0, x0, pm = self._labelXY(label,y,x,height,width)
else:
x0, y0, pm = self._labelXY(label,x,y,width,height)
fixedEnd = getattr(label,'fixedEnd', None)
if fixedEnd is not None:
v = fixedEnd._getValue(self,pm)
x00, y00 = x0, y0
if flipXY:
x0 = v
else:
y0 = v
else:
if flipXY:
x00 = x0
y00 = y+height/2.0
else:
x00 = x+width/2.0
y00 = y0
fixedStart = getattr(label,'fixedStart', None)
if fixedStart is not None:
v = fixedStart._getValue(self,pm)
if flipXY:
x00 = v
else:
y00 = v
if pm<0:
if flipXY:
dx = -2*label.dx
dy = 0
else:
dy = -2*label.dy
dx = 0
else:
dy = dx = 0
label.setOrigin(x0+dx, y0+dy)
label.setText(text)
sC, sW = label.lineStrokeColor, label.lineStrokeWidth
if sC and sW: g.insert(0,Line(x00,y00,x0,y0, strokeColor=sC, strokeWidth=sW))
g.add(label)
alx = getattr(self,'barLabelCallOut',None)
if alx:
label._callOutInfo = (self,g,rowNo,colNo,x,y,width,height,x00,y00,x0,y0)
alx(label)
del label._callOutInfo
def _makeBar(self,g,x,y,width,height,rowNo,style):
r = Rect(x, y, width, height)
r.strokeWidth = style.strokeWidth
r.fillColor = style.fillColor
r.strokeColor = style.strokeColor
if style.strokeDashArray:
r.strokeDashArray = style.strokeDashArray
g.add(r)
def _makeBars(self,g,lg):
lenData = len(self.data)
bars = self.bars
for rowNo in range(lenData):
row = self._barPositions[rowNo]
styleCount = len(bars)
styleIdx = rowNo % styleCount
rowStyle = bars[styleIdx]
for colNo in range(len(row)):
barPos = row[colNo]
style = bars.has_key((styleIdx,colNo)) and bars[(styleIdx,colNo)] or rowStyle
(x, y, width, height) = barPos
if None in (width,height):
self._addNABarLabel(lg,rowNo,colNo,x,y,width,height)
continue
# Draw a rectangular symbol for each data item,
# or a normal colored rectangle.
symbol = None
if hasattr(style, 'symbol'):
symbol = copy.deepcopy(style.symbol)
elif hasattr(self.bars, 'symbol'):
symbol = self.bars.symbol
if symbol:
symbol.x = x
symbol.y = y
symbol.width = width
symbol.height = height
g.add(symbol)
elif abs(width)>1e-7 and abs(height)>=1e-7 and (style.fillColor is not None or style.strokeColor is not None):
self._makeBar(g,x,y,width,height,rowNo,style)
self._addBarLabel(lg,rowNo,colNo,x,y,width,height)
def makeBars(self):
g = Group()
lg = Group()
self._makeBars(g,lg)
g.add(lg)
return g
def _desiredCategoryAxisLength(self):
'''for dynamically computing the desired category axis length'''
style = self.categoryAxis.style
data = self.data
n = len(data)
m = max(map(len,data))
if style=='parallel':
groupWidth = (n-1)*self.barSpacing+n*self.barWidth
else:
groupWidth = self.barWidth
return m*(self.groupSpacing+groupWidth)
def draw(self):
cA, vA = self.categoryAxis, self.valueAxis
if vA: ovAjA, vA.joinAxis = vA.joinAxis, cA
if cA: ocAjA, cA.joinAxis = cA.joinAxis, vA
if self._flipXY:
cA.setPosition(self._drawBegin(self.x,self.width), self.y, self.height)
else:
cA.setPosition(self.x, self._drawBegin(self.y,self.height), self.width)
return self._drawFinish()
class VerticalBarChart(BarChart):
"Vertical bar chart with multiple side-by-side bars."
_flipXY = 0
class HorizontalBarChart(BarChart):
"Horizontal bar chart with multiple side-by-side bars."
_flipXY = 1
class _FakeGroup:
def __init__(self, cmp=None):
self._data = []
self._cmp = cmp
def add(self,what):
self._data.append(what)
def value(self):
return self._data
def sort(self):
self._data.sort(self._cmp)
class BarChart3D(BarChart):
_attrMap = AttrMap(BASE=BarChart,
theta_x = AttrMapValue(isNumber, desc='dx/dz'),
theta_y = AttrMapValue(isNumber, desc='dy/dz'),
zDepth = AttrMapValue(isNumber, desc='depth of an individual series'),
zSpace = AttrMapValue(isNumber, desc='z gap around series'),
)
theta_x = .5
theta_y = .5
zDepth = None
zSpace = None
def calcBarPositions(self):
BarChart.calcBarPositions(self)
seriesCount = self._seriesCount
zDepth = self.zDepth
if zDepth is None: zDepth = self.barWidth
zSpace = self.zSpace
if zSpace is None: zSpace = self.barSpacing
if self.categoryAxis.style=='parallel_3d':
_3d_depth = seriesCount*zDepth+(seriesCount+1)*zSpace
else:
_3d_depth = zDepth + 2*zSpace
_3d_depth *= self._normFactor
self._3d_dx = self.theta_x*_3d_depth
self._3d_dy = self.theta_y*_3d_depth
def _calc_z0(self,rowNo):
zDepth = self.zDepth
if zDepth is None: zDepth = self.barWidth
zSpace = self.zSpace
if zSpace is None: zSpace = self.barSpacing
if self.categoryAxis.style=='parallel_3d':
z0 = self._normFactor*(rowNo*(zDepth+zSpace)+zSpace)
else:
z0 = self._normFactor*zSpace
return z0
def _makeBar(self,g,x,y,width,height,rowNo,style):
zDepth = self.zDepth
if zDepth is None: zDepth = self.barWidth
zSpace = self.zSpace
if zSpace is None: zSpace = self.barSpacing
z0 = self._calc_z0(rowNo)
z1 = z0 + zDepth*self._normFactor
if width<0:
x += width
width = -width
x += z0*self.theta_x
y += z0*self.theta_y
if self._flipXY:
y += zSpace
else:
x += zSpace
g.add((0,z0,z1,x,y,width,height,rowNo,style))
def _addBarLabel(self, g, rowNo, colNo, x, y, width, height):
z0 = self._calc_z0(rowNo)
zSpace = self.zSpace
if zSpace is None: zSpace = self.barSpacing
z1 = z0
x += z0*self.theta_x
y += z0*self.theta_y
if self._flipXY:
y += zSpace
else:
x += zSpace
g.add((1,z0,z1,x,y,width,height,rowNo,colNo))
def makeBars(self):
from utils3d import _draw_3d_bar
fg = _FakeGroup(cmp=self._cmpZ)
self._makeBars(fg,fg)
fg.sort()
g = Group()
theta_x = self.theta_x
theta_y = self.theta_y
if self.categoryAxis.style == 'stacked':
fg_value=fg.value().reverse()
for t in fg.value():
if t[0]==0:
z0,z1,x,y,width,height,rowNo,style = t[1:]
dz = z1 - z0
_draw_3d_bar(g, x, x+width, y, y+height, dz*theta_x, dz*theta_y,
fillColor=style.fillColor, fillColorShaded=None,
strokeColor=style.strokeColor, strokeWidth=style.strokeWidth,
shading=0.45)
for t in fg.value():
if t[0]==1:
z0,z1,x,y,width,height,rowNo,colNo = t[1:]
BarChart._addBarLabel(self,g,rowNo,colNo,x,y,width,height)
return g
class VerticalBarChart3D(BarChart3D,VerticalBarChart):
_cmpZ=lambda self,a,b:cmp((-a[1],a[3],a[0],-a[4]),(-b[1],b[3],b[0],-b[4]))
class HorizontalBarChart3D(BarChart3D,HorizontalBarChart):
_cmpZ = lambda self,a,b: cmp((-a[1],a[4],a[0],-a[3]),(-b[1],b[4],b[0],-b[3])) #t, z0, z1, x, y = a[:5]
# Vertical samples.
def sampleV0a():
"A slightly pathologic bar chart with only TWO data items."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'ne'
bc.categoryAxis.labels.dx = 8
bc.categoryAxis.labels.dy = -2
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV0b():
"A pathologic bar chart with only ONE data item."
drawing = Drawing(400, 200)
data = [(42,)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 50
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'ne'
bc.categoryAxis.labels.dx = 8
bc.categoryAxis.labels.dy = -2
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = ['Jan-99']
drawing.add(bc)
return drawing
def sampleV0c():
"A really pathologic bar chart with NO data items at all!"
drawing = Drawing(400, 200)
data = [()]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'ne'
bc.categoryAxis.labels.dx = 8
bc.categoryAxis.labels.dy = -2
bc.categoryAxis.categoryNames = []
drawing.add(bc)
return drawing
def sampleV1():
"Sample of multi-series bar chart."
drawing = Drawing(400, 200)
data = [
(13, 5, 20, 22, 37, 45, 19, 4),
(14, 6, 21, 23, 38, 46, 20, 5)
]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'ne'
bc.categoryAxis.labels.dx = 8
bc.categoryAxis.labels.dy = -2
bc.categoryAxis.labels.angle = 30
catNames = 'Jan Feb Mar Apr May Jun Jul Aug'.split(' ')
catNames = map(lambda n:n+'-99', catNames)
bc.categoryAxis.categoryNames = catNames
drawing.add(bc)
return drawing
def sampleV2a():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.2),
(0.6, -4.9, -3, 4, 6.8)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 0
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.valueAxis.labels.textAnchor = 'middle'
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.categoryAxis.labels.dy = -60
drawing.add(bc)
return drawing
def sampleV2b():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.2),
(0.6, -4.9, -3, 4, 6.8)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 5
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.valueAxis.labels.textAnchor = 'middle'
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.categoryAxis.labels.dy = -60
drawing.add(bc)
return drawing
def sampleV2c():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.99),
(0.6, -4.9, -3, 4, 9.99)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 2
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n'
bc.valueAxis.labels.textAnchor = 'middle'
bc.categoryAxis.labels.dy = -60
bc.barLabels.nudge = 10
bc.barLabelFormat = '%0.2f'
bc.barLabels.dx = 0
bc.barLabels.dy = 0
bc.barLabels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.barLabels.fontName = 'Helvetica'
bc.barLabels.fontSize = 6
drawing.add(bc)
return drawing
def sampleV3():
"Faked horizontal bar chart using a vertical real one (deprecated)."
names = ("UK Equities", "US Equities", "European Equities", "Japanese Equities",
"Pacific (ex Japan) Equities", "Emerging Markets Equities",
"UK Bonds", "Overseas Bonds", "UK Index-Linked", "Cash")
series1 = (-1.5, 0.3, 0.5, 1.0, 0.8, 0.7, 0.4, 0.1, 1.0, 0.3)
series2 = (0.0, 0.33, 0.55, 1.1, 0.88, 0.77, 0.44, 0.11, 1.10, 0.33)
assert len(names) == len(series1), "bad data"
assert len(names) == len(series2), "bad data"
drawing = Drawing(400, 200)
bc = VerticalBarChart()
bc.x = 0
bc.y = 0
bc.height = 100
bc.width = 150
bc.data = (series1,)
bc.bars.fillColor = colors.green
bc.barLabelFormat = '%0.2f'
bc.barLabels.dx = 0
bc.barLabels.dy = 0
bc.barLabels.boxAnchor = 'w' # irrelevant (becomes 'c')
bc.barLabels.angle = 90
bc.barLabels.fontName = 'Helvetica'
bc.barLabels.fontSize = 6
bc.barLabels.nudge = 10
bc.valueAxis.visible = 0
bc.valueAxis.valueMin = -2
bc.valueAxis.valueMax = +2
bc.valueAxis.valueStep = 1
bc.categoryAxis.tickUp = 0
bc.categoryAxis.tickDown = 0
bc.categoryAxis.categoryNames = names
bc.categoryAxis.labels.angle = 90
bc.categoryAxis.labels.boxAnchor = 'w'
bc.categoryAxis.labels.dx = 0
bc.categoryAxis.labels.dy = -125
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 6
g = Group(bc)
g.translate(100, 175)
g.rotate(-90)
drawing.add(g)
return drawing
def sampleV4a():
"A bar chart showing value axis region starting at *exactly* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV4b():
"A bar chart showing value axis region starting *below* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = -10
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV4c():
"A bar chart showing value axis region staring *above* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 10
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV4d():
"A bar chart showing value axis region entirely *below* zero."
drawing = Drawing(400, 200)
data = [(-13, -20)]
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = -30
bc.valueAxis.valueMax = -10
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
###
##dataSample5 = [(10, 20), (20, 30), (30, 40), (40, 50), (50, 60)]
##dataSample5 = [(10, 60), (20, 50), (30, 40), (40, 30), (50, 20)]
dataSample5 = [(10, 60), (20, 50), (30, 40), (40, 30)]
def sampleV5a():
"A simple bar chart with no expressed spacing attributes."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV5b():
"A simple bar chart with proportional spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 0
bc.barWidth = 40
bc.groupSpacing = 20
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV5c1():
"Make sampe simple bar chart but with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 40
bc.groupSpacing = 0
bc.barSpacing = 0
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV5c2():
"Make sampe simple bar chart but with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 40
bc.groupSpacing = 20
bc.barSpacing = 0
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV5c3():
"Make sampe simple bar chart but with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 40
bc.groupSpacing = 0
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleV5c4():
"Make sampe simple bar chart but with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 40
bc.groupSpacing = 20
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'n'
bc.categoryAxis.labels.dy = -5
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
# Horizontal samples
def sampleH0a():
"Make a slightly pathologic bar chart with only TWO data items."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'se'
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH0b():
"Make a pathologic bar chart with only ONE data item."
drawing = Drawing(400, 200)
data = [(42,)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 50
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'se'
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = ['Jan-99']
drawing.add(bc)
return drawing
def sampleH0c():
"Make a really pathologic bar chart with NO data items at all!"
drawing = Drawing(400, 200)
data = [()]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'se'
bc.categoryAxis.labels.angle = 30
bc.categoryAxis.categoryNames = []
drawing.add(bc)
return drawing
def sampleH1():
"Sample of multi-series bar chart."
drawing = Drawing(400, 200)
data = [
(13, 5, 20, 22, 37, 45, 19, 4),
(14, 6, 21, 23, 38, 46, 20, 5)
]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
catNames = 'Jan Feb Mar Apr May Jun Jul Aug'.split(' ')
catNames = map(lambda n:n+'-99', catNames)
bc.categoryAxis.categoryNames = catNames
drawing.add(bc, 'barchart')
return drawing
def sampleH2a():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.2),
(0.6, -4.9, -3, 4, 6.8)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = HorizontalBarChart()
bc.x = 80
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 0
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.valueAxis.labels.textAnchor = 'middle'
bc.valueAxis.configure(bc.data)
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.categoryAxis.labels.dx = -150
drawing.add(bc)
return drawing
def sampleH2b():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.2),
(0.6, -4.9, -3, 4, 6.8)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = HorizontalBarChart()
bc.x = 80
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 5
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.valueAxis.labels.textAnchor = 'middle'
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.categoryAxis.labels.dx = -150
drawing.add(bc)
return drawing
def sampleH2c():
"Sample of multi-series bar chart."
data = [(2.4, -5.7, 2, 5, 9.99),
(0.6, -4.9, -3, 4, 9.99)
]
labels = ("Q3 2000", "Year to Date", "12 months",
"Annualised\n3 years", "Since 07.10.99")
drawing = Drawing(400, 200)
bc = HorizontalBarChart()
bc.x = 80
bc.y = 50
bc.height = 120
bc.width = 300
bc.data = data
bc.barSpacing = 2
bc.groupSpacing = 10
bc.barWidth = 10
bc.valueAxis.valueMin = -15
bc.valueAxis.valueMax = +15
bc.valueAxis.valueStep = 5
bc.valueAxis.labels.fontName = 'Helvetica'
bc.valueAxis.labels.fontSize = 8
bc.valueAxis.labels.boxAnchor = 'n'
bc.valueAxis.labels.textAnchor = 'middle'
bc.categoryAxis.categoryNames = labels
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 8
bc.categoryAxis.labels.dx = -150
bc.barLabels.nudge = 10
bc.barLabelFormat = '%0.2f'
bc.barLabels.dx = 0
bc.barLabels.dy = 0
bc.barLabels.boxAnchor = 'n' # irrelevant (becomes 'c')
bc.barLabels.fontName = 'Helvetica'
bc.barLabels.fontSize = 6
drawing.add(bc)
return drawing
def sampleH3():
"A really horizontal bar chart (compared to the equivalent faked one)."
names = ("UK Equities", "US Equities", "European Equities", "Japanese Equities",
"Pacific (ex Japan) Equities", "Emerging Markets Equities",
"UK Bonds", "Overseas Bonds", "UK Index-Linked", "Cash")
series1 = (-1.5, 0.3, 0.5, 1.0, 0.8, 0.7, 0.4, 0.1, 1.0, 0.3)
series2 = (0.0, 0.33, 0.55, 1.1, 0.88, 0.77, 0.44, 0.11, 1.10, 0.33)
assert len(names) == len(series1), "bad data"
assert len(names) == len(series2), "bad data"
drawing = Drawing(400, 200)
bc = HorizontalBarChart()
bc.x = 100
bc.y = 20
bc.height = 150
bc.width = 250
bc.data = (series1,)
bc.bars.fillColor = colors.green
bc.barLabelFormat = '%0.2f'
bc.barLabels.dx = 0
bc.barLabels.dy = 0
bc.barLabels.boxAnchor = 'w' # irrelevant (becomes 'c')
bc.barLabels.fontName = 'Helvetica'
bc.barLabels.fontSize = 6
bc.barLabels.nudge = 10
bc.valueAxis.visible = 0
bc.valueAxis.valueMin = -2
bc.valueAxis.valueMax = +2
bc.valueAxis.valueStep = 1
bc.categoryAxis.tickLeft = 0
bc.categoryAxis.tickRight = 0
bc.categoryAxis.categoryNames = names
bc.categoryAxis.labels.boxAnchor = 'w'
bc.categoryAxis.labels.dx = -170
bc.categoryAxis.labels.fontName = 'Helvetica'
bc.categoryAxis.labels.fontSize = 6
g = Group(bc)
drawing.add(g)
return drawing
def sampleH4a():
"A bar chart showing value axis region starting at *exactly* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH4b():
"A bar chart showing value axis region starting *below* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = -10
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH4c():
"A bar chart showing value axis region starting *above* zero."
drawing = Drawing(400, 200)
data = [(13, 20)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 10
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH4d():
"A bar chart showing value axis region entirely *below* zero."
drawing = Drawing(400, 200)
data = [(-13, -20)]
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = -30
bc.valueAxis.valueMax = -10
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
dataSample5 = [(10, 60), (20, 50), (30, 40), (40, 30)]
def sampleH5a():
"A simple bar chart with no expressed spacing attributes."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH5b():
"A simple bar chart with proportional spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 0
bc.barWidth = 40
bc.groupSpacing = 20
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH5c1():
"A simple bar chart with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 10
bc.groupSpacing = 0
bc.barSpacing = 0
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH5c2():
"Simple bar chart with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 10
bc.groupSpacing = 20
bc.barSpacing = 0
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH5c3():
"Simple bar chart with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 20
bc.height = 155
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 10
bc.groupSpacing = 0
bc.barSpacing = 2
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleH5c4():
"Simple bar chart with absolute spacing."
drawing = Drawing(400, 200)
data = dataSample5
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 10
bc.groupSpacing = 20
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
drawing.add(bc)
return drawing
def sampleSymbol1():
"Simple bar chart using symbol attribute."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.barWidth = 10
bc.groupSpacing = 15
bc.barSpacing = 3
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
from reportlab.graphics.widgets.grids import ShadedRect
sym1 = ShadedRect()
sym1.fillColorStart = colors.black
sym1.fillColorEnd = colors.blue
sym1.orientation = 'horizontal'
sym1.strokeWidth = 0
sym2 = ShadedRect()
sym2.fillColorStart = colors.black
sym2.fillColorEnd = colors.pink
sym2.orientation = 'horizontal'
sym2.strokeWidth = 0
sym3 = ShadedRect()
sym3.fillColorStart = colors.blue
sym3.fillColorEnd = colors.white
sym3.orientation = 'vertical'
sym3.cylinderMode = 1
sym3.strokeWidth = 0
bc.bars.symbol = sym1
bc.bars[2].symbol = sym2
bc.bars[3].symbol = sym3
drawing.add(bc)
return drawing
def sampleStacked1():
"Simple bar chart using symbol attribute."
drawing = Drawing(400, 200)
data = dataSample5
bc = VerticalBarChart()
bc.categoryAxis.style = 'stacked'
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = data
bc.strokeColor = colors.black
bc.barWidth = 10
bc.groupSpacing = 15
bc.valueAxis.valueMin = 0
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
from reportlab.graphics.widgets.grids import ShadedRect
bc.bars.symbol = ShadedRect()
bc.bars.symbol.fillColorStart = colors.red
bc.bars.symbol.fillColorEnd = colors.white
bc.bars.symbol.orientation = 'vertical'
bc.bars.symbol.cylinderMode = 1
bc.bars.symbol.strokeWidth = 0
bc.bars[1].symbol = ShadedRect()
bc.bars[1].symbol.fillColorStart = colors.magenta
bc.bars[1].symbol.fillColorEnd = colors.white
bc.bars[1].symbol.orientation = 'vertical'
bc.bars[1].symbol.cylinderMode = 1
bc.bars[1].symbol.strokeWidth = 0
bc.bars[2].symbol = ShadedRect()
bc.bars[2].symbol.fillColorStart = colors.green
bc.bars[2].symbol.fillColorEnd = colors.white
bc.bars[2].symbol.orientation = 'vertical'
bc.bars[2].symbol.cylinderMode = 1
bc.bars[2].symbol.strokeWidth = 0
bc.bars[3].symbol = ShadedRect()
bc.bars[3].symbol.fillColorStart = colors.blue
bc.bars[3].symbol.fillColorEnd = colors.white
bc.bars[3].symbol.orientation = 'vertical'
bc.bars[3].symbol.cylinderMode = 1
bc.bars[3].symbol.strokeWidth = 0
drawing.add(bc)
return drawing
#class version of function sampleH5c4 above
class SampleH5c4(Drawing):
"Simple bar chart with absolute spacing."
def __init__(self,width=400,height=200,*args,**kw):
apply(Drawing.__init__,(self,width,height)+args,kw)
bc = HorizontalBarChart()
bc.x = 50
bc.y = 50
bc.height = 125
bc.width = 300
bc.data = dataSample5
bc.strokeColor = colors.black
bc.useAbsolute = 1
bc.barWidth = 10
bc.groupSpacing = 20
bc.barSpacing = 10
bc.valueAxis.valueMin = 0
bc.valueAxis.valueMax = 60
bc.valueAxis.valueStep = 15
bc.categoryAxis.labels.boxAnchor = 'e'
bc.categoryAxis.categoryNames = ['Ying', 'Yang']
self.add(bc,name='HBC')
| bsd-3-clause |
provaleks/o8 | addons/account_analytic_plans/__openerp__.py | 264 | 3114 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Multiple Analytic Plans',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
This module allows to use several analytic plans according to the general journal.
==================================================================================
Here multiple analytic lines are created when the invoice or the entries
are confirmed.
For example, you can define the following analytic structure:
-------------------------------------------------------------
* **Projects**
* Project 1
+ SubProj 1.1
+ SubProj 1.2
* Project 2
* **Salesman**
* Eric
* Fabien
Here, we have two plans: Projects and Salesman. An invoice line must be able to write analytic entries in the 2 plans: SubProj 1.1 and Fabien. The amount can also be split.
The following example is for an invoice that touches the two subprojects and assigned to one salesman:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**Plan1:**
* SubProject 1.1 : 50%
* SubProject 1.2 : 50%
**Plan2:**
Eric: 100%
So when this line of invoice will be confirmed, it will generate 3 analytic lines,for one account entry.
The analytic plan validates the minimum and maximum percentage at the time of creation of distribution models.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/accounting',
'depends': ['account', 'account_analytic_default'],
'data': [
'security/account_analytic_plan_security.xml',
'security/ir.model.access.csv',
'account_analytic_plans_view.xml',
'account_analytic_plans_report.xml',
'wizard/analytic_plan_create_model_view.xml',
'wizard/account_crossovered_analytic_view.xml',
'views/report_crossoveredanalyticplans.xml',
'views/account_analytic_plans.xml',
],
'demo': [],
'test': ['test/acount_analytic_plans_report.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
AndyPeterman/mrmc | lib/libUPnP/Neptune/Extras/Scripts/GenErrorMap.py | 265 | 4805 | #! /usr/bin/python
import os
import os.path
import re
import sys
ErrorPattern = re.compile('([A-Z]{3}_ERROR_[A-Z_0-9]+)\s+=?\s*\(?([A-Z_0-9-][A-Z_0-9-+ ]+[A-Z_0-9])')
FilePatternH = re.compile('^.*\.h$')
FilePatternC = re.compile('^.*\.(c|cpp)$')
Errors = {}
Codes = {}
ERROR_MAP_HEADER = """/*****************************************************************
|
| Neptune - Result Code Map
|
| This file is automatically generated by a script, do not edit!
|
| Copyright (c) 2002-2010, Axiomatic Systems, LLC.
| All rights reserved.
|
| Redistribution and use in source and binary forms, with or without
| modification, are permitted provided that the following conditions are met:
| * Redistributions of source code must retain the above copyright
| notice, this list of conditions and the following disclaimer.
| * Redistributions in binary form must reproduce the above copyright
| notice, this list of conditions and the following disclaimer in the
| documentation and/or other materials provided with the distribution.
| * Neither the name of Axiomatic Systems nor the
| names of its contributors may be used to endorse or promote products
| derived from this software without specific prior written permission.
|
| THIS SOFTWARE IS PROVIDED BY AXIOMATIC SYSTEMS ''AS IS'' AND ANY
| EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
| WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
| DISCLAIMED. IN NO EVENT SHALL AXIOMATIC SYSTEMS BE LIABLE FOR ANY
| DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
| (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
| LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
| ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
| SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
****************************************************************/
/*----------------------------------------------------------------------
| includes
+---------------------------------------------------------------------*/
#include "Neptune.h"
/*----------------------------------------------------------------------
| NPT_ResultText
+---------------------------------------------------------------------*/
const char*
NPT_ResultText(NPT_Result result)
{
switch (result) {
case NPT_SUCCESS: return "SUCCESS";
case NPT_FAILURE: return "FAILURE";
"""
ERROR_MAP_FOOTER = """
default: return "UNKNOWN";
}
}
"""
def ResolveErrors():
keep_going = True
while keep_going:
keep_going = False
for key in Errors.keys():
value = Errors[key]
if type(value) is str:
elements = [x.strip() for x in value.split('-')]
if len(elements[0]) == 0:
first = 0
else:
first = elements[0]
if Errors.has_key(first):
first = Errors[first]
if not type(first) is str:
second = int(elements[1])
Errors[key] = first-second
keep_going = True
def AnalyzeErrorCodes(file):
input = open(file)
for line in input.readlines():
m = ErrorPattern.search(line)
if m:
Errors[m.group(1)] = m.group(2)
input.close()
def ScanErrorCodes(top):
print ERROR_MAP_HEADER
for root, dirs, files in os.walk(top):
for file in files:
if FilePatternH.match(file):
AnalyzeErrorCodes(os.path.join(root, file))
ResolveErrors()
for key in Errors:
#print key,"==>",Errors[key]
if (key.find("ERROR_BASE") > 1): continue
if Codes.has_key(Errors[key]):
raise "duplicate error code: "+ str(key) +" --> " + str(Errors[key]) + "=" + Codes[Errors[key]]
Codes[Errors[key]] = key
sorted_keys = Codes.keys()
sorted_keys.sort()
sorted_keys.reverse()
last = 0
for code in sorted_keys:
#if code != last-1:
# print
print ' case %s: return "%s";' % (Codes[code],Codes[code])
last = code
print ERROR_MAP_FOOTER
####################################################
# main
####################################################
sys.argv.reverse()
sys.argv.pop()
action = None
top = None
while len(sys.argv):
arg = sys.argv.pop()
if top == None:
top = arg
else:
raise "unexpected argument " + arg
if top is None:
print "GenErrorMap.py <directory-root>"
sys.exit(1)
ScanErrorCodes(top)
| gpl-2.0 |
lepture/flask-storage | flask_storage/_base.py | 4 | 3064 | # coding: utf-8
"""
flask_storage._base
~~~~~~~~~~~~~~~~~~~~
Common utilities for flask storage.
:copyright: (c) 2013 Hsiaoming Yang.
"""
import os
import logging
from werkzeug import FileStorage
from ._compat import urljoin
__all__ = (
'TEXT', 'DOCUMENTS', 'IMAGES', 'AUDIO', 'DATA', 'SCRIPTS',
'ARCHIVES', 'EXECUTABLES', 'BaseStorage',
'UploadNotAllowed', 'UploadFileExists', 'make_request'
)
log = logging.getLogger('flask_storage')
TEXT = ('txt',)
DOCUMENTS = (
'rtf', 'odf', 'ods', 'gnumeric', 'abw',
'doc', 'docx', 'xls', 'xlsx'
)
# This contains basic image types that are viewable by most browsers
IMAGES = ('jpg', 'jpe', 'jpeg', 'png', 'gif', 'svg', 'bmp')
# This contains audio file types
AUDIO = ('wav', 'mp3', 'aac', 'ogg', 'oga', 'flac')
# This is for structured data files
DATA = ('csv', 'ini', 'json', 'plist', 'xml', 'yaml', 'yml')
# This contains various types of scripts
SCRIPTS = ('py', 'js', 'rb', 'sh', 'pl', 'php')
# This contains archive and compression formats
ARCHIVES = ('gz', 'bz2', 'zip', 'tar', 'tgz', 'txz', '7z')
# This contains shared libraries and executable files
EXECUTABLES = ('so', 'ext', 'dll')
class BaseStorage(object):
def __init__(self, name='base', extensions=None, config=None):
self.name = name
self.config = config
self.extensions = extensions or IMAGES
def url(self, filename):
"""Generate the url for a filename.
:param filename: filename for generating the url....
"""
urlbase = self.config.get('base_url')
return urljoin(urlbase, filename)
def extension_allowed(self, extname):
if not self.extensions:
return True
return extname in self.extensions
def check(self, storage):
"""
Check if the storage can be saved.
:param storage: The storage to be saved.
This function should be called everytime when you want to
save a storage::
class DemoStorage(BaseStorage):
def save(self, storage, filename):
# check storage before saving it
self.check(storage)
"""
if not isinstance(storage, FileStorage):
raise TypeError('storage must be a werkzeug.FileStorage')
_, extname = os.path.splitext(storage.filename)
ext = extname.lower()[1:]
if not self.extension_allowed(ext):
raise UploadNotAllowed('Extension not allowed')
def exists(self, filename):
raise NotImplementedError
def read(self, filename):
raise NotImplementedError
def write(self, filename, body, headers=None):
raise NotImplementedError
def delete(self, filename):
raise NotImplementedError
def save(self, storage, filename):
raise NotImplementedError
class UploadNotAllowed(Exception):
"""This exception is raised if the upload was not allowed."""
class UploadFileExists(Exception):
"""This exception is raised when the uploaded file exits."""
| bsd-3-clause |
davidmueller13/L900_3.9_Experiment | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
jenalgit/django | tests/utils_tests/test_tree.py | 429 | 1951 | import copy
import unittest
from django.utils.tree import Node
class NodeTests(unittest.TestCase):
def setUp(self):
self.node1_children = [('a', 1), ('b', 2)]
self.node1 = Node(self.node1_children)
self.node2 = Node()
def test_str(self):
self.assertEqual(str(self.node1), "(DEFAULT: ('a', 1), ('b', 2))")
self.assertEqual(str(self.node2), "(DEFAULT: )")
def test_repr(self):
self.assertEqual(repr(self.node1),
"<Node: (DEFAULT: ('a', 1), ('b', 2))>")
self.assertEqual(repr(self.node2), "<Node: (DEFAULT: )>")
def test_len(self):
self.assertEqual(len(self.node1), 2)
self.assertEqual(len(self.node2), 0)
def test_bool(self):
self.assertTrue(self.node1)
self.assertFalse(self.node2)
def test_contains(self):
self.assertIn(('a', 1), self.node1)
self.assertNotIn(('a', 1), self.node2)
def test_add(self):
# start with the same children of node1 then add an item
node3 = Node(self.node1_children)
node3_added_child = ('c', 3)
# add() returns the added data
self.assertEqual(node3.add(node3_added_child, Node.default),
node3_added_child)
# we added exactly one item, len() should reflect that
self.assertEqual(len(self.node1) + 1, len(node3))
self.assertEqual(str(node3), "(DEFAULT: ('a', 1), ('b', 2), ('c', 3))")
def test_negate(self):
# negated is False by default
self.assertFalse(self.node1.negated)
self.node1.negate()
self.assertTrue(self.node1.negated)
self.node1.negate()
self.assertFalse(self.node1.negated)
def test_deepcopy(self):
node4 = copy.copy(self.node1)
node5 = copy.deepcopy(self.node1)
self.assertIs(self.node1.children, node4.children)
self.assertIsNot(self.node1.children, node5.children)
| bsd-3-clause |
techdragon/django | tests/forms_tests/tests/test_input_formats.py | 26 | 39313 | from datetime import date, datetime, time
from django import forms
from django.test import SimpleTestCase, override_settings
from django.utils.translation import activate, deactivate
@override_settings(TIME_INPUT_FORMATS=["%I:%M:%S %p", "%I:%M %p"], USE_L10N=True)
class LocalizedTimeTests(SimpleTestCase):
def setUp(self):
# nl/formats.py has customized TIME_INPUT_FORMATS:
# ['%H:%M:%S', '%H.%M:%S', '%H.%M', '%H:%M']
activate('nl')
def tearDown(self):
deactivate()
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget.format_value(result)
self.assertEqual(text, '13:30:05')
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
# ISO formats are accepted, even if not specified in formats.py
result = f.clean('13:30:05.000155')
self.assertEqual(result, time(13, 30, 5, 155))
def test_localized_timeField(self):
"Localized TimeFields act as unlocalized widgets"
f = forms.TimeField(localize=True)
# Parse a time in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, '13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
def test_timeField_with_inputformat(self):
"TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"])
# Parse a time in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('1:30:05 PM')
with self.assertRaises(forms.ValidationError):
f.clean('13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField_with_inputformat(self):
"Localized TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"], localize=True)
# Parse a time in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('1:30:05 PM')
with self.assertRaises(forms.ValidationError):
f.clean('13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
@override_settings(TIME_INPUT_FORMATS=["%I:%M:%S %p", "%I:%M %p"])
class CustomTimeInputFormatsTests(SimpleTestCase):
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget.format_value(result)
self.assertEqual(text, '01:30:05 PM')
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean('1:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_localized_timeField(self):
"Localized TimeFields act as unlocalized widgets"
f = forms.TimeField(localize=True)
# Parse a time in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, '01:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('01:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_timeField_with_inputformat(self):
"TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"])
# Parse a time in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('1:30:05 PM')
with self.assertRaises(forms.ValidationError):
f.clean('13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_localized_timeField_with_inputformat(self):
"Localized TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"], localize=True)
# Parse a time in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('1:30:05 PM')
with self.assertRaises(forms.ValidationError):
f.clean('13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30.05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean('13.30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM")
class SimpleTimeFormatTests(SimpleTestCase):
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField(self):
"Localized TimeFields in a non-localized environment act as unlocalized widgets"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('1:30:05 PM')
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30:05')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('13:30')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
def test_timeField_with_inputformat(self):
"TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%I:%M:%S %p", "%I:%M %p"])
# Parse a time in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
def test_localized_timeField_with_inputformat(self):
"Localized TimeFields with manually specified input formats can accept those formats"
f = forms.TimeField(input_formats=["%I:%M:%S %p", "%I:%M %p"], localize=True)
# Parse a time in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('13:30:05')
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30:05 PM')
self.assertEqual(result, time(13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean('1:30 PM')
self.assertEqual(result, time(13, 30, 0))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "13:30:00")
@override_settings(DATE_INPUT_FORMATS=["%d/%m/%Y", "%d-%m-%Y"], USE_L10N=True)
class LocalizedDateTests(SimpleTestCase):
def setUp(self):
activate('de')
def tearDown(self):
deactivate()
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('21/12/2010')
# ISO formats are accepted, even if not specified in formats.py
self.assertEqual(f.clean('2010-12-21'), date(2010, 12, 21))
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip
text = f.widget.format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('21.12.10')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField(self):
"Localized DateFields act as unlocalized widgets"
f = forms.DateField(localize=True)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('21/12/2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.10')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
def test_dateField_with_inputformat(self):
"DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21')
with self.assertRaises(forms.ValidationError):
f.clean('21/12/2010')
with self.assertRaises(forms.ValidationError):
f.clean('21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField_with_inputformat(self):
"Localized DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21')
with self.assertRaises(forms.ValidationError):
f.clean('21/12/2010')
with self.assertRaises(forms.ValidationError):
f.clean('21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
@override_settings(DATE_INPUT_FORMATS=["%d.%m.%Y", "%d-%m-%Y"])
class CustomDateInputFormatsTests(SimpleTestCase):
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip
text = f.widget.format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField(self):
"Localized DateFields act as unlocalized widgets"
f = forms.DateField(localize=True)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, '21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
def test_dateField_with_inputformat(self):
"DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('21.12.2010')
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
def test_localized_dateField_with_inputformat(self):
"Localized DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%m.%d.%Y", "%m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('21.12.2010')
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010")
class SimpleDateFormatTests(SimpleTestCase):
def test_dateField(self):
"DateFields can parse dates in the default format"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('12/21/2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
def test_localized_dateField(self):
"Localized DateFields in a non-localized environment act as unlocalized widgets"
f = forms.DateField()
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean('12/21/2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
def test_dateField_with_inputformat(self):
"DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%d.%m.%Y", "%d-%m-%Y"])
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
def test_localized_dateField_with_inputformat(self):
"Localized DateFields with manually specified input formats can accept those formats"
f = forms.DateField(input_formats=["%d.%m.%Y", "%d-%m-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
# Parse a date in a valid format, get a parsed result
result = f.clean('21-12-2010')
self.assertEqual(result, date(2010, 12, 21))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21")
@override_settings(DATETIME_INPUT_FORMATS=["%I:%M:%S %p %d/%m/%Y", "%I:%M %p %d-%m-%Y"], USE_L10N=True)
class LocalizedDateTimeTests(SimpleTestCase):
def setUp(self):
activate('de')
def tearDown(self):
deactivate()
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('1:30:05 PM 21/12/2010')
# ISO formats are accepted, even if not specified in formats.py
self.assertEqual(f.clean('2010-12-21 13:30:05'), datetime(2010, 12, 21, 13, 30, 5))
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget.format_value(result)
self.assertEqual(text, '21.12.2010 13:30:05')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('21.12.2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
def test_localized_dateTimeField(self):
"Localized DateTimeFields act as unlocalized widgets"
f = forms.DateTimeField(localize=True)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('1:30:05 PM 21/12/2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, '21.12.2010 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('21.12.2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
def test_dateTimeField_with_inputformat(self):
"DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%H.%M.%S %m.%d.%Y", "%H.%M %m-%d-%Y"])
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21 13:30:05 13:30:05')
with self.assertRaises(forms.ValidationError):
f.clean('1:30:05 PM 21/12/2010')
with self.assertRaises(forms.ValidationError):
f.clean('13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30.05 12.21.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30 12-21-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
def test_localized_dateTimeField_with_inputformat(self):
"Localized DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%H.%M.%S %m.%d.%Y", "%H.%M %m-%d-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21 13:30:05')
with self.assertRaises(forms.ValidationError):
f.clean('1:30:05 PM 21/12/2010')
with self.assertRaises(forms.ValidationError):
f.clean('13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30.05 12.21.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('13.30 12-21-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "21.12.2010 13:30:00")
@override_settings(DATETIME_INPUT_FORMATS=["%I:%M:%S %p %d/%m/%Y", "%I:%M %p %d-%m-%Y"])
class CustomDateTimeInputFormatsTests(SimpleTestCase):
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21/12/2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip
text = f.widget.format_value(result)
self.assertEqual(text, '01:30:05 PM 21/12/2010')
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_localized_dateTimeField(self):
"Localized DateTimeFields act as unlocalized widgets"
f = forms.DateTimeField(localize=True)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21/12/2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, '01:30:05 PM 21/12/2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_dateTimeField_with_inputformat(self):
"DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%m.%d.%Y %H:%M:%S", "%m-%d-%Y %H:%M"])
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('13:30:05 21.12.2010')
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:05 PM 21/12/2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
def test_localized_dateTimeField_with_inputformat(self):
"Localized DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%m.%d.%Y %H:%M:%S", "%m-%d-%Y %H:%M"], localize=True)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('13:30:05 21.12.2010')
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('12.21.2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:05 PM 21/12/2010")
# Parse a date in a valid format, get a parsed result
result = f.clean('12-21-2010 13:30')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM 21/12/2010")
class SimpleDateTimeFormatTests(SimpleTestCase):
def test_dateTimeField(self):
"DateTimeFields can parse dates in the default format"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid, but non-default format, get a parsed result
result = f.clean('12/21/2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
def test_localized_dateTimeField(self):
"Localized DateTimeFields in a non-localized environment act as unlocalized widgets"
f = forms.DateTimeField()
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('13:30:05 21.12.2010')
# Parse a date in a valid format, get a parsed result
result = f.clean('2010-12-21 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('12/21/2010 13:30:05')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
def test_dateTimeField_with_inputformat(self):
"DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%I:%M:%S %p %d.%m.%Y", "%I:%M %p %d-%m-%Y"])
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21.12.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:00")
def test_localized_dateTimeField_with_inputformat(self):
"Localized DateTimeFields with manually specified input formats can accept those formats"
f = forms.DateTimeField(input_formats=["%I:%M:%S %p %d.%m.%Y", "%I:%M %p %d-%m-%Y"], localize=True)
# Parse a date in an unaccepted format; get an error
with self.assertRaises(forms.ValidationError):
f.clean('2010-12-21 13:30:05')
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30:05 PM 21.12.2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30, 5))
# Check that the parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:05")
# Parse a date in a valid format, get a parsed result
result = f.clean('1:30 PM 21-12-2010')
self.assertEqual(result, datetime(2010, 12, 21, 13, 30))
# Check that the parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "2010-12-21 13:30:00")
| bsd-3-clause |
HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/ThirdParty/Twisted/twisted/test/test_stringtransport.py | 33 | 10694 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.test.proto_helpers}.
"""
from zope.interface.verify import verifyObject
from twisted.internet.interfaces import (ITransport, IPushProducer, IConsumer,
IReactorTCP, IReactorSSL, IReactorUNIX, IAddress, IListeningPort,
IConnector)
from twisted.internet.address import IPv4Address
from twisted.trial.unittest import TestCase
from twisted.test.proto_helpers import (StringTransport, MemoryReactor,
RaisingMemoryReactor)
from twisted.internet.protocol import ClientFactory, Factory
class StringTransportTests(TestCase):
"""
Tests for L{twisted.test.proto_helpers.StringTransport}.
"""
def setUp(self):
self.transport = StringTransport()
def test_interfaces(self):
"""
L{StringTransport} instances provide L{ITransport}, L{IPushProducer},
and L{IConsumer}.
"""
self.assertTrue(verifyObject(ITransport, self.transport))
self.assertTrue(verifyObject(IPushProducer, self.transport))
self.assertTrue(verifyObject(IConsumer, self.transport))
def test_registerProducer(self):
"""
L{StringTransport.registerProducer} records the arguments supplied to
it as instance attributes.
"""
producer = object()
streaming = object()
self.transport.registerProducer(producer, streaming)
self.assertIdentical(self.transport.producer, producer)
self.assertIdentical(self.transport.streaming, streaming)
def test_disallowedRegisterProducer(self):
"""
L{StringTransport.registerProducer} raises L{RuntimeError} if a
producer is already registered.
"""
producer = object()
self.transport.registerProducer(producer, True)
self.assertRaises(
RuntimeError, self.transport.registerProducer, object(), False)
self.assertIdentical(self.transport.producer, producer)
self.assertTrue(self.transport.streaming)
def test_unregisterProducer(self):
"""
L{StringTransport.unregisterProducer} causes the transport to forget
about the registered producer and makes it possible to register a new
one.
"""
oldProducer = object()
newProducer = object()
self.transport.registerProducer(oldProducer, False)
self.transport.unregisterProducer()
self.assertIdentical(self.transport.producer, None)
self.transport.registerProducer(newProducer, True)
self.assertIdentical(self.transport.producer, newProducer)
self.assertTrue(self.transport.streaming)
def test_invalidUnregisterProducer(self):
"""
L{StringTransport.unregisterProducer} raises L{RuntimeError} if called
when no producer is registered.
"""
self.assertRaises(RuntimeError, self.transport.unregisterProducer)
def test_initialProducerState(self):
"""
L{StringTransport.producerState} is initially C{'producing'}.
"""
self.assertEqual(self.transport.producerState, 'producing')
def test_pauseProducing(self):
"""
L{StringTransport.pauseProducing} changes the C{producerState} of the
transport to C{'paused'}.
"""
self.transport.pauseProducing()
self.assertEqual(self.transport.producerState, 'paused')
def test_resumeProducing(self):
"""
L{StringTransport.resumeProducing} changes the C{producerState} of the
transport to C{'producing'}.
"""
self.transport.pauseProducing()
self.transport.resumeProducing()
self.assertEqual(self.transport.producerState, 'producing')
def test_stopProducing(self):
"""
L{StringTransport.stopProducing} changes the C{'producerState'} of the
transport to C{'stopped'}.
"""
self.transport.stopProducing()
self.assertEqual(self.transport.producerState, 'stopped')
def test_stoppedTransportCannotPause(self):
"""
L{StringTransport.pauseProducing} raises L{RuntimeError} if the
transport has been stopped.
"""
self.transport.stopProducing()
self.assertRaises(RuntimeError, self.transport.pauseProducing)
def test_stoppedTransportCannotResume(self):
"""
L{StringTransport.resumeProducing} raises L{RuntimeError} if the
transport has been stopped.
"""
self.transport.stopProducing()
self.assertRaises(RuntimeError, self.transport.resumeProducing)
def test_disconnectingTransportCannotPause(self):
"""
L{StringTransport.pauseProducing} raises L{RuntimeError} if the
transport is being disconnected.
"""
self.transport.loseConnection()
self.assertRaises(RuntimeError, self.transport.pauseProducing)
def test_disconnectingTransportCannotResume(self):
"""
L{StringTransport.resumeProducing} raises L{RuntimeError} if the
transport is being disconnected.
"""
self.transport.loseConnection()
self.assertRaises(RuntimeError, self.transport.resumeProducing)
def test_loseConnectionSetsDisconnecting(self):
"""
L{StringTransport.loseConnection} toggles the C{disconnecting} instance
variable to C{True}.
"""
self.assertFalse(self.transport.disconnecting)
self.transport.loseConnection()
self.assertTrue(self.transport.disconnecting)
def test_specifiedHostAddress(self):
"""
If a host address is passed to L{StringTransport.__init__}, that
value is returned from L{StringTransport.getHost}.
"""
address = object()
self.assertIdentical(StringTransport(address).getHost(), address)
def test_specifiedPeerAddress(self):
"""
If a peer address is passed to L{StringTransport.__init__}, that
value is returned from L{StringTransport.getPeer}.
"""
address = object()
self.assertIdentical(
StringTransport(peerAddress=address).getPeer(), address)
def test_defaultHostAddress(self):
"""
If no host address is passed to L{StringTransport.__init__}, an
L{IPv4Address} is returned from L{StringTransport.getHost}.
"""
address = StringTransport().getHost()
self.assertIsInstance(address, IPv4Address)
def test_defaultPeerAddress(self):
"""
If no peer address is passed to L{StringTransport.__init__}, an
L{IPv4Address} is returned from L{StringTransport.getPeer}.
"""
address = StringTransport().getPeer()
self.assertIsInstance(address, IPv4Address)
class ReactorTests(TestCase):
"""
Tests for L{MemoryReactor} and L{RaisingMemoryReactor}.
"""
def test_memoryReactorProvides(self):
"""
L{MemoryReactor} provides all of the attributes described by the
interfaces it advertises.
"""
memoryReactor = MemoryReactor()
verifyObject(IReactorTCP, memoryReactor)
verifyObject(IReactorSSL, memoryReactor)
verifyObject(IReactorUNIX, memoryReactor)
def test_raisingReactorProvides(self):
"""
L{RaisingMemoryReactor} provides all of the attributes described by the
interfaces it advertises.
"""
raisingReactor = RaisingMemoryReactor()
verifyObject(IReactorTCP, raisingReactor)
verifyObject(IReactorSSL, raisingReactor)
verifyObject(IReactorUNIX, raisingReactor)
def test_connectDestination(self):
"""
L{MemoryReactor.connectTCP}, L{MemoryReactor.connectSSL}, and
L{MemoryReactor.connectUNIX} will return an L{IConnector} whose
C{getDestination} method returns an L{IAddress} with attributes which
reflect the values passed.
"""
memoryReactor = MemoryReactor()
for connector in [memoryReactor.connectTCP(
"test.example.com", 8321, ClientFactory()),
memoryReactor.connectSSL(
"test.example.com", 8321, ClientFactory(),
None)]:
verifyObject(IConnector, connector)
address = connector.getDestination()
verifyObject(IAddress, address)
self.assertEqual(address.host, "test.example.com")
self.assertEqual(address.port, 8321)
connector = memoryReactor.connectUNIX("/fake/path", ClientFactory())
verifyObject(IConnector, connector)
address = connector.getDestination()
verifyObject(IAddress, address)
self.assertEqual(address.name, "/fake/path")
def test_listenDefaultHost(self):
"""
L{MemoryReactor.listenTCP}, L{MemoryReactor.listenSSL} and
L{MemoryReactor.listenUNIX} will return an L{IListeningPort} whose
C{getHost} method returns an L{IAddress}; C{listenTCP} and C{listenSSL}
will have a default host of C{'0.0.0.0'}, and a port that reflects the
value passed, and C{listenUNIX} will have a name that reflects the path
passed.
"""
memoryReactor = MemoryReactor()
for port in [memoryReactor.listenTCP(8242, Factory()),
memoryReactor.listenSSL(8242, Factory(), None)]:
verifyObject(IListeningPort, port)
address = port.getHost()
verifyObject(IAddress, address)
self.assertEqual(address.host, '0.0.0.0')
self.assertEqual(address.port, 8242)
port = memoryReactor.listenUNIX("/path/to/socket", Factory())
verifyObject(IListeningPort, port)
address = port.getHost()
verifyObject(IAddress, address)
self.assertEqual(address.name, "/path/to/socket")
def test_readers(self):
"""
Adding, removing, and listing readers works.
"""
reader = object()
reactor = MemoryReactor()
reactor.addReader(reader)
reactor.addReader(reader)
self.assertEqual(reactor.getReaders(), [reader])
reactor.removeReader(reader)
self.assertEqual(reactor.getReaders(), [])
def test_writers(self):
"""
Adding, removing, and listing writers works.
"""
writer = object()
reactor = MemoryReactor()
reactor.addWriter(writer)
reactor.addWriter(writer)
self.assertEqual(reactor.getWriters(), [writer])
reactor.removeWriter(writer)
self.assertEqual(reactor.getWriters(), [])
| gpl-3.0 |
bazzinotti/mitmproxy | libmproxy/exceptions.py | 2 | 1097 | """
We try to be very hygienic regarding the exceptions we throw:
Every Exception mitmproxy raises shall be a subclass of ProxyException.
See also: http://lucumr.pocoo.org/2014/10/16/on-error-handling/
"""
from __future__ import (absolute_import, print_function, division)
class ProxyException(Exception):
"""
Base class for all exceptions thrown by libmproxy.
"""
def __init__(self, message=None):
super(ProxyException, self).__init__(message)
class ProtocolException(ProxyException):
pass
class TlsProtocolException(ProtocolException):
pass
class ClientHandshakeException(TlsProtocolException):
def __init__(self, message, server):
super(ClientHandshakeException, self).__init__(message)
self.server = server
class Socks5ProtocolException(ProtocolException):
pass
class HttpProtocolException(ProtocolException):
pass
class ServerException(ProxyException):
pass
class ContentViewException(ProxyException):
pass
class ReplayException(ProxyException):
pass
class ScriptException(ProxyException):
pass | mit |
Nu3001/external_chromium_org | chrome/common/extensions/docs/server2/samples_data_source_test.py | 25 | 1064 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import unittest
from samples_data_source import SamplesDataSource
from servlet import Request
class SamplesDataSourceTest(unittest.TestCase):
def setUp(self):
self._base_path = os.path.join(sys.path[0],
'test_data',
'samples_data_source')
def _ReadLocalFile(self, filename):
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
def _FakeGet(self, key):
return json.loads(self._ReadLocalFile(key))
def testFilterSamples(self):
sds = SamplesDataSource({}, {}, 'fake_path', '.', Request.ForTest('/'))
sds.get = self._FakeGet
self.assertEquals(json.loads(self._ReadLocalFile('expected.json')),
sds.FilterSamples('samples.json', 'bobaloo'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
hrjn/scikit-learn | sklearn/externals/joblib/func_inspect.py | 46 | 13254 | """
My own variation on function-specific inspect-like features.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
from itertools import islice
import inspect
import warnings
import re
import os
from ._compat import _basestring
from .logger import pformat
from ._memory_helpers import open_py_source
from ._compat import PY3_OR_LATER
def get_func_code(func):
""" Attempts to retrieve a reliable function code hash.
The reason we don't use inspect.getsource is that it caches the
source, whereas we want this to be modified on the fly when the
function is modified.
Returns
-------
func_code: string
The function code
source_file: string
The path to the file in which the function is defined.
first_line: int
The first line of the code in the source file.
Notes
------
This function does a bit more magic than inspect, and is thus
more robust.
"""
source_file = None
try:
code = func.__code__
source_file = code.co_filename
if not os.path.exists(source_file):
# Use inspect for lambda functions and functions defined in an
# interactive shell, or in doctests
source_code = ''.join(inspect.getsourcelines(func)[0])
line_no = 1
if source_file.startswith('<doctest '):
source_file, line_no = re.match(
'\<doctest (.*\.rst)\[(.*)\]\>', source_file).groups()
line_no = int(line_no)
source_file = '<doctest %s>' % source_file
return source_code, source_file, line_no
# Try to retrieve the source code.
with open_py_source(source_file) as source_file_obj:
first_line = code.co_firstlineno
# All the lines after the function definition:
source_lines = list(islice(source_file_obj, first_line - 1, None))
return ''.join(inspect.getblock(source_lines)), source_file, first_line
except:
# If the source code fails, we use the hash. This is fragile and
# might change from one session to another.
if hasattr(func, '__code__'):
# Python 3.X
return str(func.__code__.__hash__()), source_file, -1
else:
# Weird objects like numpy ufunc don't have __code__
# This is fragile, as quite often the id of the object is
# in the repr, so it might not persist across sessions,
# however it will work for ufuncs.
return repr(func), source_file, -1
def _clean_win_chars(string):
"""Windows cannot encode some characters in filename."""
import urllib
if hasattr(urllib, 'quote'):
quote = urllib.quote
else:
# In Python 3, quote is elsewhere
import urllib.parse
quote = urllib.parse.quote
for char in ('<', '>', '!', ':', '\\'):
string = string.replace(char, quote(char))
return string
def get_func_name(func, resolv_alias=True, win_characters=True):
""" Return the function import path (as a list of module names), and
a name for the function.
Parameters
----------
func: callable
The func to inspect
resolv_alias: boolean, optional
If true, possible local aliases are indicated.
win_characters: boolean, optional
If true, substitute special characters using urllib.quote
This is useful in Windows, as it cannot encode some filenames
"""
if hasattr(func, '__module__'):
module = func.__module__
else:
try:
module = inspect.getmodule(func)
except TypeError:
if hasattr(func, '__class__'):
module = func.__class__.__module__
else:
module = 'unknown'
if module is None:
# Happens in doctests, eg
module = ''
if module == '__main__':
try:
filename = os.path.abspath(inspect.getsourcefile(func))
except:
filename = None
if filename is not None:
# mangling of full path to filename
parts = filename.split(os.sep)
if parts[-1].startswith('<ipython-input'):
# function is defined in an IPython session. The filename
# will change with every new kernel instance. This hack
# always returns the same filename
parts[-1] = '__ipython-input__'
filename = '-'.join(parts)
if filename.endswith('.py'):
filename = filename[:-3]
module = module + '-' + filename
module = module.split('.')
if hasattr(func, 'func_name'):
name = func.func_name
elif hasattr(func, '__name__'):
name = func.__name__
else:
name = 'unknown'
# Hack to detect functions not defined at the module-level
if resolv_alias:
# TODO: Maybe add a warning here?
if hasattr(func, 'func_globals') and name in func.func_globals:
if not func.func_globals[name] is func:
name = '%s-alias' % name
if inspect.ismethod(func):
# We need to add the name of the class
if hasattr(func, 'im_class'):
klass = func.im_class
module.append(klass.__name__)
if os.name == 'nt' and win_characters:
# Stupid windows can't encode certain characters in filenames
name = _clean_win_chars(name)
module = [_clean_win_chars(s) for s in module]
return module, name
def getfullargspec(func):
"""Compatibility function to provide inspect.getfullargspec in Python 2
This should be rewritten using a backport of Python 3 signature
once we drop support for Python 2.6. We went for a simpler
approach at the time of writing because signature uses OrderedDict
which is not available in Python 2.6.
"""
try:
return inspect.getfullargspec(func)
except AttributeError:
arg_spec = inspect.getargspec(func)
import collections
tuple_fields = ('args varargs varkw defaults kwonlyargs '
'kwonlydefaults annotations')
tuple_type = collections.namedtuple('FullArgSpec', tuple_fields)
return tuple_type(args=arg_spec.args,
varargs=arg_spec.varargs,
varkw=arg_spec.keywords,
defaults=arg_spec.defaults,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
def _signature_str(function_name, arg_spec):
"""Helper function to output a function signature"""
# inspect.formatargspec can not deal with the same
# number of arguments in python 2 and 3
arg_spec_for_format = arg_spec[:7 if PY3_OR_LATER else 4]
arg_spec_str = inspect.formatargspec(*arg_spec_for_format)
return '{}{}'.format(function_name, arg_spec_str)
def _function_called_str(function_name, args, kwargs):
"""Helper function to output a function call"""
template_str = '{0}({1}, {2})'
args_str = repr(args)[1:-1]
kwargs_str = ', '.join('%s=%s' % (k, v)
for k, v in kwargs.items())
return template_str.format(function_name, args_str,
kwargs_str)
def filter_args(func, ignore_lst, args=(), kwargs=dict()):
""" Filters the given args and kwargs using a list of arguments to
ignore, and a function specification.
Parameters
----------
func: callable
Function giving the argument specification
ignore_lst: list of strings
List of arguments to ignore (either a name of an argument
in the function spec, or '*', or '**')
*args: list
Positional arguments passed to the function.
**kwargs: dict
Keyword arguments passed to the function
Returns
-------
filtered_args: list
List of filtered positional and keyword arguments.
"""
args = list(args)
if isinstance(ignore_lst, _basestring):
# Catch a common mistake
raise ValueError(
'ignore_lst must be a list of parameters to ignore '
'%s (type %s) was given' % (ignore_lst, type(ignore_lst)))
# Special case for functools.partial objects
if (not inspect.ismethod(func) and not inspect.isfunction(func)):
if ignore_lst:
warnings.warn('Cannot inspect object %s, ignore list will '
'not work.' % func, stacklevel=2)
return {'*': args, '**': kwargs}
arg_spec = getfullargspec(func)
arg_names = arg_spec.args + arg_spec.kwonlyargs
arg_defaults = arg_spec.defaults or ()
arg_defaults = arg_defaults + tuple(arg_spec.kwonlydefaults[k]
for k in arg_spec.kwonlyargs)
arg_varargs = arg_spec.varargs
arg_varkw = arg_spec.varkw
if inspect.ismethod(func):
# First argument is 'self', it has been removed by Python
# we need to add it back:
args = [func.__self__, ] + args
# XXX: Maybe I need an inspect.isbuiltin to detect C-level methods, such
# as on ndarrays.
_, name = get_func_name(func, resolv_alias=False)
arg_dict = dict()
arg_position = -1
for arg_position, arg_name in enumerate(arg_names):
if arg_position < len(args):
# Positional argument or keyword argument given as positional
if arg_name not in arg_spec.kwonlyargs:
arg_dict[arg_name] = args[arg_position]
else:
raise ValueError(
"Keyword-only parameter '%s' was passed as "
'positional parameter for %s:\n'
' %s was called.'
% (arg_name,
_signature_str(name, arg_spec),
_function_called_str(name, args, kwargs))
)
else:
position = arg_position - len(arg_names)
if arg_name in kwargs:
arg_dict[arg_name] = kwargs.pop(arg_name)
else:
try:
arg_dict[arg_name] = arg_defaults[position]
except (IndexError, KeyError):
# Missing argument
raise ValueError(
'Wrong number of arguments for %s:\n'
' %s was called.'
% (_signature_str(name, arg_spec),
_function_called_str(name, args, kwargs))
)
varkwargs = dict()
for arg_name, arg_value in sorted(kwargs.items()):
if arg_name in arg_dict:
arg_dict[arg_name] = arg_value
elif arg_varkw is not None:
varkwargs[arg_name] = arg_value
else:
raise TypeError("Ignore list for %s() contains an unexpected "
"keyword argument '%s'" % (name, arg_name))
if arg_varkw is not None:
arg_dict['**'] = varkwargs
if arg_varargs is not None:
varargs = args[arg_position + 1:]
arg_dict['*'] = varargs
# Now remove the arguments to be ignored
for item in ignore_lst:
if item in arg_dict:
arg_dict.pop(item)
else:
raise ValueError("Ignore list: argument '%s' is not defined for "
"function %s"
% (item,
_signature_str(name, arg_spec))
)
# XXX: Return a sorted list of pairs?
return arg_dict
def _format_arg(arg):
formatted_arg = pformat(arg, indent=2)
if len(formatted_arg) > 1500:
formatted_arg = '%s...' % formatted_arg[:700]
return formatted_arg
def format_signature(func, *args, **kwargs):
# XXX: Should this use inspect.formatargvalues/formatargspec?
module, name = get_func_name(func)
module = [m for m in module if m]
if module:
module.append(name)
module_path = '.'.join(module)
else:
module_path = name
arg_str = list()
previous_length = 0
for arg in args:
formatted_arg = _format_arg(arg)
if previous_length > 80:
formatted_arg = '\n%s' % formatted_arg
previous_length = len(formatted_arg)
arg_str.append(formatted_arg)
arg_str.extend(['%s=%s' % (v, _format_arg(i)) for v, i in kwargs.items()])
arg_str = ', '.join(arg_str)
signature = '%s(%s)' % (name, arg_str)
return module_path, signature
def format_call(func, args, kwargs, object_name="Memory"):
""" Returns a nicely formatted statement displaying the function
call with the given arguments.
"""
path, signature = format_signature(func, *args, **kwargs)
msg = '%s\n[%s] Calling %s...\n%s' % (80 * '_', object_name,
path, signature)
return msg
# XXX: Not using logging framework
# self.debug(msg)
| bsd-3-clause |
bdero/edx-platform | lms/djangoapps/verify_student/tests/test_integration.py | 2 | 4457 | """
Integration tests of the payment flow, including course mode selection.
"""
from lxml.html import soupparser
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.tests.factories import UserFactory
from course_modes.tests.factories import CourseModeFactory
from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
from verify_student.models import SoftwareSecurePhotoVerification
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class TestProfEdVerification(ModuleStoreTestCase):
"""
Integration test for professional ed verification, including course mode selection.
"""
# Choose an uncommon number for the price so we can search for it on the page
MIN_PRICE = 1438
def setUp(self):
self.user = UserFactory.create(username="rusty", password="test")
self.client.login(username="rusty", password="test")
course = CourseFactory.create(org='Robot', number='999', display_name='Test Course')
self.course_key = course.id
CourseModeFactory(
mode_slug="professional",
course_id=self.course_key,
min_price=self.MIN_PRICE,
suggested_prices=''
)
self.urls = {
'course_modes_choose': reverse(
'course_modes_choose',
args=[unicode(self.course_key)]
),
'verify_show_student_requirements': reverse(
'verify_student_show_requirements',
args=[unicode(self.course_key)]
),
'verify_student_verify': reverse(
'verify_student_verify',
args=[unicode(self.course_key)]
),
'verify_student_verified': reverse(
'verify_student_verified',
args=[unicode(self.course_key)]
) + "?upgrade=False",
}
def test_new_user_flow(self):
# Go to the course mode page, expecting a redirect
# to the show requirements page
# because this is a professional ed course
# (otherwise, the student would have the option to choose their track)
resp = self.client.get(self.urls['course_modes_choose'], follow=True)
self.assertRedirects(resp, self.urls['verify_show_student_requirements'])
# On the show requirements page, verify that there's a link to the verify page
# (this is the only action the user is allowed to take)
self.assertContains(resp, self.urls['verify_student_verify'])
# Simulate the user clicking the button by following the link
# to the verified page.
# Since there are no suggested prices for professional ed,
# expect that only one price is displayed.
resp = self.client.get(self.urls['verify_student_verify'])
self.assertEqual(self._prices_on_page(resp.content), [self.MIN_PRICE])
def test_already_verified_user_flow(self):
# Simulate the user already being verified
self._verify_student()
# Go to the course mode page, expecting a redirect to the
# verified (past tense!) page.
resp = self.client.get(self.urls['course_modes_choose'], follow=True)
self.assertRedirects(resp, self.urls['verify_student_verified'])
# Since this is a professional ed course, expect that only
# one price is shown.
self.assertContains(resp, "Your Course Total is $")
self.assertContains(resp, str(self.MIN_PRICE))
# On the verified page, expect that there's a link to payment page
self.assertContains(resp, '/shoppingcart/payment_fake')
def _prices_on_page(self, page_content):
""" Retrieve the available prices on the verify page. """
html = soupparser.fromstring(page_content)
xpath_sel = '//li[@class="field contribution-option"]/span[@class="label-value"]/text()'
return [int(price) for price in html.xpath(xpath_sel)]
def _verify_student(self):
""" Simulate that the student's identity has already been verified. """
attempt = SoftwareSecurePhotoVerification.objects.create(user=self.user)
attempt.mark_ready()
attempt.submit()
attempt.approve()
| agpl-3.0 |
GenericStudent/home-assistant | homeassistant/components/notion/__init__.py | 6 | 8987 | """Support for Notion."""
import asyncio
from datetime import timedelta
import logging
from aionotion import async_get_client
from aionotion.errors import InvalidCredentialsError, NotionError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import (
aiohttp_client,
config_validation as cv,
device_registry as dr,
)
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import DATA_COORDINATOR, DOMAIN
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["binary_sensor", "sensor"]
ATTR_SYSTEM_MODE = "system_mode"
ATTR_SYSTEM_NAME = "system_name"
DEFAULT_ATTRIBUTION = "Data provided by Notion"
DEFAULT_SCAN_INTERVAL = timedelta(minutes=1)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict) -> bool:
"""Set up the Notion component."""
hass.data[DOMAIN] = {DATA_COORDINATOR: {}}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_USERNAME: conf[CONF_USERNAME],
CONF_PASSWORD: conf[CONF_PASSWORD],
},
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Notion as a config entry."""
if not entry.unique_id:
hass.config_entries.async_update_entry(
entry, unique_id=entry.data[CONF_USERNAME]
)
session = aiohttp_client.async_get_clientsession(hass)
try:
client = await async_get_client(
entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD], session
)
except InvalidCredentialsError:
_LOGGER.error("Invalid username and/or password")
return False
except NotionError as err:
_LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady from err
async def async_update():
"""Get the latest data from the Notion API."""
data = {"bridges": {}, "sensors": {}, "tasks": {}}
tasks = {
"bridges": client.bridge.async_all(),
"sensors": client.sensor.async_all(),
"tasks": client.task.async_all(),
}
results = await asyncio.gather(*tasks.values(), return_exceptions=True)
for attr, result in zip(tasks, results):
if isinstance(result, NotionError):
raise UpdateFailed(
f"There was a Notion error while updating {attr}: {result}"
)
if isinstance(result, Exception):
raise UpdateFailed(
f"There was an unknown error while updating {attr}: {result}"
)
for item in result:
if attr == "bridges" and item["id"] not in data["bridges"]:
# If a new bridge is discovered, register it:
hass.async_create_task(async_register_new_bridge(hass, item, entry))
data[attr][item["id"]] = item
return data
coordinator = hass.data[DOMAIN][DATA_COORDINATOR][
entry.entry_id
] = DataUpdateCoordinator(
hass,
_LOGGER,
name=entry.data[CONF_USERNAME],
update_interval=DEFAULT_SCAN_INTERVAL,
update_method=async_update,
)
await coordinator.async_refresh()
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a Notion config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][DATA_COORDINATOR].pop(entry.entry_id)
return unload_ok
async def async_register_new_bridge(
hass: HomeAssistant, bridge: dict, entry: ConfigEntry
):
"""Register a new bridge."""
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, bridge["hardware_id"])},
manufacturer="Silicon Labs",
model=bridge["hardware_revision"],
name=bridge["name"] or bridge["id"],
sw_version=bridge["firmware_version"]["wifi"],
)
class NotionEntity(CoordinatorEntity):
"""Define a base Notion entity."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
task_id: str,
sensor_id: str,
bridge_id: str,
system_id: str,
name: str,
device_class: str,
):
"""Initialize the entity."""
super().__init__(coordinator)
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._bridge_id = bridge_id
self._device_class = device_class
self._name = name
self._sensor_id = sensor_id
self._state = None
self._system_id = system_id
self._task_id = task_id
@property
def available(self) -> bool:
"""Return True if entity is available."""
return (
self.coordinator.last_update_success
and self._task_id in self.coordinator.data["tasks"]
)
@property
def device_class(self) -> str:
"""Return the device class."""
return self._device_class
@property
def device_state_attributes(self) -> dict:
"""Return the state attributes."""
return self._attrs
@property
def device_info(self) -> dict:
"""Return device registry information for this entity."""
bridge = self.coordinator.data["bridges"].get(self._bridge_id, {})
sensor = self.coordinator.data["sensors"][self._sensor_id]
return {
"identifiers": {(DOMAIN, sensor["hardware_id"])},
"manufacturer": "Silicon Labs",
"model": sensor["hardware_revision"],
"name": sensor["name"],
"sw_version": sensor["firmware_version"],
"via_device": (DOMAIN, bridge.get("hardware_id")),
}
@property
def name(self) -> str:
"""Return the name of the entity."""
sensor = self.coordinator.data["sensors"][self._sensor_id]
return f'{sensor["name"]}: {self._name}'
@property
def unique_id(self) -> str:
"""Return a unique, unchanging string that represents this entity."""
task = self.coordinator.data["tasks"][self._task_id]
return f'{self._sensor_id}_{task["task_type"]}'
async def _async_update_bridge_id(self) -> None:
"""Update the entity's bridge ID if it has changed.
Sensors can move to other bridges based on signal strength, etc.
"""
sensor = self.coordinator.data["sensors"][self._sensor_id]
# If the sensor's bridge ID is the same as what we had before or if it points
# to a bridge that doesn't exist (which can happen due to a Notion API bug),
# return immediately:
if (
self._bridge_id == sensor["bridge"]["id"]
or sensor["bridge"]["id"] not in self.coordinator.data["bridges"]
):
return
self._bridge_id = sensor["bridge"]["id"]
device_registry = await dr.async_get_registry(self.hass)
bridge = self.coordinator.data["bridges"][self._bridge_id]
bridge_device = device_registry.async_get_device(
{DOMAIN: bridge["hardware_id"]}, set()
)
this_device = device_registry.async_get_device(
{DOMAIN: sensor["hardware_id"]}, set()
)
device_registry.async_update_device(
this_device.id, via_device_id=bridge_device.id
)
@callback
def _async_update_from_latest_data(self) -> None:
"""Update the entity from the latest data."""
raise NotImplementedError
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
@callback
def update():
"""Update the state."""
self._async_update_from_latest_data()
self.async_write_ha_state()
self.async_on_remove(self.coordinator.async_add_listener(update))
self._async_update_from_latest_data()
| apache-2.0 |
jporradre/Useful-scripts | xls_to_csv.py | 1 | 1752 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Copyright (C) 2016 Juan Pablo Orradre
#
# xls_to_csv.py : Python script useful to convert an Excel file to a CSV file.
#
# Use: python xls_to_csv.py source_xls_file destiny_csv_file
#
# Notes:
# - Converts an Excel file to a CSV file.
# - If the excel file has multiple worksheets, only the first worksheet is converted.
# - Uses unicodecsv, so it will handle Unicode characters.
# - Uses a recent version of xlrd, so it should handle old .xls and new .xlsx equally well.
# - Based on script from http://www.penwatch.net/cms/?p=484
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import xlrd
import unicodecsv
if len(sys.argv) < 3:
print("ERROR - You must pass as parameters source xls filename and destiny csv filename respectively")
exit(1)
xls_filename = sys.argv[1]
csv_filename = sys.argv[2]
wb = xlrd.open_workbook(xls_filename)
sh = wb.sheet_by_index(0)
fh = open(csv_filename,"wb")
csv_out = unicodecsv.writer(fh, encoding='utf-8')
for row_number in xrange (sh.nrows):
csv_out.writerow(sh.row_values(row_number))
fh.close()
| gpl-3.0 |
emperrors/fetchLinuxIDC | wx_py/panel/LeftWorkSpace.py | 1 | 4770 | #! /usr/bin/python
# -*- coding: gb2312 -*-
# import wx_py.WorkSpace as WorkSpace
from WorkSpace import *
from FolderItem import *
from ArrowCallBack import *
from CommonDefs import *
import wx
MAX_FOLDER_ITEM_COUNT = 7
class LeftWorkSpace(WorkSpace, ArrowCallBack):
def __init__(self, parent, id, pos, size, style, bk_color):
super(LeftWorkSpace, self).__init__(parent, id, pos, size, style, bk_color)
# self.CallBackBlender = ArrowCallBack()
self.GlobalIndex = -1
# ×îÐÂÒ»´ÎÌí¼ÓµÄitem,ÓÃÓÚÐÂÔöʱ±È¶Ô
self.LatestFolderItem = None
self.FolderItemDict = {
}
self.Bind(event=wx.EVT_LEFT_UP,
handler=self.__on_left_key_up,
source=None,
id=wx.ID_ANY,
id2=wx.ID_ANY)
# self.TestFolderItem = FolderItem(parent=self, id=wx.ID_ANY,
# pos=(0, 0),
# size=(self.GetSize().x, 40),
# style=wx.TAB_TRAVERSAL,
# bk_color="#00ffbb")
# self.TestFolderItem.set_arrow_callback(self.CallBackBlender)
#
# self.TestFolderItem2 = FolderItem(parent=self, id=wx.ID_ANY,
# pos=(0, self.TestFolderItem.GetPosition().y +
# self.TestFolderItem.GetSize().y + 1),
# size=(self.GetSize().x, 40),
# style=wx.TAB_TRAVERSAL,
# bk_color="#00ffbb")
# self.TestFolderItem2.set_arrow_callback(self.CallBackBlender)
#
# self.TestFolderItem3 = FolderItem(parent=self, id=wx.ID_ANY,
# pos=(0, self.TestFolderItem2.GetPosition().y +
# self.TestFolderItem2.GetSize().y + 1),
# size=(self.GetSize().x, 40),
# style=wx.TAB_TRAVERSAL,
# bk_color="#00ffbb")
# self.TestFolderItem3.set_arrow_callback(self.CallBackBlender)
def __on_left_key_up(self, event):
print("aaa")
self.__append_folder_item()
def __append_folder_item(self):
# print(self.TestFolderItem.get_normal_image_height())
if len(self.FolderItemDict) >= MAX_FOLDER_ITEM_COUNT:
print("Pool filled up")
return
self.GlobalIndex += 1
new_pos_y = 0
if self.LatestFolderItem is not None:
new_pos_y = self.LatestFolderItem.GetPosition().y + \
self.LatestFolderItem.GetSize().y + \
GAP_HEIGHT
self.LatestFolderItem = FolderItem(parent=self,
id=wx.ID_ANY,
pos=(0, new_pos_y),
size=(self.GetSize().x, 20),
style=wx.TAB_TRAVERSAL,
bk_color="#00ffbb",
text_id=self.GlobalIndex)
new_folder_item = self.LatestFolderItem
# newFolderItem.set_text("%d" % self.GlobalIndex)
# newFolderItem.set_arrow_callback(self.CallBackBlender)
self.FolderItemDict[self.GlobalIndex] = new_folder_item
def print_call(self):
# super(LeftWorkSpace, self).print_call()
print("LeftWorkSpace::ArrowCallBack print_call called")
def mov_after_text_id(self, text_id=-1, delta_dis=0):
# super(LeftWorkSpace, self).mov_after_text_id(text_id, move_up)
print("LeftWorkSpace::ArrowCallBack mov_after_text_id called")
folder_item = self.FolderItemDict.get(text_id, None)
if folder_item is None:
return
self.__common_move_distance(text_id, delta_dis)
def remove_item(self, text_id=-1, delta_dis=0):
# super(LeftWorkSpace, self).remove_item(text_id)
# self.FolderItemDict.pop(text_id)
self.FolderItemDict[text_id].Destroy()
del self.FolderItemDict[text_id]
self.__common_move_distance(text_id, delta_dis)
# keys = list(self.FolderItemDict.keys())
# for key, value in keys:
# if text_id is key:
# del self.FolderItemDict[key]
def __common_move_distance(self, text_id, delta_dis):
for key, value in self.FolderItemDict.items():
if key > text_id:
tmp_pos = value.GetPosition()
value.Move(tmp_pos.x, tmp_pos.y + delta_dis)
| gpl-3.0 |
tfiedor/perun | perun/cli_groups/config_cli.py | 1 | 7641 | """Group of CLI commands used for manipulation with config"""
import click
import perun.logic.commands as commands
import perun.utils.cli_helpers as cli_helpers
import perun.utils.log as perun_log
from perun.utils.exceptions import NotPerunRepositoryException, MissingConfigSectionException, \
ExternalEditorErrorException
__author__ = 'Tomas Fiedor'
@click.group()
@click.option('--local', '-l', 'store_type', flag_value='local',
help='Sets the local config, i.e. ``.perun/local.yml``, as the source config.')
@click.option('--shared', '-h', 'store_type', flag_value='shared',
help='Sets the shared config, i.e. ``shared.yml.``, as the source config')
@click.option('--nearest', '-n', 'store_type', flag_value='recursive', default=True,
help='Sets the nearest suitable config as the source config. The'
' lookup strategy can differ for ``set`` and '
'``get``/``edit``.')
@click.pass_context
def config(ctx, **kwargs):
"""Manages the stored local and shared configuration.
Perun supports two external configurations:
1. ``local.yml``: the local configuration stored in ``.perun``
directory, containing the keys such as specification of wrapped
repository or job matrix used for quick generation of profiles (run
``perun run matrix --help`` or refer to :doc:`jobs` for information
how to construct the job matrix).
2. ``shared.yml``: the global configuration shared by all perun
instances, containing shared keys, such as text editor, formatting
string, etc.
The syntax of the ``<key>`` in most operations consists of section
separated by dots, e.g. ``vcs.type`` specifies ``type`` key in ``vcs``
section. The lookup of the ``<key>`` can be performed in three modes,
``--local``, ``--shared`` and ``--nearest``, locating or setting the
``<key>`` in local, shared or nearest configuration respectively (e.g. when
one is trying to get some key, there may be nested perun instances that do
not contain the given key). By default, perun operates in the nearest
config mode.
Refer to :doc:`config` for full description of configurations and
:ref:`config-types` for full list of configuration options.
E.g. using the following one can retrieve the type of the nearest perun
instance wrapper:
.. code-block:: bash
$ perun config get vcs.type
vcs.type: git
"""
ctx.obj = kwargs
@config.command('get')
@click.argument('key', required=True, metavar='<key>',
callback=cli_helpers.config_key_validation_callback)
@click.pass_context
def config_get(ctx, key):
"""Looks up the given ``<key>`` within the configuration hierarchy and returns
the stored value.
The syntax of the ``<key>`` consists of section separated by dots, e.g.
``vcs.type`` specifies ``type`` key in ``vcs`` section. The lookup of the
``<key>`` can be performed in three modes, ``--local``, ``--shared`` and
``--nearest``, locating the ``<key>`` in local, shared or nearest
configuration respectively (e.g. when one is trying to get some key, there
may be nested perun instances that do not contain the given key). By
default, perun operates in the nearest config mode.
Refer to :doc:`config` for full description of configurations and
:ref:`config-types` for full list of configuration options.
E.g. using the following can retrieve the type of the nearest perun
wrapper:
.. code-block:: bash
$ perun config get vcs.type
vcs.type: git
$ perun config --shared get general.editor
general.editor: vim
"""
try:
commands.config_get(ctx.obj['store_type'], key)
except MissingConfigSectionException as mcs_err:
perun_log.error("error while getting key '{}': {}".format(
key, str(mcs_err)
))
@config.command('set')
@click.argument('key', required=True, metavar='<key>',
callback=cli_helpers.config_key_validation_callback)
@click.argument('value', required=True, metavar='<value>')
@click.pass_context
def config_set(ctx, key, value):
"""Sets the value of the ``<key>`` to the given ``<value>`` in the target
configuration file.
The syntax of the ``<key>`` corresponds of section separated by dots, e.g.
``vcs.type`` specifies ``type`` key in ``vcs`` section. Perun sets the
``<key>`` in three modes, ``--local``, ``--shared`` and ``--nearest``,
which sets the ``<key>`` in local, shared or nearest configuration
respectively (e.g. when one is trying to get some key, there may be nested
perun instances that do not contain the given key). By default, perun will
operate in the nearest config mode.
The ``<value>`` is arbitrary depending on the key.
Refer to :doc:`config` for full description of configurations and
:ref:`config-types` for full list of configuration options and their
values.
E.g. using the following can set the log format for nearest perun instance
wrapper:
.. code-block:: bash
$ perun config set format.shortlog "| %source% | %collector% |"
format.shortlog: | %source% | %collector% |
"""
commands.config_set(ctx.obj['store_type'], key, value)
@config.command('edit')
@click.pass_context
def config_edit(ctx):
"""Edits the configuration file in the external editor.
The used editor is specified by the :ckey:`general.editor` option,
specified in the nearest perun configuration..
Refer to :doc:`config` for full description of configurations and
:ref:`config-types` for full list of configuration options.
"""
try:
commands.config_edit(ctx.obj['store_type'])
except (ExternalEditorErrorException, MissingConfigSectionException) as editor_exception:
perun_log.error("could not invoke external editor: {}".format(str(editor_exception)))
@config.command('reset')
@click.argument('config_template', required=False, default='master',
metavar='<template>')
@click.pass_context
def config_reset(ctx, config_template):
"""Resets the configuration file to a sane default.
If we are resetting the local configuration file we can specify a <template> that
will be used to generate a predefined set of options. Currently we support the following:
1. **user** configuration is meant for beginner users, that have no experience with Perun and
have not read the documentation thoroughly. This contains a basic preconfiguration that should
be applicable for most of the projects---data are collected by :ref:`collectors-time` and are
automatically registered in the Perun after successful run. The performance is checked using
the :ref:`degradation-method-aat`. Missing profiling info will be looked up automatically.
2. **developer** configuration is meant for advanced users, that have some understanding of
profiling and/or Perun. Fair amount of options are up to the user, such as the collection of
the data and the commands that will be profiled.
3. **master** configuration is meant for experienced users. The configuration will be mostly
empty.
See :ref:`config-templates` to learn more about predefined configuration options.
"""
try:
commands.config_reset(ctx.obj['store_type'], config_template)
except NotPerunRepositoryException as npre:
perun_log.error("could not reset the {} configuration: {}".format(
ctx.obj['store_type'], str(npre)
))
| gpl-3.0 |
GAXUSXX/G935FGaXusKernel3 | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/debug_toolbar/panels/profiling.py | 4 | 4988 | from __future__ import absolute_import, division, unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from debug_toolbar.panels import Panel
from debug_toolbar import settings as dt_settings
import cProfile
from pstats import Stats
from colorsys import hsv_to_rgb
import os
class DjangoDebugToolbarStats(Stats):
__root = None
def get_root_func(self):
if self.__root is None:
for func, (cc, nc, tt, ct, callers) in self.stats.items():
if len(callers) == 0:
self.__root = func
break
return self.__root
class FunctionCall(object):
def __init__(self, statobj, func, depth=0, stats=None,
id=0, parent_ids=[], hsv=(0, 0.5, 1)):
self.statobj = statobj
self.func = func
if stats:
self.stats = stats
else:
self.stats = statobj.stats[func][:4]
self.depth = depth
self.id = id
self.parent_ids = parent_ids
self.hsv = hsv
def parent_classes(self):
return self.parent_classes
def background(self):
r, g, b = hsv_to_rgb(*self.hsv)
return 'rgb(%f%%,%f%%,%f%%)' % (r * 100, g * 100, b * 100)
def func_std_string(self): # match what old profile produced
func_name = self.func
if func_name[:2] == ('~', 0):
# special case for built-in functions
name = func_name[2]
if name.startswith('<') and name.endswith('>'):
return '{%s}' % name[1:-1]
else:
return name
else:
file_name, line_num, method = self.func
idx = file_name.find('/site-packages/')
if idx > -1:
file_name = file_name[(idx + 14):]
file_path, file_name = file_name.rsplit(os.sep, 1)
return mark_safe(
'<span class="djdt-path">{0}/</span>'
'<span class="djdt-file">{1}</span>'
' in <span class="djdt-func">{3}</span>'
'(<span class="djdt-lineno">{2}</span>)'.format(
file_path,
file_name,
line_num,
method))
def subfuncs(self):
i = 0
h, s, v = self.hsv
count = len(self.statobj.all_callees[self.func])
for func, stats in self.statobj.all_callees[self.func].items():
i += 1
h1 = h + (i / count) / (self.depth + 1)
if stats[3] == 0:
s1 = 0
else:
s1 = s * (stats[3] / self.stats[3])
yield FunctionCall(self.statobj,
func,
self.depth + 1,
stats=stats,
id=str(self.id) + '_' + str(i),
parent_ids=self.parent_ids + [self.id],
hsv=(h1, s1, 1))
def count(self):
return self.stats[1]
def tottime(self):
return self.stats[2]
def cumtime(self):
cc, nc, tt, ct = self.stats
return self.stats[3]
def tottime_per_call(self):
cc, nc, tt, ct = self.stats
if nc == 0:
return 0
return tt / nc
def cumtime_per_call(self):
cc, nc, tt, ct = self.stats
if cc == 0:
return 0
return ct / cc
def indent(self):
return 16 * self.depth
class ProfilingPanel(Panel):
"""
Panel that displays profiling information.
"""
title = _("Profiling")
template = 'debug_toolbar/panels/profiling.html'
def process_view(self, request, view_func, view_args, view_kwargs):
self.profiler = cProfile.Profile()
args = (request,) + view_args
return self.profiler.runcall(view_func, *args, **view_kwargs)
def add_node(self, func_list, func, max_depth, cum_time=0.1):
func_list.append(func)
func.has_subfuncs = False
if func.depth < max_depth:
for subfunc in func.subfuncs():
if subfunc.stats[3] >= cum_time:
func.has_subfuncs = True
self.add_node(func_list, subfunc, max_depth, cum_time=cum_time)
def process_response(self, request, response):
if not hasattr(self, 'profiler'):
return None
# Could be delayed until the panel content is requested (perf. optim.)
self.profiler.create_stats()
self.stats = DjangoDebugToolbarStats(self.profiler)
self.stats.calc_callees()
root = FunctionCall(self.stats, self.stats.get_root_func(), depth=0)
func_list = []
self.add_node(func_list,
root,
dt_settings.CONFIG['PROFILER_MAX_DEPTH'],
root.stats[3] / 8)
self.record_stats({'func_list': func_list})
| agpl-3.0 |
philoniare/horizon | horizon/loaders.py | 14 | 2355 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Wrapper for loading templates from "templates" directories in panel modules.
"""
import io
import os
import django
from django.conf import settings
from django.template.base import TemplateDoesNotExist # noqa
if django.get_version() >= '1.8':
from django.template.engine import Engine
from django.template.loaders.base import Loader as tLoaderCls
else:
from django.template.loader import BaseLoader as tLoaderCls # noqa
from django.utils._os import safe_join # noqa
# Set up a cache of the panel directories to search.
panel_template_dirs = {}
class TemplateLoader(tLoaderCls):
is_usable = True
def get_template_sources(self, template_name):
bits = template_name.split('/', 2)
if len(bits) == 3:
dash_name, panel_name, remainder = bits
key = os.path.join(dash_name, panel_name)
if key in panel_template_dirs:
template_dir = panel_template_dirs[key]
try:
yield safe_join(template_dir, panel_name, remainder)
except UnicodeDecodeError:
# The template dir name wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of template_dir.
pass
def load_template_source(self, template_name, template_dirs=None):
for path in self.get_template_sources(template_name):
try:
with io.open(path, encoding=settings.FILE_CHARSET) as file:
return (file.read(), path)
except IOError:
pass
raise TemplateDoesNotExist(template_name)
if django.get_version() >= '1.8':
e = Engine()
_loader = TemplateLoader(e)
else:
_loader = TemplateLoader()
| apache-2.0 |
balloob/home-assistant | homeassistant/components/vallox/__init__.py | 26 | 8917 | """Support for Vallox ventilation units."""
from datetime import timedelta
import ipaddress
import logging
from vallox_websocket_api import PROFILE as VALLOX_PROFILE, Vallox
from vallox_websocket_api.constants import vlxDevConstants
from vallox_websocket_api.exceptions import ValloxApiException
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
_LOGGER = logging.getLogger(__name__)
DOMAIN = "vallox"
DEFAULT_NAME = "Vallox"
SIGNAL_VALLOX_STATE_UPDATE = "vallox_state_update"
SCAN_INTERVAL = timedelta(seconds=60)
# Various metric keys that are reused between profiles.
METRIC_KEY_MODE = "A_CYC_MODE"
METRIC_KEY_PROFILE_FAN_SPEED_HOME = "A_CYC_HOME_SPEED_SETTING"
METRIC_KEY_PROFILE_FAN_SPEED_AWAY = "A_CYC_AWAY_SPEED_SETTING"
METRIC_KEY_PROFILE_FAN_SPEED_BOOST = "A_CYC_BOOST_SPEED_SETTING"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
# pylint: disable=no-member
PROFILE_TO_STR_SETTABLE = {
VALLOX_PROFILE.HOME: "Home",
VALLOX_PROFILE.AWAY: "Away",
VALLOX_PROFILE.BOOST: "Boost",
VALLOX_PROFILE.FIREPLACE: "Fireplace",
}
STR_TO_PROFILE = {v: k for (k, v) in PROFILE_TO_STR_SETTABLE.items()}
# pylint: disable=no-member
PROFILE_TO_STR_REPORTABLE = {
**{VALLOX_PROFILE.NONE: "None", VALLOX_PROFILE.EXTRA: "Extra"},
**PROFILE_TO_STR_SETTABLE,
}
ATTR_PROFILE = "profile"
ATTR_PROFILE_FAN_SPEED = "fan_speed"
SERVICE_SCHEMA_SET_PROFILE = vol.Schema(
{vol.Required(ATTR_PROFILE): vol.All(cv.string, vol.In(STR_TO_PROFILE))}
)
SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED = vol.Schema(
{
vol.Required(ATTR_PROFILE_FAN_SPEED): vol.All(
vol.Coerce(int), vol.Clamp(min=0, max=100)
)
}
)
SERVICE_SET_PROFILE = "set_profile"
SERVICE_SET_PROFILE_FAN_SPEED_HOME = "set_profile_fan_speed_home"
SERVICE_SET_PROFILE_FAN_SPEED_AWAY = "set_profile_fan_speed_away"
SERVICE_SET_PROFILE_FAN_SPEED_BOOST = "set_profile_fan_speed_boost"
SERVICE_TO_METHOD = {
SERVICE_SET_PROFILE: {
"method": "async_set_profile",
"schema": SERVICE_SCHEMA_SET_PROFILE,
},
SERVICE_SET_PROFILE_FAN_SPEED_HOME: {
"method": "async_set_profile_fan_speed_home",
"schema": SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
},
SERVICE_SET_PROFILE_FAN_SPEED_AWAY: {
"method": "async_set_profile_fan_speed_away",
"schema": SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
},
SERVICE_SET_PROFILE_FAN_SPEED_BOOST: {
"method": "async_set_profile_fan_speed_boost",
"schema": SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
},
}
DEFAULT_FAN_SPEED_HOME = 50
DEFAULT_FAN_SPEED_AWAY = 25
DEFAULT_FAN_SPEED_BOOST = 65
async def async_setup(hass, config):
"""Set up the client and boot the platforms."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
name = conf.get(CONF_NAME)
client = Vallox(host)
state_proxy = ValloxStateProxy(hass, client)
service_handler = ValloxServiceHandler(client, state_proxy)
hass.data[DOMAIN] = {"client": client, "state_proxy": state_proxy, "name": name}
for vallox_service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[vallox_service]["schema"]
hass.services.async_register(
DOMAIN, vallox_service, service_handler.async_handle, schema=schema
)
# The vallox hardware expects quite strict timings for websocket
# requests. Timings that machines with less processing power, like
# Raspberries, cannot live up to during the busy start phase of Home
# Asssistant. Hence, async_add_entities() for fan and sensor in respective
# code will be called with update_before_add=False to intentionally delay
# the first request, increasing chance that it is issued only when the
# machine is less busy again.
hass.async_create_task(async_load_platform(hass, "sensor", DOMAIN, {}, config))
hass.async_create_task(async_load_platform(hass, "fan", DOMAIN, {}, config))
async_track_time_interval(hass, state_proxy.async_update, SCAN_INTERVAL)
return True
class ValloxStateProxy:
"""Helper class to reduce websocket API calls."""
def __init__(self, hass, client):
"""Initialize the proxy."""
self._hass = hass
self._client = client
self._metric_cache = {}
self._profile = None
self._valid = False
def fetch_metric(self, metric_key):
"""Return cached state value."""
_LOGGER.debug("Fetching metric key: %s", metric_key)
if not self._valid:
raise OSError("Device state out of sync.")
if metric_key not in vlxDevConstants.__dict__:
raise KeyError(f"Unknown metric key: {metric_key}")
return self._metric_cache[metric_key]
def get_profile(self):
"""Return cached profile value."""
_LOGGER.debug("Returning profile")
if not self._valid:
raise OSError("Device state out of sync.")
return PROFILE_TO_STR_REPORTABLE[self._profile]
async def async_update(self, event_time):
"""Fetch state update."""
_LOGGER.debug("Updating Vallox state cache")
try:
self._metric_cache = await self._client.fetch_metrics()
self._profile = await self._client.get_profile()
self._valid = True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error during state cache update: %s", err)
self._valid = False
async_dispatcher_send(self._hass, SIGNAL_VALLOX_STATE_UPDATE)
class ValloxServiceHandler:
"""Services implementation."""
def __init__(self, client, state_proxy):
"""Initialize the proxy."""
self._client = client
self._state_proxy = state_proxy
async def async_set_profile(self, profile: str = "Home") -> bool:
"""Set the ventilation profile."""
_LOGGER.debug("Setting ventilation profile to: %s", profile)
try:
await self._client.set_profile(STR_TO_PROFILE[profile])
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting ventilation profile: %s", err)
return False
async def async_set_profile_fan_speed_home(
self, fan_speed: int = DEFAULT_FAN_SPEED_HOME
) -> bool:
"""Set the fan speed in percent for the Home profile."""
_LOGGER.debug("Setting Home fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_HOME: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Home profile: %s", err)
return False
async def async_set_profile_fan_speed_away(
self, fan_speed: int = DEFAULT_FAN_SPEED_AWAY
) -> bool:
"""Set the fan speed in percent for the Home profile."""
_LOGGER.debug("Setting Away fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_AWAY: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Away profile: %s", err)
return False
async def async_set_profile_fan_speed_boost(
self, fan_speed: int = DEFAULT_FAN_SPEED_BOOST
) -> bool:
"""Set the fan speed in percent for the Boost profile."""
_LOGGER.debug("Setting Boost fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_BOOST: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Boost profile: %s", err)
return False
async def async_handle(self, service):
"""Dispatch a service call."""
method = SERVICE_TO_METHOD.get(service.service)
params = service.data.copy()
if not hasattr(self, method["method"]):
_LOGGER.error("Service not implemented: %s", method["method"])
return
result = await getattr(self, method["method"])(**params)
# Force state_proxy to refresh device state, so that updates are
# propagated to platforms.
if result:
await self._state_proxy.async_update(None)
| apache-2.0 |
tnemisteam/cdf-steps | school/views/school_upgradation_views.py | 1 | 4461 | from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView, \
ArchiveIndexView, DateDetailView, \
DayArchiveView, MonthArchiveView, \
TodayArchiveView, WeekArchiveView, \
YearArchiveView
from school.models import School_upgradation
class School_upgradationView(object):
model = School_upgradation
def get_template_names(self):
"""Nest templates within school_upgradation directory."""
tpl = super(School_upgradationView, self).get_template_names()[0]
app = self.model._meta.app_label
mdl = 'school_upgradation'
#self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))
self.template_name = tpl[:7]+'school_upgradation/'+tpl[7:]
return [self.template_name]
class School_upgradationDateView(School_upgradationView):
date_field = 'timestamp'
month_format = '%m'
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_upgradation_list')
class School_upgradationBaseListView(School_upgradationView):
paginate_by = 10
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_upgradation_list')
class School_upgradationArchiveIndexView(
School_upgradationDateView, School_upgradationBaseListView, ArchiveIndexView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_upgradation_list')
class School_upgradationCreateView(School_upgradationView, CreateView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_upgradation_list')
class School_upgradationDateDetailView(School_upgradationDateView, DateDetailView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_upgradation_list')
class School_upgradationDayArchiveView(
School_upgradationDateView, School_upgradationBaseListView, DayArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_upgradation_list')
class School_upgradationDeleteView(School_upgradationView, DeleteView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_upgradation_list')
class School_upgradationDetailView(School_upgradationView, DetailView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_upgradation_list')
class School_upgradationListView(School_upgradationBaseListView, ListView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_upgradation_list')
class School_upgradationMonthArchiveView(
School_upgradationDateView, School_upgradationBaseListView, MonthArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_upgradation_list')
class School_upgradationTodayArchiveView(
School_upgradationDateView, School_upgradationBaseListView, TodayArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_upgradation_list')
class School_upgradationUpdateView(School_upgradationView, UpdateView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_upgradation_list')
class School_upgradationWeekArchiveView(
School_upgradationDateView, School_upgradationBaseListView, WeekArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_upgradation_list')
class School_upgradationYearArchiveView(
School_upgradationDateView, School_upgradationBaseListView, YearArchiveView):
make_object_list = True
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_upgradation_list')
| mit |
s-hertel/ansible | test/support/integration/plugins/filter/json_query.py | 197 | 1857 | # (c) 2015, Filipe Niero Felisbino <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError, AnsibleFilterError
try:
import jmespath
HAS_LIB = True
except ImportError:
HAS_LIB = False
def json_query(data, expr):
'''Query data using jmespath query language ( http://jmespath.org ). Example:
- debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
'''
if not HAS_LIB:
raise AnsibleError('You need to install "jmespath" prior to running '
'json_query filter')
try:
return jmespath.search(expr, data)
except jmespath.exceptions.JMESPathError as e:
raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e)
except Exception as e:
# For older jmespath, we can get ValueError and TypeError without much info.
raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e)
class FilterModule(object):
''' Query filter '''
def filters(self):
return {
'json_query': json_query
}
| gpl-3.0 |
borjam/exabgp | src/exabgp/bgp/message/open/__init__.py | 2 | 3257 | # encoding: utf-8
"""
__init__.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from struct import unpack
from exabgp.bgp.message.message import Message
from exabgp.bgp.message.notification import Notify
from exabgp.bgp.message.open.version import Version
from exabgp.bgp.message.open.asn import ASN
from exabgp.bgp.message.open.holdtime import HoldTime
from exabgp.bgp.message.open.routerid import RouterID
from exabgp.bgp.message.open.capability import Capabilities
# =================================================================== Open
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+
# | Version |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | My Autonomous System |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Hold Time |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | BGP Identifier |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Opt Parm Len |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | Optional Parameters (variable) |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# Optional Parameters:
# 0 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-...
# | Parm. Type | Parm. Length | Parameter Value (variable)
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-...
@Message.register
class Open(Message):
ID = Message.CODE.OPEN
TYPE = bytes([Message.CODE.OPEN])
def __init__(self, version, asn, hold_time, router_id, capabilities):
self.version = version
self.asn = asn
self.hold_time = hold_time
self.router_id = router_id
self.capabilities = capabilities
def message(self, negotiated=None):
return self._message(
self.version.pack()
+ self.asn.trans().pack()
+ self.hold_time.pack()
+ self.router_id.pack()
+ self.capabilities.pack()
)
def __str__(self):
return "OPEN version=%d asn=%d hold_time=%s router_id=%s capabilities=[%s]" % (
self.version,
self.asn.trans(),
self.hold_time,
self.router_id,
self.capabilities,
)
@classmethod
def unpack_message(cls, data, direction=None, negotiated=None):
version = data[0]
if version != 4:
# Only version 4 is supported nowdays..
raise Notify(2, 1, bytes(data[0], 'ascii'))
asn = unpack('!H', data[1:3])[0]
hold_time = unpack('!H', data[3:5])[0]
numeric = unpack('!L', data[5:9])[0]
router_id = "%d.%d.%d.%d" % (numeric >> 24, (numeric >> 16) & 0xFF, (numeric >> 8) & 0xFF, numeric & 0xFF)
return cls(Version(version), ASN(asn), HoldTime(hold_time), RouterID(router_id), Capabilities.unpack(data[9:]))
| bsd-3-clause |
SmartXiaoMing/clean_openwrt_sdk | staging_dir/host/lib/scons-2.1.0/SCons/Builder.py | 21 | 33982 | """SCons.Builder
Builder object subsystem.
A Builder object is a callable that encapsulates information about how
to execute actions to create a target Node (file) from source Nodes
(files), and how to create those dependencies for tracking.
The main entry point here is the Builder() factory method. This provides
a procedural interface that creates the right underlying Builder object
based on the keyword arguments supplied and the types of the arguments.
The goal is for this external interface to be simple enough that the
vast majority of users can create new Builders as necessary to support
building new types of files in their configurations, without having to
dive any deeper into this subsystem.
The base class here is BuilderBase. This is a concrete base class which
does, in fact, represent the Builder objects that we (or users) create.
There is also a proxy that looks like a Builder:
CompositeBuilder
This proxies for a Builder with an action that is actually a
dictionary that knows how to map file suffixes to a specific
action. This is so that we can invoke different actions
(compilers, compile options) for different flavors of source
files.
Builders and their proxies have the following public interface methods
used by other modules:
__call__()
THE public interface. Calling a Builder object (with the
use of internal helper methods) sets up the target and source
dependencies, appropriate mapping to a specific action, and the
environment manipulation necessary for overridden construction
variable. This also takes care of warning about possible mistakes
in keyword arguments.
add_emitter()
Adds an emitter for a specific file suffix, used by some Tool
modules to specify that (for example) a yacc invocation on a .y
can create a .h *and* a .c file.
add_action()
Adds an action for a specific file suffix, heavily used by
Tool modules to add their specific action(s) for turning
a source file into an object file to the global static
and shared object file Builders.
There are the following methods for internal use within this module:
_execute()
The internal method that handles the heavily lifting when a
Builder is called. This is used so that the __call__() methods
can set up warning about possible mistakes in keyword-argument
overrides, and *then* execute all of the steps necessary so that
the warnings only occur once.
get_name()
Returns the Builder's name within a specific Environment,
primarily used to try to return helpful information in error
messages.
adjust_suffix()
get_prefix()
get_suffix()
get_src_suffix()
set_src_suffix()
Miscellaneous stuff for handling the prefix and suffix
manipulation we use in turning source file names into target
file names.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Builder.py 5357 2011/09/09 21:31:03 bdeegan"
import collections
import SCons.Action
from SCons.Debug import logInstanceCreation
from SCons.Errors import InternalError, UserError
import SCons.Executor
import SCons.Memoize
import SCons.Node
import SCons.Node.FS
import SCons.Util
import SCons.Warnings
class _Null(object):
pass
_null = _Null
def match_splitext(path, suffixes = []):
if suffixes:
matchsuf = [S for S in suffixes if path[-len(S):] == S]
if matchsuf:
suf = max([(len(_f),_f) for _f in matchsuf])[1]
return [path[:-len(suf)], path[-len(suf):]]
return SCons.Util.splitext(path)
class DictCmdGenerator(SCons.Util.Selector):
"""This is a callable class that can be used as a
command generator function. It holds on to a dictionary
mapping file suffixes to Actions. It uses that dictionary
to return the proper action based on the file suffix of
the source file."""
def __init__(self, dict=None, source_ext_match=1):
SCons.Util.Selector.__init__(self, dict)
self.source_ext_match = source_ext_match
def src_suffixes(self):
return list(self.keys())
def add_action(self, suffix, action):
"""Add a suffix-action pair to the mapping.
"""
self[suffix] = action
def __call__(self, target, source, env, for_signature):
if not source:
return []
if self.source_ext_match:
suffixes = self.src_suffixes()
ext = None
for src in map(str, source):
my_ext = match_splitext(src, suffixes)[1]
if ext and my_ext != ext:
raise UserError("While building `%s' from `%s': Cannot build multiple sources with different extensions: %s, %s"
% (repr(list(map(str, target))), src, ext, my_ext))
ext = my_ext
else:
ext = match_splitext(str(source[0]), self.src_suffixes())[1]
if not ext:
#return ext
raise UserError("While building `%s': "
"Cannot deduce file extension from source files: %s"
% (repr(list(map(str, target))), repr(list(map(str, source)))))
try:
ret = SCons.Util.Selector.__call__(self, env, source, ext)
except KeyError, e:
raise UserError("Ambiguous suffixes after environment substitution: %s == %s == %s" % (e.args[0], e.args[1], e.args[2]))
if ret is None:
raise UserError("While building `%s' from `%s': Don't know how to build from a source file with suffix `%s'. Expected a suffix in this list: %s." % \
(repr(list(map(str, target))), repr(list(map(str, source))), ext, repr(list(self.keys()))))
return ret
class CallableSelector(SCons.Util.Selector):
"""A callable dictionary that will, in turn, call the value it
finds if it can."""
def __call__(self, env, source):
value = SCons.Util.Selector.__call__(self, env, source)
if callable(value):
value = value(env, source)
return value
class DictEmitter(SCons.Util.Selector):
"""A callable dictionary that maps file suffixes to emitters.
When called, it finds the right emitter in its dictionary for the
suffix of the first source file, and calls that emitter to get the
right lists of targets and sources to return. If there's no emitter
for the suffix in its dictionary, the original target and source are
returned.
"""
def __call__(self, target, source, env):
emitter = SCons.Util.Selector.__call__(self, env, source)
if emitter:
target, source = emitter(target, source, env)
return (target, source)
class ListEmitter(collections.UserList):
"""A callable list of emitters that calls each in sequence,
returning the result.
"""
def __call__(self, target, source, env):
for e in self.data:
target, source = e(target, source, env)
return (target, source)
# These are a common errors when calling a Builder;
# they are similar to the 'target' and 'source' keyword args to builders,
# so we issue warnings when we see them. The warnings can, of course,
# be disabled.
misleading_keywords = {
'targets' : 'target',
'sources' : 'source',
}
class OverrideWarner(collections.UserDict):
"""A class for warning about keyword arguments that we use as
overrides in a Builder call.
This class exists to handle the fact that a single Builder call
can actually invoke multiple builders. This class only emits the
warnings once, no matter how many Builders are invoked.
"""
def __init__(self, dict):
collections.UserDict.__init__(self, dict)
if __debug__: logInstanceCreation(self, 'Builder.OverrideWarner')
self.already_warned = None
def warn(self):
if self.already_warned:
return
for k in self.keys():
if k in misleading_keywords:
alt = misleading_keywords[k]
msg = "Did you mean to use `%s' instead of `%s'?" % (alt, k)
SCons.Warnings.warn(SCons.Warnings.MisleadingKeywordsWarning, msg)
self.already_warned = 1
def Builder(**kw):
"""A factory for builder objects."""
composite = None
if 'generator' in kw:
if 'action' in kw:
raise UserError("You must not specify both an action and a generator.")
kw['action'] = SCons.Action.CommandGeneratorAction(kw['generator'], {})
del kw['generator']
elif 'action' in kw:
source_ext_match = kw.get('source_ext_match', 1)
if 'source_ext_match' in kw:
del kw['source_ext_match']
if SCons.Util.is_Dict(kw['action']):
composite = DictCmdGenerator(kw['action'], source_ext_match)
kw['action'] = SCons.Action.CommandGeneratorAction(composite, {})
kw['src_suffix'] = composite.src_suffixes()
else:
kw['action'] = SCons.Action.Action(kw['action'])
if 'emitter' in kw:
emitter = kw['emitter']
if SCons.Util.is_String(emitter):
# This allows users to pass in an Environment
# variable reference (like "$FOO") as an emitter.
# We will look in that Environment variable for
# a callable to use as the actual emitter.
var = SCons.Util.get_environment_var(emitter)
if not var:
raise UserError("Supplied emitter '%s' does not appear to refer to an Environment variable" % emitter)
kw['emitter'] = EmitterProxy(var)
elif SCons.Util.is_Dict(emitter):
kw['emitter'] = DictEmitter(emitter)
elif SCons.Util.is_List(emitter):
kw['emitter'] = ListEmitter(emitter)
result = BuilderBase(**kw)
if not composite is None:
result = CompositeBuilder(result, composite)
return result
def _node_errors(builder, env, tlist, slist):
"""Validate that the lists of target and source nodes are
legal for this builder and environment. Raise errors or
issue warnings as appropriate.
"""
# First, figure out if there are any errors in the way the targets
# were specified.
for t in tlist:
if t.side_effect:
raise UserError("Multiple ways to build the same target were specified for: %s" % t)
if t.has_explicit_builder():
if not t.env is None and not t.env is env:
action = t.builder.action
t_contents = action.get_contents(tlist, slist, t.env)
contents = action.get_contents(tlist, slist, env)
if t_contents == contents:
msg = "Two different environments were specified for target %s,\n\tbut they appear to have the same action: %s" % (t, action.genstring(tlist, slist, t.env))
SCons.Warnings.warn(SCons.Warnings.DuplicateEnvironmentWarning, msg)
else:
msg = "Two environments with different actions were specified for the same target: %s" % t
raise UserError(msg)
if builder.multi:
if t.builder != builder:
msg = "Two different builders (%s and %s) were specified for the same target: %s" % (t.builder.get_name(env), builder.get_name(env), t)
raise UserError(msg)
# TODO(batch): list constructed each time!
if t.get_executor().get_all_targets() != tlist:
msg = "Two different target lists have a target in common: %s (from %s and from %s)" % (t, list(map(str, t.get_executor().get_all_targets())), list(map(str, tlist)))
raise UserError(msg)
elif t.sources != slist:
msg = "Multiple ways to build the same target were specified for: %s (from %s and from %s)" % (t, list(map(str, t.sources)), list(map(str, slist)))
raise UserError(msg)
if builder.single_source:
if len(slist) > 1:
raise UserError("More than one source given for single-source builder: targets=%s sources=%s" % (list(map(str,tlist)), list(map(str,slist))))
class EmitterProxy(object):
"""This is a callable class that can act as a
Builder emitter. It holds on to a string that
is a key into an Environment dictionary, and will
look there at actual build time to see if it holds
a callable. If so, we will call that as the actual
emitter."""
def __init__(self, var):
self.var = SCons.Util.to_String(var)
def __call__(self, target, source, env):
emitter = self.var
# Recursively substitute the variable.
# We can't use env.subst() because it deals only
# in strings. Maybe we should change that?
while SCons.Util.is_String(emitter) and emitter in env:
emitter = env[emitter]
if callable(emitter):
target, source = emitter(target, source, env)
elif SCons.Util.is_List(emitter):
for e in emitter:
target, source = e(target, source, env)
return (target, source)
def __cmp__(self, other):
return cmp(self.var, other.var)
class BuilderBase(object):
"""Base class for Builders, objects that create output
nodes (files) from input nodes (files).
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
def __init__(self, action = None,
prefix = '',
suffix = '',
src_suffix = '',
target_factory = None,
source_factory = None,
target_scanner = None,
source_scanner = None,
emitter = None,
multi = 0,
env = None,
single_source = 0,
name = None,
chdir = _null,
is_explicit = 1,
src_builder = None,
ensure_suffix = False,
**overrides):
if __debug__: logInstanceCreation(self, 'Builder.BuilderBase')
self._memo = {}
self.action = action
self.multi = multi
if SCons.Util.is_Dict(prefix):
prefix = CallableSelector(prefix)
self.prefix = prefix
if SCons.Util.is_Dict(suffix):
suffix = CallableSelector(suffix)
self.env = env
self.single_source = single_source
if 'overrides' in overrides:
SCons.Warnings.warn(SCons.Warnings.DeprecatedBuilderKeywordsWarning,
"The \"overrides\" keyword to Builder() creation has been deprecated;\n" +\
"\tspecify the items as keyword arguments to the Builder() call instead.")
overrides.update(overrides['overrides'])
del overrides['overrides']
if 'scanner' in overrides:
SCons.Warnings.warn(SCons.Warnings.DeprecatedBuilderKeywordsWarning,
"The \"scanner\" keyword to Builder() creation has been deprecated;\n"
"\tuse: source_scanner or target_scanner as appropriate.")
del overrides['scanner']
self.overrides = overrides
self.set_suffix(suffix)
self.set_src_suffix(src_suffix)
self.ensure_suffix = ensure_suffix
self.target_factory = target_factory
self.source_factory = source_factory
self.target_scanner = target_scanner
self.source_scanner = source_scanner
self.emitter = emitter
# Optional Builder name should only be used for Builders
# that don't get attached to construction environments.
if name:
self.name = name
self.executor_kw = {}
if not chdir is _null:
self.executor_kw['chdir'] = chdir
self.is_explicit = is_explicit
if src_builder is None:
src_builder = []
elif not SCons.Util.is_List(src_builder):
src_builder = [ src_builder ]
self.src_builder = src_builder
def __nonzero__(self):
raise InternalError("Do not test for the Node.builder attribute directly; use Node.has_builder() instead")
def get_name(self, env):
"""Attempts to get the name of the Builder.
Look at the BUILDERS variable of env, expecting it to be a
dictionary containing this Builder, and return the key of the
dictionary. If there's no key, then return a directly-configured
name (if there is one) or the name of the class (by default)."""
try:
index = list(env['BUILDERS'].values()).index(self)
return list(env['BUILDERS'].keys())[index]
except (AttributeError, KeyError, TypeError, ValueError):
try:
return self.name
except AttributeError:
return str(self.__class__)
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
def splitext(self, path, env=None):
if not env:
env = self.env
if env:
suffixes = self.src_suffixes(env)
else:
suffixes = []
return match_splitext(path, suffixes)
def _adjustixes(self, files, pre, suf, ensure_suffix=False):
if not files:
return []
result = []
if not SCons.Util.is_List(files):
files = [files]
for f in files:
if SCons.Util.is_String(f):
f = SCons.Util.adjustixes(f, pre, suf, ensure_suffix)
result.append(f)
return result
def _create_nodes(self, env, target = None, source = None):
"""Create and return lists of target and source nodes.
"""
src_suf = self.get_src_suffix(env)
target_factory = env.get_factory(self.target_factory)
source_factory = env.get_factory(self.source_factory)
source = self._adjustixes(source, None, src_suf)
slist = env.arg2nodes(source, source_factory)
pre = self.get_prefix(env, slist)
suf = self.get_suffix(env, slist)
if target is None:
try:
t_from_s = slist[0].target_from_source
except AttributeError:
raise UserError("Do not know how to create a target from source `%s'" % slist[0])
except IndexError:
tlist = []
else:
splitext = lambda S: self.splitext(S,env)
tlist = [ t_from_s(pre, suf, splitext) ]
else:
target = self._adjustixes(target, pre, suf, self.ensure_suffix)
tlist = env.arg2nodes(target, target_factory, target=target, source=source)
if self.emitter:
# The emitter is going to do str(node), but because we're
# being called *from* a builder invocation, the new targets
# don't yet have a builder set on them and will look like
# source files. Fool the emitter's str() calls by setting
# up a temporary builder on the new targets.
new_targets = []
for t in tlist:
if not t.is_derived():
t.builder_set(self)
new_targets.append(t)
orig_tlist = tlist[:]
orig_slist = slist[:]
target, source = self.emitter(target=tlist, source=slist, env=env)
# Now delete the temporary builders that we attached to any
# new targets, so that _node_errors() doesn't do weird stuff
# to them because it thinks they already have builders.
for t in new_targets:
if t.builder is self:
# Only delete the temporary builder if the emitter
# didn't change it on us.
t.builder_set(None)
# Have to call arg2nodes yet again, since it is legal for
# emitters to spit out strings as well as Node instances.
tlist = env.arg2nodes(target, target_factory,
target=orig_tlist, source=orig_slist)
slist = env.arg2nodes(source, source_factory,
target=orig_tlist, source=orig_slist)
return tlist, slist
def _execute(self, env, target, source, overwarn={}, executor_kw={}):
# We now assume that target and source are lists or None.
if self.src_builder:
source = self.src_builder_sources(env, source, overwarn)
if self.single_source and len(source) > 1 and target is None:
result = []
if target is None: target = [None]*len(source)
for tgt, src in zip(target, source):
if not tgt is None: tgt = [tgt]
if not src is None: src = [src]
result.extend(self._execute(env, tgt, src, overwarn))
return SCons.Node.NodeList(result)
overwarn.warn()
tlist, slist = self._create_nodes(env, target, source)
# Check for errors with the specified target/source lists.
_node_errors(self, env, tlist, slist)
# The targets are fine, so find or make the appropriate Executor to
# build this particular list of targets from this particular list of
# sources.
executor = None
key = None
if self.multi:
try:
executor = tlist[0].get_executor(create = 0)
except (AttributeError, IndexError):
pass
else:
executor.add_sources(slist)
if executor is None:
if not self.action:
fmt = "Builder %s must have an action to build %s."
raise UserError(fmt % (self.get_name(env or self.env),
list(map(str,tlist))))
key = self.action.batch_key(env or self.env, tlist, slist)
if key:
try:
executor = SCons.Executor.GetBatchExecutor(key)
except KeyError:
pass
else:
executor.add_batch(tlist, slist)
if executor is None:
executor = SCons.Executor.Executor(self.action, env, [],
tlist, slist, executor_kw)
if key:
SCons.Executor.AddBatchExecutor(key, executor)
# Now set up the relevant information in the target Nodes themselves.
for t in tlist:
t.cwd = env.fs.getcwd()
t.builder_set(self)
t.env_set(env)
t.add_source(slist)
t.set_executor(executor)
t.set_explicit(self.is_explicit)
return SCons.Node.NodeList(tlist)
def __call__(self, env, target=None, source=None, chdir=_null, **kw):
# We now assume that target and source are lists or None.
# The caller (typically Environment.BuilderWrapper) is
# responsible for converting any scalar values to lists.
if chdir is _null:
ekw = self.executor_kw
else:
ekw = self.executor_kw.copy()
ekw['chdir'] = chdir
if kw:
if 'srcdir' in kw:
def prependDirIfRelative(f, srcdir=kw['srcdir']):
import os.path
if SCons.Util.is_String(f) and not os.path.isabs(f):
f = os.path.join(srcdir, f)
return f
if not SCons.Util.is_List(source):
source = [source]
source = list(map(prependDirIfRelative, source))
del kw['srcdir']
if self.overrides:
env_kw = self.overrides.copy()
env_kw.update(kw)
else:
env_kw = kw
else:
env_kw = self.overrides
env = env.Override(env_kw)
return self._execute(env, target, source, OverrideWarner(kw), ekw)
def adjust_suffix(self, suff):
if suff and not suff[0] in [ '.', '_', '$' ]:
return '.' + suff
return suff
def get_prefix(self, env, sources=[]):
prefix = self.prefix
if callable(prefix):
prefix = prefix(env, sources)
return env.subst(prefix)
def set_suffix(self, suffix):
if not callable(suffix):
suffix = self.adjust_suffix(suffix)
self.suffix = suffix
def get_suffix(self, env, sources=[]):
suffix = self.suffix
if callable(suffix):
suffix = suffix(env, sources)
return env.subst(suffix)
def set_src_suffix(self, src_suffix):
if not src_suffix:
src_suffix = []
elif not SCons.Util.is_List(src_suffix):
src_suffix = [ src_suffix ]
self.src_suffix = [callable(suf) and suf or self.adjust_suffix(suf) for suf in src_suffix]
def get_src_suffix(self, env):
"""Get the first src_suffix in the list of src_suffixes."""
ret = self.src_suffixes(env)
if not ret:
return ''
return ret[0]
def add_emitter(self, suffix, emitter):
"""Add a suffix-emitter mapping to this Builder.
This assumes that emitter has been initialized with an
appropriate dictionary type, and will throw a TypeError if
not, so the caller is responsible for knowing that this is an
appropriate method to call for the Builder in question.
"""
self.emitter[suffix] = emitter
def add_src_builder(self, builder):
"""
Add a new Builder to the list of src_builders.
This requires wiping out cached values so that the computed
lists of source suffixes get re-calculated.
"""
self._memo = {}
self.src_builder.append(builder)
def _get_sdict(self, env):
"""
Returns a dictionary mapping all of the source suffixes of all
src_builders of this Builder to the underlying Builder that
should be called first.
This dictionary is used for each target specified, so we save a
lot of extra computation by memoizing it for each construction
environment.
Note that this is re-computed each time, not cached, because there
might be changes to one of our source Builders (or one of their
source Builders, and so on, and so on...) that we can't "see."
The underlying methods we call cache their computed values,
though, so we hope repeatedly aggregating them into a dictionary
like this won't be too big a hit. We may need to look for a
better way to do this if performance data show this has turned
into a significant bottleneck.
"""
sdict = {}
for bld in self.get_src_builders(env):
for suf in bld.src_suffixes(env):
sdict[suf] = bld
return sdict
def src_builder_sources(self, env, source, overwarn={}):
sdict = self._get_sdict(env)
src_suffixes = self.src_suffixes(env)
lengths = list(set(map(len, src_suffixes)))
def match_src_suffix(name, src_suffixes=src_suffixes, lengths=lengths):
node_suffixes = [name[-l:] for l in lengths]
for suf in src_suffixes:
if suf in node_suffixes:
return suf
return None
result = []
for s in SCons.Util.flatten(source):
if SCons.Util.is_String(s):
match_suffix = match_src_suffix(env.subst(s))
if not match_suffix and not '.' in s:
src_suf = self.get_src_suffix(env)
s = self._adjustixes(s, None, src_suf)[0]
else:
match_suffix = match_src_suffix(s.name)
if match_suffix:
try:
bld = sdict[match_suffix]
except KeyError:
result.append(s)
else:
tlist = bld._execute(env, None, [s], overwarn)
# If the subsidiary Builder returned more than one
# target, then filter out any sources that this
# Builder isn't capable of building.
if len(tlist) > 1:
tlist = [t for t in tlist if match_src_suffix(t.name)]
result.extend(tlist)
else:
result.append(s)
source_factory = env.get_factory(self.source_factory)
return env.arg2nodes(result, source_factory)
def _get_src_builders_key(self, env):
return id(env)
memoizer_counters.append(SCons.Memoize.CountDict('get_src_builders', _get_src_builders_key))
def get_src_builders(self, env):
"""
Returns the list of source Builders for this Builder.
This exists mainly to look up Builders referenced as
strings in the 'BUILDER' variable of the construction
environment and cache the result.
"""
memo_key = id(env)
try:
memo_dict = self._memo['get_src_builders']
except KeyError:
memo_dict = {}
self._memo['get_src_builders'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
builders = []
for bld in self.src_builder:
if SCons.Util.is_String(bld):
try:
bld = env['BUILDERS'][bld]
except KeyError:
continue
builders.append(bld)
memo_dict[memo_key] = builders
return builders
def _subst_src_suffixes_key(self, env):
return id(env)
memoizer_counters.append(SCons.Memoize.CountDict('subst_src_suffixes', _subst_src_suffixes_key))
def subst_src_suffixes(self, env):
"""
The suffix list may contain construction variable expansions,
so we have to evaluate the individual strings. To avoid doing
this over and over, we memoize the results for each construction
environment.
"""
memo_key = id(env)
try:
memo_dict = self._memo['subst_src_suffixes']
except KeyError:
memo_dict = {}
self._memo['subst_src_suffixes'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
suffixes = [env.subst(x) for x in self.src_suffix]
memo_dict[memo_key] = suffixes
return suffixes
def src_suffixes(self, env):
"""
Returns the list of source suffixes for all src_builders of this
Builder.
This is essentially a recursive descent of the src_builder "tree."
(This value isn't cached because there may be changes in a
src_builder many levels deep that we can't see.)
"""
sdict = {}
suffixes = self.subst_src_suffixes(env)
for s in suffixes:
sdict[s] = 1
for builder in self.get_src_builders(env):
for s in builder.src_suffixes(env):
if s not in sdict:
sdict[s] = 1
suffixes.append(s)
return suffixes
class CompositeBuilder(SCons.Util.Proxy):
"""A Builder Proxy whose main purpose is to always have
a DictCmdGenerator as its action, and to provide access
to the DictCmdGenerator's add_action() method.
"""
def __init__(self, builder, cmdgen):
if __debug__: logInstanceCreation(self, 'Builder.CompositeBuilder')
SCons.Util.Proxy.__init__(self, builder)
# cmdgen should always be an instance of DictCmdGenerator.
self.cmdgen = cmdgen
self.builder = builder
__call__ = SCons.Util.Delegate('__call__')
def add_action(self, suffix, action):
self.cmdgen.add_action(suffix, action)
self.set_src_suffix(self.cmdgen.src_suffixes())
def is_a_Builder(obj):
""""Returns True iff the specified obj is one of our Builder classes.
The test is complicated a bit by the fact that CompositeBuilder
is a proxy, not a subclass of BuilderBase.
"""
return (isinstance(obj, BuilderBase)
or isinstance(obj, CompositeBuilder)
or callable(obj))
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
rwl/PyDyn | pydyn/solvers/ModifiedEuler2.py | 1 | 4185 | # Copyright (C) 2009 Stijn Cole
# Copyright (C) 2010-2011 Richard Lincoln
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import copy, r_
from pydyn.models.exciters.Exciter import Exciter
from pydyn.models.governors.Governor import Governor
from pydyn.models.generators.Generator import Generator
from pydyn.SolveNetwork import SolveNetwork
from pydyn.MachineCurrents import MachineCurrents
def ModifiedEuler2(t, Xgen0, Pgen, Vgen0, Xexc0, Pexc, Vexc0, Xgov0, Pgov, Vgov0,
invYbus, gbus, genmodel, excmodel, govmodel, stepsize):
""" Modified Euler ODE solver with check on interface errors
@see: U{http://www.esat.kuleuven.be/electa/teaching/matdyn/}
"""
## Set up
eulerfailed = False
tol = 1e-8
maxit = 20
## First Prediction Step
# EXCITERS
dFexc0 = Exciter(Xexc0, Pexc, Vexc0, excmodel)
Xexc_new = Xexc0 + stepsize * dFexc0
# GOVERNORS
dFgov0 = Governor(Xgov0, Pgov, Vgov0, govmodel)
Xgov_new = Xgov0 + stepsize * dFgov0
# GENERATORS
dFgen0 = Generator(Xgen0, Xexc_new, Xgov_new, Pgen, Vgen0, genmodel)
Xgen_new = Xgen0 + stepsize * dFgen0
Vexc_new = copy(Vexc0)
Vgov_new = copy(Vgov0)
Vgen_new = copy(Vgen0)
for i in range(maxit):
Xexc_old = copy(Xexc_new)
Xgov_old = copy(Xgov_new)
Xgen_old = copy(Xgen_new)
Vexc_old = copy(Vexc_new)
Vgov_old = copy(Vgov_new)
Vgen_old = copy(Vgen_new)
# Calculate system voltages
U_new = SolveNetwork(Xgen_new, Pgen, invYbus, gbus, genmodel)
# Calculate machine currents and power
Id_new, Iq_new, Pe_new = MachineCurrents(Xgen_new, Pgen, U_new[gbus], genmodel)
# Update variables that have changed
Vgen_new = r_[Id_new, Iq_new, Pe_new]
Vexc_new = abs(U_new[gbus])
Vgov_new = Xgen_new[:, 1]
# Correct the prediction, and find new values of x
# EXCITERS
dFexc1 = Exciter(Xexc_old, Pexc, Vexc_new, excmodel)
Xexc_new = Xexc0 + stepsize/2 * (dFexc0 + dFexc1)
# GOVERNORS
dFgov1 = Governor(Xgov_old, Pgov, Vgov_new, govmodel)
Xgov_new = Xgov0 + stepsize/2 * (dFgov0 + dFgov1)
# GENERATORS
dFgen1 = Generator(Xgen_old, Xexc_new, Xgov_new, Pgen, Vgen_new, genmodel)
Xgen_new = Xgen0 + stepsize/2 * (dFgen0 + dFgen1)
# Calculate error
Xexc_d = abs((Xexc_new - Xexc_old).T)
Xgov_d = abs((Xgov_new - Xgov_old).T)
Xgen_d = abs((Xgen_new - Xgen_old).T)
Vexc_d = abs((Vexc_new - Vexc_old).T)
Vgov_d = abs((Vgov_new - Vgov_old).T)
Vgen_d = abs((Vgen_new - Vgen_old).T)
errest = max( [max(max(Vexc_d)), max(max(Vgov_d)), max(max(Vgen_d)),
max(max(Xexc_d)), max(max(Xgov_d)), max(max(Xgen_d)) ])
if errest < tol:
break # solution found
else:
if i == maxit:
U0 = copy(U_new)
Vexc0 = copy(Vexc_new); Vgov0 = copy(Vgov_new); Vgen0 = copy(Vgen_new)
Xgen0 = copy(Xgen_new); Xexc0 = copy(Xexc_new); Xgov0 = copy(Xgov_new)
Pgen0 = copy(Pgen); Pexc0 = copy(Pexc); Pgov0 = copy(Pgov)
eulerfailed = True
return Xgen0, Pgen0, Vgen0, Xexc0, Pexc0, Vexc0, Xgov0, Pgov0, Vgov0, U0, t, eulerfailed, stepsize
## Update
U0 = U_new
Vexc0 = Vexc_new
Vgov0 = Vgov_new
Vgen0 = Vgen_new
Xgen0 = Xgen_new
Xexc0 = Xexc_new
Xgov0 = Xgov_new
Pgen0 = Pgen
Pexc0 = Pexc
Pgov0 = Pgov
return Xgen0, Pgen0, Vgen0, Xexc0, Pexc0, Vexc0, Xgov0, Pgov0, Vgov0, U0, t, eulerfailed, stepsize
| apache-2.0 |
nikesh-mahalka/nova | nova/objects/__init__.py | 4 | 3006 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(comstud): You may scratch your head as you see code that imports
# this module and then accesses attributes for objects such as Instance,
# etc, yet you do not see these attributes in here. Never fear, there is
# a little bit of magic. When objects are registered, an attribute is set
# on this module automatically, pointing to the newest/latest version of
# the object.
def register_all():
# NOTE(danms): You must make sure your object gets imported in this
# function in order for it to be registered by services that may
# need to receive it via RPC.
__import__('nova.objects.agent')
__import__('nova.objects.aggregate')
__import__('nova.objects.bandwidth_usage')
__import__('nova.objects.block_device')
__import__('nova.objects.cell_mapping')
__import__('nova.objects.compute_node')
__import__('nova.objects.dns_domain')
__import__('nova.objects.ec2')
__import__('nova.objects.external_event')
__import__('nova.objects.fixed_ip')
__import__('nova.objects.flavor')
__import__('nova.objects.floating_ip')
__import__('nova.objects.host_mapping')
__import__('nova.objects.hv_spec')
__import__('nova.objects.image_meta')
__import__('nova.objects.instance')
__import__('nova.objects.instance_action')
__import__('nova.objects.instance_fault')
__import__('nova.objects.instance_group')
__import__('nova.objects.instance_info_cache')
__import__('nova.objects.instance_mapping')
__import__('nova.objects.instance_numa_topology')
__import__('nova.objects.instance_pci_requests')
__import__('nova.objects.keypair')
__import__('nova.objects.migration')
__import__('nova.objects.monitor_metric')
__import__('nova.objects.network')
__import__('nova.objects.network_request')
__import__('nova.objects.numa')
__import__('nova.objects.pci_device')
__import__('nova.objects.pci_device_pool')
__import__('nova.objects.request_spec')
__import__('nova.objects.tag')
__import__('nova.objects.quotas')
__import__('nova.objects.security_group')
__import__('nova.objects.security_group_rule')
__import__('nova.objects.service')
__import__('nova.objects.task_log')
__import__('nova.objects.vcpu_model')
__import__('nova.objects.virt_cpu_topology')
__import__('nova.objects.virtual_interface')
__import__('nova.objects.volume_usage')
| apache-2.0 |
jinie/sublime-wakatime | packages/wakatime/packages/pygments_py3/pygments/lexers/nix.py | 72 | 4031 | # -*- coding: utf-8 -*-
"""
pygments.lexers.nix
~~~~~~~~~~~~~~~~~~~
Lexers for the NixOS Nix language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal
__all__ = ['NixLexer']
class NixLexer(RegexLexer):
"""
For the `Nix language <http://nixos.org/nix/>`_.
.. versionadded:: 2.0
"""
name = 'Nix'
aliases = ['nixos', 'nix']
filenames = ['*.nix']
mimetypes = ['text/x-nix']
flags = re.MULTILINE | re.UNICODE
keywords = ['rec', 'with', 'let', 'in', 'inherit', 'assert', 'if',
'else', 'then', '...']
builtins = ['import', 'abort', 'baseNameOf', 'dirOf', 'isNull', 'builtins',
'map', 'removeAttrs', 'throw', 'toString', 'derivation']
operators = ['++', '+', '?', '.', '!', '//', '==',
'!=', '&&', '||', '->', '=']
punctuations = ["(", ")", "[", "]", ";", "{", "}", ":", ",", "@"]
tokens = {
'root': [
# comments starting with #
(r'#.*$', Comment.Single),
# multiline comments
(r'/\*', Comment.Multiline, 'comment'),
# whitespace
(r'\s+', Text),
# keywords
('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in keywords), Keyword),
# highlight the builtins
('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in builtins),
Name.Builtin),
(r'\b(true|false|null)\b', Name.Constant),
# operators
('(%s)' % '|'.join(re.escape(entry) for entry in operators),
Operator),
# word operators
(r'\b(or|and)\b', Operator.Word),
# punctuations
('(%s)' % '|'.join(re.escape(entry) for entry in punctuations), Punctuation),
# integers
(r'[0-9]+', Number.Integer),
# strings
(r'"', String.Double, 'doublequote'),
(r"''", String.Single, 'singlequote'),
# paths
(r'[\w.+-]*(\/[\w.+-]+)+', Literal),
(r'\<[\w.+-]+(\/[\w.+-]+)*\>', Literal),
# urls
(r'[a-zA-Z][a-zA-Z0-9\+\-\.]*\:[\w%/?:@&=+$,\\.!~*\'-]+', Literal),
# names of variables
(r'[\w-]+\s*=', String.Symbol),
(r'[a-zA-Z_][\w\'-]*', Text),
],
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'singlequote': [
(r"'''", String.Escape),
(r"''\$\{", String.Escape),
(r"''\n", String.Escape),
(r"''\r", String.Escape),
(r"''\t", String.Escape),
(r"''", String.Single, '#pop'),
(r'\$\{', String.Interpol, 'antiquote'),
(r"[^']", String.Single),
],
'doublequote': [
(r'\\', String.Escape),
(r'\\"', String.Escape),
(r'\\$\{', String.Escape),
(r'"', String.Double, '#pop'),
(r'\$\{', String.Interpol, 'antiquote'),
(r'[^"]', String.Double),
],
'antiquote': [
(r"\}", String.Interpol, '#pop'),
# TODO: we should probably escape also here ''${ \${
(r"\$\{", String.Interpol, '#push'),
include('root'),
],
}
def analyse_text(text):
rv = 0.0
# TODO: let/in
if re.search(r'import.+?<[^>]+>', text):
rv += 0.4
if re.search(r'mkDerivation\s+(\(|\{|rec)', text):
rv += 0.4
if re.search(r'=\s+mkIf\s+', text):
rv += 0.4
if re.search(r'\{[a-zA-Z,\s]+\}:', text):
rv += 0.1
return rv
| bsd-3-clause |
zijistark/ck2utils | esc/province_setup.py | 1 | 2392 | #!/usr/bin/env python3
# USAGE:
# log_province_setup.py
# mkdir -p $REPO_ROOT/SWMH-BETA/SWMH/localisation/customizable_localisation
# cp $REPO_ROOT/{EMF/EMF,SWMH-BETA/SWMH}/localisation/customizable_localisation/emf_debug_custom_loc.txt
# cp $REPO_ROOT/{EMF/EMF,SWMH-BETA/SWMH}/localisation/1_emf_debug.csv
# rm $REPO_ROOT/SWMH-BETA/SWMH/common/province_setup/*
# hipinstall.sh --swmh
# ~/common/Crusader\ Kings\ II/CK2game.exe -debug -debugscripts
# # console "run province_setup.txt", quit
# cp ~/win/Documents/Paradox\ Interactive/Crusader\ Kings\ II/logs/game.log $REPO_ROOT/province_setup_data.txt
# province_setup.py # run this file
# git -C $REPO_ROOT/SWMH-BETA clean -df
import re
import ck2parser
from print_time import print_time
NEW_DATA_FROM_FILE = ck2parser.rootpath / 'province_setup_data.txt'
# NEW_DATA_FROM_FILE = None # format only
@print_time
def main():
parser = ck2parser.FullParser(ck2parser.rootpath / 'SWMH-BETA/SWMH')
if NEW_DATA_FROM_FILE:
output_tree = parser.parse('')
with NEW_DATA_FROM_FILE.open(encoding='cp1252') as f:
for line in f:
match = re.search(r'<(.*?)> (.*)', line)
if match is None:
continue
prov_type, pairs = match.groups()
data = dict(x.split('=') for x in pairs.split(', '))
to_parse = '{} = {{\n'.format(data['id'])
if data.get('title'):
to_parse += 'title = {}\n'.format(data['title'])
to_parse += 'max_settlements = {}\n'.format(
data['max_settlements'] if prov_type == 'LAND' else 7)
to_parse += 'terrain = {}\n'.format(data['terrain'])
to_parse += '}\n'
try:
parsed = parser.parse(to_parse)
except:
print(repr(to_parse))
raise
output_tree.contents.extend(parsed)
parsed = parser.parse('# -*- ck2.province_setup -*-')
output_tree.pre_comments[:0] = parsed.post_comments
else:
output_tree = parser.parse_file('common/province_setup/'
'00_province_setup.txt')
outpath = parser.moddirs[0] / 'common/province_setup/00_province_setup.txt'
parser.write(output_tree, outpath)
if __name__ == '__main__':
main()
| gpl-2.0 |
niboshi/chainer | chainerx/testing/array.py | 8 | 6059 | import numpy.testing
import chainerx
# NumPy-like assertion functions that accept both NumPy and ChainerX arrays
def _as_numpy(x):
if isinstance(x, chainerx.ndarray):
return chainerx.to_numpy(x)
assert isinstance(x, numpy.ndarray) or numpy.isscalar(x)
return x
def _check_dtype_and_strides(x, y, dtype_check, strides_check):
if (strides_check is not None
and dtype_check is not None
and strides_check
and not dtype_check):
raise ValueError(
'Combination of dtype_check=False and strides_check=True is not '
'allowed')
if dtype_check is None:
dtype_check = True
if strides_check is None:
strides_check = dtype_check
if (isinstance(x, (numpy.ndarray, chainerx.ndarray))
and isinstance(y, (numpy.ndarray, chainerx.ndarray))):
if strides_check:
assert x.strides == y.strides, (
'Strides mismatch: x: {}, y: {}'.format(x.strides, y.strides))
if dtype_check:
assert x.dtype.name == y.dtype.name, (
'Dtype mismatch: x: {}, y: {}'.format(x.dtype, y.dtype))
def _preprocess_input(a):
# Convert to something NumPy can handle and return
return _as_numpy(a)
def assert_allclose(
x, y, rtol=1e-7, atol=0, equal_nan=True, err_msg='', verbose=True):
"""Raises an AssertionError if two array_like objects are not equal up to a
tolerance.
Args:
x(numpy.ndarray or chainerx.ndarray): The actual object to check.
y(numpy.ndarray or chainerx.ndarray): The desired, expected object.
rtol(float): Relative tolerance.
atol(float): Absolute tolerance.
equal_nan(bool): Allow NaN values if True. Otherwise, fail the
assertion if any NaN is found.
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values
are appended to the error message.
.. seealso:: :func:`numpy.testing.assert_allclose`
"""
x = _preprocess_input(x)
y = _preprocess_input(y)
numpy.testing.assert_allclose(
x, y, rtol=rtol, atol=atol, equal_nan=equal_nan, err_msg=err_msg,
verbose=verbose)
def assert_array_equal(x, y, err_msg='', verbose=True):
"""Raises an AssertionError if two array_like objects are not equal.
Args:
x(numpy.ndarray or chainerx.ndarray): The actual object to check.
y(numpy.ndarray or chainerx.ndarray): The desired, expected object.
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values
are appended to the error message.
.. seealso:: :func:`numpy.testing.assert_array_equal`
"""
x = _preprocess_input(x)
y = _preprocess_input(y)
numpy.testing.assert_array_equal(x, y, err_msg=err_msg, verbose=verbose)
def assert_allclose_ex(x, y, rtol=1e-7, atol=0, equal_nan=True, err_msg='',
verbose=True, **kwargs):
"""assert_allclose_ex(
x, y, rtol=1e-7, atol=0, equal_nan=True, err_msg='', verbose=True,
*, dtype_check=True, strides_check=True)
Raises an AssertionError if two array_like objects are not equal up to a
tolerance.
Args:
x(numpy.ndarray or chainerx.ndarray): The actual object to check.
y(numpy.ndarray or chainerx.ndarray): The desired, expected object.
rtol(float): Relative tolerance.
atol(float): Absolute tolerance.
equal_nan(bool): Allow NaN values if True. Otherwise, fail the
assertion if any NaN is found.
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values
are appended to the error message.
dtype_check(bool): If ``True``, consistency of dtype is also checked.
Disabling ``dtype_check`` also implies ``strides_check=False``.
strides_check(bool): If ``True``, consistency of strides is also
checked.
float16_rtol(float): Relative tolerance for float16 dtype.
float16_atol(float): Absolute tolerance for float16 dtype.
float32_rtol(float): Relative tolerance for float32 dtype.
float32_atol(float): Absolute tolerance for float32 dtype.
float64_rtol(float): Relative tolerance for float64 dtype.
float64_atol(float): Absolute tolerance for float64 dtype.
.. seealso:: :func:`numpy.testing.assert_allclose`
"""
dtype_check = kwargs.pop('dtype_check', None)
strides_check = kwargs.pop('strides_check', None)
atol = kwargs.pop(x.dtype.name + '_atol', atol)
rtol = kwargs.pop(x.dtype.name + '_rtol', rtol)
assert_allclose(x, y, rtol, atol, equal_nan, err_msg, verbose)
_check_dtype_and_strides(x, y, dtype_check, strides_check)
def assert_array_equal_ex(x, y, *args, **kwargs):
"""assert_array_equal_ex(
x, y, err_msg='', verbose=True, *, dtype_check=True,
strides_check=True)
Raises an AssertionError if two array_like objects are not equal.
Args:
x(numpy.ndarray or chainerx.ndarray): The actual object to check.
y(numpy.ndarray or chainerx.ndarray): The desired, expected object.
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values
are appended to the error message.
dtype_check(bool): If ``True``, consistency of dtype is also checked.
Disabling ``dtype_check`` also implies ``strides_check=False``.
strides_check(bool): If ``True``, consistency of strides is also
checked.
.. seealso::
:func:`numpy.testing.assert_array_equal`
"""
dtype_check = kwargs.pop('dtype_check', None)
strides_check = kwargs.pop('strides_check', None)
assert_array_equal(x, y, *args, **kwargs)
_check_dtype_and_strides(x, y, dtype_check, strides_check)
| mit |
VShangxiao/zhihu-python | test.py | 24 | 9440 | # -*- coding: utf-8 -*-
'''
$$
$$$ &&&&$$$$##$$$$$$$$$$$$$$$$$$#$$$
$$$ $$$$$$$$$$$$$$$ ##$$$$$$$$$$$$$$$$$$o; ;
$$$$$$$$$$$$$$$ $$$$$$$$$$$$$$$ *$$o #
$$$ $$$ $$$ $$$ $$$ *$$o $$$$
$$* $$$ $$$ $$$ $$$$ *$$o $$$$
$$$ $$$ $$$ $$$$ *$$o $$$$
$$o $$$ $$$ $$$ *$$o $$$o
;$$$$$$$$$$$$$$$$ $$$ $$$ *$$o
$$$$$$$$$$$$$$$$$* $$$ $$$ ;$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
$$$ $$$ $$$ *$$o
$$$ $$$ $$$ *$$o
$$$$$$$ $$$ $$$ *$$o
$$$; $$$$ $$$ $$$ *$$o
$$$$ $$$ $$$$$ $$$$$$$$$ *$$o
$$$$! $$ $$$$* $$$;
$$$$$ ; $$$$$$$$$$$
$$$$$$
'''
from zhihu import Question
from zhihu import Answer
from zhihu import User
from zhihu import Collection
def question_test(url):
question = Question(url)
# 获取该问题的标题
title = question.get_title()
# 获取该问题的详细描述
detail = question.get_detail()
# 获取回答个数
answers_num = question.get_answers_num()
# 获取关注该问题的人数
followers_num = question.get_followers_num()
# 获取该问题所属话题
topics = question.get_topics()
# 获取该问题被浏览次数
visit_times = question.get_visit_times()
# 获取排名第一的回答
top_answer = question.get_top_answer()
# 获取排名前十的十个回答
top_answers = question.get_top_i_answers(10)
# 获取所有回答
answers = question.get_all_answers()
print title # 输出:现实可以有多美好?
print detail
# 输出:
# 本问题相对于“现实可以多残酷?传送门:现实可以有多残酷?
# 题主: 昨天看了“现实可以有多残酷“。感觉不太好,所以我
# 开了这个问题以相对应,希望能够“中和一下“。和那个问题题主不想
# 把它变成“比惨大会“一样,我也不想把这个变成“鸡汤故事会“,或者
# 是“晒幸福“比赛。所以大家从“现实,实际”的角度出发,讲述自己的
# 美好故事,让大家看看社会的冷和暖,能更加辨证地看待世界,是此
# 题和彼题共同的“心愿“吧。
print answers_num # 输出:2441
print followers_num # 输出:26910
for topic in topics:
print topic, # 输出:情感克制 现实 社会 个人经历
print visit_times # 输出: 该问题当前被浏览的次数
print top_answer # 输出:<zhihu.Answer instance at 0x7f8b6582d0e0>(Answer类对象)
print top_answers # 输出:<generator object get_top_i_answers at 0x7fed676eb320>(代表前十的Answer的生成器)
print answers # 输出:<generator object get_all_answer at 0x7f8b66ba30a0>(代表所有Answer的生成器)
def answer_test(answer_url):
answer = Answer(answer_url)
# 获取该答案回答的问题
question = answer.get_question()
# 获取该答案的作者
author = answer.get_author()
# 获取该答案获得的赞同数
upvote = answer.get_upvote()
# 获取改该答案所属问题被浏览次数
visit_times = answer.get_visit_times()
# 获取所有给该答案点赞的用户信息
voters = answer.get_voters()
# 把答案输出为txt文件
answer.to_txt()
# 把答案输出为markdown文件
answer.to_md()
print question
# <zhihu.Question instance at 0x7f0b25d13f80>
# 一个Question对象
print question.get_title() # 输出:现实可以有多美好?
print author
# <zhihu.User instance at 0x7f0b25425b90>
# 一个User对象
print voters # <generator object get_voters at 0x7f32fbe55730>(代表所有该答案点赞的用户的生成器)
print author.get_user_id() # 输出:田浩
print upvote # 输出:9320
print visit_times # 输出: 改答案所属问题被浏览次数
def user_test(user_url):
user = User(user_url)
# 获取用户ID
user_id = user.get_user_id()
# 获取该用户的关注者人数
followers_num = user.get_followers_num()
# 获取该用户关注的人数
followees_num = user.get_followees_num()
# 获取该用户提问的个数
asks_num = user.get_asks_num()
# 获取该用户回答的个数
answers_num = user.get_answers_num()
# 获取该用户收藏夹个数
collections_num = user.get_collections_num()
# 获取该用户获得的赞同数
agree_num = user.get_agree_num()
# 获取该用户获得的感谢数
thanks_num = user.get_thanks_num()
# 获取该用户关注的人
followees = user.get_followees()
# 获取关注该用户的人
followers = user.get_followers()
# 获取该用户提的问题
asks = user.get_asks()
# 获取该用户回答的问题的答案
answers = user.get_answers()
# 获取该用户的收藏夹
collections = user.get_collections()
print user_id # 黄继新
print followers_num # 614840
print followees_num # 8408
print asks_num # 1323
print answers_num # 786
print collections_num # 44
print agree_num # 46387
print thanks_num # 11477
print followees
# <generator object get_followee at 0x7ffcac3af050>
# 代表所有该用户关注的人的生成器对象
i = 0
for followee in followees:
print followee.get_user_id()
i = i + 1
if i == 41:
break
print followers
# <generator object get_follower at 0x7ffcac3af0f0>
# 代表所有关注该用户的人的生成器对象
i = 0
for follower in followers:
print follower.get_user_id()
i = i + 1
if i == 41:
break
print asks
# <generator object get_ask at 0x7ffcab9db780>
# 代表该用户提的所有问题的生成器对象
print answers
# <generator object get_answer at 0x7ffcab9db7d0>
# 代表该用户回答的所有问题的答案的生成器对象
print collections
# <generator object get_collection at 0x7ffcab9db820>
# 代表该用户收藏夹的生成器对象
def collection_test(collection_url):
collection = Collection(collection_url)
# 获取该收藏夹的创建者
creator = collection.get_creator()
# 获取该收藏夹的名字
name = collection.get_name()
# 获取该收藏夹下的前十个答案
top_answers = collection.get_top_i_answers(10)
# 获取该收藏夹下的所有答案
answers = collection.get_all_answers()
print creator
# <zhihu.User instance at 0x7fe1296f29e0>
# 一个User对象
print creator.get_user_id() # 稷黍
print name # 给你一个不同的视角
print top_answers
# <generator object get_top_i_answers at 0x7f378465dc80>
# 代表前十个答案的生成器对象
print answers
# <generator object get_all_answer at 0x7fe12a29b280>
# 代表所有答案的生成器对象
def test():
url = "http://www.zhihu.com/question/24269892"
question = Question(url)
# 得到排名第一的答案
answer = question.get_top_answer()
# 得到排名第一的答案的作者
user = answer.get_author()
# 得到该作者回答过的所有问题的答案
user_answers = user.get_answers()
# 输出该作者回答过的所有问题的标题
for answer in user_answers:
print answer.get_question().get_title()
# 得到该用户的所有收藏夹
user_collections = user.get_collections()
for collection in user_collections:
# 输出每一个收藏夹的名字
print collection.get_name()
# 得到该收藏夹下的前十个回答
top_answers = collection.get_top_i_answers(10)
# 把答案内容转成txt,markdown
for answer in top_answers:
answer.to_txt()
answer.to_md()
def main():
url = "http://www.zhihu.com/question/24269892"
question_test(url)
answer_url = "http://www.zhihu.com/question/24269892/answer/29960616"
answer_test(answer_url)
user_url = "http://www.zhihu.com/people/jixin"
user_test(user_url)
collection_url = "http://www.zhihu.com/collection/36750683"
collection_test(collection_url)
test()
if __name__ == '__main__':
main()
| mit |
yuanzhao/gpdb | src/test/tinc/tincrepo/resource_management/memory_accounting/too_many_exec_accounts/test_exec_accounts.py | 15 | 2189 | import tinctest
from mpp.models import SQLTestCase
from gppylib.commands.base import Command
from mpp.lib.PSQL import PSQL
def _set_VLIM(vlimMB):
# Set up GUCs for VLIM (gp_vmem_protect_limit), SLIM (gp_vmem_limit_per_query) and RQT activation percent (runaway_detector_activation_percent)
tinctest.logger.info('Setting GUCs for VLIM gp_vmem_protect_limit=%dMB'%(vlimMB))
Command('Run gpconfig to set GUC gp_vmem_protect_limit',
'source $GPHOME/greenplum_path.sh;gpconfig -c gp_vmem_protect_limit -v %d' % vlimMB).run(validateAfter=True)
# Restart DB
Command('Restart database for GUCs to take effect',
'source $GPHOME/greenplum_path.sh && gpstop -ar').run(validateAfter=True)
def _reset_VLIM ():
# Reset GUCs for VLIM (gp_vmem_protect_limit), SLIM (gp_vmem_limit_per_query) and RQT activation percent (runaway_detector_activation_percent)
tinctest.logger.info('Resetting GUCs for VLIM gp_vmem_protect_limit')
Command('Run gpconfig to reset GUC gp_vmem_protect_limit',
'source $GPHOME/greenplum_path.sh;gpconfig -c gp_vmem_protect_limit -v 8192').run(validateAfter=True)
# Restart DB
Command('Restart database for GUCs to take effect',
'source $GPHOME/greenplum_path.sh && gpstop -ar').run(validateAfter=True)
class TooManyExecAccountsTestCase(SQLTestCase):
"""
@tags memory_accounting
"""
'''
SQL Test for Too many executor accounts in memory accounting
'''
def _infer_metadata(self):
super(TooManyExecAccountsTestCase, self)._infer_metadata()
try:
self.vlimMB = int(self._metadata.get('vlimMB', '8192')) # Default is 8192
except Exception:
tinctest.logger.info("Error getting the testcase related metadata")
raise
def setUp(self):
_set_VLIM(self.vlimMB)
return super(TooManyExecAccountsTestCase, self).setUp()
@classmethod
def setUpClass(cls):
super(TooManyExecAccountsTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
_reset_VLIM()
sql_dir = 'sql/'
ans_dir = 'expected'
out_dir = 'output/'
| apache-2.0 |
hzruandd/AutobahnPython | autobahn/wamp/test/test_component.py | 10 | 7873 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import os
if os.environ.get('USE_TWISTED', False):
from autobahn.twisted.util import sleep
from autobahn.twisted import wamp
from twisted.trial import unittest
from twisted.internet import defer
from twisted.application import service
class CaseComponent(wamp.ApplicationSession):
"""
Application code goes here. This is an example component that calls
a remote procedure on a WAMP peer, subscribes to a topic to receive
events, and then stops the world after some events.
"""
def __init__(self, config):
wamp.ApplicationSession.__init__(self, config)
self.test = config.extra['test']
self.stop = False
self._logline = 1
self.finished = False
def log(self, *args):
if len(args) > 1:
sargs = ", ".join(str(s) for s in args)
elif len(args) == 1:
sargs = args[0]
else:
sargs = "-"
msg = u'= : {0:>3} : {1:<20} : {2}'.format(self._logline, self.__class__.__name__, sargs)
self._logline += 1
print(msg)
def finish(self):
if not self.finished:
self.test.deferred.callback(None)
self.finished = True
else:
print("already finished")
class Case1_Backend(CaseComponent):
@defer.inlineCallbacks
def onJoin(self, details):
self.log("joined")
def add2(x, y):
self.log("add2 invoked: {0}, {1}".format(x, y))
return x + y
yield self.register(add2, 'com.mathservice.add2')
self.log("add2 registered")
self.finish()
class Case1_Frontend(CaseComponent):
@defer.inlineCallbacks
def onJoin(self, details):
self.log("joined")
try:
res = yield self.call('com.mathservice.add2', 2, 3)
except Exception as e:
self.log("call error: {0}".format(e))
else:
self.log("call result: {0}".format(res))
self.finish()
class Case2_Backend(CaseComponent):
@defer.inlineCallbacks
def onJoin(self, details):
self.log("joined")
def ping():
self.log("ping() is invoked")
return
def add2(a, b):
self.log("add2() is invoked", a, b)
return a + b
def stars(nick="somebody", stars=0):
self.log("stars() is invoked", nick, stars)
return u"{0} starred {1}x".format(nick, stars)
def orders(product, limit=5):
self.log("orders() is invoked", product, limit)
return [u"Product {0}".format(i) for i in range(50)][:limit]
def arglen(*args, **kwargs):
self.log("arglen() is invoked", args, kwargs)
return [len(args), len(kwargs)]
yield self.register(ping, u'com.arguments.ping')
yield self.register(add2, u'com.arguments.add2')
yield self.register(stars, u'com.arguments.stars')
yield self.register(orders, u'com.arguments.orders')
yield self.register(arglen, u'com.arguments.arglen')
self.log("procedures registered")
class Case2_Frontend(CaseComponent):
@defer.inlineCallbacks
def onJoin(self, details):
self.log("joined")
yield sleep(1)
yield self.call(u'com.arguments.ping')
self.log("Pinged!")
res = yield self.call(u'com.arguments.add2', 2, 3)
self.log("Add2: {0}".format(res))
starred = yield self.call(u'com.arguments.stars')
self.log("Starred 1: {0}".format(starred))
starred = yield self.call(u'com.arguments.stars', nick=u'Homer')
self.log("Starred 2: {0}".format(starred))
starred = yield self.call(u'com.arguments.stars', stars=5)
self.log("Starred 3: {0}".format(starred))
starred = yield self.call(u'com.arguments.stars', nick=u'Homer', stars=5)
self.log("Starred 4: {0}".format(starred))
orders = yield self.call(u'com.arguments.orders', u'coffee')
self.log("Orders 1: {0}".format(orders))
orders = yield self.call(u'com.arguments.orders', u'coffee', limit=10)
self.log("Orders 2: {0}".format(orders))
arglengths = yield self.call(u'com.arguments.arglen')
self.log("Arglen 1: {0}".format(arglengths))
arglengths = yield self.call(u'com.arguments.arglen', 1, 2, 3)
self.log("Arglen 1: {0}".format(arglengths))
arglengths = yield self.call(u'com.arguments.arglen', a=1, b=2, c=3)
self.log("Arglen 2: {0}".format(arglengths))
arglengths = yield self.call(u'com.arguments.arglen', 1, 2, 3, a=1, b=2, c=3)
self.log("Arglen 3: {0}".format(arglengths))
self.log("finishing")
self.finish()
class TestRpc(unittest.TestCase):
if os.environ.get("WAMP_ROUTER_URL") is None:
skip = ("Please provide WAMP_ROUTER_URL environment with url to "
"WAMP router to run WAMP integration tests")
def setUp(self):
self.debug = False
self.url = os.environ.get("WAMP_ROUTER_URL")
self.realm = u"realm1"
@defer.inlineCallbacks
def runOneTest(self, components):
self.deferred = defer.Deferred()
app = service.MultiService()
for component in components:
c = wamp.Service(
url=self.url,
extra=dict(test=self),
realm=self.realm,
make=component,
debug=bool(os.environ.get('debug_websocket', False)),
debug_wamp=bool(os.environ.get('debug_lowlevel', False)),
debug_app=bool(os.environ.get('debug_app', False))
)
c.setServiceParent(app)
app.startService()
yield self.deferred
app.stopService()
@defer.inlineCallbacks
def test_case1(self):
yield self.runOneTest([Case1_Backend, Case1_Frontend])
@defer.inlineCallbacks
def test_case2(self):
yield self.runOneTest([Case2_Backend, Case2_Frontend])
| mit |
h2oloopan/easymerge | EasyMerge/tests/reddit/r2/r2/models/gold.py | 1 | 19762 | # The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2013 reddit
# Inc. All Rights Reserved.
###############################################################################
from r2.lib.db.tdb_sql import make_metadata, index_str, create_table
import json
import pytz
import uuid
from pycassa import NotFoundException
from pycassa.system_manager import INT_TYPE, UTF8_TYPE
from pycassa.util import convert_uuid_to_time
from pylons import g, c
from pylons.i18n import _, ungettext
from datetime import datetime
import sqlalchemy as sa
from sqlalchemy.exc import IntegrityError, OperationalError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.sql.expression import select
from sqlalchemy.sql.functions import sum as sa_sum
from r2.lib.utils import GoldPrice, randstr
import re
from random import choice
from time import time
from r2.lib.db import tdb_cassandra
from r2.lib.db.tdb_cassandra import NotFound, view_of
from r2.models import Account
from r2.models.subreddit import Frontpage
from r2.models.wiki import WikiPage
from r2.lib.memoize import memoize
import stripe
gold_bonus_cutoff = datetime(2010,7,27,0,0,0,0,g.tz)
gold_static_goal_cutoff = datetime(2013, 11, 7, tzinfo=g.display_tz)
ENGINE_NAME = 'authorize'
ENGINE = g.dbm.get_engine(ENGINE_NAME)
METADATA = make_metadata(ENGINE)
TIMEZONE = pytz.timezone("America/Los_Angeles")
Session = scoped_session(sessionmaker(bind=ENGINE))
Base = declarative_base(bind=ENGINE)
gold_table = sa.Table('reddit_gold', METADATA,
sa.Column('trans_id', sa.String, nullable = False,
primary_key = True),
# status can be: invalid, unclaimed, claimed
sa.Column('status', sa.String, nullable = False),
sa.Column('date', sa.DateTime(timezone=True),
nullable = False,
default = sa.func.now()),
sa.Column('payer_email', sa.String, nullable = False),
sa.Column('paying_id', sa.String, nullable = False),
sa.Column('pennies', sa.Integer, nullable = False),
sa.Column('secret', sa.String, nullable = True),
sa.Column('account_id', sa.String, nullable = True),
sa.Column('days', sa.Integer, nullable = True),
sa.Column('subscr_id', sa.String, nullable = True))
indices = [index_str(gold_table, 'status', 'status'),
index_str(gold_table, 'date', 'date'),
index_str(gold_table, 'account_id', 'account_id'),
index_str(gold_table, 'secret', 'secret'),
index_str(gold_table, 'payer_email', 'payer_email'),
index_str(gold_table, 'subscr_id', 'subscr_id')]
create_table(gold_table, indices)
class GoldRevenueGoalByDate(object):
__metaclass__ = tdb_cassandra.ThingMeta
_use_db = True
_cf_name = "GoldRevenueGoalByDate"
_read_consistency_level = tdb_cassandra.CL.ONE
_write_consistency_level = tdb_cassandra.CL.ALL
_extra_schema_creation_args = {
"column_name_class": UTF8_TYPE,
"default_validation_class": INT_TYPE,
}
_compare_with = UTF8_TYPE
_type_prefix = None
ROWKEY = '1'
@staticmethod
def _colkey(date):
return date.strftime("%Y-%m-%d")
@classmethod
def set(cls, date, goal):
cls._cf.insert(cls.ROWKEY, {cls._colkey(date): int(goal)})
@classmethod
def get(cls, date):
"""Gets the goal for a date, or the nearest previous goal."""
try:
colkey = cls._colkey(date)
col = cls._cf.get(
cls.ROWKEY,
column_reversed=True,
column_start=colkey,
column_count=1,
)
return col.values()[0]
except NotFoundException:
return None
class GildedCommentsByAccount(tdb_cassandra.DenormalizedRelation):
_use_db = True
_last_modified_name = 'Gilding'
_views = []
@classmethod
def value_for(cls, thing1, thing2):
return ''
@classmethod
def gild(cls, user, thing):
cls.create(user, [thing])
class GildedLinksByAccount(tdb_cassandra.DenormalizedRelation):
_use_db = True
_last_modified_name = 'Gilding'
_views = []
@classmethod
def value_for(cls, thing1, thing2):
return ''
@classmethod
def gild(cls, user, thing):
cls.create(user, [thing])
@view_of(GildedCommentsByAccount)
@view_of(GildedLinksByAccount)
class GildingsByThing(tdb_cassandra.View):
_use_db = True
_extra_schema_creation_args = {
"key_validation_class": tdb_cassandra.UTF8_TYPE,
"column_name_class": tdb_cassandra.UTF8_TYPE,
}
@classmethod
def get_gilder_ids(cls, thing):
columns = cls.get_time_sorted_columns(thing._fullname)
return [int(account_id, 36) for account_id in columns.iterkeys()]
@classmethod
def create(cls, user, things):
for thing in things:
cls._set_values(thing._fullname, {user._id36: ""})
@classmethod
def delete(cls, user, things):
# gildings cannot be undone
raise NotImplementedError()
@view_of(GildedCommentsByAccount)
@view_of(GildedLinksByAccount)
class GildingsByDay(tdb_cassandra.View):
_use_db = True
_compare_with = tdb_cassandra.TIME_UUID_TYPE
_extra_schema_creation_args = {
"key_validation_class": tdb_cassandra.ASCII_TYPE,
"column_name_class": tdb_cassandra.TIME_UUID_TYPE,
"default_validation_class": tdb_cassandra.UTF8_TYPE,
}
@staticmethod
def _rowkey(date):
return date.strftime("%Y-%m-%d")
@classmethod
def get_gildings(cls, date):
key = cls._rowkey(date)
columns = cls.get_time_sorted_columns(key)
gildings = []
for name, json_blob in columns.iteritems():
timestamp = convert_uuid_to_time(name)
date = datetime.utcfromtimestamp(timestamp).replace(tzinfo=g.tz)
gilding = json.loads(json_blob)
gilding["date"] = date
gilding["user"] = int(gilding["user"], 36)
gildings.append(gilding)
return gildings
@classmethod
def create(cls, user, things):
key = cls._rowkey(datetime.now(g.tz))
columns = {}
for thing in things:
columns[uuid.uuid1()] = json.dumps({
"user": user._id36,
"thing": thing._fullname,
})
cls._set_values(key, columns)
@classmethod
def delete(cls, user, things):
# gildings cannot be undone
raise NotImplementedError()
def create_unclaimed_gold (trans_id, payer_email, paying_id,
pennies, days, secret, date,
subscr_id = None):
try:
gold_table.insert().execute(trans_id=str(trans_id),
subscr_id=subscr_id,
status="unclaimed",
payer_email=payer_email,
paying_id=paying_id,
pennies=pennies,
days=days,
secret=str(secret),
date=date
)
except IntegrityError:
rp = gold_table.update(
sa.and_(gold_table.c.status == 'uncharged',
gold_table.c.trans_id == str(trans_id)),
values = {
gold_table.c.status: "unclaimed",
gold_table.c.payer_email: payer_email,
gold_table.c.paying_id: paying_id,
gold_table.c.pennies: pennies,
gold_table.c.days: days,
gold_table.c.secret:secret,
gold_table.c.subscr_id : subscr_id
},
).execute()
def create_claimed_gold (trans_id, payer_email, paying_id,
pennies, days, secret, account_id, date,
subscr_id = None, status="claimed"):
gold_table.insert().execute(trans_id=trans_id,
subscr_id=subscr_id,
status=status,
payer_email=payer_email,
paying_id=paying_id,
pennies=pennies,
days=days,
secret=secret,
account_id=account_id,
date=date)
def create_gift_gold (giver_id, recipient_id, days, date, signed):
trans_id = "X%d%s-%s" % (int(time()), randstr(2), 'S' if signed else 'A')
gold_table.insert().execute(trans_id=trans_id,
status="gift",
paying_id=giver_id,
payer_email='',
pennies=0,
days=days,
account_id=recipient_id,
date=date)
def create_gold_code(trans_id, payer_email, paying_id, pennies, days, date):
if not trans_id:
trans_id = "GC%d%s" % (int(time()), randstr(2))
valid_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# keep picking new codes until we find an unused one
while True:
code = randstr(10, alphabet=valid_chars)
s = sa.select([gold_table],
sa.and_(gold_table.c.secret == code.lower(),
gold_table.c.status == 'unclaimed'))
res = s.execute().fetchall()
if not res:
gold_table.insert().execute(
trans_id=trans_id,
status='unclaimed',
payer_email=payer_email,
paying_id=paying_id,
pennies=pennies,
days=days,
secret=code.lower(),
date=date)
return code
def account_by_payingid(paying_id):
s = sa.select([sa.distinct(gold_table.c.account_id)],
gold_table.c.paying_id == paying_id)
res = s.execute().fetchall()
if len(res) != 1:
return None
return int(res[0][0])
# returns None if the ID was never valid
# returns "already claimed" if it's already been claimed
# Otherwise, it's valid and the function claims it, returning a tuple with:
# * the number of days
# * the subscr_id, if any
def claim_gold(secret, account_id):
if not secret:
return None
# The donation email has the code at the end of the sentence,
# so they might get sloppy and catch the period or some whitespace.
secret = secret.strip(". ")
secret = secret.replace("-", "").lower()
rp = gold_table.update(sa.and_(gold_table.c.status == 'unclaimed',
gold_table.c.secret == secret),
values = {
gold_table.c.status: 'claimed',
gold_table.c.account_id: account_id,
},
).execute()
if rp.rowcount == 0:
just_claimed = False
elif rp.rowcount == 1:
just_claimed = True
else:
raise ValueError("rowcount == %d?" % rp.rowcount)
s = sa.select([gold_table.c.days, gold_table.c.subscr_id],
gold_table.c.secret == secret,
limit = 1)
rows = s.execute().fetchall()
if not rows:
return None
elif just_claimed:
return (rows[0].days, rows[0].subscr_id)
else:
return "already claimed"
def check_by_email(email):
s = sa.select([gold_table.c.status,
gold_table.c.secret,
gold_table.c.days,
gold_table.c.account_id],
gold_table.c.payer_email == email)
return s.execute().fetchall()
def retrieve_gold_transaction(transaction_id):
s = sa.select([gold_table], gold_table.c.trans_id == transaction_id)
res = s.execute().fetchall()
if res:
return res[0] # single row per transaction_id
def update_gold_transaction(transaction_id, status):
rp = gold_table.update(gold_table.c.trans_id == str(transaction_id),
values={gold_table.c.status: status}).execute()
def transactions_by_user(user):
s = sa.select([gold_table], gold_table.c.account_id == str(user._id))
res = s.execute().fetchall()
return res
def gold_payments_by_user(user):
transactions = transactions_by_user(user)
# filter out received gifts
transactions = [trans for trans in transactions
if not trans.trans_id.startswith(('X', 'M'))]
return transactions
def gold_received_by_user(user):
transactions = transactions_by_user(user)
transactions = [trans for trans in transactions
if trans.trans_id.startswith('X')]
return transactions
def days_to_pennies(days):
if days < 366:
months = days / 31
return months * g.gold_month_price.pennies
else:
years = days / 366
return years * g.gold_year_price.pennies
def append_random_bottlecap_phrase(message):
"""Appends a random "bottlecap" phrase from the wiki page.
The wiki page should be an unordered list with each item a separate
bottlecap.
"""
bottlecap = None
try:
wp = WikiPage.get(Frontpage, g.wiki_page_gold_bottlecaps)
split_list = re.split('^[*-] ', wp.content, flags=re.MULTILINE)
choices = [item.strip() for item in split_list if item.strip()]
if len(choices):
bottlecap = choice(choices)
except NotFound:
pass
if bottlecap:
message += '\n\n> ' + bottlecap
return message
def gold_revenue_multi(dates):
NON_REVENUE_STATUSES = ("declined", "chargeback", "fudge")
date_expr = sa.func.date_trunc('day',
sa.func.timezone(TIMEZONE.zone, gold_table.c.date))
query = (select([date_expr, sa_sum(gold_table.c.pennies)])
.where(~ gold_table.c.status.in_(NON_REVENUE_STATUSES))
.where(date_expr.in_(dates))
.group_by(date_expr)
)
return {truncated_time.date(): pennies
for truncated_time, pennies in ENGINE.execute(query)}
@memoize("gold-revenue-volatile", time=600)
def gold_revenue_volatile(date):
return gold_revenue_multi([date]).get(date, 0)
@memoize("gold-revenue-steady")
def gold_revenue_steady(date):
return gold_revenue_multi([date]).get(date, 0)
@memoize("gold-goal")
def gold_goal_on(date):
"""Returns the gold revenue goal (in pennies) for a given date."""
return GoldRevenueGoalByDate.get(date)
def account_from_stripe_customer_id(stripe_customer_id):
q = Account._query(Account.c.gold_subscr_id == stripe_customer_id,
Account.c._spam == (True, False), data=True)
return next(iter(q), None)
@memoize("subscription-details", time=60)
def _get_subscription_details(stripe_customer_id):
stripe.api_key = g.STRIPE_SECRET_KEY
customer = stripe.Customer.retrieve(stripe_customer_id)
if getattr(customer, 'deleted', False):
return {}
subscription = customer.subscription
card = customer.active_card
end = datetime.fromtimestamp(subscription.current_period_end).date()
last4 = card.last4
pennies = subscription.plan.amount
return {
'next_charge_date': end,
'credit_card_last4': last4,
'pennies': pennies,
}
def get_subscription_details(user):
if not getattr(user, 'gold_subscr_id', None):
return
return _get_subscription_details(user.gold_subscr_id)
def get_discounted_price(gold_price):
discount = float(getattr(g, 'BTC_DISCOUNT', '0'))
price = (gold_price.pennies * (1 - discount)) / 100.
return GoldPrice("%.2f" % price)
def make_gold_message(thing, user_gilded):
from r2.models import Comment
if thing.gildings == 0 or thing._spam or thing._deleted:
return None
author = Account._byID(thing.author_id, data=True)
if not author._deleted:
author_name = author.name
else:
author_name = _("[deleted]")
if c.user_is_loggedin and thing.author_id == c.user._id:
if isinstance(thing, Comment):
gilded_message = ungettext(
"a redditor gifted you a month of reddit gold for this "
"comment.",
"redditors have gifted you %(months)d months of reddit gold "
"for this comment.",
thing.gildings
)
else:
gilded_message = ungettext(
"a redditor gifted you a month of reddit gold for this "
"submission.",
"redditors have gifted you %(months)d months of reddit gold "
"for this submission.",
thing.gildings
)
elif user_gilded:
if isinstance(thing, Comment):
gilded_message = ungettext(
"you have gifted reddit gold to %(recipient)s for this "
"comment.",
"you and other redditors have gifted %(months)d months of "
"reddit gold to %(recipient)s for this comment.",
thing.gildings
)
else:
gilded_message = ungettext(
"you have gifted reddit gold to %(recipient)s for this "
"submission.",
"you and other redditors have gifted %(months)d months of "
"reddit gold to %(recipient)s for this submission.",
thing.gildings
)
else:
if isinstance(thing, Comment):
gilded_message = ungettext(
"a redditor has gifted reddit gold to %(recipient)s for this "
"comment.",
"redditors have gifted %(months)d months of reddit gold to "
"%(recipient)s for this comment.",
thing.gildings
)
else:
gilded_message = ungettext(
"a redditor has gifted reddit gold to %(recipient)s for this "
"submission.",
"redditors have gifted %(months)d months of reddit gold to "
"%(recipient)s for this submission.",
thing.gildings
)
return gilded_message % dict(
recipient=author_name,
months=thing.gildings,
)
| mit |
LLNL/spack | var/spack/repos/builtin/packages/optional-lite/package.py | 5 | 1888 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
from shutil import copytree
class OptionalLite(CMakePackage):
"""
A single-file header-only version of a C++17-like optional, a nullable
object for C++98, C++11 and later.
"""
homepage = "https://github.com/martinmoene/optional-lite"
url = "https://github.com/martinmoene/optional-lite/archive/v3.0.0.tar.gz"
version('3.2.0', sha256='069c92f6404878588be761d609b917a111b0231633a91f7f908288fc77eb24c8')
version('3.1.1', sha256='b61fe644b9f77d7cc1c555b3e40e973b135bf2c0350e5fa67bc6f379d9fc3158')
version('3.1.0', sha256='66ca0d923e77c3f2a792ef3871e9ddbacf5fac2bfd6b8743df9c9c5814644718')
version('3.0.0', sha256='36ae58512c478610647978811f0f4dbe105880372bd7ed39417314d50a27254e')
version('2.3.0', sha256='8fe46216147234b172c6a5b182726834afc44dfdca1e976a264d6f96eb183916')
version('2.2.0', sha256='9ce1bb021de42f804f8d17ed30b79fc98296122bec8db60492104978cd282fa2')
version('2.0.0', sha256='e8d803cbc7be241df41a9ab267b525b7941df09747cd5a7deb55f863bd8a4e8d')
version('1.0.3', sha256='7a2fb0fe20d61d091f6730237add9bab58bc0df1288cb96f3e8a61b859539067')
def cmake_args(self):
return [
"-DOPTIONAL_LITE_OPT_BUILD_TESTS=%s"
% ("ON" if self.run_tests else "OFF"),
"-DOPTIONAL_LITE_OPT_BUILD_EXAMPLES=OFF"
]
# Pre-3.2.0 install was simply a copytree on the includes
@when("@:3.1")
def cmake(self, spec, prefix):
pass
@when("@:3.1")
def build(self, spec, prefix):
pass
@when("@:3.1")
def install(self, spec, prefix):
copytree('include', prefix.include)
@when("@:3.1")
def check(self):
pass
| lgpl-2.1 |
DataDog/monitor | docs/_sphinx/pygments/styles/simple.py | 2 | 1788 | # -*- coding: utf-8 -*-
"""
pygments.styles.eigengo
~~~~~~~~~~~~~~~~~~~~~~~~
Simple style for Scala highlighting.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class SimpleStyle(Style):
"""
Simple style for Scala highlighting.
"""
background_color = "#f0f0f0"
default_style = ""
font_face = "Menlo"
styles = {
Whitespace: "#f0f0f0",
Comment: "#777766",
Comment.Preproc: "",
Comment.Special: "",
Keyword: "bold #000080",
Keyword.Pseudo: "",
Keyword.Type: "",
Operator: "#000000",
Operator.Word: "",
Name.Builtin: "#000000",
Name.Function: "#000000",
Name.Class: "#000000",
Name.Namespace: "#000000",
Name.Exception: "#000000",
Name.Variable: "#000000",
Name.Constant: "bold #000000",
Name.Label: "#000000",
Name.Entity: "#000000",
Name.Attribute: "#000000",
Name.Tag: "#000000",
Name.Decorator: "#000000",
String: "#008000",
String.Doc: "",
String.Interpol: "",
String.Escape: "",
String.Regex: "",
String.Symbol: "",
String.Other: "",
Number: "#008000",
Error: "border:#FF0000"
}
| apache-2.0 |
drmrd/ansible | test/units/modules/network/nxos/test_nxos_bgp_neighbor.py | 39 | 2505 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_bgp_neighbor
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosBgpNeighborModule(TestNxosModule):
module = nxos_bgp_neighbor
def setUp(self):
super(TestNxosBgpNeighborModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp_neighbor.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp_neighbor.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosBgpNeighborModule, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_bgp', 'config.cfg')
self.load_config.return_value = []
def test_nxos_bgp_neighbor(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.3', description='some words'))
self.execute_module(changed=True, commands=['router bgp 65535', 'neighbor 3.3.3.3', 'description some words'])
def test_nxos_bgp_neighbor_remove_private_as(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.4', remove_private_as='all'))
self.execute_module(changed=False, commands=[])
def test_nxos_bgp_neighbor_remove_private_as_changed(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.4', remove_private_as='replace-as'))
self.execute_module(changed=True, commands=['router bgp 65535', 'neighbor 3.3.3.4', 'remove-private-as replace-as'])
| gpl-3.0 |
smaccm/python-capdl-tool | capdl/__init__.py | 1 | 1096 | #
# Copyright 2014, NICTA
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(NICTA_BSD)
#
from Cap import Cap
from ELF import ELF
from Object import Frame, PageTable, PageDirectory, ASIDPool, CNode, Endpoint, \
AsyncEndpoint, TCB, Untyped, IOPorts, IODevice, IOPageTable, \
IRQ
from Spec import Spec
from Allocator import seL4_UntypedObject, seL4_TCBObject, seL4_EndpointObject, \
seL4_AsyncEndpointObject, seL4_CapTableObject, seL4_ARM_SmallPageObject, \
seL4_ARM_PageTableObject, seL4_ARM_PageDirectoryObject, seL4_IA32_4K, \
seL4_IA32_PageTableObject, seL4_IA32_PageDirectoryObject, \
seL4_IA32_IOPageTableObject, seL4_CanRead, seL4_CanWrite, seL4_CanGrant, \
seL4_AllRights, ObjectAllocator, CSpaceAllocator, seL4_FrameObject, \
seL4_PageDirectoryObject
from PageCollection import PageCollection, create_address_space
from util import page_table_vaddr, page_table_index, page_index, page_vaddr
| bsd-2-clause |
cosenal/osf.io | website/addons/forward/tests/test_views.py | 44 | 1162 | from nose.tools import *
from website.addons.forward.tests.utils import ForwardAddonTestCase
class TestForwardLogs(ForwardAddonTestCase):
def setUp(self):
super(TestForwardLogs, self).setUp()
self.app.authenticate(*self.user.auth)
def test_change_url_log_added(self):
log_count = len(self.project.logs)
self.app.put_json(
self.project.api_url_for('forward_config_put'),
dict(
url='http://how.to.bas/ic',
redirectBool=True,
redirectSecs=15,
),
)
self.project.reload()
assert_equal(
len(self.project.logs),
log_count + 1
)
def test_change_timeout_log_not_added(self):
log_count = len(self.project.logs)
self.app.put_json(
self.project.api_url_for('forward_config_put'),
dict(
url=self.node_settings.url,
redirectBool=True,
redirectSecs=15,
),
)
self.project.reload()
assert_equal(
len(self.project.logs),
log_count
)
| apache-2.0 |
snailhu/myself-Repository | SmartDataApp/pusher/push_api.py | 1 | 1326 | #coding:utf-8
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.db import transaction
import simplejson
from SmartDataApp.controller.admin import return_error_response
from SmartDataApp.views import convert_session_id_to_user
from SmartDataApp.models import ProfileDetail
@transaction.atomic
@csrf_exempt
def api_get_channel_user_id(request):
convert_session_id_to_user(request)
if request.method != u'POST':
return return_error_response()
elif 'application/json' in request.META['CONTENT_TYPE'].split(';'):
data = simplejson.loads(request.body)
channel_id = data.get("channel_id", None)
user_id = data.get("user_id", None)
device_type = data.get("device_type", None)
if channel_id and user_id:
profile = ProfileDetail.objects.get(profile=request.user)
profile.device_chanel_id = channel_id
profile.device_user_id = user_id
profile.device_type = device_type
profile.save()
return HttpResponse(simplejson.dumps({'success': True, 'info': '绑定成功'}), content_type='application/json')
else:
return HttpResponse(simplejson.dumps({'success': False, 'info': '没有传入相关信息'}), content_type='application/json')
| apache-2.0 |
BigBrother1984/android_external_chromium_org | tools/perf/benchmarks/robohornet_pro.py | 24 | 1290 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Microsoft's RoboHornet Pro benchmark."""
import os
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class RobohornetProMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
tab.ExecuteJavaScript('ToggleRoboHornet()')
done = 'document.getElementById("results").innerHTML.indexOf("Total") != -1'
def _IsDone():
return tab.EvaluateJavaScript(done)
util.WaitFor(_IsDone, 120)
result = int(tab.EvaluateJavaScript('stopTime - startTime'))
results.Add('Total', 'ms', result)
class RobohornetPro(test.Test):
test = RobohornetProMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/robohornetpro.json',
# Measurement require use of real Date.now() for measurement.
'make_javascript_deterministic': False,
'pages': [
{ 'url':
'http://ie.microsoft.com/testdrive/performance/robohornetpro/' }
]
}, os.path.abspath(__file__))
| bsd-3-clause |
SNAPPETITE/backend | flask/lib/python2.7/site-packages/whoosh/lang/snowball/finnish.py | 96 | 10074 | from .bases import _StandardStemmer
from whoosh.compat import u
class FinnishStemmer(_StandardStemmer):
"""
The Finnish Snowball stemmer.
:cvar __vowels: The Finnish vowels.
:type __vowels: unicode
:cvar __restricted_vowels: A subset of the Finnish vowels.
:type __restricted_vowels: unicode
:cvar __long_vowels: The Finnish vowels in their long forms.
:type __long_vowels: tuple
:cvar __consonants: The Finnish consonants.
:type __consonants: unicode
:cvar __double_consonants: The Finnish double consonants.
:type __double_consonants: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the Finnish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/finnish/stemmer.html
"""
__vowels = u("aeiouy\xE4\xF6")
__restricted_vowels = u("aeiou\xE4\xF6")
__long_vowels = ("aa", "ee", "ii", "oo", "uu", u("\xE4\xE4"),
u("\xF6\xF6"))
__consonants = "bcdfghjklmnpqrstvwxz"
__double_consonants = ("bb", "cc", "dd", "ff", "gg", "hh", "jj",
"kk", "ll", "mm", "nn", "pp", "qq", "rr",
"ss", "tt", "vv", "ww", "xx", "zz")
__step1_suffixes = ('kaan', u('k\xE4\xE4n'), 'sti', 'kin', 'han',
u('h\xE4n'), 'ko', u('k\xF6'), 'pa', u('p\xE4'))
__step2_suffixes = ('nsa', u('ns\xE4'), 'mme', 'nne', 'si', 'ni',
'an', u('\xE4n'), 'en')
__step3_suffixes = ('siin', 'tten', 'seen', 'han', 'hen', 'hin',
'hon', u('h\xE4n'), u('h\xF6n'), 'den', 'tta',
u('tt\xE4'), 'ssa', u('ss\xE4'), 'sta',
u('st\xE4'), 'lla', u('ll\xE4'), 'lta',
u('lt\xE4'), 'lle', 'ksi', 'ine', 'ta',
u('t\xE4'), 'na', u('n\xE4'), 'a', u('\xE4'),
'n')
__step4_suffixes = ('impi', 'impa', u('imp\xE4'), 'immi', 'imma',
u('imm\xE4'), 'mpi', 'mpa', u('mp\xE4'), 'mmi',
'mma', u('mm\xE4'), 'eja', u('ej\xE4'))
def stem(self, word):
"""
Stem a Finnish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
step3_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 1: Particles etc.
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "sti":
if suffix in r2:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
if word[-len(suffix) - 1] in u("ntaeiouy\xE4\xF6"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2: Possessives
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "si":
if word[-3] != "k":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "ni":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith("kse"):
word = "".join((word[:-3], "ksi"))
if r1.endswith("kse"):
r1 = "".join((r1[:-3], "ksi"))
if r2.endswith("kse"):
r2 = "".join((r2[:-3], "ksi"))
elif suffix == "an":
if (word[-4:-2] in ("ta", "na") or
word[-5:-2] in ("ssa", "sta", "lla", "lta")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == u("\xE4n"):
if (word[-4:-2] in (u("t\xE4"), u("n\xE4")) or
word[-5:-2] in (u("ss\xE4"), u("st\xE4"),
u("ll\xE4"), u("lt\xE4"))):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "en":
if word[-5:-2] in ("lle", "ine"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
break
# STEP 3: Cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in ("han", "hen", "hin", "hon", u("h\xE4n"),
u("h\xF6n")):
if ((suffix == "han" and word[-4] == "a") or
(suffix == "hen" and word[-4] == "e") or
(suffix == "hin" and word[-4] == "i") or
(suffix == "hon" and word[-4] == "o") or
(suffix == u("h\xE4n") and word[-4] == u("\xE4")) or
(suffix == u("h\xF6n") and word[-4] == u("\xF6"))):
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix in ("siin", "den", "tten"):
if (word[-len(suffix) - 1] == "i" and
word[-len(suffix) - 2] in self.__restricted_vowels):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
else:
continue
elif suffix == "seen":
if word[-6:-4] in self.__long_vowels:
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
step3_success = True
else:
continue
elif suffix in ("a", u("\xE4")):
if word[-2] in self.__vowels and word[-3] in self.__consonants:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
elif suffix in ("tta", u("tt\xE4")):
if word[-4] == "e":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix == "n":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
if word[-2:] == "ie" or word[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
break
# STEP 4: Other endings
for suffix in self.__step4_suffixes:
if r2.endswith(suffix):
if suffix in ("mpi", "mpa", u("mp\xE4"), "mmi", "mma",
u("mm\xE4")):
if word[-5:-3] != "po":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5: Plurals
if step3_success and len(r1) >= 1 and r1[-1] in "ij":
word = word[:-1]
r1 = r1[:-1]
elif (not step3_success and len(r1) >= 2 and
r1[-1] == "t" and r1[-2] in self.__vowels):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if r2.endswith("imma"):
word = word[:-4]
r1 = r1[:-4]
elif r2.endswith("mma") and r2[-5:-3] != "po":
word = word[:-3]
r1 = r1[:-3]
# STEP 6: Tidying up
if r1[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
if (len(r1) >= 2 and r1[-2] in self.__consonants and
r1[-1] in u("a\xE4ei")):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith(("oj", "uj")):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith("jo"):
word = word[:-1]
r1 = r1[:-1]
# If the word ends with a double consonant
# followed by zero or more vowels, the last consonant is removed.
for i in range(1, len(word)):
if word[-i] in self.__vowels:
continue
else:
if i == 1:
if word[-i - 1:] in self.__double_consonants:
word = word[:-1]
else:
if word[-i - 1:-i + 1] in self.__double_consonants:
word = "".join((word[:-i], word[-i + 1:]))
break
return word
| mit |
xindus40223115/w17_test | static/Brython3.1.3-20150514-095342/Lib/textwrap.py | 745 | 16488 | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <[email protected]>
import re
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 0 .. 'tabsize' spaces, depending on its position
in its line. If false, each tab is treated as a single character.
tabsize (default: 8)
Expand tabs in input text to 0 .. 'tabsize' spaces, unless
'expand_tabs' is false.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
unicode_whitespace_trans = {}
uspace = ord(' ')
for x in _whitespace:
unicode_whitespace_trans[ord(x)] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[a-z]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z') # end of chunk
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True,
tabsize=8):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
self.tabsize = tabsize
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs(self.tabsize)
if self.replace_whitespace:
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if self.break_on_hyphens is True:
chunks = self.wordsep_re.split(text)
else:
chunks = self.wordsep_simple_re.split(text)
chunks = [c for c in chunks if c]
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print(dedent("Hello there.\n This is indented."))
| gpl-3.0 |
micwypych/github-cmake-project-checker | project_checker/tests/checkhomeworkfromconfig.py | 1 | 1133 | from unittest import TestCase
from unittest.mock import MagicMock
from project_checker.checker.pull_all_links import check_homework_by_configuration
from project_checker.checker.project.config import Config
class StubConfig(Config):
def __init__(self, lines):
self.load = lambda *args: None
project_stub = MagicMock(owners=['200100'], to_result_raniking_lines=lambda *args: 'ok;ok;0;0;ok')
self.student_projects = lambda *args: [project_stub]
self.groups = {'200100': '1c'}
self.deadlines = {'1c': ['2000-01-01 12:34']}
self.homework = MagicMock(list=lambda *args: ['lab1_ex1', 'lab1_ex2'])
file_stub = MagicMock(write=lambda txt: lines.append(txt))
self.parent_directory = MagicMock(open=lambda *args: file_stub)
class CheckAllHomeworkTest(TestCase):
def test_single_project_with_one_owner_five_ranking_tasks(self):
lines = []
# prj.Config.__init__ = lambda s, *directory: StubConfig.__init__(s, lines)
check_homework_by_configuration(StubConfig(lines), pull_new_version=True)
self.assertIn('200100\tok;ok;0;0;ok', lines)
| mit |
flar2/m8-GPE-4.4.3 | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
CapOM/ChromiumGStreamerBackend | testing/scripts/checklicenses.py | 76 | 1036 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import common
def main_run(args):
with common.temporary_file() as tempfile_path:
rc = common.run_command([
os.path.join(common.SRC_DIR, 'tools', 'checklicenses',
'checklicenses.py'),
'--json', tempfile_path
])
with open(tempfile_path) as f:
checklicenses_results = json.load(f)
result_set = set()
for result in checklicenses_results:
result_set.add((result['filename'], result['license']))
json.dump({
'valid': True,
'failures': ['%s: %s' % (r[0], r[1]) for r in result_set],
}, args.output)
return rc
def main_compile_targets(args):
json.dump([], args.output)
if __name__ == '__main__':
funcs = {
'run': main_run,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
| bsd-3-clause |
sou81821/chainer | examples/modelzoo/download_model.py | 28 | 1047 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import six
parser = argparse.ArgumentParser(
description='Download a Caffe reference model')
parser.add_argument('model_type', choices=('alexnet', 'caffenet', 'googlenet'),
help='Model type (alexnet, caffenet, googlenet)')
args = parser.parse_args()
if args.model_type == 'alexnet':
url = 'http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel'
name = 'bvlc_alexnet.caffemodel'
elif args.model_type == 'caffenet':
url = 'http://dl.caffe.berkeleyvision.org/' \
'bvlc_reference_caffenet.caffemodel'
name = 'bvlc_reference_caffenet.caffemodel'
elif args.model_type == 'googlenet':
url = 'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel'
name = 'bvlc_googlenet.caffemodel'
else:
raise RuntimeError('Invalid model type. Choose from '
'alexnet, caffenet, and googlenet.')
print('Downloading model file...')
six.moves.urllib.request.urlretrieve(url, name)
print('Done')
| mit |
loulich/Couchpotato | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/adultswim.py | 22 | 6720 | # coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
xpath_text,
float_or_none,
)
class AdultSwimIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?adultswim\.com/videos/(?P<is_playlist>playlists/)?(?P<show_path>[^/]+)/(?P<episode_path>[^/?#]+)/?'
_TESTS = [{
'url': 'http://adultswim.com/videos/rick-and-morty/pilot',
'playlist': [
{
'md5': '247572debc75c7652f253c8daa51a14d',
'info_dict': {
'id': 'rQxZvXQ4ROaSOqq-or2Mow-0',
'ext': 'flv',
'title': 'Rick and Morty - Pilot Part 1',
'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
},
},
{
'md5': '77b0e037a4b20ec6b98671c4c379f48d',
'info_dict': {
'id': 'rQxZvXQ4ROaSOqq-or2Mow-3',
'ext': 'flv',
'title': 'Rick and Morty - Pilot Part 4',
'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
},
},
],
'info_dict': {
'title': 'Rick and Morty - Pilot',
'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
}
}, {
'url': 'http://www.adultswim.com/videos/playlists/american-parenting/putting-francine-out-of-business/',
'playlist': [
{
'md5': '2eb5c06d0f9a1539da3718d897f13ec5',
'info_dict': {
'id': '-t8CamQlQ2aYZ49ItZCFog-0',
'ext': 'flv',
'title': 'American Dad - Putting Francine Out of Business',
'description': 'Stan hatches a plan to get Francine out of the real estate business.Watch more American Dad on [adult swim].'
},
}
],
'info_dict': {
'title': 'American Dad - Putting Francine Out of Business',
'description': 'Stan hatches a plan to get Francine out of the real estate business.Watch more American Dad on [adult swim].'
},
}]
@staticmethod
def find_video_info(collection, slug):
for video in collection.get('videos'):
if video.get('slug') == slug:
return video
@staticmethod
def find_collection_by_linkURL(collections, linkURL):
for collection in collections:
if collection.get('linkURL') == linkURL:
return collection
@staticmethod
def find_collection_containing_video(collections, slug):
for collection in collections:
for video in collection.get('videos'):
if video.get('slug') == slug:
return collection, video
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
show_path = mobj.group('show_path')
episode_path = mobj.group('episode_path')
is_playlist = True if mobj.group('is_playlist') else False
webpage = self._download_webpage(url, episode_path)
# Extract the value of `bootstrappedData` from the Javascript in the page.
bootstrappedDataJS = self._search_regex(r'var bootstrappedData = ({.*});', webpage, episode_path)
try:
bootstrappedData = json.loads(bootstrappedDataJS)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % episode_path
raise ExtractorError(errmsg, cause=ve)
# Downloading videos from a /videos/playlist/ URL needs to be handled differently.
# NOTE: We are only downloading one video (the current one) not the playlist
if is_playlist:
collections = bootstrappedData['playlists']['collections']
collection = self.find_collection_by_linkURL(collections, show_path)
video_info = self.find_video_info(collection, episode_path)
show_title = video_info['showTitle']
segment_ids = [video_info['videoPlaybackID']]
else:
collections = bootstrappedData['show']['collections']
collection, video_info = self.find_collection_containing_video(collections, episode_path)
show = bootstrappedData['show']
show_title = show['title']
segment_ids = [clip['videoPlaybackID'] for clip in video_info['clips']]
episode_id = video_info['id']
episode_title = video_info['title']
episode_description = video_info['description']
episode_duration = video_info.get('duration')
entries = []
for part_num, segment_id in enumerate(segment_ids):
segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=mobile' % segment_id
segment_title = '%s - %s' % (show_title, episode_title)
if len(segment_ids) > 1:
segment_title += ' Part %d' % (part_num + 1)
idoc = self._download_xml(
segment_url, segment_title,
'Downloading segment information', 'Unable to download segment information')
segment_duration = float_or_none(
xpath_text(idoc, './/trt', 'segment duration').strip())
formats = []
file_els = idoc.findall('.//files/file')
for file_el in file_els:
bitrate = file_el.attrib.get('bitrate')
ftype = file_el.attrib.get('type')
formats.append({
'format_id': '%s_%s' % (bitrate, ftype),
'url': file_el.text.strip(),
# The bitrate may not be a number (for example: 'iphone')
'tbr': int(bitrate) if bitrate.isdigit() else None,
'quality': 1 if ftype == 'hd' else -1
})
self._sort_formats(formats)
entries.append({
'id': segment_id,
'title': segment_title,
'formats': formats,
'duration': segment_duration,
'description': episode_description
})
return {
'_type': 'playlist',
'id': episode_id,
'display_id': episode_path,
'entries': entries,
'title': '%s - %s' % (show_title, episode_title),
'description': episode_description,
'duration': episode_duration
}
| gpl-3.0 |
liorvh/infernal-twin | airgraph-ng/graphviz/libDumpParse.py | 78 | 6884 | #!/usr/bin/python
#airodump parsing lib
#returns in an array of client and Ap information
#part of the airdrop-ng project
from sys import exit as Exit
class airDumpParse:
def parser(self,file):
"""
One Function to call to parse a file and return the information
"""
self.capr = None
self.NAP = None
self.NA = None
self.apDict = None
self.clientDict = None
fileOpenResults = self.airDumpOpen(file)
self.airDumpParse(fileOpenResults)
self.clientApChannelRelationship()
return {'NA':self.NA,'capr':self.capr,'apDict':self.apDict,
'clientDict':self.clientDict,'NAP':self.NAP}
def airDumpOpen(self,file):
"""
Takes one argument (the input file) and opens it for reading
Returns a list full of data
"""
try:
openedFile = open(file, "r")
except IOError:
print "Error Airodump File",file,"does not exist"
Exit(1)
data = openedFile.xreadlines()
cleanedData = []
for line in data:
cleanedData.append(line.rstrip())
openedFile.close()
return cleanedData
def airDumpParse(self,cleanedDump):
"""
Function takes parsed dump file list and does some more cleaning.
Returns a list of 2 dictionaries (Clients and APs)
"""
try: #some very basic error handeling to make sure they are loading up the correct file
try:
apStart = cleanedDump.index('BSSID, First time seen, Last time seen, Channel, Speed, Privacy, Power, # beacons, # data, LAN IP, ESSID')
except Exception:
apStart = cleanedDump.index('BSSID, First time seen, Last time seen, channel, Speed, Privacy, Cipher, Authentication, Power, # beacons, # IV, LAN IP, ID-length, ESSID, Key')
del cleanedDump[apStart] #remove the first line of text with the headings
try:
stationStart = cleanedDump.index('Station MAC, First time seen, Last time seen, Power, # packets, BSSID, Probed ESSIDs')
except Exception:
stationStart = cleanedDump.index('Station MAC, First time seen, Last time seen, Power, # packets, BSSID, ESSID')
except Exception:
print "You Seem to have provided an improper input file please make sure you are loading an airodump txt file and not a pcap"
Exit(1)
del cleanedDump[stationStart] #Remove the heading line
clientList = cleanedDump[stationStart:] #Splits all client data into its own list
del cleanedDump[stationStart:] #The remaining list is all of the AP information
self.apDict = self.apTag(cleanedDump)
self.clientDict = self.clientTag(clientList)
return
def apTag(self,devices):
"""
Create a ap dictionary with tags of the data type on an incoming list
"""
dict = {}
for entry in devices:
ap = {}
string_list = entry.split(',')
#sorry for the clusterfuck but i swear it all makse sense this is builiding a dic from our list so we dont have to do postion calls later
len(string_list)
if len(string_list) == 15:
ap = {"bssid":string_list[0].replace(' ',''),
"fts":string_list[1],
"lts":string_list[2],
"channel":string_list[3].replace(' ',''),
"speed":string_list[4],
"privacy":string_list[5].replace(' ',''),
"cipher":string_list[6],
"auth":string_list[7],
"power":string_list[8],
"beacons":string_list[9],
"iv":string_list[10],
"ip":string_list[11],
"id":string_list[12],
"essid":string_list[13][1:],
"key":string_list[14]}
elif len(string_list) == 11:
ap = {"bssid":string_list[0].replace(' ',''),
"fts":string_list[1],
"lts":string_list[2],
"channel":string_list[3].replace(' ',''),
"speed":string_list[4],
"privacy":string_list[5].replace(' ',''),
"power":string_list[6],
"beacons":string_list[7],
"data":string_list[8],
"ip":string_list[9],
"essid":string_list[10][1:]}
if len(ap) != 0:
dict[string_list[0]] = ap
return dict
def clientTag(self,devices):
"""
Create a client dictionary with tags of the data type on an incoming list
"""
dict = {}
for entry in devices:
client = {}
string_list = entry.split(',')
if len(string_list) >= 7:
client = {"station":string_list[0].replace(' ',''),
"fts":string_list[1],
"lts":string_list[2],
"power":string_list[3],
"packets":string_list[4],
"bssid":string_list[5].replace(' ',''),
"probe":string_list[6:][0:]}
if len(client) != 0:
dict[string_list[0]] = client
return dict
def clientApChannelRelationship(self):
"""
parse the dic for the relationships of client to ap
in the process also populate list of
"""
clients = self.clientDict
AP = self.apDict
NA = [] #create a var to keep the not associdated clients mac's
NAP = [] #create a var to keep track of associated clients mac's to AP's we cant see
apCount = {} #count number of Aps dict is faster the list stored as BSSID:number of essids
apClient = {} #dict that stores bssid and clients as a nested list
for key in (clients):
mac = clients[key] #mac is the MAC address of the client
if mac["bssid"] != ' (notassociated) ': #one line of of our dictionary of clients
if AP.has_key(mac["bssid"]): # if it is check to see its an AP we can see and have info on
if apClient.has_key(mac["bssid"]):
apClient[mac["bssid"]].extend([key]) #if key exists append new client
else:
apClient[mac["bssid"]] = [key] #create new key and append the client
else: NAP.append(key) # stores the clients that are talking to an access point we cant see
else: NA.append(key) #stores the lines of the not assocated AP's in a list
self.NAP = NAP
self.NA = NA
self.capr = apClient
return
| gpl-3.0 |
wpjesus/codematch | ietf/meeting/tests_views.py | 1 | 16225 | import os
import shutil
import datetime
import urlparse
from django.core.urlresolvers import reverse as urlreverse
from django.conf import settings
from pyquery import PyQuery
from ietf.doc.models import Document
from ietf.meeting.models import Session, TimeSlot
from ietf.meeting.test_data import make_meeting_test_data
from ietf.utils.test_utils import TestCase, login_testing_unauthorized, unicontent
class MeetingTests(TestCase):
def setUp(self):
self.materials_dir = os.path.abspath(settings.TEST_MATERIALS_DIR)
if not os.path.exists(self.materials_dir):
os.mkdir(self.materials_dir)
settings.AGENDA_PATH = self.materials_dir
def tearDown(self):
shutil.rmtree(self.materials_dir)
def write_materials_file(self, meeting, doc, content):
path = os.path.join(self.materials_dir, "%s/%s/%s" % (meeting.number, doc.type_id, doc.external_url))
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(path, "w") as f:
f.write(content)
def write_materials_files(self, meeting, session):
draft = Document.objects.filter(type="draft", group=session.group).first()
self.write_materials_file(meeting, session.materials.get(type="agenda"),
"1. WG status (15 minutes)\n\n2. Status of %s\n\n" % draft.name)
self.write_materials_file(meeting, session.materials.get(type="minutes"),
"1. More work items underway\n\n2. The draft will be finished before next meeting\n\n")
self.write_materials_file(meeting, session.materials.filter(type="slides").exclude(states__type__slug='slides',states__slug='deleted').first(),
"This is a slideshow")
def test_agenda(self):
meeting = make_meeting_test_data()
session = Session.objects.filter(meeting=meeting, group__acronym="mars").first()
slot = TimeSlot.objects.get(sessionassignments__session=session)
self.write_materials_files(meeting, session)
time_interval = "%s-%s" % (slot.time.strftime("%H:%M").lstrip("0"), (slot.time + slot.duration).strftime("%H:%M").lstrip("0"))
# plain
r = self.client.get(urlreverse("ietf.meeting.views.agenda", kwargs=dict(num=meeting.number)))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
agenda_content = q("#content").html()
self.assertTrue(session.group.acronym in agenda_content)
self.assertTrue(session.group.name in agenda_content)
self.assertTrue(session.group.parent.acronym.upper() in agenda_content)
self.assertTrue(slot.location.name in agenda_content)
self.assertTrue(time_interval in agenda_content)
# Make sure there's a frame for the agenda and it points to the right place
self.assertTrue(any([session.materials.get(type='agenda').href() in x.attrib["data-src"] for x in q('tr div.modal-body div.frame')]))
# Make sure undeleted slides are present and deleted slides are not
self.assertTrue(any([session.materials.filter(type='slides').exclude(states__type__slug='slides',states__slug='deleted').first().title in x.text for x in q('tr div.modal-body ul a')]))
self.assertFalse(any([session.materials.filter(type='slides',states__type__slug='slides',states__slug='deleted').first().title in x.text for x in q('tr div.modal-body ul a')]))
# text
# the rest of the results don't have as nicely formatted times
time_interval = time_interval.replace(":", "")
r = self.client.get(urlreverse("ietf.meeting.views.agenda", kwargs=dict(num=meeting.number, ext=".txt")))
self.assertEqual(r.status_code, 200)
agenda_content = r.content
self.assertTrue(session.group.acronym in agenda_content)
self.assertTrue(session.group.name in agenda_content)
self.assertTrue(session.group.parent.acronym.upper() in agenda_content)
self.assertTrue(slot.location.name in agenda_content)
self.assertTrue(time_interval in agenda_content)
# CSV
r = self.client.get(urlreverse("ietf.meeting.views.agenda", kwargs=dict(num=meeting.number, ext=".csv")))
self.assertEqual(r.status_code, 200)
agenda_content = r.content
self.assertTrue(session.group.acronym in agenda_content)
self.assertTrue(session.group.name in agenda_content)
self.assertTrue(session.group.parent.acronym.upper() in agenda_content)
self.assertTrue(slot.location.name in agenda_content)
self.assertTrue(session.materials.get(type='agenda').external_url in unicontent(r))
self.assertTrue(session.materials.filter(type='slides').exclude(states__type__slug='slides',states__slug='deleted').first().external_url in unicontent(r))
self.assertFalse(session.materials.filter(type='slides',states__type__slug='slides',states__slug='deleted').first().external_url in unicontent(r))
# iCal
r = self.client.get(urlreverse("ietf.meeting.views.ical_agenda", kwargs=dict(num=meeting.number))
+ "?" + session.group.parent.acronym.upper())
self.assertEqual(r.status_code, 200)
agenda_content = r.content
self.assertTrue(session.group.acronym in agenda_content)
self.assertTrue(session.group.name in agenda_content)
self.assertTrue(slot.location.name in agenda_content)
self.assertTrue("BEGIN:VTIMEZONE" in agenda_content)
self.assertTrue("END:VTIMEZONE" in agenda_content)
self.assertTrue(session.agenda().get_absolute_url() in unicontent(r))
self.assertTrue(session.materials.filter(type='slides').exclude(states__type__slug='slides',states__slug='deleted').first().get_absolute_url() in unicontent(r))
# TODO - the ics view uses .all on a queryset in a view so it's showing the deleted slides.
#self.assertFalse(session.materials.filter(type='slides',states__type__slug='slides',states__slug='deleted').first().get_absolute_url() in unicontent(r))
# week view
r = self.client.get(urlreverse("ietf.meeting.views.week_view", kwargs=dict(num=meeting.number)))
self.assertEqual(r.status_code, 200)
agenda_content = r.content
self.assertTrue(session.group.acronym in agenda_content)
self.assertTrue(slot.location.name in agenda_content)
def test_agenda_by_room(self):
meeting = make_meeting_test_data()
url = urlreverse("ietf.meeting.views.agenda_by_room",kwargs=dict(num=meeting.number))
login_testing_unauthorized(self,"secretary",url)
r = self.client.get(url)
self.assertTrue(all([x in unicontent(r) for x in ['mars','IESG Breakfast','Test Room','Breakfast Room']]))
def test_agenda_by_type(self):
meeting = make_meeting_test_data()
url = urlreverse("ietf.meeting.views.agenda_by_type",kwargs=dict(num=meeting.number))
login_testing_unauthorized(self,"secretary",url)
r = self.client.get(url)
self.assertTrue(all([x in unicontent(r) for x in ['mars','IESG Breakfast','Test Room','Breakfast Room']]))
url = urlreverse("ietf.meeting.views.agenda_by_type",kwargs=dict(num=meeting.number,type='session'))
r = self.client.get(url)
self.assertTrue(all([x in unicontent(r) for x in ['mars','Test Room']]))
self.assertFalse(any([x in unicontent(r) for x in ['IESG Breakfast','Breakfast Room']]))
url = urlreverse("ietf.meeting.views.agenda_by_type",kwargs=dict(num=meeting.number,type='lead'))
r = self.client.get(url)
self.assertFalse(any([x in unicontent(r) for x in ['mars','Test Room']]))
self.assertTrue(all([x in unicontent(r) for x in ['IESG Breakfast','Breakfast Room']]))
def test_agenda_room_view(self):
meeting = make_meeting_test_data()
url = urlreverse("ietf.meeting.views.room_view",kwargs=dict(num=meeting.number))
login_testing_unauthorized(self,"secretary",url)
r = self.client.get(url)
self.assertTrue(all([x in unicontent(r) for x in ['mars','IESG Breakfast','Test Room','Breakfast Room']]))
def test_session_details(self):
meeting = make_meeting_test_data()
url = urlreverse("ietf.meeting.views.session_details", kwargs=dict(num=meeting.number, acronym="mars"))
r = self.client.get(url)
self.assertTrue(all([x in unicontent(r) for x in ('slides','agenda','minutes')]))
self.assertFalse('deleted' in unicontent(r))
def test_materials(self):
meeting = make_meeting_test_data()
session = Session.objects.filter(meeting=meeting, group__acronym="mars").first()
self.write_materials_files(meeting, session)
# session agenda
r = self.client.get(urlreverse("ietf.meeting.views.session_agenda",
kwargs=dict(num=meeting.number, session=session.group.acronym)))
self.assertEqual(r.status_code, 200)
self.assertTrue("1. WG status" in unicontent(r))
# early materials page
r = self.client.get(urlreverse("ietf.meeting.views.current_materials"))
self.assertEqual(r.status_code, 302)
self.assertTrue(meeting.number in r["Location"])
r = self.client.get(urlreverse("ietf.meeting.views.materials", kwargs=dict(meeting_num=meeting.number)))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
row = q('#content td div:contains("%s")' % str(session.group.acronym)).closest("tr")
self.assertTrue(row.find('a:contains("Agenda")'))
self.assertTrue(row.find('a:contains("Minutes")'))
self.assertTrue(row.find('a:contains("Slideshow")'))
self.assertFalse(row.find("a:contains(\"Bad Slideshow\")"))
# FIXME: missing tests of .pdf/.tar generation (some code can
# probably be lifted from similar tests in iesg/tests.py)
def test_feed(self):
meeting = make_meeting_test_data()
session = Session.objects.filter(meeting=meeting, group__acronym="mars").first()
r = self.client.get("/feed/wg-proceedings/")
self.assertEqual(r.status_code, 200)
self.assertTrue("agenda" in unicontent(r))
self.assertTrue(session.group.acronym in unicontent(r))
class EditTests(TestCase):
def setUp(self):
# make sure we have the colors of the area
from ietf.group.colors import fg_group_colors, bg_group_colors
area_upper = "FARFUT"
fg_group_colors[area_upper] = "#333"
bg_group_colors[area_upper] = "#aaa"
def test_edit_agenda(self):
meeting = make_meeting_test_data()
self.client.login(username="secretary", password="secretary+password")
r = self.client.get(urlreverse("ietf.meeting.views.edit_agenda", kwargs=dict(num=meeting.number)))
self.assertEqual(r.status_code, 200)
self.assertTrue("load_assignments" in unicontent(r))
def test_save_agenda_as_and_read_permissions(self):
meeting = make_meeting_test_data()
# try to get non-existing agenda
url = urlreverse("ietf.meeting.views.edit_agenda", kwargs=dict(num=meeting.number,
owner=meeting.agenda.owner_email(),
name="foo"))
r = self.client.get(url)
self.assertEqual(r.status_code, 404)
# save as new name (requires valid existing agenda)
url = urlreverse("ietf.meeting.views.edit_agenda", kwargs=dict(num=meeting.number,
owner=meeting.agenda.owner_email(),
name=meeting.agenda.name))
self.client.login(username="ad", password="ad+password")
r = self.client.post(url, {
'savename': "foo",
'saveas': "saveas",
})
self.assertEqual(r.status_code, 302)
# Verify that we actually got redirected to a new place.
self.assertNotEqual(urlparse.urlparse(r.url).path, url)
# get
schedule = meeting.get_schedule_by_name("foo")
url = urlreverse("ietf.meeting.views.edit_agenda", kwargs=dict(num=meeting.number,
owner=schedule.owner_email(),
name="foo"))
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
schedule.visible = True
schedule.public = False
schedule.save()
# get as anonymous doesn't work
self.client.logout()
r = self.client.get(url)
self.assertEqual(r.status_code, 403)
# public, now anonymous works
schedule.public = True
schedule.save()
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
# Secretariat can always see it
schedule.visible = False
schedule.public = False
schedule.save()
self.client.login(username="secretary", password="secretary+password")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
def test_save_agenda_broken_names(self):
meeting = make_meeting_test_data()
# save as new name (requires valid existing agenda)
url = urlreverse("ietf.meeting.views.edit_agenda", kwargs=dict(num=meeting.number,
owner=meeting.agenda.owner_email(),
name=meeting.agenda.name))
self.client.login(username="ad", password="ad+password")
r = self.client.post(url, {
'savename': "/no/this/should/not/work/it/is/too/long",
'saveas': "saveas",
})
self.assertEqual(r.status_code, 302)
self.assertEqual(urlparse.urlparse(r.url).path, url)
# TODO: Verify that an error message was in fact returned.
r = self.client.post(url, {
'savename': "/invalid/chars/",
'saveas': "saveas",
})
# TODO: Verify that an error message was in fact returned.
self.assertEqual(r.status_code, 302)
self.assertEqual(urlparse.urlparse(r.url).path, url)
# Non-ASCII alphanumeric characters
r = self.client.post(url, {
'savename': u"f\u00E9ling",
'saveas': "saveas",
})
# TODO: Verify that an error message was in fact returned.
self.assertEqual(r.status_code, 302)
self.assertEqual(urlparse.urlparse(r.url).path, url)
def test_edit_timeslots(self):
meeting = make_meeting_test_data()
self.client.login(username="secretary", password="secretary+password")
r = self.client.get(urlreverse("ietf.meeting.views.edit_timeslots", kwargs=dict(num=meeting.number)))
self.assertEqual(r.status_code, 200)
self.assertTrue(meeting.room_set.all().first().name in unicontent(r))
def test_slot_to_the_right(self):
meeting = make_meeting_test_data()
session = Session.objects.filter(meeting=meeting, group__acronym="mars").first()
mars_scheduled = session.timeslotassignments.get()
mars_slot = TimeSlot.objects.get(sessionassignments__session=session)
mars_ends = mars_slot.time + mars_slot.duration
session = Session.objects.filter(meeting=meeting, group__acronym="ames").first()
ames_slot_qs = TimeSlot.objects.filter(sessionassignments__session=session)
ames_slot_qs.update(time=mars_ends + datetime.timedelta(seconds=11 * 60))
self.assertTrue(not mars_slot.slot_to_the_right)
self.assertTrue(not mars_scheduled.slot_to_the_right)
ames_slot_qs.update(time=mars_ends + datetime.timedelta(seconds=10 * 60))
self.assertTrue(mars_slot.slot_to_the_right)
self.assertTrue(mars_scheduled.slot_to_the_right)
| bsd-3-clause |
mjirayu/sit_academy | lms/djangoapps/courseware/features/lti.py | 56 | 14199 | # pylint: disable=missing-docstring
import datetime
import os
import pytz
from django.conf import settings
from mock import patch
from pytz import UTC
from splinter.exceptions import ElementDoesNotExist
from selenium.common.exceptions import NoAlertPresentException
from nose.tools import assert_true, assert_equal, assert_in, assert_is_none
from lettuce import world, step
from courseware.tests.factories import InstructorFactory, BetaTesterFactory
from courseware.access import has_access
from student.tests.factories import UserFactory
from common import visit_scenario_item
TEST_COURSE_NAME = "test_course_a"
@step('I view the LTI and error is shown$')
def lti_is_not_rendered(_step):
# error is shown
assert world.is_css_present('.error_message', wait_time=0)
# iframe is not presented
assert not world.is_css_present('iframe', wait_time=0)
# link is not presented
assert not world.is_css_present('.link_lti_new_window', wait_time=0)
def check_lti_iframe_content(text):
# inside iframe test content is presented
location = world.scenario_dict['LTI'].location.html_id()
iframe_name = 'ltiFrame-' + location
with world.browser.get_iframe(iframe_name) as iframe:
# iframe does not contain functions from terrain/ui_helpers.py
assert iframe.is_element_present_by_css('.result', wait_time=0)
assert (text == world.retry_on_exception(
lambda: iframe.find_by_css('.result')[0].text,
max_attempts=5
))
@step('I view the LTI and it is rendered in (.*)$')
def lti_is_rendered(_step, rendered_in):
if rendered_in.strip() == 'iframe':
world.wait_for_present('iframe')
assert world.is_css_present('iframe', wait_time=2)
assert not world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
# iframe is visible
assert world.css_visible('iframe')
check_lti_iframe_content("This is LTI tool. Success.")
elif rendered_in.strip() == 'new page':
assert not world.is_css_present('iframe', wait_time=2)
assert world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
click_and_check_lti_popup()
else: # incorrect rendered_in parameter
assert False
@step('I view the permission alert$')
def view_lti_permission_alert(_step):
assert not world.is_css_present('iframe', wait_time=2)
assert world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
world.css_find('.link_lti_new_window').first.click()
alert = world.browser.get_alert()
assert alert is not None
assert len(world.browser.windows) == 1
def check_no_alert():
"""
Make sure the alert has gone away.
Note that the splinter documentation indicates that
get_alert should return None if no alert is present,
however that is not the case. Instead a
NoAlertPresentException is raised.
"""
try:
assert_is_none(world.browser.get_alert())
except NoAlertPresentException:
pass
@step('I accept the permission alert and view the LTI$')
def accept_lti_permission_alert(_step):
parent_window = world.browser.current_window # Save the parent window
# To start with you should only have one window/tab
assert len(world.browser.windows) == 1
alert = world.browser.get_alert()
alert.accept()
check_no_alert()
# Give it a few seconds for the LTI window to appear
world.wait_for(
lambda _: len(world.browser.windows) == 2,
timeout=5,
timeout_msg="Timed out waiting for the LTI window to appear."
)
# Verify the LTI window
check_lti_popup(parent_window)
@step('I reject the permission alert and do not view the LTI$')
def reject_lti_permission_alert(_step):
alert = world.browser.get_alert()
alert.dismiss()
check_no_alert()
assert len(world.browser.windows) == 1
@step('I view the LTI but incorrect_signature warning is rendered$')
def incorrect_lti_is_rendered(_step):
assert world.is_css_present('iframe', wait_time=2)
assert not world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
# inside iframe test content is presented
check_lti_iframe_content("Wrong LTI signature")
@step('the course has correct LTI credentials with registered (.*)$')
def set_correct_lti_passport(_step, user='Instructor'):
coursenum = TEST_COURSE_NAME
metadata = {
'lti_passports': ["correct_lti_id:test_client_key:test_client_secret"]
}
i_am_registered_for_the_course(coursenum, metadata, user)
@step('the course has incorrect LTI credentials$')
def set_incorrect_lti_passport(_step):
coursenum = TEST_COURSE_NAME
metadata = {
'lti_passports': ["test_lti_id:test_client_key:incorrect_lti_secret_key"]
}
i_am_registered_for_the_course(coursenum, metadata)
@step('the course has an LTI component with (.*) fields(?:\:)?$') # , new_page is(.*), graded is(.*)
def add_correct_lti_to_course(_step, fields):
category = 'lti'
metadata = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://127.0.0.1:{}/correct_lti_endpoint'.format(settings.LTI_PORT),
}
if fields.strip() == 'incorrect_lti_id': # incorrect fields
metadata.update({
'lti_id': 'incorrect_lti_id'
})
elif fields.strip() == 'correct': # correct fields
pass
elif fields.strip() == 'no_launch_url':
metadata.update({
'launch_url': u''
})
else: # incorrect parameter
assert False
if _step.hashes:
metadata.update(_step.hashes[0])
world.scenario_dict['LTI'] = world.ItemFactory.create(
parent_location=world.scenario_dict['SECTION'].location,
category=category,
display_name='LTI',
metadata=metadata,
)
setattr(world.scenario_dict['LTI'], 'TEST_BASE_PATH', '{host}:{port}'.format(
host=world.browser.host,
port=world.browser.port,
))
visit_scenario_item('LTI')
def create_course_for_lti(course, metadata):
# First clear the modulestore so we don't try to recreate
# the same course twice
# This also ensures that the necessary templates are loaded
world.clear_courses()
weight = 0.1
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": weight
},
]
}
# Create the course
# We always use the same org and display name,
# but vary the course identifier (e.g. 600x or 191x)
world.scenario_dict['COURSE'] = world.CourseFactory.create(
org='edx',
number=course,
display_name='Test Course',
metadata=metadata,
grading_policy=grading_policy,
)
# Add a section to the course to contain problems
world.scenario_dict['CHAPTER'] = world.ItemFactory.create(
parent_location=world.scenario_dict['COURSE'].location,
category='chapter',
display_name='Test Chapter',
)
world.scenario_dict['SECTION'] = world.ItemFactory.create(
parent_location=world.scenario_dict['CHAPTER'].location,
category='sequential',
display_name='Test Section',
metadata={'graded': True, 'format': 'Homework'})
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def i_am_registered_for_the_course(coursenum, metadata, user='Instructor'):
# Create user
if user == 'BetaTester':
# Create the course
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=5)
metadata.update({'days_early_for_beta': 5, 'start': tomorrow})
create_course_for_lti(coursenum, metadata)
course_descriptor = world.scenario_dict['COURSE']
# create beta tester
user = BetaTesterFactory(course_key=course_descriptor.id)
normal_student = UserFactory()
instructor = InstructorFactory(course_key=course_descriptor.id)
assert not has_access(normal_student, 'load', course_descriptor)
assert has_access(user, 'load', course_descriptor)
assert has_access(instructor, 'load', course_descriptor)
else:
metadata.update({'start': datetime.datetime(1970, 1, 1, tzinfo=UTC)})
create_course_for_lti(coursenum, metadata)
course_descriptor = world.scenario_dict['COURSE']
user = InstructorFactory(course_key=course_descriptor.id)
# Enroll the user in the course and log them in
if has_access(user, 'load', course_descriptor):
world.enroll_user(user, course_descriptor.id)
world.log_in(username=user.username, password='test')
def check_lti_popup(parent_window):
# You should now have 2 browser windows open, the original courseware and the LTI
windows = world.browser.windows
assert_equal(len(windows), 2)
# For verification, iterate through the window titles and make sure that
# both are there.
tabs = []
expected_tabs = [u'LTI | Test Section | {0} Courseware | edX'.format(TEST_COURSE_NAME), u'TEST TITLE']
for window in windows:
world.browser.switch_to_window(window)
tabs.append(world.browser.title)
assert_equal(tabs, expected_tabs)
# Now verify the contents of the LTI window (which is the 2nd window/tab)
# Note: The LTI opens in a new browser window, but Selenium sticks with the
# current window until you explicitly switch to the context of the new one.
world.browser.switch_to_window(windows[1])
url = world.browser.url
basename = os.path.basename(url)
pathname = os.path.splitext(basename)[0]
assert_equal(pathname, u'correct_lti_endpoint')
result = world.css_find('.result').first.text
assert_equal(result, u'This is LTI tool. Success.')
world.browser.driver.close() # Close the pop-up window
world.browser.switch_to_window(parent_window) # Switch to the main window again
def click_and_check_lti_popup():
parent_window = world.browser.current_window # Save the parent window
world.css_find('.link_lti_new_window').first.click()
check_lti_popup(parent_window)
@step('visit the LTI component')
def visit_lti_component(_step):
visit_scenario_item('LTI')
@step('I see LTI component (.*) with text "([^"]*)"$')
def see_elem_text(_step, elem, text):
selector_map = {
'progress': '.problem-progress',
'feedback': '.problem-feedback',
'module title': '.problem-header',
'button': '.link_lti_new_window',
'description': '.lti-description'
}
assert_in(elem, selector_map)
assert_true(world.css_has_text(selector_map[elem], text))
@step('I see text "([^"]*)"$')
def check_progress(_step, text):
assert world.browser.is_text_present(text)
@step('I see graph with total progress "([^"]*)"$')
def see_graph(_step, progress):
selector = 'grade-detail-graph'
xpath = '//div[@id="{parent}"]//div[text()="{progress}"]'.format(
parent=selector,
progress=progress,
)
node = world.browser.find_by_xpath(xpath)
assert node
@step('I see in the gradebook table that "([^"]*)" is "([^"]*)"$')
def see_value_in_the_gradebook(_step, label, text):
table_selector = '.grade-table'
index = 0
table_headers = world.css_find('{0} thead th'.format(table_selector))
for i, element in enumerate(table_headers):
if element.text.strip() == label:
index = i
break
assert_true(world.css_has_text('{0} tbody td'.format(table_selector), text, index=index))
@step('I submit answer to LTI (.*) question$')
def click_grade(_step, version):
version_map = {
'1': {'selector': 'submit-button', 'expected_text': 'LTI consumer (edX) responded with XML content'},
'2': {'selector': 'submit-lti2-button', 'expected_text': 'LTI consumer (edX) responded with HTTP 200'},
}
assert_in(version, version_map)
location = world.scenario_dict['LTI'].location.html_id()
iframe_name = 'ltiFrame-' + location
with world.browser.get_iframe(iframe_name) as iframe:
iframe.find_by_name(version_map[version]['selector']).first.click()
assert iframe.is_text_present(version_map[version]['expected_text'])
@step('LTI provider deletes my grade and feedback$')
def click_delete_button(_step):
with world.browser.get_iframe(get_lti_frame_name()) as iframe:
iframe.find_by_name('submit-lti2-delete-button').first.click()
def get_lti_frame_name():
location = world.scenario_dict['LTI'].location.html_id()
return 'ltiFrame-' + location
@step('I see in iframe that LTI role is (.*)$')
def check_role(_step, role):
world.wait_for_present('iframe')
location = world.scenario_dict['LTI'].location.html_id()
iframe_name = 'ltiFrame-' + location
with world.browser.get_iframe(iframe_name) as iframe:
expected_role = 'Role: ' + role
role = world.retry_on_exception(
lambda: iframe.find_by_tag('h5').first.value,
max_attempts=5,
ignored_exceptions=ElementDoesNotExist
)
assert_equal(expected_role, role)
@step('I switch to (.*)$')
def switch_view(_step, view):
staff_status = world.css_find('#action-preview-select').first.value
if staff_status != view:
world.browser.select("select", view)
world.wait_for_ajax_complete()
assert_equal(world.css_find('#action-preview-select').first.value, view)
@step("in the LTI component I do not see (.*)$")
def check_lti_component_no_elem(_step, text):
selector_map = {
'a launch button': '.link_lti_new_window',
'an provider iframe': '.ltiLaunchFrame',
'feedback': '.problem-feedback',
'progress': '.problem-progress',
}
assert_in(text, selector_map)
assert_true(world.is_css_not_present(selector_map[text]))
| agpl-3.0 |
rcook/ptool-templates | python-game/setup.py | 1 | 1446 | {{py_copyright}}
import os
import re
from setuptools import find_packages, setup
def _read_properties():
init_path = os.path.abspath(os.path.join("{{module_name}}", "__init__.py"))
regex = re.compile("^\\s*__(?P<key>.*)__\\s*=\\s*\"(?P<value>.*)\"\\s*$")
with open(init_path, "rt") as f:
props = {}
for line in f.readlines():
m = regex.match(line)
if m is not None:
props[m.group("key")] = m.group("value")
return props
props = _read_properties()
version = props["version"]
description = props["description"]
setup(
name="{{project_name}}",
version=version,
description=description,
setup_requires=["setuptools-markdown"],
long_description_markdown_filename="README.md",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
],
url="{{project_name | git_url(git_server) }}",
author="{{author}}",
author_email="{{author_email}}",
license="MIT",
packages=find_packages(),
install_requires=[
"pygame"
],
entry_points={
"console_scripts": [
"{{project_name}} = {{module_name}}.__main__:_main"
]
},
include_package_data=True,
package_data={ "{{module_name}}.images": ["*.gif", "*.jpg", "*.png"] },
test_suite="{{module_name}}.tests.suite",
zip_safe=False)
| mit |
dmeulen/home-assistant | tests/components/test_input_slider.py | 17 | 2031 | """The tests for the Input slider component."""
# pylint: disable=protected-access
import unittest
from tests.common import get_test_home_assistant
from homeassistant.bootstrap import setup_component
from homeassistant.components.input_slider import (DOMAIN, select_value)
class TestInputSlider(unittest.TestCase):
"""Test the input slider component."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_config(self):
"""Test config."""
invalid_configs = [
None,
{},
{'name with space': None},
{'test_1': {
'min': 50,
'max': 50,
}},
]
for cfg in invalid_configs:
self.assertFalse(
setup_component(self.hass, DOMAIN, {DOMAIN: cfg}))
def test_select_value(self):
"""Test select_value method."""
self.assertTrue(setup_component(self.hass, DOMAIN, {DOMAIN: {
'test_1': {
'initial': 50,
'min': 0,
'max': 100,
},
}}))
entity_id = 'input_slider.test_1'
state = self.hass.states.get(entity_id)
self.assertEqual(50, float(state.state))
select_value(self.hass, entity_id, '30.4')
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(30.4, float(state.state))
select_value(self.hass, entity_id, '70')
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(70, float(state.state))
select_value(self.hass, entity_id, '110')
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(70, float(state.state))
| mit |
Chiroptera/QCThesis | MyML/cluster/Horn.py | 3 | 8889 | import numpy as np
from sklearn import preprocessing
from datetime import datetime
def _corr(C):
R=np.empty_like(C)
#compute correlation from covariance
for i,ci in enumerate(C):
for j,cij in enumerate(ci):
R[i,j] = cij / np.sqrt(C[i,i] * C[j,j])
return R
def pcaFun(x, whiten=False,e=0, type='cov', method='svd',
center=True,normalize=False):
# x : n x m numpy.array of n points and m dimensions
# whiten : boolean parameter - whiten data or not
# e : normalization parameter for whitening data
n,d = x.shape
oX=x
# normalize
if normalize:
x=sklearn.normalize(x,axis=0)
# center data
if center:
avg=np.mean(x,axis=0)
x=x-avg
if method=='eig':
# compute covariance matrix
if type=='cov':
C=x.T.dot(x)
C /= n
elif type=='corr':
#C=np.corrcoef(x,rowvar=0, bias=1)
C=x.T.dot(x)
C /= n
C=_corr(C)
else:
raise Exception('Incompatible argument value \
\'type='+str(type)+'\'')
# compute eig
eigVals,eigVect=np.linalg.eig(C)
#sort eigenthings
eigValOrder=eigVals.argsort()[::-1] #descending eigen indeces
sortedEigVect=np.zeros(eigVect.shape)
sortedEigVal=np.zeros(eigVals.shape)
for i,j in enumerate(eigValOrder):
sortedEigVect[:,i]=eigVect[:,j]
sortedEigVal[i]=eigVals[j]
comps = sortedEigVect
eigs = sortedEigVal
elif method=='svd':
U,S,V = np.linalg.svd(x)
comps=V.T
eigs= (S**2) / n
else:
raise Exception('Incompatible argument value \
\'method='+str(method)+'\'')
# project data
projX=x.dot(comps)
if whiten is True:
whiten_vect = np.sqrt((eigs + e))
projX = projX / whiten_vect
return projX, comps, eigs
# function graddesc(xyData,q,[steps])
# purpose: performing quantum clustering in and moving the
# data points down the potential gradient
# input: xyData - the data vectors
# q=a parameter for the parsen window variance (q=1/(2*sigma^2))
# sigma=parameter for the parsen window variance (choose q or sigma)
# steps=number of gradient descent steps (default=50)
# eta=gradient descent step size
# output: D=location of data o=point after GD
def graddesc(xyData,**kwargs):
"""
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
Argument treatment
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
"""
argKeys = kwargs.keys()
if 'steps' in argKeys:
steps = kwargs['steps']
else:
steps = 50
if 'q' in argKeys:
q = kwargs['q']
elif 'sigma' in argKeys:
sigma = kwargs['sigma']
q = 1 / (2 * pow(sigma,2))
else:
sigma=0.1
q = 1 / (2 * pow(sigma,2))
if 'r' in argKeys:
D = kwargs['r']
else:
D = xyData
if 'eta' in argKeys:
eta = kwargs['eta']
else:
eta = 0.1
if 'all_square' in argKeys and kwargs['all_square'] is not False:
if xyData.shape[1]>2:
raise Exception('all_square should not be used in data > 2 dims')
points=kwargs['all_square']
totalPoints=pow(kwargs['all_square'],2)
a=np.linspace(-1,1,points)
D=[(x,y) for x in a for y in a]
D=np.array(D)
else:
D=xyData
if 'return_eta' in argKeys:
return_eta=kwargs['return_eta']
else:
return_eta=False
if 'timelapse' in argKeys:
timelapse=kwargs['timelapse']
if timelapse:
tD=list()
timelapse_count=0
if 'timelapse_list' in argKeys:
timelapse_list=kwargs['timelapse_list']
elif 'timelapse_percent' in argKeys:
timelapse_percent=kwargs['timelapse_percent']
list_inc=int(steps/(steps*timelapse_percent))
if list_inc == 0:
list_inc = 1
timelapse_list=range(steps)[::list_inc]
else:
timelapse_percent=0.25
list_inc=int(steps/(steps*timelapse_percent))
if list_inc == 0:
list_inc = 1
timelapse_list=range(steps)[::list_inc]
timelapse_list=range(steps)[::int(steps*timelapse_percent)]
else:
timelapse=False
if 'timeit' in argKeys:
timeit=kwargs['timeit']
#timings=np.zeros(steps+1) #+1 for the total time
timings=datetime.now()
else:
timeit=False
# add more states to timelapse list
if timelapse:
if timelapse_count in timelapse_list:
tD.append(D)
timelapse_count += 1
"""
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
Algorithm starts here
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
"""
# first run
V,P,E,dV = qc(xyData,q=q,r=D)
for j in range(4):
for i in range(steps/4):
# normalize potential gradient
dV = preprocessing.normalize(dV)
# gradient descent
D = D - eta*dV
# add more states to timelapse list
if timelapse:
if timelapse_count in timelapse_list:
tD.append(D)
timelapse_count += 1
"""
if timeit:
start_time=datetime.now()"""
# perform Quantum Clustering
V,P,E,dV = qc(xyData,q=q,r=D)
"""
if timeit:
timeings[i*4]=(datetime.now() - start).total_seconds()"""
eta*=0.5
"""
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
Algorithm ends here
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
"""
if timeit:
timings=(datetime.now() - timings).total_seconds()
if timelapse:
tD.append(D)
D=tD
returnList=[D,V,E]
if return_eta:
returnList.append(eta)
if timeit:
returnList.append(timings)
#returnList.append(timelapse_list)
return returnList
# function qc
# purpose: performing quantum clustering in n dimensions
# input:
# ri - a vector of points in n dimensions
# q - the factor q which determines the clustering width
# r - the vector of points to calculate the potential for. equals ri if not specified
# output:
# V - the potential
# P - the wave function
# E - the energy
# dV - the gradient of V
# example: [V,P,E,dV] = qc ([1,1;1,3;3,3],5,[0.5,1,1.5]);
# see also: qc2d
def qc(ri,**kwargs):
argKeys=kwargs.keys()
if 'q' in argKeys:
q=kwargs['q']
elif 'sigma' in argKeys:
sigma=kwargs['sigma']
q = 1 / (2 * pow(sigma,2))
else:
sigma=0.1
q = 1 / (2 * pow(sigma,2))
if 'r' in argKeys:
r=kwargs['r']
else:
r=ri
pointsNum,dims = ri.shape
calculatedNum = r.shape[0]
# prepare the potential
V=np.zeros(calculatedNum)
dP2=np.zeros(calculatedNum)
# prepare P
P=np.zeros(calculatedNum)
singledV1=np.zeros((calculatedNum,dims))
singledV2=np.zeros((calculatedNum,dims))
dV1=np.zeros((calculatedNum,dims))
dV2=np.zeros((calculatedNum,dims))
dV=np.zeros((calculatedNum,dims))
# prevent division by zero
# calculate V
# run over all the points and calculate for each the P and dP2
for point in range(calculatedNum):
# compute ||x-xi||^2
# axis=1 will sum rows instead of columns
D2 = np.sum(pow(r[point]-ri,2),axis=1)
# compute gaussian
singlePoint = np.exp(-q*D2)
# compute Laplacian of gaussian = ||x-xi||^2 * exp(...)
singleLaplace = D2 * singlePoint
#compute gradient components
aux = r[point] - ri
for d in range(dims):
singledV1[:,d] = aux[:,d] * singleLaplace
singledV2[:,d] = aux[:,d] * singlePoint
P[point] = np.sum(singlePoint)
dP2[point] = np.sum(singleLaplace)
dV1[point] = np.sum(singledV1,axis=0)
dV2[point] = np.sum(singledV2,axis=0)
# if there are points with 0 probability,
# assigned them the lowest probability of any point
P=np.where(P==0,np.min(np.extract((P!=0),P)),P)
# compute ground state energy
V = -dims/2 + q*dP2 / P
E = -min(V)
# compute potential on points
V += E
# compute gradient of V
for d in range(dims):
dV[:,d] = -q * dV1[:,d] + (V-E+(dims+2)/2) * dV2[:,d]
return V,P,E,dV
# clust=fineCluster(xyData,minD) cluster xyData points when closer than minD
# output: clust=vector the cluter index that is asigned to each data point
# (it's cluster serial #)
def fineCluster(xyData,minD,potential=None,timeit=False):
if potential is not None:
usePotential=True
else:
usePotential=False
n = xyData.shape[0]
clust = np.zeros(n)
if timeit:
timings=datetime.now()
if usePotential:
# index of points sorted by potential
sortedUnclust=potential.argsort()
# index of unclestered point with lowest potential
i=sortedUnclust[0]
else:
i=0
# fist cluster index is 1
clustInd=1
while np.min(clust)==0:
x=xyData[i]
# euclidean distance from ith point to others
D = np.sum(pow(xyData-x,2),axis=1)
D = pow(D,0.5)
clust = np.where(D<minD,clustInd,clust)
# index of non clustered points
# unclust=[x for x in clust if x == 0]
clusted= clust.nonzero()[0]
if usePotential:
# sorted index of non clustered points
sortedUnclust=[x for x in sortedUnclust if x not in clusted]
if len(sortedUnclust) == 0:
break
#index of unclustered point with lowest potential
i=sortedUnclust[0]
else:
#index of first unclustered datapoint
i=np.argmin(clust)
clustInd += 1
if timeit:
timings=(datetime.now()-timings).total_seconds()
returnList=[clust]
if timeit:
return clust,timings
return clust | mit |
sheldonkhall/MITA-model | axisymm_mwa.py | 1 | 36082 | ## Python Module: axisymm_mwa
## Author: Sheldon Hall
## Email: [email protected]
##
## This module contains functions for predicting the outcomes of
## minimally invasive cancer treatments (MICTs). Namely: RFA,
## MWA, CA, IRE. The main focus of this code is to perform
## sensitivity analyses on representative (simplified) problems.
##
## The bioheat model chosen is the effective heat capacity form
## of Pennes equations, which utilises the only transient
## solver. The computations of SAR for RFA and MWA are quasi-
## static and performed as required by the nonlinear solvers for
## the bioheat equation.
##
## Several classes are included to define data structures for
## various model, solver and code parameters.
# Copyright (C) 2014 Sheldon Hall ([email protected])
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# - includes
#
from scipy import constants as S
import numpy as N
from dolfin import *
import sys
import os
#
# - set fenics params
#
parameters["num_threads"] = 6 # invoke multi-thread parallel support
parameters["allow_extrapolation"] = True # for mesh mappings
parameters["form_compiler"]["optimize"] = True # optimise compiler pg 167 fenics book
#solver = NewtonSolver("mumps")
#solver = LUSolver("petsc")
#solver = KrylovSolver("cg","ilu")
#
# - Some constants
#
pi = S.pi
e = S.e
c0 = S.c
mu_0 = S.mu_0
eps_0 = S.epsilon_0
#
# - dummies to catch errors
#
Pin = 0. # input power for WPBC
r_1 = 1. # inner radius for coaxial feed
r_2 = 2. # outer radius for coaxial feed
H_phi_0_re = Constant(0.0) # input real amplitude for WPBC
H_phi_0_im = Constant(0.0) # input imaginary amplitude for WPBC
qmet = Constant(0.) # metabolic heat term
#
# - global parameters
#
order = 2 # order of elements
#
# - EM parameters
#
class EM_parameters:
restrict_mesh = 1
# RFA parameters
cond = 0.333
cond_rate = 0.
V0 = 0. # ground pad voltage
Vprobe = 100. # probe voltage
cond_model = 'constant' # dependence of electrical conductivity
cond_vap = 0.5 # conductivity after vapourisation
imp_max = 120 # impedance threshold for control system
imp_t_off = 15 # time to switch off probe after imp_max exceeded
mms_source = Constant(0.) # source term for use in mms
# MWA parameters
freq = 2.45e9 # Probe frequency
om = 2 * pi * freq # Probe angular frequency
Pin = 0. # input power for WPBC
r_1 = 1. # inner radius for coaxial feed
r_2 = 2. # outer radius for coaxial feed
H_phi_0_re = Constant(0.0) # input real amplitude for WPBC
H_phi_0_im = Constant(0.0) # input imaginary amplitude for WPBC
eps_r_by_subdomain = [] # baseline epsilon r values by subdomain
sigma_by_subdomain = [] # baseline sigma values by subdomain
mu_r_by_subdomain = [] # baseline mu r values by subdomain
Z_dielectric = 0. # WPBC
C_dielectric = 0. # WPBC
# temperature dependent dielectric
es1=48.391
es2=6.286
es3=0.0764
es4=1.
ss1=2.173
ss2=5.951
ss3=0.0697
ss4=0.
# boundaries
zero_V = []
insulating = []
symmetry = []
active_tip = []
# def __init__(self,Pin):
# self.Pin=Pin
#
# - Thermal parameters
#
class thermal_parameters:
restrict_th_mesh = 1 # region to restrict thermal solution to
qmet = Constant(0.) # metabolic heat term
rho_c_t = 1060.*3411. # rho*c in tissue (phase)
rho_c_v = 4.4e5 # rho*c in vapourised tissue (phase)
Lh = 0 # latent heat of vapourisation
Cliq = 0. # water content tissue (%)
Tu = 374. # upper transition temp
Tl = 372. # lower transition temp
Q_sink = 0 # line heat sink
k = Constant(0.56) # thermal conductivity
dk = 0. # rate of change of thermal conductivity
omega = Constant(0.004) # blood perfusion
rho = Constant(1020.) # density blood
c = Constant(3640.) # specific heat blood
T0 = Constant(310.) # baseline temperature
k_model = 'constant' # choose model for thermal conductivity
em_method = 'constant' # choose why type of EM model to use
T_initial = Constant(310.) # initial flat temperature profile
perf_model = 'constant'
stop_on_me = True # error given when T change over dt too large
Q = Constant(0.) # allow custom heat source to be given
cda_update = True # compute cell death
p_stop = 0.8 # value of viability at which to stop perfusion
cool_probe_temp = 310. # coolant temp on probe boundary
cool_probe = 100. # boundary to apply condition
h_transfer = 0. # heat transfer coefficient into coolant
nu = 1. # number of iterations of heat solver before updating heat source
# boundaries
bulk_tissue = []
cool_probe = []
class cell_death_parameters:
kb = 7.77e-3
kf_ = 3.33e-3
Tk = 40.5
A_init = 0.99
#
# - Functions
#
# - create output directory
def ensure_dir(f):
if not os.path.exists(f):
os.makedirs(f)
# compute_SAR_nl
#
# takes the temperature field, mesh and material properties as arguments and returns SAR
#
# arguments:
#
# returns:
#def compute_SAR_nl(mesh, interior, boundaries, problemname, eps_r_by_subdomain,
# mu_r_by_subdomain, sigma_by_subdomain, H_phi_0_re, H_phi_0_im, om,
# es1, es2, es3, es4, ss1, ss2, ss3, ss4, T):
def compute_SAR_nl(problemname, mesh, interior, boundaries, emp, T, thp):
# check directory exists for results
ensure_dir(problemname)
# set solver params in advance
#solver = KrylovSolver("cg", "hypre_euclid")
# set measure
dss = ds[boundaries]
#
# - Define the function spaces
#
V0 = VectorFunctionSpace(mesh, "CG", order, dim=2) # complex scalar field
V0E = VectorFunctionSpace(mesh, "CG", order, dim=4) # complex vector field
V0_Re = FunctionSpace(mesh, "CG", order) # scalar field
V0_dc = FunctionSpace(mesh, "DG", 0) # material properties (discontinuous on boundary)
#
# - Define piecewise constant material properties
#
# vectorised numpy quicker than a python loop
eps_r = Function(V0_dc)
eps_r.vector()[:] = N.choose(N.asarray(interior.array(), dtype=N.int32), emp.eps_r_by_subdomain)
mu_r = Function(V0_dc)
mu_r.vector()[:] = N.choose(N.asarray(interior.array(), dtype=N.int32), emp.mu_r_by_subdomain)
sigma = Function(V0_dc)
sigma.vector()[:] = N.choose(N.asarray(interior.array(), dtype=N.int32), emp.sigma_by_subdomain)
# substitue values in tissue
# take restrict_th_mesh and set T dependent properties in that region
T_p = Function(V0_dc)
# T_p = interpolate(T,V0_dc)
T_p = project_axisym(T,V0_dc)
T_array = T_p.vector().array()
eps_r_array = eps_r.vector().array()
eps_r_array[interior.array()==thp.restrict_th_mesh] = emp.es1*(1-1/(1+N.exp(emp.es2-emp.es3*(T_array[interior.array()==thp.restrict_th_mesh]+37.))))+emp.es4
eps_r.vector()[:] = eps_r_array
sigma_array = sigma.vector().array()
sigma_array[interior.array()==thp.restrict_th_mesh] = emp.ss1*(1-1/(1+N.exp(emp.ss2-emp.ss3*(T_array[interior.array()==thp.restrict_th_mesh]+37.))))+emp.ss4
sigma.vector()[:] = sigma_array
#eps_r = es1*(1-1/(1+exp(es2-es3*(T-273.))))+es4
#sigma = ss1*(1-1/(1+exp(ss2-ss3*(T-273.))))+ss4
File("%s/eps_r.pvd" % problemname) << eps_r
File("%s/sigma.pvd" % problemname) << sigma
#
# - construct weak form and boundary conditions
#
# Finite element test and trial
H_phi = TrialFunction(V0)
T = TestFunction(V0)
# Get the r and z components
polar = V0.cell().x
r = polar[0]
z = polar[1]
# Get the surface normal
n = V0.cell().n
### should be moved to specific example
# imposed electric field for BC
E0 = Constant(1.0)
f = Constant(0.0)
# define k_0
k_0 = emp.om * sqrt(eps_0 * mu_0)
# Reciprocal of complex relative permittivity
mod_Eps_r = eps_r * eps_r + sigma * sigma / (emp.om * emp.om * eps_0 * eps_0)
reEps_r_Re = eps_r / mod_Eps_r
reEps_r_Im = sigma / (emp.om * eps_0) / mod_Eps_r
# Complex relative permittivity and square root all materials
# equivalent to mod_Eps_r
mer = N.array(emp.eps_r_by_subdomain)**2 + (N.array(emp.sigma_by_subdomain)/(emp.om*eps_0))**2
# equivalent to reEps_r_Re + j*reEps_r_Im above
rer = N.array(emp.eps_r_by_subdomain)/mer + N.array(emp.sigma_by_subdomain)/(emp.om*eps_0)/mer*1j
# square root of complex number
srer = N.sqrt(rer)
# extract real and imaginary part and assign to dolfin variable
srer_re = Function(V0_dc)
srer_re.vector()[:] = N.choose(N.asarray(interior.array(),dtype=N.int32),N.real(srer))
srer_im = Function(V0_dc)
srer_im.vector()[:] = N.choose(N.asarray(interior.array(),dtype=N.int32),N.imag(srer))
#
# - construct weak form + BCs
#
## boundaries defined as:
## 1 - symmetry condition at x = 0
## 2 - first order absorbing boundary condition
## 3 - imposed z-component of electric field
## 4 - waveguide port boundary condition
# Main operators (applied)
curl_H_phi_r = - H_phi.dx(1)
curl_H_phi_z = (1 / r) * (r * H_phi).dx(0)
curl_T_r = - T.dx(1)
curl_T_z = (1 / r) * (r * T).dx(0)
# Define the bilinear forms
# Mass form
m = mu_r * (k_0 ** 2) * inner(T, H_phi)
s = reEps_r_Re * (curl_T_r[0] * curl_H_phi_r[0] + curl_T_r[1] * curl_H_phi_r[1]) \
+ reEps_r_Im * (curl_T_r[1] * curl_H_phi_r[0] - curl_T_r[0] * curl_H_phi_r[1]) \
+ reEps_r_Re * (curl_T_z[0] * curl_H_phi_z[0] + curl_T_z[1] * curl_H_phi_z[1]) \
+ reEps_r_Im * (curl_T_z[1] * curl_H_phi_z[0] - curl_T_z[0] * curl_H_phi_z[1])
a = r * s * dx - \
r * m * dx + \
-r * k_0 * (T[1] * (reEps_r_Im*H_phi[1]-reEps_r_Re*H_phi[0]) +\
T[0] * (reEps_r_Im*H_phi[0] + reEps_r_Re*H_phi[1])) * dss(2) \
- r * k_0 * T[0] * (srer_im * (H_phi[0]) + srer_re * (H_phi[1])) * dss(4) \
- r * k_0 * T[1] * (srer_im * (H_phi[1]) - srer_re * (H_phi[0])) * dss(4)
### gives better solution to coax problem wpbc without source
# a = r * s * dx - \
# r * m * dx + \
# - r * k_0 * T[0] * (srer_im * (H_phi[0]) + srer_re * (H_phi[1])) * dss(2) \
# - r * k_0 * T[1] * (srer_im * (H_phi[1]) - srer_re * (H_phi[0])) * dss(2) \
# - r * k_0 * T[0] * (srer_im * (H_phi[0]) + srer_re * (H_phi[1])) * dss(4) \
# - r * k_0 * T[1] * (srer_im * (H_phi[1]) - srer_re * (H_phi[0])) * dss(4)
L = r * (T[0]+T[1]) * f * dx + r * emp.om * eps_0 * E0 * T[1] * dss(3) +\
r * k_0 * T[0] * (srer_im * (- 2*emp.H_phi_0_re) + srer_re * (- 2*emp.H_phi_0_im)) * dss(4) +\
r * k_0 * T[1] * (srer_im * (- 2*emp.H_phi_0_im) - srer_re * (- 2*emp.H_phi_0_re)) * dss(4)
bc1 = DirichletBC(V0, Constant((0.0, 0.0)), boundaries, 1)
bcs=[bc1]
#
# - solve for H_phi in axisymmetric case
#
U = Function(V0)
#solve(a == L, U, bcs,
# solver_parameters={"linear_solver": "mumps",
# "preconditioner": "hypre_euclid"})
solve(a == L, U, bcs,
solver_parameters={"linear_solver": "mumps",
"preconditioner": "hypre_euclid"})
#
# - Post-processing
#
# compute E_r component
uE = TrialFunction(V0)
TE = TestFunction(V0)
aE = r * inner(TE, uE) * dx
LE = r * (1 / (emp.om * eps_0)) * \
((- reEps_r_Im * U[0].dx(1)- reEps_r_Re * U[1].dx(1)) * TE[0]) * dx + \
r * (1 / (emp.om * eps_0)) * \
((- reEps_r_Im * U[1].dx(1) + reEps_r_Re * U[0].dx(1)) * TE[1]) * dx
E_r = Function(V0)
#solve(aE == LE, E_r, solver_parameters={"linear_solver": "mumps"})
solve(aE == LE, E_r,
solver_parameters={"linear_solver": "mumps",
"preconditioner": "hypre_euclid"})
# compute E_z component
aE = r * inner(TE, uE) * dx
LE = r * (1 / (emp.om * eps_0)) * \
(reEps_r_Im * ((1 / r) * (r * U[0]).dx(0)) + \
reEps_r_Re * ((1 / r) * (r * U[1]).dx(0))) * TE[0] * dx + \
r * (1 / (emp.om * eps_0)) * (reEps_r_Im * ((1 / r) * (r * U[1]).dx(0)) - \
reEps_r_Re * ((1 / r) * (r * U[0]).dx(0))) * TE[1] * dx
E_z = Function(V0)
#solve(aE == LE, E_z, solver_parameters={"linear_solver": "mumps"})
solve(aE == LE, E_z,
solver_parameters={"linear_solver": "mumps",
"preconditioner": "hypre_euclid"})
# compute SAR
#Q = project_axisym(0.5 * sigma * (E_r[0] ** 2 + E_r[1] ** 2 + E_z[0] ** 2 + E_z[1] ** 2),V0_Re)
Q = project_axisym(0.5 * sigma * (E_r[0] ** 2 + E_r[1] ** 2 + E_z[0] ** 2 + E_z[1] ** 2),V0_dc)
# compute power according to RFA
power = assemble(Q*r*dx)*2*N.pi
print "power: ", power
# File("%s/U.pvd" % problemname) << U
# File("%s/E_r.pvd" % problemname) << E_r
# File("%s/E_z.pvd" % problemname) << E_z
# File("%s/Q.pvd" % problemname) << Q
# normalize solution to 1
## U_nodal_values = E_z.vector() # extract nodal values
## U_array = U_nodal_values.array() # copy to numpy array
## U_max = U_array.max() # numpy find max value
## U_array /= U_max
## E_z.vector()[:] = U_array
## E_z.vector().set_local(U_array) # alternative
## By now the solution should have been computed and stored. Anything following
## this is problem specific and can just be specified in a separate script to
## keep things tidy
return U, Q, E_r, E_z
# compute_T_enthalpy_nl
#
# solves the time-dependent enthalpy form of the bioheat equation using backward
# differences. This form of the equation allows the computation of phase changes
# in the tissue as a result of heating and freezing. The main extension over the
# most simple time-dependent bioheat equation is the ability to take functions
# of temperature and time as parameters. This therefore also includes nonlinear
# phenomena.
#
# arguments:
#
# returns:
def compute_enthalpy_nl(mesh, interior, boundaries, problemname, dt, tmax, dt_min, dt_max, t_out, thp, emp):
#checks
ensure_dir(problemname) # check directory exists for results
eps = N.finfo(float).eps # useful quantity
if t_out.size == 1 and t_out[0] > dt_max-eps: # catch output error
print 'single time point out'
elif N.any(N.diff(t_out)<dt_max-eps):
error("largest time step spans more than one output reduce dt_max or coarsen t_out")
print "--+--+-- start time-dependent bioheat solve --+--+--"
# NOTE:
#
# EQUATIONS HAVE BEEN SCALED IN TERMS OF THETA = T - T0
thp.Tu = thp.Tu - 310
thp.Tl = thp.Tl - 310
# set solver params in advance
solver = KrylovSolver("cg", "hypre_euclid")
#solver.parameters["absolute_tolerance"] = 1E-7
#solver.parameters["relative_tolerance"] = 1E-4
#solver.parameters["maximum_iterations"] = 1000
#set_log_level(DEBUG)
# output files
file_temp=File("%s/enthalpy.pvd" % problemname)
file_SAR=File("%s/SAR.pvd" % problemname)
file_cd=File("%s/cell-death.pvd" % problemname)
file_perf=File("%s/perfusion.pvd" % problemname)
file_vfield=File("%s/voltage.pvd" % problemname)
# define a restriction
# to generalise this define new meshfunction that sets to 1 everything in restriction
interior_new = MeshFunction("uint",interior)
help = N.asarray(interior_new.array())
for ent in N.nditer(help, op_flags=['readwrite']):
ent[...] = N.where(N.any(ent == thp.restrict_th_mesh),1,0)
interior_new.array()[:] = help
restriction = Restriction(interior_new,1) # restrict thermal calc to tissue only
W = FunctionSpace(restriction, 'CG', order)
W_dg = FunctionSpace(restriction, 'DG', order) # DG SAR
W_dg_0 = FunctionSpace(restriction, 'DG', 0) # material properties (perfusion etc)
# set measure
dss = ds[boundaries]
# Get the r and z components
polar = W.cell().x
r = polar[0]
z = polar[1]
# define quantities that need updating
dte = Expression('dt',dt=0.)
cur_time = Expression('t',t=0.)
# initial uniform temperature
# T_prev = interpolate(thp.T_initial,W)
T_prev = project_axisym(thp.T_initial-310,W)
# initial values (if needed)
resistance = 0.
print "--+--+-- initial SAR --+--+--"
# initial SAR
if thp.em_method=='iterate' or thp.em_method=='constant':
U, Q, E_r, E_z = compute_SAR_nl(problemname, mesh, interior, boundaries, emp, T_prev, thp)
elif thp.em_method=='ai':
Q = interpolate(Constant(1.),W)
elif thp.em_method=='none':
Q = interpolate(Constant(0.),W)
elif thp.em_method=='vyas':
Q = Expression('2*60*70/pi/pow(0.0045,2)*exp(-2*pow(x[0],2)/pow(0.0045,2)-60*x[1])')
elif thp.em_method=='RFA-const' or thp.em_method=='iterateRFA':
Q, resistance, power, Vfield = RFA_SAR(problemname, mesh, interior, boundaries, emp, T_prev, thp)
elif thp.em_method=='custom':
Q = thp.Q
elif thp.em_method=='mms-nonlinear':
M = 23.
P = 1.
L = 1.
H = 0.512
R = 0.02*0.512
F = 1.
Q = (M*(M*pow(P,2)*r*R*pow(cos(P*r),2) - 2*pow(L,2)*M*r*R*pow(cos(P*r),2)*cos(2*L*z) - M*pow(P,2)*r*R*pow(cos(P*r),2)*cos(2*L*z) + 2*exp(F*cur_time)*H*pow(L,2)*r*cos(P*r)*sin(L*z) + 2*exp(F*cur_time)*H*pow(P,2)*r*cos(P*r)*sin(L*z) - 2*exp(F*cur_time)*F*r*thp.rho_c_t*cos(P*r)*sin(L*z) + 2*exp(F*cur_time)*H*P*sin(P*r)*sin(L*z) - 2*M*pow(P,2)*r*R*pow(sin(P*r),2)*pow(sin(L*z),2) + M*P*R*sin(2*P*r)*pow(sin(L*z),2)))/(2.*exp(2*F*cur_time)*r)
# elif thp.em_method=='mms-nonlinear-full':
# M = 310.
# P = 1.
# L = 1.
# H = 0.512
# R = 0.02*0.512
# F = 1.
# W1 = .15
# Y = .02*.15
# G = 1.
# A1 = 1.
# B = 1.
# emp.mms_source = Constant(0.)
# Q, resistance, power, Vfield = RFA_SAR(problemname, mesh, interior, boundaries, emp, T_prev, thp)
# interpolate heat source onto restriction
# qext = interpolate(Q,W)
qext = project_axisym(Q,W)
# plot(qext)
# interactive()
if thp.em_method=='vyas':
qext = conditional(And(gt(z,0),lt(cur_time,0.3)),Q,0.)
elif thp.em_method=='mms-nonlinear':
qext = Q
# elif thp.em_method=='mms-nonlinear-full':
# qext = Q
# apply boundary conditions according to mesh function
bcs = []
for index in thp.bulk_tissue:
bcs.append(DirichletBC(W, 0., boundaries, index))
# for index in thp.cool_probe:
# bcs.append(DirichletBC(W, thp.cool_probe_temp-310., boundaries, index))
# for a neumann condition to create a heat sink at r=0 set gmsh to 6
# define variational problem
T = TrialFunction(W)
v = TestFunction(W)
f = qext
q = thp.qmet
# define effective heat capacity using ufl conditional
rc1 = conditional(lt(T_prev,thp.Tl), thp.rho_c_t, 0.)
rc2 = conditional(And(ge(T_prev,thp.Tl),le(T_prev,thp.Tu)), (thp.rho_c_t+thp.rho_c_v)/2+thp.rho*thp.Lh*thp.Cliq*(1./(thp.Tu-thp.Tl)), 0.)
rc3 = conditional(gt(T_prev,thp.Tu), thp.rho_c_v, 0.)
# define thermal conductivity
k=thp.k
if thp.k_model=='linear':
k=thp.k + thp.dk*(T_prev)
elif thp.k_model=='linear_limited':
k=conditional(le(T_prev,63.),thp.k + thp.dk*(T_prev),thp.k + thp.dk*(63.))
elif thp.k_model=='ai':
k=conditional(lt(T_prev,43), 0.465, 0.) + conditional(And(ge(T_prev,43),lt(T_prev,73), 0.867, 0.)) + conditional(ge(T_prev,73),1.460,0.)
# define perfusion term using D > 0.8 as shutoff
D_prev=interpolate(Constant(0.),W)
omega=thp.omega
# project D onto piecewise constant mesh to stop negative values
D_prev_const = project_axisym(D_prev,W_dg_0)
if thp.perf_model=='stop':
omega=conditional(gt(D_prev_const,thp.p_stop), thp.omega, 0.)
print "check perfusion threshhold"
# old unscaled and unstable weak form
# a = k*inner(nabla_grad(T), nabla_grad(v))*r*dx + v*omega*thp.rho*thp.c*T*r*dx + v*rc1/dte*T*r*dx + v*rc2/dte*T*r*dx + v*rc3/dte*T*r*dx
# L = f*v*r*dx+q*v*r*dx+v*omega*thp.rho*thp.c*thp.T0*r*dx + v*rc1/dte*T_prev*r*dx + v*rc2/dte*T_prev*r*dx + v*rc3/dte*T_prev*r*dx + v*thp.Q_sink/(2*pi)*dss(6)
# scaled but no heat transfer coefficient
# a = k*inner(nabla_grad(T), nabla_grad(v))*r*dx + v*omega*thp.rho*thp.c*T*r*dx + v*rc1/dte*T*r*dx + v*rc2/dte*T*r*dx + v*rc3/dte*T*r*dx
# L = f*v*r*dx + q*v*r*dx + v*rc1/dte*T_prev*r*dx + v*rc2/dte*T_prev*r*dx + v*rc3/dte*T_prev*r*dx + v*thp.Q_sink/(2*pi)*dss(6)
# heat transfer
a = k*inner(nabla_grad(T), nabla_grad(v))*r*dx + v*omega*thp.rho*thp.c*T*r*dx + v*rc1/dte*T*r*dx + v*rc2/dte*T*r*dx + v*rc3/dte*T*r*dx + v*T*thp.h_transfer*r*dss(4)
L = f*v*r*dx + q*v*r*dx + v*rc1/dte*T_prev*r*dx + v*rc2/dte*T_prev*r*dx + v*rc3/dte*T_prev*r*dx + v*thp.Q_sink/(2*pi)*dss(6)
T = Function(W)
T_out = Function(W)
# set initial temperature for SAR
# T = interpolate(thp.T_initial,W)
T = project_axisym(thp.T_initial-310.,W)
# assemble in advance of time iteration
A = None
b = None
# SAR update criteria
Q = Function(W_dg)
iu = thp.nu-1
store_resistance = N.array(t_out) # save the resistance at output times
store_power = N.array(t_out) # save power
power = 0.
# initialise cell death
n = len(T.vector().array())
cda = N.zeros(2*n) # cell death array
cda[::2] = cell_death_parameters.A_init
D = interpolate(Constant(0.),W) # dead field
# control system
imp_on = True
imp_off_t_start = dt
t = dt
step_inc = True
while t <= tmax+eps:
# update SAR
# this is SLOW so introduce index iu
# iu increments each transient iteration, when it reaches nu it updates SAR
iu += 1
if (iu == thp.nu) and (thp.em_method=="iterate"):
print "--+--+-- updating SAR --+--+--"
U, Q, E_r, E_z = compute_SAR_nl(problemname, mesh, interior, boundaries, emp, T,thp)
iu = 0
if (iu == thp.nu) and (thp.em_method=="iterateRFA"):
if imp_on: # control system switch
print "--+--+-- updating SAR --+--+--"
Q, resistance, power, Vfield = RFA_SAR(problemname, mesh, interior, boundaries, emp, T, thp)
iu = 0
else:
print "--+--+-- SAR offfffffff --+--+--"
Q = interpolate(Constant(0.),W_dg)
resistance = 0.
power = 0.
iu = 0.
# if (iu == nu) and (thp.em_method=="mms-nonlinear-full"):
# print "--+--+-- updating SAR --+--+--"
# Q, resistance, power, Vfield = RFA_SAR(problemname, mesh, interior, boundaries, emp, T, thp)
# iu = 0
# check for power < 0
if (Q.vector().array().min()<0):
error('source term Q < 0')
# assemble each iteration to account for previous time step
dte.dt = dt
cur_time.t = t
if thp.em_method=='iterate' or thp.em_method=='iterateRFA' or thp.em_method=="mms-nonlinear-full":
f.assign(Q)
D_prev_const.assign(project_axisym(D_prev,W_dg))
b = assemble(L, tensor=b)
A = assemble(a, tensor=A)
for bc in bcs:
bc.apply(A, b)
# solve(A, T.vector(), b,
# solver_parameters={"linear_solver": "mumps","preconditioner": "hypre_euclid"})
solver.solve(A, T.vector(), b)
# adapt T to ensure about 5 time steps per degree change
nodal_T = T.vector().array()
nodal_T_prev = T_prev.vector().array()
T_error = N.abs(nodal_T-nodal_T_prev).max()
print "max T err: ", T_error, " t: ", t+dt, " dt: ", dt, " pow: ", power, " imp: ", resistance
#plot(T)
#interactive()
if T_error < abs(thp.Tu-thp.Tl)*0.03 and dt < dt_max and step_inc:
# t += dt*.1
# dt = dt*1.1
t += dt*.1
dt = dt*1.1
# ensure dt_max not exceeded
if dt > dt_max:
# remove time step
# dt = dt/1.1
# t -= .1*dt
dt = dt/1.1
t -= .1*dt
# replace with dt_max
t -= dt
t += dt_max
dt = dt_max
print "**************************** INCREASE STEP *********************************"
elif T_error > abs(thp.Tu-thp.Tl)*0.3 and dt > dt_min:
# t = t - dt*0.5
# dt = dt*0.5
t = t - dt*0.1
dt = dt*0.9
step_inc = False # stop increase immediately after decrease
print "**************************** DECREASE STEP *********************************"
else:
# check that temp does not change too much
if T_error > abs(thp.Tu-thp.Tl)*0.3:
#print "step size too large"
if thp.stop_on_me:
plot(T-T_prev)
interactive()
error("step size too large")
else:
warning("step size too large")
print "********* TIME STEP MAX ERROR ", T_error, " ****************"
# Test if output required and compute directly
#eps = dt*0.001
# if any(N.logical_and(t_out >= t-dt,t_out < t+eps)):
if any(N.logical_and(t_out > t,t_out < t+dt+eps)):
print "***************************** OUTPUT FILE **********************************"
# dt_0=N.abs(t_out[N.logical_and(t_out>=t-dt,t_out<t+eps)]-t+dt)
dt_0=N.abs(t_out[N.logical_and(t_out > t,t_out < t+dt+eps)]-t)
print dt_0
T_out.vector()[:] = T_prev.vector().array()+(T.vector().array()-T_prev.vector().array())/dt*dt_0
# scale back to Kelvin and change name
T_out = project_axisym(T_out+Constant(310.),W)
T_out.rename('Temperature',T_out.label())
file_temp << (T_out, t+dt_0[0])
# rest just change name and output
Q_out = project_axisym(f,W)
Q_out.rename('SAR',Q_out.label())
file_SAR << Q_out
# V_out = project_axisym(Vfield,W)
# V_out.rename('Voltage',V_out.label())
# file_vfield << V_out
D.vector()[:] = 1.-cda[1::2] # viable (also only last value not linear interp as T)
file_cd << D
# perf_out = project_axisym(omega,W_dg_0)
# perf_out.rename('Perfusion',perf_out.label())
# file_perf << perf_out
# store_resistance[N.logical_and(t_out>=t-dt,t_out<t+eps)] = resistance
# store_power[N.logical_and(t_out>=t-dt,t_out<t+eps)] = power
store_resistance[N.logical_and(t_out > t,t_out < t+dt+eps)] = resistance
store_power[N.logical_and(t_out > t,t_out < t+dt+eps)] = power
# Update cell death
print "***************************** CELL DEATH **********************************"
#print cda
#print n
#print t
#print dt
#print nodal_T
#print nodal_T.min()
#print nodal_T.max()
if thp.cda_update:
cda = cell_death_timestep(cda,n,t,dt,nodal_T,cell_death_parameters)
D.vector()[:] = 1.-cda[1::2]
D_prev.assign(D) # update cell death for perfusion
#print cda
# Control System
#
# If Impedance greater than emp.imp_max SAR == 0
if (resistance > emp.imp_max) and imp_on:
imp_off_t_start = t
imp_on = False
#print 'imp_off_t_start', imp_off_t_start
#print 'emp.imp_t_off', emp.imp_t_off
if t - imp_off_t_start > emp.imp_t_off:
imp_on = True
#print imp_on
# ACCEPT TIME STEP
t += dt
T_prev.assign(T)
step_inc = True # allow increase after accepted step
print "***************************** ACCEPT TIME **********************************"
N.savetxt("%s/impedance.out" % problemname, (store_resistance, store_power), delimiter=',') # output impedance
return project_axisym(T+Constant(310.),W)
# cell_death_func
#
# evaluate gradients of the ode system at a time t
#
# args:
# y = vector of current state [alive, dead] with len(alive)==len(dead)
# t = current time (dummy)
# n = len(alive)
# kb = model parameter
# kf_ = model parameter
# Tk = model parameter
# T_prev_nodal = vector of corresponding temperature values len(T_pev_nodal)==len(alive)
#
# returns:
# dydt = vector of gradients len(dydt)==len(alive)
def cell_death_func(y,t,n,kb,kf_,Tk,T_prev_nodal): #odeint
#print y, t, n, kb,kf_,Tk,T_prev_nodal
#dydt = N.zeros(2*n) # cell death array
dydt = N.array(y)
dydt[:n] = -kf_*N.exp((T_prev_nodal-273.)/Tk)*(1.-y[:n])*y[:n]+kb*(1.-y[:n]-y[n:])
dydt[n:] = kf_*N.exp((T_prev_nodal-273.)/Tk)*(1.-y[:n])*(1.-y[:n]-y[n:])
return dydt
# def cell_death_func_class(t,y,args): # ode class
# # args = n, cdp.kb, cdp.kf_, cdp.Tk, T_prev_nodal
# dydt = N.array(y)
# dydt[:args[0]] = -args[2]*N.exp((args[4]-273.)/args[3])*(1.-y[:args[0]])*y[:args[0]]+args[1]*(1.-y[:args[0]]-y[args[0]:])
# dydt[args[0]:] = args[2]*N.exp((args[4]-273.)/args[3])*(1.-y[:args[0]])*(1.-y[:args[0]]-y[args[0]:])
# return dydt
def cell_death_func_class(t,y,args): # ode class
# scaled for T' = T-310
# args = n, cdp.kb, cdp.kf_, cdp.Tk, T_prev_nodal
dydt = N.array(y)
dydt[::2] = -args[2]*N.exp((args[4]+37.)/args[3])*(1.-y[::2])*y[::2]+args[1]*(1.-y[::2]-y[1::2])
dydt[1::2] = args[2]*N.exp((args[4]+37.)/args[3])*(1.-y[::2])*(1.-y[::2]-y[1::2])
return dydt
# cell_death_jac_class
#
# jacobian of the cell death model in vain hope of performance gain
def cell_death_jac_class(t,y,args):
packed_jac = N.zeros((3,len(y)))
# diagonal
packed_jac[1][::2] = args[2]*N.exp((args[4]-273.)/args[3])*(2*y[::2]-1)-args[2]
packed_jac[1][1::2] = -args[2]*N.exp((args[4]-273.)/args[3])*(1-y[::2])
# lead (to left of diagonal)
packed_jac[0][1::2] = -args[2]
# trail (to right of diagonal)
packed_jac[2][::2] = args[2]*N.exp((args[4]-273.)/args[3])*(2*y[::2]+y[1::2]-2)
#print 'jac called'
return packed_jac
# cell_death_timestep
#
# evaluate cell death model for current time step
#
# args:
# y0 - initial condition [alive, dead] (model state at beginning of time step)
# n - len(alive)
# dt - width of time step
# T_prev_nodal - temperature to assume during time step
# cdp - cell death parameters
#
# cell_death_timestep(N.array([0.99,0.]),1,0.,900.,338,cell_death_parameters)
def cell_death_timestep(y0,n,t,dt,T_prev_nodal,cdp):
import scipy.integrate as spint
# ode int
# time = N.array([t,t+dt])
# yt = spint.odeint(cell_death_func,y0,time,(n,cdp.kb,cdp.kf_,cdp.Tk,T_prev_nodal),atol=1e-4,rtol=1e-4,mxords=4)
# cda = yt[1]
# ode class
step_int = spint.ode(cell_death_func_class)
step_int.set_integrator("vode",method="bdf",nsteps=1e4)
# step_int = spint.ode(cell_death_func_class,cell_death_jac_class)
# step_int.set_integrator("vode",method="bdf",nsteps=1e4,lband=1,with_jacobian=True)
# step_int.set_jac_params([n, cdp.kb, cdp.kf_, cdp.Tk, T_prev_nodal])
step_int.set_f_params([n, cdp.kb, cdp.kf_, cdp.Tk, T_prev_nodal])
step_int.set_initial_value(y0,0.)
step_int.integrate(dt)
print step_int.successful()
if not step_int.successful():
error("cell death step solve failed")
cda = step_int.y
return cda
# RFA_SAR
#
# compute the SAR according to the Laplace equation in axisymmetric
# cyclindrical coordinates
#
# args:
#
def RFA_SAR(problemname, mesh, interior, boundaries, emp, T, thp):
# public function pre-amble
ensure_dir(problemname) # check directory exists for results
print "--+--+-- compute RFA SAR --+--+--"
# set solver params in advance
solver = KrylovSolver("cg", "hypre_euclid")
#solver.parameters["absolute_tolerance"] = 1E-7
#solver.parameters["relative_tolerance"] = 1E-4
#solver.parameters["maximum_iterations"] = 1000
#set_log_level(DEBUG)
# define a restriction for V calculation
restriction = Restriction(interior,emp.restrict_mesh) # restrict em calc
W = FunctionSpace(restriction, 'CG', order)
W_dg0 = FunctionSpace(restriction, 'DG', 0) # attempt to stabilise sigma
W_dg = FunctionSpace(restriction, 'DG', order) # capture SAR shape and discontinuity
# interpolate T onto piecewise constant to stabilise conductivity
T_p = project_axisym(T,W_dg0)
# set measure
dss = ds[boundaries]
# Get the r and z components
polar = W.cell().x
r = polar[0]
z = polar[1]
# allocate boundary conditions to line regions defined in boundary mesh function
# symmetry and insulating are natural BCs
bcs = []
for index in emp.zero_V:
bcs.append(DirichletBC(W, emp.V0, boundaries, index))
for index in emp.active_tip:
bcs.append(DirichletBC(W, emp.Vprobe, boundaries, index))
# define variational problem
V = TrialFunction(W)
U = TestFunction(W)
# define electrical conductivity
sigma=emp.cond
if emp.cond_model=='linear':
sigma=emp.cond + emp.cond_rate*(T_p)
elif emp.cond_model=='nonlinear':
sigma=(conditional(le(T_p,thp.Tu), emp.cond + emp.cond_rate*(T_p), 0.) +
conditional(And(gt(T_p,thp.Tu),le(T_p,thp.Tu+5.)), (emp.cond_vap - (emp.cond + emp.cond_rate*(thp.Tu)))/5.*(T_p-thp.Tu) + (emp.cond + emp.cond_rate*(thp.Tu)), 0.) +
conditional(gt(T_p,thp.Tu+5.), emp.cond_vap, 0.))
# File("%s/sigma.pvd" % problemname) << project_axisym(sigma,W_dg0)
a = sigma*inner(nabla_grad(V), nabla_grad(U))*r*dx
L = emp.mms_source*U*r*dx
V = Function(W)
solve(a == L, V, bcs)
U = TestFunction(W_dg)
SAR = TrialFunction(W_dg)
a = SAR*U*r*dx
L = U*sigma*inner(nabla_grad(V), nabla_grad(V))*r*dx
SAR = Function(W_dg)
solve(a == L, SAR)
# SAR = project_axisym(sigma*inner(nabla_grad(V), nabla_grad(V))*r,W_dg)
# plot(V)
# plot(SAR)
# interactive()
# compute impedance for control system as purely electrical
# need to use assemble
# integrate current density over probe active tip
# line element should be ds without adaption
power = assemble(SAR*r*dx)*2*N.pi
resistance = (emp.Vprobe)**2/power
# print "power: ", power
# print "resistance according to Kroger & elmer: ", (emp.Vprobe)**2/power
return SAR, resistance, power, V
# define a projection function for axisymetric to avoid mistakes
def project_axisym(func,space):
polar = space.cell().x
r = polar[0]
z = polar[1]
w = TrialFunction(space)
v = TestFunction(space)
a = inner(w,v)*r*dx
L = inner(func, v)*r*dx
pfunc = Function(space)
solve(a == L, pfunc)
return pfunc
| gpl-3.0 |
lunarca/fngrpt | libs/LoggingHelpers.py | 2 | 1793 | # -*- coding: utf-8 -*-
'''
@author: moloch
Copyright 2013
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
from libs.Singleton import Singleton
from collections import deque
@Singleton
class ObservableLoggingHandler(logging.StreamHandler):
'''
An observable logging class, just shuffles logging messages
from the main logger to the observers. A small history is
stored in volatile memory.
'''
max_history_size = 100
_observers = []
_history = deque()
def add_observer(self, observer):
''' Add new observer and send them any history '''
if observer not in self._observers:
self._observers.append(observer)
observer.update(list(self._history))
def remove_observer(self, observer):
''' Remove ref to an observer '''
if observer in self._observers:
self._observers.remove(observer)
def emit(self, record):
'''
Overloaded method, gets called when logging messages are sent
'''
msg = self.format(record)
for observer in self._observers:
observer.update([msg])
if self.max_history_size < len(self._history):
self._history.popleft()
self._history.append(msg)
| apache-2.0 |
tplavcic/percona-xtrabackup | storage/innobase/xtrabackup/test/python/testtools/testresult/real.py | 42 | 21672 | # Copyright (c) 2008 testtools developers. See LICENSE for details.
"""Test results and related things."""
__metaclass__ = type
__all__ = [
'ExtendedToOriginalDecorator',
'MultiTestResult',
'TestResult',
'ThreadsafeForwardingResult',
]
import datetime
import sys
import unittest
from testtools.compat import all, _format_exc_info, str_is_unicode, _u
# From http://docs.python.org/library/datetime.html
_ZERO = datetime.timedelta(0)
# A UTC class.
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return _ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return _ZERO
utc = UTC()
class TestResult(unittest.TestResult):
"""Subclass of unittest.TestResult extending the protocol for flexability.
This test result supports an experimental protocol for providing additional
data to in test outcomes. All the outcome methods take an optional dict
'details'. If supplied any other detail parameters like 'err' or 'reason'
should not be provided. The details dict is a mapping from names to
MIME content objects (see testtools.content). This permits attaching
tracebacks, log files, or even large objects like databases that were
part of the test fixture. Until this API is accepted into upstream
Python it is considered experimental: it may be replaced at any point
by a newer version more in line with upstream Python. Compatibility would
be aimed for in this case, but may not be possible.
:ivar skip_reasons: A dict of skip-reasons -> list of tests. See addSkip.
"""
def __init__(self):
# startTestRun resets all attributes, and older clients don't know to
# call startTestRun, so it is called once here.
# Because subclasses may reasonably not expect this, we call the
# specific version we want to run.
TestResult.startTestRun(self)
def addExpectedFailure(self, test, err=None, details=None):
"""Called when a test has failed in an expected manner.
Like with addSuccess and addError, testStopped should still be called.
:param test: The test that has been skipped.
:param err: The exc_info of the error that was raised.
:return: None
"""
# This is the python 2.7 implementation
self.expectedFailures.append(
(test, self._err_details_to_string(test, err, details)))
def addError(self, test, err=None, details=None):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
:param details: Alternative way to supply details about the outcome.
see the class docstring for more information.
"""
self.errors.append((test,
self._err_details_to_string(test, err, details)))
def addFailure(self, test, err=None, details=None):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
:param details: Alternative way to supply details about the outcome.
see the class docstring for more information.
"""
self.failures.append((test,
self._err_details_to_string(test, err, details)))
def addSkip(self, test, reason=None, details=None):
"""Called when a test has been skipped rather than running.
Like with addSuccess and addError, testStopped should still be called.
This must be called by the TestCase. 'addError' and 'addFailure' will
not call addSkip, since they have no assumptions about the kind of
errors that a test can raise.
:param test: The test that has been skipped.
:param reason: The reason for the test being skipped. For instance,
u"pyGL is not available".
:param details: Alternative way to supply details about the outcome.
see the class docstring for more information.
:return: None
"""
if reason is None:
reason = details.get('reason')
if reason is None:
reason = 'No reason given'
else:
reason = ''.join(reason.iter_text())
skip_list = self.skip_reasons.setdefault(reason, [])
skip_list.append(test)
def addSuccess(self, test, details=None):
"""Called when a test succeeded."""
def addUnexpectedSuccess(self, test, details=None):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"""Has this result been successful so far?
If there have been any errors, failures or unexpected successes,
return False. Otherwise, return True.
Note: This differs from standard unittest in that we consider
unexpected successes to be equivalent to failures, rather than
successes.
"""
return not (self.errors or self.failures or self.unexpectedSuccesses)
if str_is_unicode:
# Python 3 and IronPython strings are unicode, use parent class method
_exc_info_to_unicode = unittest.TestResult._exc_info_to_string
else:
# For Python 2, need to decode components of traceback according to
# their source, so can't use traceback.format_exception
# Here follows a little deep magic to copy the existing method and
# replace the formatter with one that returns unicode instead
from types import FunctionType as __F, ModuleType as __M
__f = unittest.TestResult._exc_info_to_string.im_func
__g = dict(__f.func_globals)
__m = __M("__fake_traceback")
__m.format_exception = _format_exc_info
__g["traceback"] = __m
_exc_info_to_unicode = __F(__f.func_code, __g, "_exc_info_to_unicode")
del __F, __M, __f, __g, __m
def _err_details_to_string(self, test, err=None, details=None):
"""Convert an error in exc_info form or a contents dict to a string."""
if err is not None:
return self._exc_info_to_unicode(err, test)
return _details_to_str(details)
def _now(self):
"""Return the current 'test time'.
If the time() method has not been called, this is equivalent to
datetime.now(), otherwise its the last supplied datestamp given to the
time() method.
"""
if self.__now is None:
return datetime.datetime.now(utc)
else:
return self.__now
def startTestRun(self):
"""Called before a test run starts.
New in Python 2.7. The testtools version resets the result to a
pristine condition ready for use in another test run. Note that this
is different from Python 2.7's startTestRun, which does nothing.
"""
super(TestResult, self).__init__()
self.skip_reasons = {}
self.__now = None
# -- Start: As per python 2.7 --
self.expectedFailures = []
self.unexpectedSuccesses = []
# -- End: As per python 2.7 --
def stopTestRun(self):
"""Called after a test run completes
New in python 2.7
"""
def time(self, a_datetime):
"""Provide a timestamp to represent the current time.
This is useful when test activity is time delayed, or happening
concurrently and getting the system time between API calls will not
accurately represent the duration of tests (or the whole run).
Calling time() sets the datetime used by the TestResult object.
Time is permitted to go backwards when using this call.
:param a_datetime: A datetime.datetime object with TZ information or
None to reset the TestResult to gathering time from the system.
"""
self.__now = a_datetime
def done(self):
"""Called when the test runner is done.
deprecated in favour of stopTestRun.
"""
class MultiTestResult(TestResult):
"""A test result that dispatches to many test results."""
def __init__(self, *results):
TestResult.__init__(self)
self._results = list(map(ExtendedToOriginalDecorator, results))
def _dispatch(self, message, *args, **kwargs):
return tuple(
getattr(result, message)(*args, **kwargs)
for result in self._results)
def startTest(self, test):
return self._dispatch('startTest', test)
def stopTest(self, test):
return self._dispatch('stopTest', test)
def addError(self, test, error=None, details=None):
return self._dispatch('addError', test, error, details=details)
def addExpectedFailure(self, test, err=None, details=None):
return self._dispatch(
'addExpectedFailure', test, err, details=details)
def addFailure(self, test, err=None, details=None):
return self._dispatch('addFailure', test, err, details=details)
def addSkip(self, test, reason=None, details=None):
return self._dispatch('addSkip', test, reason, details=details)
def addSuccess(self, test, details=None):
return self._dispatch('addSuccess', test, details=details)
def addUnexpectedSuccess(self, test, details=None):
return self._dispatch('addUnexpectedSuccess', test, details=details)
def startTestRun(self):
return self._dispatch('startTestRun')
def stopTestRun(self):
return self._dispatch('stopTestRun')
def time(self, a_datetime):
return self._dispatch('time', a_datetime)
def done(self):
return self._dispatch('done')
def wasSuccessful(self):
"""Was this result successful?
Only returns True if every constituent result was successful.
"""
return all(self._dispatch('wasSuccessful'))
class TextTestResult(TestResult):
"""A TestResult which outputs activity to a text stream."""
def __init__(self, stream):
"""Construct a TextTestResult writing to stream."""
super(TextTestResult, self).__init__()
self.stream = stream
self.sep1 = '=' * 70 + '\n'
self.sep2 = '-' * 70 + '\n'
def _delta_to_float(self, a_timedelta):
return (a_timedelta.days * 86400.0 + a_timedelta.seconds +
a_timedelta.microseconds / 1000000.0)
def _show_list(self, label, error_list):
for test, output in error_list:
self.stream.write(self.sep1)
self.stream.write("%s: %s\n" % (label, test.id()))
self.stream.write(self.sep2)
self.stream.write(output)
def startTestRun(self):
super(TextTestResult, self).startTestRun()
self.__start = self._now()
self.stream.write("Tests running...\n")
def stopTestRun(self):
if self.testsRun != 1:
plural = 's'
else:
plural = ''
stop = self._now()
self._show_list('ERROR', self.errors)
self._show_list('FAIL', self.failures)
for test in self.unexpectedSuccesses:
self.stream.write(
"%sUNEXPECTED SUCCESS: %s\n%s" % (
self.sep1, test.id(), self.sep2))
self.stream.write("Ran %d test%s in %.3fs\n\n" %
(self.testsRun, plural,
self._delta_to_float(stop - self.__start)))
if self.wasSuccessful():
self.stream.write("OK\n")
else:
self.stream.write("FAILED (")
details = []
details.append("failures=%d" % (
sum(map(len, (
self.failures, self.errors, self.unexpectedSuccesses)))))
self.stream.write(", ".join(details))
self.stream.write(")\n")
super(TextTestResult, self).stopTestRun()
class ThreadsafeForwardingResult(TestResult):
"""A TestResult which ensures the target does not receive mixed up calls.
This is used when receiving test results from multiple sources, and batches
up all the activity for a single test into a thread-safe batch where all
other ThreadsafeForwardingResult objects sharing the same semaphore will be
locked out.
Typical use of ThreadsafeForwardingResult involves creating one
ThreadsafeForwardingResult per thread in a ConcurrentTestSuite. These
forward to the TestResult that the ConcurrentTestSuite run method was
called with.
target.done() is called once for each ThreadsafeForwardingResult that
forwards to the same target. If the target's done() takes special action,
care should be taken to accommodate this.
"""
def __init__(self, target, semaphore):
"""Create a ThreadsafeForwardingResult forwarding to target.
:param target: A TestResult.
:param semaphore: A threading.Semaphore with limit 1.
"""
TestResult.__init__(self)
self.result = ExtendedToOriginalDecorator(target)
self.semaphore = semaphore
def _add_result_with_semaphore(self, method, test, *args, **kwargs):
self.semaphore.acquire()
try:
self.result.time(self._test_start)
self.result.startTest(test)
self.result.time(self._now())
try:
method(test, *args, **kwargs)
finally:
self.result.stopTest(test)
finally:
self.semaphore.release()
def addError(self, test, err=None, details=None):
self._add_result_with_semaphore(self.result.addError,
test, err, details=details)
def addExpectedFailure(self, test, err=None, details=None):
self._add_result_with_semaphore(self.result.addExpectedFailure,
test, err, details=details)
def addFailure(self, test, err=None, details=None):
self._add_result_with_semaphore(self.result.addFailure,
test, err, details=details)
def addSkip(self, test, reason=None, details=None):
self._add_result_with_semaphore(self.result.addSkip,
test, reason, details=details)
def addSuccess(self, test, details=None):
self._add_result_with_semaphore(self.result.addSuccess,
test, details=details)
def addUnexpectedSuccess(self, test, details=None):
self._add_result_with_semaphore(self.result.addUnexpectedSuccess,
test, details=details)
def startTestRun(self):
self.semaphore.acquire()
try:
self.result.startTestRun()
finally:
self.semaphore.release()
def stopTestRun(self):
self.semaphore.acquire()
try:
self.result.stopTestRun()
finally:
self.semaphore.release()
def done(self):
self.semaphore.acquire()
try:
self.result.done()
finally:
self.semaphore.release()
def startTest(self, test):
self._test_start = self._now()
super(ThreadsafeForwardingResult, self).startTest(test)
def wasSuccessful(self):
return self.result.wasSuccessful()
class ExtendedToOriginalDecorator(object):
"""Permit new TestResult API code to degrade gracefully with old results.
This decorates an existing TestResult and converts missing outcomes
such as addSkip to older outcomes such as addSuccess. It also supports
the extended details protocol. In all cases the most recent protocol
is attempted first, and fallbacks only occur when the decorated result
does not support the newer style of calling.
"""
def __init__(self, decorated):
self.decorated = decorated
def __getattr__(self, name):
return getattr(self.decorated, name)
def addError(self, test, err=None, details=None):
self._check_args(err, details)
if details is not None:
try:
return self.decorated.addError(test, details=details)
except TypeError:
# have to convert
err = self._details_to_exc_info(details)
return self.decorated.addError(test, err)
def addExpectedFailure(self, test, err=None, details=None):
self._check_args(err, details)
addExpectedFailure = getattr(
self.decorated, 'addExpectedFailure', None)
if addExpectedFailure is None:
return self.addSuccess(test)
if details is not None:
try:
return addExpectedFailure(test, details=details)
except TypeError:
# have to convert
err = self._details_to_exc_info(details)
return addExpectedFailure(test, err)
def addFailure(self, test, err=None, details=None):
self._check_args(err, details)
if details is not None:
try:
return self.decorated.addFailure(test, details=details)
except TypeError:
# have to convert
err = self._details_to_exc_info(details)
return self.decorated.addFailure(test, err)
def addSkip(self, test, reason=None, details=None):
self._check_args(reason, details)
addSkip = getattr(self.decorated, 'addSkip', None)
if addSkip is None:
return self.decorated.addSuccess(test)
if details is not None:
try:
return addSkip(test, details=details)
except TypeError:
# extract the reason if it's available
try:
reason = ''.join(details['reason'].iter_text())
except KeyError:
reason = _details_to_str(details)
return addSkip(test, reason)
def addUnexpectedSuccess(self, test, details=None):
outcome = getattr(self.decorated, 'addUnexpectedSuccess', None)
if outcome is None:
try:
test.fail("")
except test.failureException:
return self.addFailure(test, sys.exc_info())
if details is not None:
try:
return outcome(test, details=details)
except TypeError:
pass
return outcome(test)
def addSuccess(self, test, details=None):
if details is not None:
try:
return self.decorated.addSuccess(test, details=details)
except TypeError:
pass
return self.decorated.addSuccess(test)
def _check_args(self, err, details):
param_count = 0
if err is not None:
param_count += 1
if details is not None:
param_count += 1
if param_count != 1:
raise ValueError("Must pass only one of err '%s' and details '%s"
% (err, details))
def _details_to_exc_info(self, details):
"""Convert a details dict to an exc_info tuple."""
return (_StringException,
_StringException(_details_to_str(details)), None)
def done(self):
try:
return self.decorated.done()
except AttributeError:
return
def progress(self, offset, whence):
method = getattr(self.decorated, 'progress', None)
if method is None:
return
return method(offset, whence)
@property
def shouldStop(self):
return self.decorated.shouldStop
def startTest(self, test):
return self.decorated.startTest(test)
def startTestRun(self):
try:
return self.decorated.startTestRun()
except AttributeError:
return
def stop(self):
return self.decorated.stop()
def stopTest(self, test):
return self.decorated.stopTest(test)
def stopTestRun(self):
try:
return self.decorated.stopTestRun()
except AttributeError:
return
def tags(self, new_tags, gone_tags):
method = getattr(self.decorated, 'tags', None)
if method is None:
return
return method(new_tags, gone_tags)
def time(self, a_datetime):
method = getattr(self.decorated, 'time', None)
if method is None:
return
return method(a_datetime)
def wasSuccessful(self):
return self.decorated.wasSuccessful()
class _StringException(Exception):
"""An exception made from an arbitrary string."""
if not str_is_unicode:
def __init__(self, string):
if type(string) is not unicode:
raise TypeError("_StringException expects unicode, got %r" %
(string,))
Exception.__init__(self, string)
def __str__(self):
return self.args[0].encode("utf-8")
def __unicode__(self):
return self.args[0]
# For 3.0 and above the default __str__ is fine, so we don't define one.
def __hash__(self):
return id(self)
def __eq__(self, other):
try:
return self.args == other.args
except AttributeError:
return False
def _details_to_str(details):
"""Convert a details dict to a string."""
chars = []
# sorted is for testing, may want to remove that and use a dict
# subclass with defined order for items instead.
for key, content in sorted(details.items()):
if content.content_type.type != 'text':
chars.append('Binary content: %s\n' % key)
continue
chars.append('Text attachment: %s\n' % key)
chars.append('------------\n')
chars.extend(content.iter_text())
if not chars[-1].endswith('\n'):
chars.append('\n')
chars.append('------------\n')
return _u('').join(chars)
| gpl-2.0 |
hyperdriveguy/pyUpdate | pyUpdate.py | 1 | 1574 | #!/usr/bin/python3
# Import modules
from os import system as do
from time import sleep
# Set Variables
upCache = " "
upSys = " "
remAuto = " "
# define how to get config
def getConfig(filename):
import imp
f = open(filename)
global data
data = imp.load_source('data', '', f)
f.close()
# path to "config" file
getConfig('/home/carson/.pyUpdate/updaterConfig.txt')
if data.autoCache == 0:
while not upCache == "y" or not upCache == "n":
upCache = input("Update cache? (Recommended) y/n")
if upCache == "y":
do("clear")
do("sudo apt-get update")
if upCache == "n":
print("Not updating cache")
sleep(3)
if data.autoCache == 1:
do("clear")
do("sudo apt-get update --force-yes")
if data.autoUpdate == 0:
while not upSys == "y" or not upSys == "n":
upSys = input("Update system? (Recommended) y/n")
if upSys == "y":
do("clear")
do("sudo apt-get upgrade --force-yes")
if upSys == "n":
print("Not updating system")
sleep(3)
if data.autoUpdate == 1:
do("clear")
do("sudo apt-get upgrade --force-yes")
if data.autoRemove == 0:
while not remAuto == "y" or not remAuto == "n":
remAuto = input("Remove automatically installed packages? (Recommended) y/n")
if remAuto == "y":
do("clear")
do("sudo apt-get autoremove")
if remAuto == "n":
print("Not removing automatically installed packages")
sleep(3)
if data.autoCache == 1:
do("clear")
do("sudo apt-get autoremove --force-yes")
| mit |
chrisvans/roastdoge | profiling/tests.py | 1 | 13749 | # Django
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core.urlresolvers import reverse
from django.http import JsonResponse
from django.template.loader import render_to_string
from django.test import TestCase, Client, RequestFactory
# Ours
from coffee.factories import CoffeeFactory
import factories
import models
import ajax
import forms
import views
# Third Party
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.support import ui
from rest_framework.test import APIRequestFactory
# Python
import time
class TestRoastProfileDetailFunctional(StaticLiveServerTestCase):
"""
Test that the front-end user interface for the roast profile detail view / charts
works as expected.
"""
@classmethod
def setUpClass(cls):
cls.selenium = WebDriver()
super(TestRoastProfileDetailFunctional, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(TestRoastProfileDetailFunctional, cls).tearDownClass()
def setUp(self):
self.coffee = CoffeeFactory.create()
self.coffee._generate_profile()
self.roastprofile = self.coffee.roastprofile_set.all()[0]
def tearDown(self):
self.coffee.delete()
def test_comment_form_create_and_post(self):
"""
Test that a user can create a comment from the roastprofile detail page.
"""
self.selenium.get(
'%s%s' % (
self.live_server_url,
reverse('roastprofile-detail', args=(self.roastprofile.id,))
)
)
# Wait for page to fully load // TODO: Find a better way, I should never have to use time.sleep in
# a test. If it takes longer than a second, it could intermittently fail.
time.sleep(1)
# Click on the (time=21) point on the chart, which should create a form
self.selenium.find_element_by_class_name("nv-path-21").click()
time.sleep(1) # Wait for the form to be rendered
# Find the form, if this fails, the form wasn't created.
comment_form = self.selenium.find_element_by_id('id_comment')
comment_form.send_keys('My Comment')
# Select the submit button, submit the form.
self.selenium.find_element_by_id('submit-pointcomment').click()
time.sleep(1) # Wait for the form to submit and be rendered
# Ensure that the submitted comment exists with correct text, and that a new blank form was rendered.
self.selenium.find_element_by_class_name('comment')
self.selenium.find_element_by_id('id_comment')
comment = models.PointComment.objects.filter(comment='My Comment')
self.assertEqual(comment.exists(), True)
# Test that the comment svg is properly created and placed on the chart.
self.selenium.find_element_by_css_selector(".svg-comment-icon")
def test_comment_form_delete(self):
"""
Test that a user can delete a comment from the roast profile detail page.
"""
self.selenium.get(
'%s%s' % (
self.live_server_url,
reverse('roastprofile-detail', args=(self.roastprofile.id,))
)
)
# Grab the first point ( by time ) on a roast profile
# and create a comment for it
firstpoint = self.roastprofile.temppoint_set.all().order_by('time')[0]
newcomment = factories.PointCommentFactory.create(point=firstpoint, comment="Le Commentzorz")
time.sleep(1)
# Click on the first point, assert that the comment we just created renders
self.selenium.find_element_by_class_name("nv-path-0").click()
self.selenium.find_elements_by_class_name("id_comment")
time.sleep(1)
# Assert that we can delete the comment from the page
self.selenium.find_element_by_css_selector(
".comment .comment-delete"
).click()
time.sleep(1)
comment_query = models.PointComment.objects.filter(id=newcomment.id)
self.assertEqual(comment_query.exists(), False)
def test_select_roastprofile_dropdown(self):
"""
Test that a user can click on the dropdown list of roastprofiles, and that data is
properly rendered in the chart.
"""
self.coffee._generate_profile()
self.selenium.get(
'%s%s' % (
self.live_server_url,
reverse('roastprofile-detail', args=(self.roastprofile.id,))
)
)
generated_rp = models.RoastProfile.objects.exclude(id=self.roastprofile.id).get()
self.selenium.find_element_by_css_selector(
"select#id_roastprofile_select > option[value='%s']" % generated_rp.id
).click()
time.sleep(1)
waiting = True
timeout = 10
count = 0
while waiting:
try:
for index, point in enumerate(generated_rp.temppoint_set.all()):
self.selenium.find_element_by_css_selector(
"g.nv-group.nv-series-1 .nv-point-%s" % index
).click()
waiting = False
except NoSuchElementException as e:
if count > timeout:
raise e
time.sleep(1)
count += 1
waiting = None
def test_record_newprofile(self):
"""
Test that a user can record a new profile, and that the proper data is grabbed
and rendered on the chart via polling.
"""
self.selenium.get(
'%s%s' % (
self.live_server_url,
reverse('roastprofile-detail', args=(self.roastprofile.id,))
)
)
self.selenium.find_element_by_id("listen-newprofile").click()
waiting = True
timeout = 10
count = 0
while waiting:
try:
new_rp = models.RoastProfile.objects.exclude(id=self.roastprofile.id).get()
waiting = False
except models.RoastProfile.DoesNotExist as e:
if count > timeout:
raise e
time.sleep(1)
count += 1
waiting = None
for i in range(30):
models.TempPoint.objects.create(
roast_profile=new_rp,
time=str(i+1),
temperature='%s.1' % (str(i*5))
)
# assert that all the points were rendered
# The script will take at least 5 seconds before it will grab the data and render it.
time.sleep(5)
waiting = True
timeout = 10
count = 0
while waiting:
try:
for index, point in enumerate(new_rp.temppoint_set.all()):
self.selenium.find_element_by_css_selector(
"g.nv-group.nv-series-1 .nv-point-%s" % index
).click()
waiting = False
except NoSuchElementException as e:
if count > timeout:
raise e
time.sleep(1)
count += 1
waiting = None
self.selenium.find_element_by_id("listen-newprofile").click()
class TestAjaxViews(TestCase):
"""
Test that every API route that will be called via JS AJAX will work as expected.
"""
def setUp(self):
self.coffee = CoffeeFactory.create()
self.coffee._generate_profile()
self.roastprofile = self.coffee.roastprofile_set.all()[0]
self.request_factory = RequestFactory()
self.api_request_factory = APIRequestFactory()
def tearDown(self):
self.coffee.delete()
def test_temppoint_comment_create_form(self):
"""
Test that this view properly responds with a rendered form as JSON.
"""
some_temppoint = self.roastprofile.temppoint_set.all()[0]
request = self.api_request_factory.get(
reverse('rest-pointcomment-get-form'),
{'id': some_temppoint.id }
)
view = views.PointCommentViewSet.as_view(actions={'get':'get_form'})
response = view(request, pk=some_temppoint.id)
data = render_to_string(
'_includes/forms/point_comment_form.jade',
{
'point': some_temppoint,
'form': forms.PointCommentForm(data={'point':some_temppoint.id}),
'comments': some_temppoint.pointcomment_set.all().order_by('created')
}
)
expected_response = JsonResponse({'data':data})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, expected_response.content)
def test_temppoint_comment_create(self):
"""
Test that this view properly creates a comment on the temppoint.
"""
some_temppoint = self.roastprofile.temppoint_set.all()[0]
comment = 'I made a comment dood'
request = self.api_request_factory.post(
reverse('rest-pointcomment-list',),
{'point':some_temppoint.id, 'comment': comment},
)
view = views.PointCommentViewSet.as_view(actions={'post':'create'})
response = view(request)
self.assertEqual(response.status_code, 201)
comment_queryset = models.PointComment.objects.filter(point__id=some_temppoint.id, comment=comment)
self.assertEqual(comment_queryset.exists(), True)
self.assertEqual(comment_queryset.count(), 1)
def test_comment_delete(self):
"""
Test that this view deletes a PointComment based on it's ID, and returns appropriate data
for the JavaScript to use.
"""
some_temppoint = self.roastprofile.temppoint_set.all()[0]
pointcomment = factories.PointCommentFactory.create(point=some_temppoint, comment="Hay")
request = self.api_request_factory.delete(
reverse(
'rest-pointcomment-delete-and-respond',
kwargs={'pk':pointcomment.id},
)
)
self.assertEqual(models.PointComment.objects.filter(id=pointcomment.id).exists(), True)
view = views.PointCommentViewSet.as_view(actions={'delete':'delete_and_respond'})
response = view(request, pk=pointcomment.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, '{"deletedCommentID": %s, "hasComments": false}' % pointcomment.id)
self.assertEqual(models.PointComment.objects.filter(id=pointcomment.id).exists(), False)
def test_roastprofile_create(self):
"""
Test that this view creates a new RoastProfile with a Coffee as it's parent based
on that Coffee's ID.
"""
request = self.api_request_factory.post(
reverse('rest-roastprofile-list'),
{'coffee': self.coffee.id, 'name': 'Test Profile'}
)
self.assertEqual(self.coffee.roastprofile_set.all().count(), 1)
view = views.RoastProfileViewSet.as_view(actions={'post':'create'})
response = view(request)
self.assertEqual(response.status_code, 201)
self.assertEqual(self.coffee.roastprofile_set.all().count(), 2)
def test_roastprofile_delete(self):
"""
Test that this view delete's a RoastProfile based on it's ID.
"""
request = self.api_request_factory.delete(
reverse(
'rest-roastprofile-detail',
kwargs={'pk': self.roastprofile.id}
)
)
self.assertEqual(self.coffee.roastprofile_set.all().count(), 1)
view = views.RoastProfileViewSet.as_view(actions={'delete':'destroy'})
response = view(request, pk=self.roastprofile.id)
self.assertEqual(response.status_code, 204)
self.assertEqual(self.coffee.roastprofile_set.all().exists(), False)
def test_roastprofile_graph_data(self):
"""
Test that this view properly returns a JSON response with 'graphData' that is
a JSON encoded form of a RoastProfile's get_temp_graph_data method.
"""
request = self.api_request_factory.get(
reverse(
'rest-roastprofile-detail',
kwargs={'pk': self.roastprofile.id}
)
)
view = views.RoastProfileViewSet.as_view(actions={'get':'retrieve'})
response = view(request, pk=self.roastprofile.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.data['temp_graph_data'],
self.roastprofile._get_temp_graph_data()
)
def test_roastprofile_graph_data_slice(self):
"""
Test that this view properly returns a slice of a set of related TempPoints from a RoastProfile.
"""
slice_start = '5'
request = self.api_request_factory.get(
reverse('rest-roastprofile-get-graph-data-slice'),
{
'id': self.roastprofile.id,
'sliceStart': slice_start
}
)
view = views.RoastProfileViewSet.as_view(actions={'get':'get_graph_data_slice'})
response = view(request)
expected_data = {
'graphDataValues': self.roastprofile.get_temp_graph_data_slice(slice_start),
'lastSlice': self.roastprofile.temppoint_set.all().order_by('-time')[0].time
}
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, JsonResponse(expected_data).content) | mit |
kinnngg/knightofsorrow.tk | crowbar-master/lib/core/threadpool.py | 5 | 1322 |
try:
import sys
from Queue import Queue
from threading import Thread
from lib.core.exceptions import CrowbarExceptions
except Exception, err:
from lib.core.exceptions import CrowbarExceptions
raise CrowbarExceptions(str(err))
class Worker(Thread):
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get(True, None)
if not func:
break
try:
func(*args, **kargs)
except:
pass
self.tasks.task_done()
class ThreadPool:
def __init__(self, num_threads):
self.threads = []
self.num_threads = num_threads
self.tasks = Queue(self.num_threads)
for _ in range(self.num_threads):
worker = Worker(self.tasks)
self.threads.append(worker)
def add_task(self, func, *args, **kargs):
self.tasks.put((func, args, kargs))
def wait_completion(self):
self.tasks.join()
for _ in range(self.num_threads):
self.add_task(None, None, None)
for t in self.threads:
t.join()
| mit |
DutchDanny/kernel_kk443_sense_m8ace | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
Stanford-Online/edx-platform | common/djangoapps/student/tests/test_credit.py | 13 | 9724 | """
Tests for credit courses on the student dashboard.
"""
import datetime
import unittest
import ddt
import pytz
from django.conf import settings
from django.urls import reverse
from django.test.utils import override_settings
from mock import patch
from openedx.core.djangoapps.credit import api as credit_api
from openedx.core.djangoapps.credit.models import CreditCourse, CreditEligibility, CreditProvider
from student.models import CourseEnrollmentAttribute
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
TEST_CREDIT_PROVIDER_SECRET_KEY = "931433d583c84ca7ba41784bad3232e6"
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@override_settings(CREDIT_PROVIDER_SECRET_KEYS={
"hogwarts": TEST_CREDIT_PROVIDER_SECRET_KEY,
})
@patch.dict(settings.FEATURES, {"ENABLE_CREDIT_ELIGIBILITY": True})
@ddt.ddt
class CreditCourseDashboardTest(ModuleStoreTestCase):
"""
Tests for credit courses on the student dashboard.
"""
USERNAME = "ron"
PASSWORD = "mobiliarbus"
PROVIDER_ID = "hogwarts"
PROVIDER_NAME = "Hogwarts School of Witchcraft and Wizardry"
PROVIDER_STATUS_URL = "http://credit.example.com/status"
def setUp(self):
"""Create a course and an enrollment. """
super(CreditCourseDashboardTest, self).setUp()
# Create a user and log in
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result, msg="Could not log in")
# Create a course and configure it as a credit course
self.course = CourseFactory()
CreditCourse.objects.create(course_key=self.course.id, enabled=True) # pylint: disable=no-member
# Configure a credit provider
CreditProvider.objects.create(
provider_id=self.PROVIDER_ID,
display_name=self.PROVIDER_NAME,
provider_status_url=self.PROVIDER_STATUS_URL,
enable_integration=True,
)
# Configure a single credit requirement (minimum passing grade)
credit_api.set_credit_requirements(
self.course.id, # pylint: disable=no-member
[
{
"namespace": "grade",
"name": "grade",
"display_name": "Final Grade",
"criteria": {
"min_grade": 0.8
}
}
]
)
# Enroll the user in the course as "verified"
self.enrollment = CourseEnrollmentFactory(
user=self.user,
course_id=self.course.id, # pylint: disable=no-member
mode="verified"
)
def test_not_eligible_for_credit(self):
# The user is not yet eligible for credit, so no additional information should be displayed on the dashboard.
response = self._load_dashboard()
self.assertNotContains(response, "credit-eligibility-msg")
self.assertNotContains(response, "purchase-credit-btn")
def test_eligible_for_credit(self):
# Simulate that the user has completed the only requirement in the course
# so the user is eligible for credit.
self._make_eligible()
# The user should have the option to purchase credit
response = self._load_dashboard()
self.assertContains(response, "credit-eligibility-msg")
self.assertContains(response, "purchase-credit-btn")
# Move the eligibility deadline so it's within 30 days
eligibility = CreditEligibility.objects.get(username=self.USERNAME)
eligibility.deadline = datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=29)
eligibility.save()
# The user should still have the option to purchase credit,
# but there should also be a message urging the user to purchase soon.
response = self._load_dashboard()
self.assertContains(response, "credit-eligibility-msg")
self.assertContains(response, "purchase-credit-btn")
self.assertContains(response, "You have completed this course and are eligible")
def test_purchased_credit(self):
# Simulate that the user has purchased credit, but has not
# yet initiated a request to the credit provider
self._make_eligible()
self._purchase_credit()
response = self._load_dashboard()
self.assertContains(response, "credit-request-not-started-msg")
def test_purchased_credit_and_request_pending(self):
# Simulate that the user has purchased credit and initiated a request,
# but we haven't yet heard back from the credit provider.
self._make_eligible()
self._purchase_credit()
self._initiate_request()
# Expect that the user's status is "pending"
response = self._load_dashboard()
self.assertContains(response, "credit-request-pending-msg")
def test_purchased_credit_and_request_approved(self):
# Simulate that the user has purchased credit and initiated a request,
# and had that request approved by the credit provider
self._make_eligible()
self._purchase_credit()
request_uuid = self._initiate_request()
self._set_request_status(request_uuid, "approved")
# Expect that the user's status is "approved"
response = self._load_dashboard()
self.assertContains(response, "credit-request-approved-msg")
def test_purchased_credit_and_request_rejected(self):
# Simulate that the user has purchased credit and initiated a request,
# and had that request rejected by the credit provider
self._make_eligible()
self._purchase_credit()
request_uuid = self._initiate_request()
self._set_request_status(request_uuid, "rejected")
# Expect that the user's status is "approved"
response = self._load_dashboard()
self.assertContains(response, "credit-request-rejected-msg")
def test_credit_status_error(self):
# Simulate an error condition: the user has a credit enrollment
# but no enrollment attribute indicating which provider the user
# purchased credit from.
self._make_eligible()
self._purchase_credit()
CourseEnrollmentAttribute.objects.all().delete()
# Expect an error message
response = self._load_dashboard()
self.assertContains(response, "credit-error-msg")
def _load_dashboard(self):
"""Load the student dashboard and return the HttpResponse. """
return self.client.get(reverse("dashboard"))
def _make_eligible(self):
"""Make the user eligible for credit in the course. """
credit_api.set_credit_requirement_status(
self.user,
self.course.id, # pylint: disable=no-member
"grade", "grade",
status="satisfied",
reason={
"final_grade": 0.95
}
)
def _purchase_credit(self):
"""Purchase credit from a provider in the course. """
self.enrollment.mode = "credit"
self.enrollment.save() # pylint: disable=no-member
CourseEnrollmentAttribute.objects.create(
enrollment=self.enrollment,
namespace="credit",
name="provider_id",
value=self.PROVIDER_ID,
)
def _initiate_request(self):
"""Initiate a request for credit from a provider. """
request = credit_api.create_credit_request(
self.course.id, # pylint: disable=no-member
self.PROVIDER_ID,
self.USERNAME
)
return request["parameters"]["request_uuid"]
def _set_request_status(self, uuid, status):
"""Set the status of a request for credit, simulating the notification from the provider. """
credit_api.update_credit_request_status(uuid, self.PROVIDER_ID, status)
@ddt.data(
(
[u'Arizona State University'],
'You are now eligible for credit from Arizona State University'),
(
[u'Arizona State University', u'Hogwarts School of Witchcraft'],
'You are now eligible for credit from Arizona State University and Hogwarts School of Witchcraft'
),
(
[u'Arizona State University', u'Hogwarts School of Witchcraft and Wizardry', u'Charter Oak'],
'You are now eligible for credit from Arizona State University, Hogwarts School'
' of Witchcraft and Wizardry, and Charter Oak'
),
([], 'You have completed this course and are eligible'),
(None, 'You have completed this course and are eligible')
)
@ddt.unpack
def test_eligible_for_credit_with_providers_names(self, providers_list, credit_string):
"""Verify the message on dashboard with different number of providers."""
# Simulate that the user has completed the only requirement in the course
# so the user is eligible for credit.
self._make_eligible()
# The user should have the option to purchase credit
with patch('student.views.dashboard.get_credit_provider_display_names') as mock_method:
mock_method.return_value = providers_list
response = self._load_dashboard()
self.assertContains(response, "credit-eligibility-msg")
self.assertContains(response, "purchase-credit-btn")
self.assertContains(response, credit_string)
| agpl-3.0 |
flccrakers/dj-tango | bin/correct-tangos.py | 1 | 1733 | #!/usr/bin/python3
# -*- coding:Utf-8 -*
import os
import time
from djtango.data import djDataConnection
from djtango.tangosong import TangoSong
inputFile = './tango-a-corriger.csv'
def loadcsv(file):
ret=[]
inF = open(file, "r") #opens file with name of "test.txt"
for line in inF :
res = line.strip().split(";")
ret.append(res)
#print(res)
return ret
def printRest(tangoList):
left = 0
for index in range(1, len(tangoList)):
table = tangoList[index]
if table[3] == "":
left+=1
print(str(left)+" to inspect, "+str(len(tangoList)-left)+" checked, on a total of "+str(len(tangoList)))
def saveAndExit(file):
outF = open(file, "w")
s = ';'
for i in range (0,len(tangoList)):
outF.write(s.join(tangoList[i])+"\n")
#outF.
exit(0)
tangoList = loadcsv(inputFile)
printRest(tangoList)
#first = True
i = 1
curIndex = []
for index in range(1, len(tangoList)):
table = tangoList[index]
if table[3] == "":
print(table)
curIndex.append(index)
command = "firefox \"http://www.el-recodo.com/music?T="+table[1].replace(" ", '+')+"&lang=fr\" &"
#print (command)
os.system(command)
time.sleep(0.25)
i+=1
#else:
#print("déjà corrigé "+str(i))
if i>5:
mdpe = input("une fois corrigé, appuyez sur entrer pour quitter et sauvegarder taper exit : ")
print("mdpe is :" +mdpe)
count = 0
for x in curIndex:
if mdpe == 'exit':
saveAndExit(inputFile)
elif mdpe[count] == 'c':
print (tangoList[x])
tangoList[x][3] = 'corrigé'
print (tangoList[x])
elif mdpe[count] == 'n':
print (tangoList[x])
tangoList[x][3] = "N’existe pas chez El Recodo"
print (tangoList[x])
print("\n###########\n")
count+=1
curIndex = []
i = 1
| gpl-3.0 |
VDBWRAIR/bio_pieces | bio_bits/fasta.py | 2 | 4383 | from __future__ import print_function
import sys
import argparse
import string
import functools
from Bio.SeqIO import parse, FastaIO
from Bio.Data import IUPACData
from . import util
from . import ctleptop
# Maximum amount of permutations for ambiguous sequences that will
# be generated
MAX_PERMS = 100
def parse_args():
parser = argparse.ArgumentParser(
description='''Simplistic fasta manipulator'''
)
parser.add_argument(
'fasta',
help='Fasta file path or - to read from standard input'
)
parser.add_argument(
'--wrap', default=False, action='store_const', const=80,
help='Default action. Converts all multi line sequences to single lines'
)
parser.add_argument(
'--split', action='store_true', default=False,
help='If set then split the input fasta on each identifier '
' and create a new file for each identifier'
)
parser.add_argument(
'--disambiguate', action='store_true', default=False,
help='Generate all permutations sequence based on ambiguous nucleotide'
' sequences'
)
return parser.parse_args()
def writefasta(records, outfile=sys.stdout, wrap=None):
'''
:param SeqRecord records: Sequence of SeqRecord
:param handle|string outfile: Output file or handle to write to
:param bool wrap: Wrap sequences at 80 chars or not
'''
fasta_out = FastaIO.FastaWriter(outfile, wrap=wrap)
fasta_out.write_file(records)
def splitfasta(records, wrap=False):
for seq in records:
outfile = str(seq.id)
for p in string.punctuation:
outfile = outfile.replace(p, '_')
outfile = outfile + '.fasta'
with open(outfile, 'w') as fh:
writefasta([seq], fh, wrap)
def disambiguate(records):
all_records = []
for record in records:
make_rec = functools.partial(
util.make_seqrec,
id=record.id, name=record.name, description=record.description
)
# Get the input sequence
sequence = str(record.seq)
# Generate all permutations of that sequence
#perms = list(ctleptop.nearbyPermutations(sequence))
perms = permutate_ambiguous_sequence(record.id, sequence)
for perm in perms:
if perm:
yield make_rec(perm)
def permutate_ambiguous_sequence(seq_id, seq_str):
'''
'''
# Abiguous mapping table from BioPython
amb_values = IUPACData.ambiguous_dna_values
a_bases = []
total_perms = 1
for nt in seq_str:
amb_bases = amb_values.get(nt, nt)
if len(amb_bases) > 1:
a_bases.append(nt)
total_perms *= len(amb_bases)
# Start ambiguous sequences with our input sequence
amb_seqs = [seq_str]
# i holds current position
for i in range(len(seq_str)):
nt = seq_str[i]
amb_bases = amb_values.get(nt, nt)
# Skip all non-ambiguous bases
if len(amb_values) == 1:
continue
#print("i: {0}".format(i))
cur_seqs = []
# Go through each sequence again and and generate ambiguous bases
for seq in amb_seqs:
#print("nt: {0}".format(nt))
# build up permutations for the current ambiguous base
for base in amb_bases:
#print("base: {0}".format(base))
#print("seq[:i] + base + seq[i+1:]".format(seq[:i], base, seq[i+1:]))
cur_seqs.append(seq[:i] + base + seq[i+1:])
#print("cur_seqs: {0}".format(cur_seqs))
amb_seqs = cur_seqs
if len(amb_seqs) > MAX_PERMS:
sys.stderr.write("Sequence {0} has {1} ambiguous bases that would produce {2} permutations and was skipped\n".format(seq_id, len(a_bases), total_perms))
return []
#print("amb_seqs: {0}".format(amb_seqs))
return amb_seqs
def main():
args = parse_args()
in_fasta = args.fasta
# Normalize input stream
if in_fasta == '-':
_input = sys.stdin
else:
_input = open(in_fasta)
input_records = parse(_input, 'fasta')
if args.split:
splitfasta(input_records, args.wrap)
elif args.disambiguate:
recs = disambiguate(input_records)
writefasta(recs, sys.stdout, args.wrap)
else:
writefasta(input_records, sys.stdout, wrap=args.wrap)
| gpl-2.0 |
pjg101/SickRage | lib/past/builtins/misc.py | 62 | 2500 | from __future__ import unicode_literals
import sys
import inspect
from collections import Mapping
from future.utils import PY3, exec_
if PY3:
import builtins
def apply(f, *args, **kw):
return f(*args, **kw)
from past.builtins import str as oldstr
def chr(i):
"""
Return a byte-string of one character with ordinal i; 0 <= i <= 256
"""
return oldstr(bytes((i,)))
def cmp(x, y):
"""
cmp(x, y) -> integer
Return negative if x<y, zero if x==y, positive if x>y.
"""
return (x > y) - (x < y)
from sys import intern
def oct(number):
"""oct(number) -> string
Return the octal representation of an integer
"""
return '0' + builtins.oct(number)[2:]
raw_input = input
from imp import reload
unicode = str
unichr = chr
xrange = range
else:
import __builtin__
apply = __builtin__.apply
chr = __builtin__.chr
cmp = __builtin__.cmp
execfile = __builtin__.execfile
intern = __builtin__.intern
oct = __builtin__.oct
raw_input = __builtin__.raw_input
reload = __builtin__.reload
unicode = __builtin__.unicode
unichr = __builtin__.unichr
xrange = __builtin__.xrange
if PY3:
def execfile(filename, myglobals=None, mylocals=None):
"""
Read and execute a Python script from a file in the given namespaces.
The globals and locals are dictionaries, defaulting to the current
globals and locals. If only globals is given, locals defaults to it.
"""
if myglobals is None:
# There seems to be no alternative to frame hacking here.
caller_frame = inspect.stack()[1]
myglobals = caller_frame[0].f_globals
mylocals = caller_frame[0].f_locals
elif mylocals is None:
# Only if myglobals is given do we set mylocals to it.
mylocals = myglobals
if not isinstance(myglobals, Mapping):
raise TypeError('globals must be a mapping')
if not isinstance(mylocals, Mapping):
raise TypeError('locals must be a mapping')
with open(filename, "rbU") as fin:
source = fin.read()
code = compile(source, filename, "exec")
exec_(code, myglobals, mylocals)
if PY3:
__all__ = ['apply', 'chr', 'cmp', 'execfile', 'intern', 'raw_input',
'reload', 'unichr', 'unicode', 'xrange']
else:
__all__ = []
| gpl-3.0 |
erkrishna9/odoo | addons/base_gengo/__init__.py | 377 | 1122 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Openerp sa (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_company
import ir_translation
import wizard
import controller
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
benkonrath/transip-api | tests/service_tests/test_domain.py | 1 | 11788 | import unittest
from transip.client import MODE_RO, MODE_RW
from transip.service import DomainService
from transip.service.objects import DnsEntry
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import patch, Mock
class TestDomainService(unittest.TestCase):
@patch('transip.client.SudsClient')
def setUp(self, mock_client):
super(TestDomainService, self).setUp()
self.service = DomainService(login='sundayafternoon')
self.service.build_cookie = Mock(return_value={"cookie": "value"})
self.service.update_cookie = Mock()
self.i = mock_client.return_value
def set_return_value(self, method, value):
getattr(self.i.service, method).return_value = value
def _generic_test(self, soap_method, method, result, parameters=(), mode=MODE_RO):
self.set_return_value(soap_method, result)
# CALL
soap_result = getattr(self.service, method)(*parameters)
# VERIFY
self.service.build_cookie.assert_called_with(mode=mode, method=soap_method, parameters=parameters)
self.service.update_cookie.assert_called_with({"cookie": "value"})
getattr(self.i.service, soap_method).assert_called_with(*parameters)
self.assertEqual(soap_result, result)
def test_constructor(self):
# CALL
ds = DomainService(login='sundayafternoon')
# VERIFY
self.assertEqual(ds.url, 'https://api.transip.nl/wsdl/?service=DomainService')
@patch('transip.client.SudsClient')
def test_get_domains(self, mock_client):
# SETUP
ds = DomainService('sundayafternoon')
ds.build_cookie = Mock(return_value={"cookie": "value"})
ds.update_cookie = Mock()
i = mock_client.return_value
i.service.getDomainNames.return_value = ['domain1', 'domain2']
# CALL
result = ds.get_domain_names()
# VERIFY
ds.build_cookie.assert_called_with(mode=MODE_RO, method='getDomainNames', parameters=())
ds.update_cookie.assert_called_with({"cookie": "value"})
i.service.getDomainNames.assert_called_with()
self.assertEqual(result, ['domain1', 'domain2'])
@patch('transip.client.SudsClient')
def test_get_info(self, mock_client):
# SETUP
ds = DomainService(login='sundayafternoon')
ds.build_cookie = Mock(return_value={"cookie": "value"})
ds.update_cookie = Mock()
i = mock_client.return_value
getinfo_result = Mock()
getinfo_result.dnsEntries = [DnsEntry('testentry', 86400, DnsEntry.TYPE_A, '127.0.0.1')]
i.service.getInfo.return_value = getinfo_result
# CALL
result = ds.get_info('example.com')
# VERIFY
ds.build_cookie.assert_called_with(mode=MODE_RO, method='getInfo', parameters=['example.com'])
ds.update_cookie.assert_called_with({"cookie": "value"})
i.service.getInfo.assert_called_with('example.com')
self.assertEqual(result, getinfo_result)
@patch('transip.client.SudsClient')
def test_set_dns_entries(self, mock_client):
# SETUP
ds = DomainService('sundayafternoon')
ds.build_cookie = Mock(return_value={"cookie": "value"})
ds.update_cookie = Mock()
i = mock_client.return_value
i.service.setDnsEntries.return_value = None
dns_entry = DnsEntry('testentry', 86400, DnsEntry.TYPE_A, '127.0.0.1')
# CALL
result = ds.set_dns_entries('domain1', [dns_entry, ])
# VERIFY
ds.build_cookie.assert_called_with(mode=MODE_RW, method='setDnsEntries', parameters=('domain1', [dns_entry, ]))
ds.update_cookie.assert_called_with({"cookie": "value"})
i.service.setDnsEntries.assert_called_with('domain1', [dns_entry, ])
@patch('transip.client.SudsClient')
def test_add_dns_entries(self, mock_client):
ds = DomainService('sundayafternoon')
ds.build_cookie = Mock(return_value={'cookie': 'value'})
ds.update_cookie = Mock()
getinfo_result = Mock()
dns_entry1 = DnsEntry(
'testentry1',
86400,
DnsEntry.TYPE_A,
'127.0.0.1',
)
dns_entry2 = DnsEntry(
'testentry2',
86400,
DnsEntry.TYPE_A,
'127.0.0.1',
)
getinfo_result.dnsEntries = [
dns_entry1,
dns_entry2,
]
mock_client.return_value.service.getInfo.return_value = getinfo_result
dns_entry3 = DnsEntry(
'testentry3',
86400,
DnsEntry.TYPE_A,
'127.0.0.1',
)
ds.add_dns_entries('domain1', [dns_entry3])
mock_client.return_value.service.setDnsEntries.assert_called_with(
'domain1',
[dns_entry1, dns_entry2, dns_entry3],
)
@patch('transip.client.SudsClient')
def test_remove_dns_entries(self, mock_client):
ds = DomainService('sundayafternoon')
ds.build_cookie = Mock(return_value={'cookie': 'value'})
ds.update_cookie = Mock()
getinfo_result = Mock()
dns_entry1 = DnsEntry(
'testentry1',
86400,
DnsEntry.TYPE_A,
'127.0.0.1',
)
dns_entry2 = DnsEntry(
'testentry2',
86400,
DnsEntry.TYPE_A,
'127.0.0.1',
)
getinfo_result.dnsEntries = [
dns_entry1,
dns_entry2,
]
mock_client.return_value.service.getInfo.return_value = getinfo_result
ds.remove_dns_entries('domain1', [dns_entry1])
mock_client.return_value.service.setDnsEntries.assert_called_with(
'domain1',
[dns_entry2],
)
def test_batch_check_availability(self):
self._generic_test(
soap_method='batchCheckAvailability',
method='batch_check_availability',
result='mock',
parameters=(['example.com', 'example.nl'],),
mode=MODE_RO
)
with self.assertRaises(ValueError):
self._generic_test(
soap_method='batchCheckAvailability',
method='batch_check_availability',
result='mock',
parameters=(['example.com', 'example.nl'] * 11,),
mode=MODE_RO
)
def test_check_availability(self):
self._generic_test(
soap_method='checkAvailability',
method='check_availability',
result='mock',
parameters=('example.com',),
mode=MODE_RO
)
def test_get_whois(self):
self._generic_test(
soap_method='getWhois',
method='get_whois',
result='mock',
parameters=('example.com',),
mode=MODE_RO
)
def test_get_domain_names(self):
self._generic_test(
soap_method='getDomainNames',
method='get_domain_names',
result=['mock', 'mock2'],
mode=MODE_RO
)
def test_batch_get_info(self):
self._generic_test(
soap_method='batchGetInfo',
method='batch_get_info',
result=['mock', 'mock2'],
parameters=('example.com',),
mode=MODE_RO
)
def test_get_auth_code(self):
self._generic_test(
soap_method='getAuthCode',
method='get_auth_code',
result='string',
parameters=('example.com',),
mode=MODE_RO
)
def test_get_is_locked(self):
self._generic_test(
soap_method='getIsLocked',
method='get_is_locked',
result=True,
parameters=('example.com',),
mode=MODE_RO
)
def test_register(self):
self._generic_test(
soap_method='register',
method='register',
result='string',
parameters=('example.com',),
mode=MODE_RW
)
def test_cancel(self):
self._generic_test(
soap_method='cancel',
method='cancel',
result='string',
parameters=('example.com', 'domain'),
mode=MODE_RW
)
def test_transfer_with_owner_change(self):
self._generic_test(
soap_method='transferWithOwnerChange',
method='transfer_with_owner_change',
result='string',
parameters=('example.com', 'authcode'),
mode=MODE_RW
)
def test_transfer_without_owner_change(self):
self._generic_test(
soap_method='transferWithoutOwnerChange',
method='transfer_without_owner_change',
result='string',
parameters=('example.com', 'authcode'),
mode=MODE_RW
)
def test_set_nameservers(self):
self._generic_test(
soap_method='setNameservers',
method='set_nameservers',
result='string',
parameters=('example.com', 'nameservers'),
mode=MODE_RW
)
def test_set_lock(self):
self._generic_test(
soap_method='setLock',
method='set_lock',
result='string',
parameters=('example.com',),
mode=MODE_RW
)
def test_unset_lock(self):
self._generic_test(
soap_method='unsetLock',
method='unset_lock',
result='string',
parameters=('example.com',),
mode=MODE_RW
)
def test_set_owner(self):
self._generic_test(
soap_method='setOwner',
method='set_owner',
result='string',
parameters=('example.com', 'registrant_whois_contact'),
mode=MODE_RW
)
def test_set_contacts(self):
self._generic_test(
soap_method='setContacts',
method='set_contacts',
result='string',
parameters=('example.com', 'contacts'),
mode=MODE_RW
)
def test_get_all_tld_infos(self):
self._generic_test(
soap_method='getAllTldInfos',
method='get_all_tld_infos',
result='string',
mode=MODE_RO
)
def test_get_tld_info(self):
self._generic_test(
soap_method='getTldInfo',
method='get_tld_info',
result='string',
parameters=('.com',),
mode=MODE_RO
)
def test_get_current_domain_action(self):
self._generic_test(
soap_method='getCurrentDomainAction',
method='get_current_domain_action',
result='string',
parameters=('example.com',),
mode=MODE_RO
)
def test_retry_current_domain_action_with_new_data(self):
self._generic_test(
soap_method='retryCurrentDomainActionWithNewData',
method='retry_current_domain_action_with_new_data',
result='string',
parameters=('example.com',),
mode=MODE_RO
)
def test_retry_transfer_with_different_auth_code(self):
self._generic_test(
soap_method='retryTransferWithDifferentAuthCode',
method='retry_transfer_with_different_auth_code',
result='string',
parameters=('example.com', 'new_auth_code'),
mode=MODE_RO
)
def test_cancel_domain_action(self):
self._generic_test(
soap_method='cancelDomainAction',
method='cancel_domain_action',
result='string',
parameters=('example.com',),
mode=MODE_RO
)
| mit |
ezc/protobuf | python/setup.py | 32 | 7877 | #! /usr/bin/python
#
# See README for usage instructions.
import sys
import os
import subprocess
# We must use setuptools, not distutils, because we need to use the
# namespace_packages option for the "google" package.
try:
from setuptools import setup, Extension
except ImportError:
try:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, Extension
except ImportError:
sys.stderr.write(
"Could not import setuptools; make sure you have setuptools or "
"ez_setup installed.\n")
raise
from distutils.command.clean import clean as _clean
from distutils.command.build_py import build_py as _build_py
from distutils.spawn import find_executable
maintainer_email = "[email protected]"
# Find the Protocol Compiler.
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
protoc = os.environ['PROTOC']
elif os.path.exists("../src/protoc"):
protoc = "../src/protoc"
elif os.path.exists("../src/protoc.exe"):
protoc = "../src/protoc.exe"
elif os.path.exists("../vsprojects/Debug/protoc.exe"):
protoc = "../vsprojects/Debug/protoc.exe"
elif os.path.exists("../vsprojects/Release/protoc.exe"):
protoc = "../vsprojects/Release/protoc.exe"
else:
protoc = find_executable("protoc")
def generate_proto(source):
"""Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input."""
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print "Generating %s..." % output
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc == None:
sys.stderr.write(
"protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
def GenerateUnittestProtos():
generate_proto("../src/google/protobuf/unittest.proto")
generate_proto("../src/google/protobuf/unittest_custom_options.proto")
generate_proto("../src/google/protobuf/unittest_import.proto")
generate_proto("../src/google/protobuf/unittest_import_public.proto")
generate_proto("../src/google/protobuf/unittest_mset.proto")
generate_proto("../src/google/protobuf/unittest_no_generic_services.proto")
generate_proto("google/protobuf/internal/test_bad_identifiers.proto")
generate_proto("google/protobuf/internal/more_extensions.proto")
generate_proto("google/protobuf/internal/more_extensions_dynamic.proto")
generate_proto("google/protobuf/internal/more_messages.proto")
generate_proto("google/protobuf/internal/factory_test1.proto")
generate_proto("google/protobuf/internal/factory_test2.proto")
def MakeTestSuite():
# This is apparently needed on some systems to make sure that the tests
# work even if a previous version is already installed.
if 'google' in sys.modules:
del sys.modules['google']
GenerateUnittestProtos()
import unittest
import google.protobuf.internal.generator_test as generator_test
import google.protobuf.internal.descriptor_test as descriptor_test
import google.protobuf.internal.reflection_test as reflection_test
import google.protobuf.internal.service_reflection_test \
as service_reflection_test
import google.protobuf.internal.text_format_test as text_format_test
import google.protobuf.internal.wire_format_test as wire_format_test
import google.protobuf.internal.unknown_fields_test as unknown_fields_test
import google.protobuf.internal.descriptor_database_test \
as descriptor_database_test
import google.protobuf.internal.descriptor_pool_test as descriptor_pool_test
import google.protobuf.internal.message_factory_test as message_factory_test
import google.protobuf.internal.message_cpp_test as message_cpp_test
import google.protobuf.internal.reflection_cpp_generated_test \
as reflection_cpp_generated_test
loader = unittest.defaultTestLoader
suite = unittest.TestSuite()
for test in [ generator_test,
descriptor_test,
reflection_test,
service_reflection_test,
text_format_test,
wire_format_test ]:
suite.addTest(loader.loadTestsFromModule(test))
return suite
class clean(_clean):
def run(self):
# Delete generated files in the code tree.
for (dirpath, dirnames, filenames) in os.walk("."):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filepath.endswith("_pb2.py") or filepath.endswith(".pyc") or \
filepath.endswith(".so") or filepath.endswith(".o") or \
filepath.endswith('google/protobuf/compiler/__init__.py'):
os.remove(filepath)
# _clean is an old-style class, so super() doesn't work.
_clean.run(self)
class build_py(_build_py):
def run(self):
# Generate necessary .proto file if it doesn't exist.
generate_proto("../src/google/protobuf/descriptor.proto")
generate_proto("../src/google/protobuf/compiler/plugin.proto")
GenerateUnittestProtos()
# Make sure google.protobuf.compiler is a valid package.
open('google/protobuf/compiler/__init__.py', 'a').close()
# _build_py is an old-style class, so super() doesn't work.
_build_py.run(self)
if __name__ == '__main__':
ext_module_list = []
# C++ implementation extension
if os.getenv("PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION", "python") == "cpp":
print "Using EXPERIMENTAL C++ Implmenetation."
ext_module_list.append(Extension(
"google.protobuf.internal._net_proto2___python",
[ "google/protobuf/pyext/python_descriptor.cc",
"google/protobuf/pyext/python_protobuf.cc",
"google/protobuf/pyext/python-proto2.cc" ],
include_dirs = [ "." ],
libraries = [ "protobuf" ]))
setup(name = 'protobuf',
version = '2.5.0',
packages = [ 'google' ],
namespace_packages = [ 'google' ],
test_suite = 'setup.MakeTestSuite',
# Must list modules explicitly so that we don't install tests.
py_modules = [
'google.protobuf.internal.api_implementation',
'google.protobuf.internal.containers',
'google.protobuf.internal.cpp_message',
'google.protobuf.internal.decoder',
'google.protobuf.internal.encoder',
'google.protobuf.internal.enum_type_wrapper',
'google.protobuf.internal.message_listener',
'google.protobuf.internal.python_message',
'google.protobuf.internal.type_checkers',
'google.protobuf.internal.wire_format',
'google.protobuf.descriptor',
'google.protobuf.descriptor_pb2',
'google.protobuf.compiler.plugin_pb2',
'google.protobuf.message',
'google.protobuf.descriptor_database',
'google.protobuf.descriptor_pool',
'google.protobuf.message_factory',
'google.protobuf.reflection',
'google.protobuf.service',
'google.protobuf.service_reflection',
'google.protobuf.text_format' ],
cmdclass = { 'clean': clean, 'build_py': build_py },
install_requires = ['setuptools'],
ext_modules = ext_module_list,
url = 'http://code.google.com/p/protobuf/',
maintainer = maintainer_email,
maintainer_email = '[email protected]',
license = 'New BSD License',
description = 'Protocol Buffers',
long_description =
"Protocol Buffers are Google's data interchange format.",
)
| bsd-3-clause |
dariemp/odoo | addons/website_sale/controllers/main.py | 32 | 42618 | # -*- coding: utf-8 -*-
import werkzeug
from openerp import SUPERUSER_ID
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
from openerp.addons.website.models.website import slug
from openerp.addons.web.controllers.main import login_redirect
PPG = 20 # Products Per Page
PPR = 4 # Products Per Row
class table_compute(object):
def __init__(self):
self.table = {}
def _check_place(self, posx, posy, sizex, sizey):
res = True
for y in range(sizey):
for x in range(sizex):
if posx+x>=PPR:
res = False
break
row = self.table.setdefault(posy+y, {})
if row.setdefault(posx+x) is not None:
res = False
break
for x in range(PPR):
self.table[posy+y].setdefault(x, None)
return res
def process(self, products):
# Compute products positions on the grid
minpos = 0
index = 0
maxy = 0
for p in products:
x = min(max(p.website_size_x, 1), PPR)
y = min(max(p.website_size_y, 1), PPR)
if index>=PPG:
x = y = 1
pos = minpos
while not self._check_place(pos%PPR, pos/PPR, x, y):
pos += 1
# if 21st products (index 20) and the last line is full (PPR products in it), break
# (pos + 1.0) / PPR is the line where the product would be inserted
# maxy is the number of existing lines
# + 1.0 is because pos begins at 0, thus pos 20 is actually the 21st block
# and to force python to not round the division operation
if index >= PPG and ((pos + 1.0) / PPR) > maxy:
break
if x==1 and y==1: # simple heuristic for CPU optimization
minpos = pos/PPR
for y2 in range(y):
for x2 in range(x):
self.table[(pos/PPR)+y2][(pos%PPR)+x2] = False
self.table[pos/PPR][pos%PPR] = {
'product': p, 'x':x, 'y': y,
'class': " ".join(map(lambda x: x.html_class or '', p.website_style_ids))
}
if index<=PPG:
maxy=max(maxy,y+(pos/PPR))
index += 1
# Format table according to HTML needs
rows = self.table.items()
rows.sort()
rows = map(lambda x: x[1], rows)
for col in range(len(rows)):
cols = rows[col].items()
cols.sort()
x += len(cols)
rows[col] = [c for c in map(lambda x: x[1], cols) if c != False]
return rows
# TODO keep with input type hidden
class QueryURL(object):
def __init__(self, path='', **args):
self.path = path
self.args = args
def __call__(self, path=None, **kw):
if not path:
path = self.path
for k,v in self.args.items():
kw.setdefault(k,v)
l = []
for k,v in kw.items():
if v:
if isinstance(v, list) or isinstance(v, set):
l.append(werkzeug.url_encode([(k,i) for i in v]))
else:
l.append(werkzeug.url_encode([(k,v)]))
if l:
path += '?' + '&'.join(l)
return path
def get_pricelist():
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
sale_order = context.get('sale_order')
if sale_order:
pricelist = sale_order.pricelist_id
else:
partner = pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id
pricelist = partner.property_product_pricelist
return pricelist
class website_sale(http.Controller):
def get_pricelist(self):
return get_pricelist()
def get_attribute_value_ids(self, product):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
currency_obj = pool['res.currency']
attribute_value_ids = []
visible_attrs = set(l.attribute_id.id
for l in product.attribute_line_ids
if len(l.value_ids) > 1)
if request.website.pricelist_id.id != context['pricelist']:
website_currency_id = request.website.currency_id.id
currency_id = self.get_pricelist().currency_id.id
for p in product.product_variant_ids:
price = currency_obj.compute(cr, uid, website_currency_id, currency_id, p.lst_price)
attribute_value_ids.append([p.id, [v.id for v in p.attribute_value_ids if v.attribute_id.id in visible_attrs], p.price, price])
else:
attribute_value_ids = [[p.id, [v.id for v in p.attribute_value_ids if v.attribute_id.id in visible_attrs], p.price, p.lst_price]
for p in product.product_variant_ids]
return attribute_value_ids
def _get_search_domain(self, search, category, attrib_values):
domain = request.website.sale_product_domain()
if search:
for srch in search.split(" "):
domain += [
'|', '|', '|', ('name', 'ilike', srch), ('description', 'ilike', srch),
('description_sale', 'ilike', srch), ('product_variant_ids.default_code', 'ilike', srch)]
if category:
domain += [('public_categ_ids', 'child_of', int(category))]
if attrib_values:
attrib = None
ids = []
for value in attrib_values:
if not attrib:
attrib = value[0]
ids.append(value[1])
elif value[0] == attrib:
ids.append(value[1])
else:
domain += [('attribute_line_ids.value_ids', 'in', ids)]
attrib = value[0]
ids = [value[1]]
if attrib:
domain += [('attribute_line_ids.value_ids', 'in', ids)]
return domain
@http.route([
'/shop',
'/shop/page/<int:page>',
'/shop/category/<model("product.public.category"):category>',
'/shop/category/<model("product.public.category"):category>/page/<int:page>'
], type='http', auth="public", website=True)
def shop(self, page=0, category=None, search='', **post):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
attrib_list = request.httprequest.args.getlist('attrib')
attrib_values = [map(int, v.split("-")) for v in attrib_list if v]
attrib_set = set([v[1] for v in attrib_values])
domain = self._get_search_domain(search, category, attrib_values)
keep = QueryURL('/shop', category=category and int(category), search=search, attrib=attrib_list)
if not context.get('pricelist'):
pricelist = self.get_pricelist()
context['pricelist'] = int(pricelist)
else:
pricelist = pool.get('product.pricelist').browse(cr, uid, context['pricelist'], context)
product_obj = pool.get('product.template')
url = "/shop"
product_count = product_obj.search_count(cr, uid, domain, context=context)
if search:
post["search"] = search
if category:
category = pool['product.public.category'].browse(cr, uid, int(category), context=context)
url = "/shop/category/%s" % slug(category)
if attrib_list:
post['attrib'] = attrib_list
pager = request.website.pager(url=url, total=product_count, page=page, step=PPG, scope=7, url_args=post)
product_ids = product_obj.search(cr, uid, domain, limit=PPG, offset=pager['offset'], order='website_published desc, website_sequence desc', context=context)
products = product_obj.browse(cr, uid, product_ids, context=context)
style_obj = pool['product.style']
style_ids = style_obj.search(cr, uid, [], context=context)
styles = style_obj.browse(cr, uid, style_ids, context=context)
category_obj = pool['product.public.category']
category_ids = category_obj.search(cr, uid, [('parent_id', '=', False)], context=context)
categs = category_obj.browse(cr, uid, category_ids, context=context)
attributes_obj = request.registry['product.attribute']
attributes_ids = attributes_obj.search(cr, uid, [], context=context)
attributes = attributes_obj.browse(cr, uid, attributes_ids, context=context)
from_currency = pool.get('product.price.type')._get_field_currency(cr, uid, 'list_price', context)
to_currency = pricelist.currency_id
compute_currency = lambda price: pool['res.currency']._compute(cr, uid, from_currency, to_currency, price, context=context)
values = {
'search': search,
'category': category,
'attrib_values': attrib_values,
'attrib_set': attrib_set,
'pager': pager,
'pricelist': pricelist,
'products': products,
'bins': table_compute().process(products),
'rows': PPR,
'styles': styles,
'categories': categs,
'attributes': attributes,
'compute_currency': compute_currency,
'keep': keep,
'style_in_product': lambda style, product: style.id in [s.id for s in product.website_style_ids],
'attrib_encode': lambda attribs: werkzeug.url_encode([('attrib',i) for i in attribs]),
}
return request.website.render("website_sale.products", values)
@http.route(['/shop/product/<model("product.template"):product>'], type='http', auth="public", website=True)
def product(self, product, category='', search='', **kwargs):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
category_obj = pool['product.public.category']
template_obj = pool['product.template']
context.update(active_id=product.id)
if category:
category = category_obj.browse(cr, uid, int(category), context=context)
category = category if category.exists() else False
attrib_list = request.httprequest.args.getlist('attrib')
attrib_values = [map(int,v.split("-")) for v in attrib_list if v]
attrib_set = set([v[1] for v in attrib_values])
keep = QueryURL('/shop', category=category and category.id, search=search, attrib=attrib_list)
category_ids = category_obj.search(cr, uid, [], context=context)
category_list = category_obj.name_get(cr, uid, category_ids, context=context)
category_list = sorted(category_list, key=lambda category: category[1])
pricelist = self.get_pricelist()
from_currency = pool.get('product.price.type')._get_field_currency(cr, uid, 'list_price', context)
to_currency = pricelist.currency_id
compute_currency = lambda price: pool['res.currency']._compute(cr, uid, from_currency, to_currency, price, context=context)
if not context.get('pricelist'):
context['pricelist'] = int(self.get_pricelist())
product = template_obj.browse(cr, uid, int(product), context=context)
values = {
'search': search,
'category': category,
'pricelist': pricelist,
'attrib_values': attrib_values,
'compute_currency': compute_currency,
'attrib_set': attrib_set,
'keep': keep,
'category_list': category_list,
'main_object': product,
'product': product,
'get_attribute_value_ids': self.get_attribute_value_ids
}
return request.website.render("website_sale.product", values)
@http.route(['/shop/product/comment/<int:product_template_id>'], type='http', auth="public", website=True)
def product_comment(self, product_template_id, **post):
if not request.session.uid:
return login_redirect()
cr, uid, context = request.cr, request.uid, request.context
if post.get('comment'):
request.registry['product.template'].message_post(
cr, uid, product_template_id,
body=post.get('comment'),
type='comment',
subtype='mt_comment',
context=dict(context, mail_create_nosubscribe=True))
return werkzeug.utils.redirect('/shop/product/%s#comments' % product_template_id)
@http.route(['/shop/pricelist'], type='http', auth="public", website=True)
def pricelist(self, promo, **post):
cr, uid, context = request.cr, request.uid, request.context
request.website.sale_get_order(code=promo, context=context)
return request.redirect("/shop/cart")
@http.route(['/shop/cart'], type='http', auth="public", website=True)
def cart(self, **post):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
order = request.website.sale_get_order()
if order:
from_currency = pool.get('product.price.type')._get_field_currency(cr, uid, 'list_price', context)
to_currency = order.pricelist_id.currency_id
compute_currency = lambda price: pool['res.currency']._compute(cr, uid, from_currency, to_currency, price, context=context)
else:
compute_currency = lambda price: price
values = {
'order': order,
'compute_currency': compute_currency,
'suggested_products': [],
}
if order:
_order = order
if not context.get('pricelist'):
_order = order.with_context(pricelist=order.pricelist_id.id)
values['suggested_products'] = _order._cart_accessories()
return request.website.render("website_sale.cart", values)
@http.route(['/shop/cart/update'], type='http', auth="public", methods=['POST'], website=True)
def cart_update(self, product_id, add_qty=1, set_qty=0, **kw):
cr, uid, context = request.cr, request.uid, request.context
request.website.sale_get_order(force_create=1)._cart_update(product_id=int(product_id), add_qty=float(add_qty), set_qty=float(set_qty))
return request.redirect("/shop/cart")
@http.route(['/shop/cart/update_json'], type='json', auth="public", methods=['POST'], website=True)
def cart_update_json(self, product_id, line_id, add_qty=None, set_qty=None, display=True):
order = request.website.sale_get_order(force_create=1)
if order.state != 'draft':
request.website.sale_reset()
return {}
value = order._cart_update(product_id=product_id, line_id=line_id, add_qty=add_qty, set_qty=set_qty)
if not order.cart_quantity:
request.website.sale_reset()
return {}
if not display:
return None
value['cart_quantity'] = order.cart_quantity
value['website_sale.total'] = request.website._render("website_sale.total", {
'website_sale_order': request.website.sale_get_order()
})
return value
#------------------------------------------------------
# Checkout
#------------------------------------------------------
def checkout_redirection(self, order):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
# must have a draft sale order with lines at this point, otherwise reset
if not order or order.state != 'draft':
request.session['sale_order_id'] = None
request.session['sale_transaction_id'] = None
return request.redirect('/shop')
# if transaction pending / done: redirect to confirmation
tx = context.get('website_sale_transaction')
if tx and tx.state != 'draft':
return request.redirect('/shop/payment/confirmation/%s' % order.id)
def checkout_values(self, data=None):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
orm_partner = registry.get('res.partner')
orm_user = registry.get('res.users')
orm_country = registry.get('res.country')
state_orm = registry.get('res.country.state')
country_ids = orm_country.search(cr, SUPERUSER_ID, [], context=context)
countries = orm_country.browse(cr, SUPERUSER_ID, country_ids, context)
states_ids = state_orm.search(cr, SUPERUSER_ID, [], context=context)
states = state_orm.browse(cr, SUPERUSER_ID, states_ids, context)
partner = orm_user.browse(cr, SUPERUSER_ID, request.uid, context).partner_id
order = None
shipping_id = None
shipping_ids = []
checkout = {}
if not data:
if request.uid != request.website.user_id.id:
checkout.update( self.checkout_parse("billing", partner) )
shipping_ids = orm_partner.search(cr, SUPERUSER_ID, [("parent_id", "=", partner.id), ('type', "=", 'delivery')], context=context)
else:
order = request.website.sale_get_order(force_create=1, context=context)
if order.partner_id:
domain = [("partner_id", "=", order.partner_id.id)]
user_ids = request.registry['res.users'].search(cr, SUPERUSER_ID, domain, context=dict(context or {}, active_test=False))
if not user_ids or request.website.user_id.id not in user_ids:
checkout.update( self.checkout_parse("billing", order.partner_id) )
else:
checkout = self.checkout_parse('billing', data)
try:
shipping_id = int(data["shipping_id"])
except ValueError:
pass
if shipping_id == -1:
checkout.update(self.checkout_parse('shipping', data))
if shipping_id is None:
if not order:
order = request.website.sale_get_order(context=context)
if order and order.partner_shipping_id:
shipping_id = order.partner_shipping_id.id
shipping_ids = list(set(shipping_ids) - set([partner.id]))
if shipping_id == partner.id:
shipping_id = 0
elif shipping_id > 0 and shipping_id not in shipping_ids:
shipping_ids.append(shipping_id)
elif shipping_id is None and shipping_ids:
shipping_id = shipping_ids[0]
ctx = dict(context, show_address=1)
shippings = []
if shipping_ids:
shippings = shipping_ids and orm_partner.browse(cr, SUPERUSER_ID, list(shipping_ids), ctx) or []
if shipping_id > 0:
shipping = orm_partner.browse(cr, SUPERUSER_ID, shipping_id, ctx)
checkout.update( self.checkout_parse("shipping", shipping) )
checkout['shipping_id'] = shipping_id
# Default search by user country
if not checkout.get('country_id'):
country_code = request.session['geoip'].get('country_code')
if country_code:
country_ids = request.registry.get('res.country').search(cr, uid, [('code', '=', country_code)], context=context)
if country_ids:
checkout['country_id'] = country_ids[0]
values = {
'countries': countries,
'states': states,
'checkout': checkout,
'shipping_id': partner.id != shipping_id and shipping_id or 0,
'shippings': shippings,
'error': {},
'has_check_vat': hasattr(registry['res.partner'], 'check_vat')
}
return values
mandatory_billing_fields = ["name", "phone", "email", "street2", "city", "country_id"]
optional_billing_fields = ["street", "state_id", "vat", "vat_subjected", "zip"]
mandatory_shipping_fields = ["name", "phone", "street", "city", "country_id"]
optional_shipping_fields = ["state_id", "zip"]
def checkout_parse(self, address_type, data, remove_prefix=False):
""" data is a dict OR a partner browse record
"""
# set mandatory and optional fields
assert address_type in ('billing', 'shipping')
if address_type == 'billing':
all_fields = self.mandatory_billing_fields + self.optional_billing_fields
prefix = ''
else:
all_fields = self.mandatory_shipping_fields + self.optional_shipping_fields
prefix = 'shipping_'
# set data
if isinstance(data, dict):
query = dict((prefix + field_name, data[prefix + field_name])
for field_name in all_fields if prefix + field_name in data)
else:
query = dict((prefix + field_name, getattr(data, field_name))
for field_name in all_fields if getattr(data, field_name))
if address_type == 'billing' and data.parent_id:
query[prefix + 'street'] = data.parent_id.name
if query.get(prefix + 'state_id'):
query[prefix + 'state_id'] = int(query[prefix + 'state_id'])
if query.get(prefix + 'country_id'):
query[prefix + 'country_id'] = int(query[prefix + 'country_id'])
if query.get(prefix + 'vat'):
query[prefix + 'vat_subjected'] = True
if not remove_prefix:
return query
return dict((field_name, data[prefix + field_name]) for field_name in all_fields if prefix + field_name in data)
def checkout_form_validate(self, data):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
# Validation
error = dict()
for field_name in self.mandatory_billing_fields:
if not data.get(field_name):
error[field_name] = 'missing'
if data.get("vat") and hasattr(registry["res.partner"], "check_vat"):
if request.website.company_id.vat_check_vies:
# force full VIES online check
check_func = registry["res.partner"].vies_vat_check
else:
# quick and partial off-line checksum validation
check_func = registry["res.partner"].simple_vat_check
vat_country, vat_number = registry["res.partner"]._split_vat(data.get("vat"))
if not check_func(cr, uid, vat_country, vat_number, context=None): # simple_vat_check
error["vat"] = 'error'
if data.get("shipping_id") == -1:
for field_name in self.mandatory_shipping_fields:
field_name = 'shipping_' + field_name
if not data.get(field_name):
error[field_name] = 'missing'
return error
def checkout_form_save(self, checkout):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
order = request.website.sale_get_order(force_create=1, context=context)
orm_partner = registry.get('res.partner')
orm_user = registry.get('res.users')
order_obj = request.registry.get('sale.order')
partner_lang = request.lang if request.lang in [lang.code for lang in request.website.language_ids] else None
billing_info = {'customer': True}
if partner_lang:
billing_info['lang'] = partner_lang
billing_info.update(self.checkout_parse('billing', checkout, True))
# set partner_id
partner_id = None
if request.uid != request.website.user_id.id:
partner_id = orm_user.browse(cr, SUPERUSER_ID, uid, context=context).partner_id.id
elif order.partner_id:
user_ids = request.registry['res.users'].search(cr, SUPERUSER_ID,
[("partner_id", "=", order.partner_id.id)], context=dict(context or {}, active_test=False))
if not user_ids or request.website.user_id.id not in user_ids:
partner_id = order.partner_id.id
# save partner informations
if partner_id and request.website.partner_id.id != partner_id:
orm_partner.write(cr, SUPERUSER_ID, [partner_id], billing_info, context=context)
else:
# create partner
partner_id = orm_partner.create(cr, SUPERUSER_ID, billing_info, context=context)
# create a new shipping partner
if checkout.get('shipping_id') == -1:
shipping_info = {}
if partner_lang:
shipping_info['lang'] = partner_lang
shipping_info.update(self.checkout_parse('shipping', checkout, True))
shipping_info['type'] = 'delivery'
shipping_info['parent_id'] = partner_id
checkout['shipping_id'] = orm_partner.create(cr, SUPERUSER_ID, shipping_info, context)
order_info = {
'partner_id': partner_id,
'message_follower_ids': [(4, partner_id), (3, request.website.partner_id.id)],
'partner_invoice_id': partner_id,
}
order_info.update(order_obj.onchange_partner_id(cr, SUPERUSER_ID, [], partner_id, context=context)['value'])
address_change = order_obj.onchange_delivery_id(cr, SUPERUSER_ID, [], order.company_id.id, partner_id,
checkout.get('shipping_id'), None, context=context)['value']
order_info.update(address_change)
if address_change.get('fiscal_position'):
fiscal_update = order_obj.onchange_fiscal_position(cr, SUPERUSER_ID, [], address_change['fiscal_position'],
[(4, l.id) for l in order.order_line], context=None)['value']
order_info.update(fiscal_update)
order_info.pop('user_id')
order_info.update(partner_shipping_id=checkout.get('shipping_id') or partner_id)
order_obj.write(cr, SUPERUSER_ID, [order.id], order_info, context=context)
@http.route(['/shop/checkout'], type='http', auth="public", website=True)
def checkout(self, **post):
cr, uid, context = request.cr, request.uid, request.context
order = request.website.sale_get_order(force_create=1, context=context)
redirection = self.checkout_redirection(order)
if redirection:
return redirection
values = self.checkout_values()
return request.website.render("website_sale.checkout", values)
@http.route(['/shop/confirm_order'], type='http', auth="public", website=True)
def confirm_order(self, **post):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
order = request.website.sale_get_order(context=context)
if not order:
return request.redirect("/shop")
redirection = self.checkout_redirection(order)
if redirection:
return redirection
values = self.checkout_values(post)
values["error"] = self.checkout_form_validate(values["checkout"])
if values["error"]:
return request.website.render("website_sale.checkout", values)
self.checkout_form_save(values["checkout"])
request.session['sale_last_order_id'] = order.id
request.website.sale_get_order(update_pricelist=True, context=context)
return request.redirect("/shop/payment")
#------------------------------------------------------
# Payment
#------------------------------------------------------
@http.route(['/shop/payment'], type='http', auth="public", website=True)
def payment(self, **post):
""" Payment step. This page proposes several payment means based on available
payment.acquirer. State at this point :
- a draft sale order with lines; otherwise, clean context / session and
back to the shop
- no transaction in context / session, or only a draft one, if the customer
did go to a payment.acquirer website but closed the tab without
paying / canceling
"""
cr, uid, context = request.cr, request.uid, request.context
payment_obj = request.registry.get('payment.acquirer')
sale_order_obj = request.registry.get('sale.order')
order = request.website.sale_get_order(context=context)
redirection = self.checkout_redirection(order)
if redirection:
return redirection
shipping_partner_id = False
if order:
if order.partner_shipping_id.id:
shipping_partner_id = order.partner_shipping_id.id
else:
shipping_partner_id = order.partner_invoice_id.id
values = {
'order': request.registry['sale.order'].browse(cr, SUPERUSER_ID, order.id, context=context)
}
values['errors'] = sale_order_obj._get_errors(cr, uid, order, context=context)
values.update(sale_order_obj._get_website_data(cr, uid, order, context))
# fetch all registered payment means
# if tx:
# acquirer_ids = [tx.acquirer_id.id]
# else:
if not values['errors']:
acquirer_ids = payment_obj.search(cr, SUPERUSER_ID, [('website_published', '=', True), ('company_id', '=', order.company_id.id)], context=context)
values['acquirers'] = list(payment_obj.browse(cr, uid, acquirer_ids, context=context))
render_ctx = dict(context, submit_class='btn btn-primary', submit_txt=_('Pay Now'))
for acquirer in values['acquirers']:
acquirer.button = payment_obj.render(
cr, SUPERUSER_ID, acquirer.id,
order.name,
order.amount_total,
order.pricelist_id.currency_id.id,
partner_id=shipping_partner_id,
tx_values={
'return_url': '/shop/payment/validate',
},
context=render_ctx)
return request.website.render("website_sale.payment", values)
@http.route(['/shop/payment/transaction/<int:acquirer_id>'], type='json', auth="public", website=True)
def payment_transaction(self, acquirer_id):
""" Json method that creates a payment.transaction, used to create a
transaction when the user clicks on 'pay now' button. After having
created the transaction, the event continues and the user is redirected
to the acquirer website.
:param int acquirer_id: id of a payment.acquirer record. If not set the
user is redirected to the checkout page
"""
cr, uid, context = request.cr, request.uid, request.context
transaction_obj = request.registry.get('payment.transaction')
order = request.website.sale_get_order(context=context)
if not order or not order.order_line or acquirer_id is None:
return request.redirect("/shop/checkout")
assert order.partner_id.id != request.website.partner_id.id
# find an already existing transaction
tx = request.website.sale_get_transaction()
if tx:
if tx.state == 'draft': # button cliked but no more info -> rewrite on tx or create a new one ?
tx.write({
'acquirer_id': acquirer_id,
'amount': order.amount_total,
})
tx_id = tx.id
else:
tx_id = transaction_obj.create(cr, SUPERUSER_ID, {
'acquirer_id': acquirer_id,
'type': 'form',
'amount': order.amount_total,
'currency_id': order.pricelist_id.currency_id.id,
'partner_id': order.partner_id.id,
'partner_country_id': order.partner_id.country_id.id,
'reference': order.name,
'sale_order_id': order.id,
}, context=context)
request.session['sale_transaction_id'] = tx_id
# update quotation
request.registry['sale.order'].write(
cr, SUPERUSER_ID, [order.id], {
'payment_acquirer_id': acquirer_id,
'payment_tx_id': request.session['sale_transaction_id']
}, context=context)
return tx_id
@http.route('/shop/payment/get_status/<int:sale_order_id>', type='json', auth="public", website=True)
def payment_get_status(self, sale_order_id, **post):
cr, uid, context = request.cr, request.uid, request.context
order = request.registry['sale.order'].browse(cr, SUPERUSER_ID, sale_order_id, context=context)
assert order.id == request.session.get('sale_last_order_id')
if not order:
return {
'state': 'error',
'message': '<p>%s</p>' % _('There seems to be an error with your request.'),
}
tx_ids = request.registry['payment.transaction'].search(
cr, SUPERUSER_ID, [
'|', ('sale_order_id', '=', order.id), ('reference', '=', order.name)
], context=context)
if not tx_ids:
if order.amount_total:
return {
'state': 'error',
'message': '<p>%s</p>' % _('There seems to be an error with your request.'),
}
else:
state = 'done'
message = ""
validation = None
else:
tx = request.registry['payment.transaction'].browse(cr, SUPERUSER_ID, tx_ids[0], context=context)
state = tx.state
if state == 'done':
message = '<p>%s</p>' % _('Your payment has been received.')
elif state == 'cancel':
message = '<p>%s</p>' % _('The payment seems to have been canceled.')
elif state == 'pending' and tx.acquirer_id.validation == 'manual':
message = '<p>%s</p>' % _('Your transaction is waiting confirmation.')
if tx.acquirer_id.post_msg:
message += tx.acquirer_id.post_msg
elif state == 'error':
message = '<p>%s</p>' % _('An error occurred during the transaction.')
validation = tx.acquirer_id.validation
return {
'state': state,
'message': message,
'validation': validation
}
@http.route('/shop/payment/validate', type='http', auth="public", website=True)
def payment_validate(self, transaction_id=None, sale_order_id=None, **post):
""" Method that should be called by the server when receiving an update
for a transaction. State at this point :
- UDPATE ME
"""
cr, uid, context = request.cr, request.uid, request.context
email_act = None
sale_order_obj = request.registry['sale.order']
if transaction_id is None:
tx = request.website.sale_get_transaction()
else:
tx = request.registry['payment.transaction'].browse(cr, uid, transaction_id, context=context)
if sale_order_id is None:
order = request.website.sale_get_order(context=context)
else:
order = request.registry['sale.order'].browse(cr, SUPERUSER_ID, sale_order_id, context=context)
assert order.id == request.session.get('sale_last_order_id')
if not order or (order.amount_total and not tx):
return request.redirect('/shop')
if (not order.amount_total and not tx) or tx.state in ['pending', 'done']:
if (not order.amount_total and not tx):
# Orders are confirmed by payment transactions, but there is none for free orders,
# (e.g. free events), so confirm immediately
order.with_context(dict(context, send_email=True)).action_button_confirm()
elif tx and tx.state == 'cancel':
# cancel the quotation
sale_order_obj.action_cancel(cr, SUPERUSER_ID, [order.id], context=request.context)
# clean context and session, then redirect to the confirmation page
request.website.sale_reset(context=context)
if tx and tx.state == 'draft':
return request.redirect('/shop')
return request.redirect('/shop/confirmation')
@http.route(['/shop/confirmation'], type='http', auth="public", website=True)
def payment_confirmation(self, **post):
""" End of checkout process controller. Confirmation is basically seing
the status of a sale.order. State at this point :
- should not have any context / session info: clean them
- take a sale.order id, because we request a sale.order and are not
session dependant anymore
"""
cr, uid, context = request.cr, request.uid, request.context
sale_order_id = request.session.get('sale_last_order_id')
if sale_order_id:
order = request.registry['sale.order'].browse(cr, SUPERUSER_ID, sale_order_id, context=context)
else:
return request.redirect('/shop')
return request.website.render("website_sale.confirmation", {'order': order})
#------------------------------------------------------
# Edit
#------------------------------------------------------
@http.route(['/shop/add_product'], type='http', auth="user", methods=['POST'], website=True)
def add_product(self, name=None, category=0, **post):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
if not name:
name = _("New Product")
product_obj = request.registry.get('product.product')
product_id = product_obj.create(cr, uid, { 'name': name, 'public_categ_ids': category }, context=context)
product = product_obj.browse(cr, uid, product_id, context=context)
return request.redirect("/shop/product/%s?enable_editor=1" % slug(product.product_tmpl_id))
@http.route(['/shop/change_styles'], type='json', auth="public")
def change_styles(self, id, style_id):
product_obj = request.registry.get('product.template')
product = product_obj.browse(request.cr, request.uid, id, context=request.context)
remove = []
active = False
for style in product.website_style_ids:
if style.id == style_id:
remove.append(style.id)
active = True
break
style = request.registry.get('product.style').browse(request.cr, request.uid, style_id, context=request.context)
if remove:
product.write({'website_style_ids': [(3, rid) for rid in remove]})
if not active:
product.write({'website_style_ids': [(4, style.id)]})
return not active
@http.route(['/shop/change_sequence'], type='json', auth="public")
def change_sequence(self, id, sequence):
product_obj = request.registry.get('product.template')
if sequence == "top":
product_obj.set_sequence_top(request.cr, request.uid, [id], context=request.context)
elif sequence == "bottom":
product_obj.set_sequence_bottom(request.cr, request.uid, [id], context=request.context)
elif sequence == "up":
product_obj.set_sequence_up(request.cr, request.uid, [id], context=request.context)
elif sequence == "down":
product_obj.set_sequence_down(request.cr, request.uid, [id], context=request.context)
@http.route(['/shop/change_size'], type='json', auth="public")
def change_size(self, id, x, y):
product_obj = request.registry.get('product.template')
product = product_obj.browse(request.cr, request.uid, id, context=request.context)
return product.write({'website_size_x': x, 'website_size_y': y})
def order_lines_2_google_api(self, order_lines):
""" Transforms a list of order lines into a dict for google analytics """
ret = []
for line in order_lines:
product = line.product_id
ret.append({
'id': line.order_id and line.order_id.id,
'sku': product.ean13 or product.id,
'name': product.name or '-',
'category': product.categ_id and product.categ_id.name or '-',
'price': line.price_unit,
'quantity': line.product_uom_qty,
})
return ret
def order_2_return_dict(self, order):
""" Returns the tracking_cart dict of the order for Google analytics basically defined to be inherited """
return {
'transaction': {
'id': order.id,
'affiliation': order.company_id.name,
'revenue': order.amount_total,
'tax': order.amount_tax,
'currency': order.currency_id.name
},
'lines': self.order_lines_2_google_api(order.order_line)
}
@http.route(['/shop/tracking_last_order'], type='json', auth="public")
def tracking_cart(self, **post):
""" return data about order in JSON needed for google analytics"""
cr, context = request.cr, request.context
ret = {}
sale_order_id = request.session.get('sale_last_order_id')
if sale_order_id:
order = request.registry['sale.order'].browse(cr, SUPERUSER_ID, sale_order_id, context=context)
ret = self.order_2_return_dict(order)
return ret
@http.route(['/shop/get_unit_price'], type='json', auth="public", methods=['POST'], website=True)
def get_unit_price(self, product_ids, add_qty, use_order_pricelist=False, **kw):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
products = pool['product.product'].browse(cr, uid, product_ids, context=context)
partner = pool['res.users'].browse(cr, uid, uid, context=context).partner_id
if use_order_pricelist:
pricelist_id = request.session.get('sale_order_code_pricelist_id') or partner.property_product_pricelist.id
else:
pricelist_id = partner.property_product_pricelist.id
prices = pool['product.pricelist'].price_rule_get_multi(cr, uid, [], [(product, add_qty, partner) for product in products], context=context)
return {product_id: prices[product_id][pricelist_id][0] for product_id in product_ids}
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
AdeshAtole/coala | coalib/tests/parsing/GlobbingTest.py | 9 | 11091 | """
Tests Globbing and related functions
Test Files are local and permanent and organized as follows:
GlobTestDir
├── SubDir1
│ ├── File11.py
│ └── File12.py
│ SubDir2
│ ├── File(with)parentheses.txt
│ └── File[with]brackets.txt
├── File1.x
├── File2.y
└── File3.z
"""
import os
import unittest
from coalib.parsing.Globbing import (
_iter_alternatives, _iter_choices, _position_is_bracketed, fnmatch, glob)
class TestFiles:
"""
Testfiles to check glob patterns on
"""
glob_test_root = os.path.split(__file__)[0]
glob_test_dir = os.path.join(glob_test_root, 'GlobTestDir')
dir1 = os.path.join(glob_test_dir, 'SubDir1')
file11 = os.path.join(dir1, 'File11.py')
file12 = os.path.join(dir1, 'File12.py')
dir2 = os.path.join(glob_test_dir, 'SubDir2')
file_paren = os.path.join(dir2, 'File(with)parentheses.txt')
file_brack = os.path.join(dir2, 'File[with]brackets.txt')
file1 = os.path.join(glob_test_dir, 'File1.x')
file2 = os.path.join(glob_test_dir, 'File2.y')
file3 = os.path.join(glob_test_dir, 'File3.z')
class GlobbingHelperFunctionsTest(unittest.TestCase):
def test_positions(self):
# pattern: [bracketed values]
pattern_positions_dict = {
"[]": [],
"[a]": [1],
"[][]": [1, 2],
"[]]]": [1],
"[[[]": [1, 2],
"[[[][]]]": [1, 2, 5],
"][": [],
"][][": [],
"[!]": [],
"[!c]": [1, 2],
"[!": []
}
for pattern, bracketed_positions in pattern_positions_dict.items():
for pos in range(len(pattern)):
if pos in bracketed_positions:
self.assertTrue(_position_is_bracketed(pattern, pos))
else:
self.assertFalse(_position_is_bracketed(pattern, pos))
def test_choices(self):
# pattern: [choices]
pattern_choices_dict = {
"": [""],
"a": ["a"],
"a|b": ["a", "b"],
"a|b|c": ["a", "b", "c"],
"a|b[|]c": ["a", "b[|]c"],
"a|[b|c]": ["a", "[b|c]"],
"a[|b|c]": ["a[|b|c]"],
"[a|b|c]": ["[a|b|c]"],
"[a]|[b]|[c]": ["[a]", "[b]", "[c]"],
"[[a]|[b]|[c]": ["[[a]", "[b]", "[c]"]
}
for pattern, choices in pattern_choices_dict.items():
self.assertEqual(list(_iter_choices(pattern)), choices)
def test_alternatives(self):
# pattern: [alternatives]
pattern_alternatives_dict = {
"": [""],
"(ab)": ["ab"],
"a|b": ["a|b"],
"()": [""],
"(|)": [""],
"(a|b)": ["a", "b"],
"(a|b|c)": ["a", "b", "c"],
"a(b|c)": ["ab", "ac"],
"(a|b)(c|d)": ["ac", "ad", "bc", "bd"],
"(a|b(c|d)": ["(a|bc", "(a|bd"],
"(a[|]b)": ["a[|]b"],
"[(]a|b)": ["[(]a|b)"],
}
for pattern, alternatives in pattern_alternatives_dict.items():
self.assertEqual(sorted(list(_iter_alternatives(pattern))),
sorted(alternatives))
class FnmatchTest(unittest.TestCase):
def _test_fnmatch(self, pattern, matches, non_matches):
for match in matches:
self.assertTrue(fnmatch(match, pattern))
for non_match in non_matches:
self.assertFalse(fnmatch(non_match, pattern))
def test_circumflex_in_set(self):
pattern = "[^abc]"
matches = ["^", "a", "b", "c"]
non_matches = ["d", "e", "f", "g"]
self._test_fnmatch(pattern, matches, non_matches)
def test_negative_set(self):
pattern = "[!ab]"
matches = ["c", "d"]
non_matches = ["a", "b"]
self._test_fnmatch(pattern, matches, non_matches)
def test_escaped_bracket(self):
pattern = "[]ab]"
matches = ["]", "a", "b"]
non_matches = ["[]ab]", "ab]"]
self._test_fnmatch(pattern, matches, non_matches)
def test_empty_set(self):
pattern = "a[]b"
matches = ["a[]b"]
non_matches = ["a", "b", "[", "]", "ab"]
self._test_fnmatch(pattern, matches, non_matches)
def test_home_dir(self):
pattern = os.path.join("~", "a", "b")
matches = [os.path.expanduser(os.path.join("~", "a", "b"))]
non_matches = [os.path.join("~", "a", "b")]
self._test_fnmatch(pattern, matches, non_matches)
def test_alternatives(self):
pattern = "(a|b)"
matches = ["a", "b"]
non_matches = ["(a|b)", "a|b"]
self._test_fnmatch(pattern, matches, non_matches)
def test_set_precedence(self):
pattern = "(a|[b)]"
matches = ["(a|b", "(a|)"]
non_matches = ["a]", "[b]"]
self._test_fnmatch(pattern, matches, non_matches)
def test_questionmark(self):
pattern = "a?b"
matches = ["axb", "ayb"]
non_matches = ["ab", "aXXb"]
self._test_fnmatch(pattern, matches, non_matches)
def test_asterisk(self):
pattern = "a*b"
matches = ["axb", "ayb"]
non_matches = ["aXbX", os.path.join("a", "b")]
self._test_fnmatch(pattern, matches, non_matches)
def test_double_asterisk(self):
pattern = "a**b"
matches = ["axb", "ayb", os.path.join("a", "b")]
non_matches = ["aXbX"]
self._test_fnmatch(pattern, matches, non_matches)
def test_multiple_patterns(self):
pattern = ["a**b", "a**c"]
matches = ["axb", "axc"]
non_matches = ["aXbX", "aXcX"]
self._test_fnmatch(pattern, matches, non_matches)
pattern = []
matches = ["anything", "anything_else"]
non_matches = []
self._test_fnmatch(pattern, matches, non_matches)
class GlobTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def _test_glob(self, pattern, file_list):
results = sorted([os.path.normcase(g) for g in glob(pattern)])
file_list = sorted([os.path.normcase(f) for f in file_list])
self.assertEqual([i for i in results if "__pycache__" not in i],
file_list)
def test_collect_files(self):
pattern = os.path.join(TestFiles.glob_test_dir, 'Sub*', 'File1?.py')
file_list = [TestFiles.file11, TestFiles.file12]
self._test_glob(pattern, file_list)
def test_collect_dirs(self):
pattern = os.path.join(TestFiles.glob_test_dir, 'Sub*' + os.sep)
file_list = [TestFiles.dir1+os.sep, TestFiles.dir2+os.sep]
self._test_glob(pattern, file_list)
def test_collect_specific_dir(self):
pattern = os.path.join(TestFiles.dir1 + os.sep)
file_list = [TestFiles.dir1+os.sep]
self._test_glob(pattern, file_list)
def test_collect_flat(self):
pattern = os.path.join(TestFiles.glob_test_dir, '*')
file_list = [TestFiles.dir1,
TestFiles.dir2,
TestFiles.file1,
TestFiles.file2,
TestFiles.file3]
self._test_glob(pattern, file_list)
def test_collect_all(self):
pattern = os.path.join(TestFiles.glob_test_dir, '**', '*')
file_list = [TestFiles.dir1,
TestFiles.dir2,
TestFiles.file1,
TestFiles.file2,
TestFiles.file3,
TestFiles.file11,
TestFiles.file12,
TestFiles.file_paren,
TestFiles.file_brack]
self._test_glob(pattern, file_list)
def test_collect_basename(self):
pattern = TestFiles.glob_test_dir
file_list = [TestFiles.glob_test_dir]
self._test_glob(pattern, file_list)
def test_collect_none(self):
pattern = ''
file_list = []
self._test_glob(pattern, file_list)
def test_collect_specific(self):
pattern = os.path.join(TestFiles.file12)
file_list = [TestFiles.file12]
self._test_glob(pattern, file_list)
def test_collect_parentheses(self):
pattern = os.path.join(TestFiles.glob_test_dir,
'SubDir[12]',
'File[(]with)parentheses.txt')
file_list = [TestFiles.file_paren]
self._test_glob(pattern, file_list)
def test_collect_brackets(self):
pattern = os.path.join(TestFiles.glob_test_dir,
'SubDir[12]',
'File[[]with[]]brackets.txt')
file_list = [TestFiles.file_brack]
self._test_glob(pattern, file_list)
def test_collect_or(self):
pattern = os.path.join(TestFiles.glob_test_dir, "File?.(x|y|z)")
file_list = [TestFiles.file1, TestFiles.file2, TestFiles.file3]
self._test_glob(pattern, file_list)
def test_wildcard_dir(self):
pattern = os.path.join(TestFiles.glob_test_dir, "SubDir?", "File11.py")
file_list = [TestFiles.file11]
self._test_glob(pattern, file_list)
def test_collect_recursive(self):
pattern = os.path.join(TestFiles.glob_test_dir, "**", "*")
file_list = [TestFiles.file1,
TestFiles.file2,
TestFiles.file3,
TestFiles.file11,
TestFiles.file12,
TestFiles.file_paren,
TestFiles.file_brack,
TestFiles.dir1,
TestFiles.dir2]
self._test_glob(pattern, file_list)
def test_collect_invalid(self):
pattern = "NOPE"
file_list = []
self._test_glob(pattern, file_list)
def test_no_dirname_recursive(self):
old_curdir = os.curdir
os.curdir = TestFiles.glob_test_dir
pattern = '**'
file_list = [TestFiles.file1,
TestFiles.file2,
TestFiles.file3,
TestFiles.file11,
TestFiles.file12,
TestFiles.file_paren,
TestFiles.file_brack,
TestFiles.dir1,
TestFiles.dir2]
results = sorted([os.path.normcase(os.path.join(os.curdir, g))
for g in glob(pattern)])
file_list = sorted([os.path.normcase(f) for f in file_list])
self.assertEqual([i for i in results if "__pycache__" not in i],
file_list)
os.curdir = old_curdir
def test_no_dirname(self):
old_curdir = os.curdir
os.curdir = TestFiles.glob_test_dir
pattern = '*Dir?'
file_list = [TestFiles.dir1,
TestFiles.dir2]
results = sorted([os.path.normcase(os.path.join(os.curdir, g))
for g in glob(pattern)])
file_list = sorted([os.path.normcase(f) for f in file_list])
self.assertEqual(results, file_list)
os.curdir = old_curdir
| agpl-3.0 |
keithroe/vtkoptix | ThirdParty/Twisted/twisted/internet/_win32stdio.py | 32 | 3161 | # -*- test-case-name: twisted.test.test_stdio -*-
"""
Windows-specific implementation of the L{twisted.internet.stdio} interface.
"""
import win32api
import os, msvcrt
from zope.interface import implements
from twisted.internet.interfaces import IHalfCloseableProtocol, ITransport, IAddress
from twisted.internet.interfaces import IConsumer, IPushProducer
from twisted.internet import _pollingfile, main
from twisted.python.failure import Failure
class Win32PipeAddress(object):
implements(IAddress)
class StandardIO(_pollingfile._PollingTimer):
implements(ITransport,
IConsumer,
IPushProducer)
disconnecting = False
disconnected = False
def __init__(self, proto, reactor=None):
"""
Start talking to standard IO with the given protocol.
Also, put it stdin/stdout/stderr into binary mode.
"""
if reactor is None:
from twisted.internet import reactor
for stdfd in range(0, 1, 2):
msvcrt.setmode(stdfd, os.O_BINARY)
_pollingfile._PollingTimer.__init__(self, reactor)
self.proto = proto
hstdin = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
hstdout = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
self.stdin = _pollingfile._PollableReadPipe(
hstdin, self.dataReceived, self.readConnectionLost)
self.stdout = _pollingfile._PollableWritePipe(
hstdout, self.writeConnectionLost)
self._addPollableResource(self.stdin)
self._addPollableResource(self.stdout)
self.proto.makeConnection(self)
def dataReceived(self, data):
self.proto.dataReceived(data)
def readConnectionLost(self):
if IHalfCloseableProtocol.providedBy(self.proto):
self.proto.readConnectionLost()
self.checkConnLost()
def writeConnectionLost(self):
if IHalfCloseableProtocol.providedBy(self.proto):
self.proto.writeConnectionLost()
self.checkConnLost()
connsLost = 0
def checkConnLost(self):
self.connsLost += 1
if self.connsLost >= 2:
self.disconnecting = True
self.disconnected = True
self.proto.connectionLost(Failure(main.CONNECTION_DONE))
# ITransport
def write(self, data):
self.stdout.write(data)
def writeSequence(self, seq):
self.stdout.write(''.join(seq))
def loseConnection(self):
self.disconnecting = True
self.stdin.close()
self.stdout.close()
def getPeer(self):
return Win32PipeAddress()
def getHost(self):
return Win32PipeAddress()
# IConsumer
def registerProducer(self, producer, streaming):
return self.stdout.registerProducer(producer, streaming)
def unregisterProducer(self):
return self.stdout.unregisterProducer()
# def write() above
# IProducer
def stopProducing(self):
self.stdin.stopProducing()
# IPushProducer
def pauseProducing(self):
self.stdin.pauseProducing()
def resumeProducing(self):
self.stdin.resumeProducing()
| bsd-3-clause |
Sabayon/entropy | matter/matter/spec.py | 1 | 18129 | # -*- coding: utf-8 -*-
"""
@author: Fabio Erculiani <[email protected]>
@contact: [email protected]
@copyright: Fabio Erculiani
@license: GPL-2
B{Matter TinderBox Toolkit}.
"""
import os
import shlex
from _entropy.matter.utils import convert_to_unicode, get_stringtype
class GenericSpecFunctions(object):
def ne_string(self, x):
return x, 'raw_unicode_escape'
def ne_list(self, x):
return x
def not_none(self, x):
return x is not None
def valid_integer(self, x):
try:
int(x)
except (TypeError, ValueError,):
return False
return True
def always_valid(self, *_args):
return True
def valid_path(self, x):
return os.path.lexists(x)
def valid_file(self, x):
return os.path.isfile(x)
def valid_dir(self, x):
return os.path.isdir(x)
def ve_string_open_file_read(self, x):
try:
open(x, "rb").close()
return x
except (IOError, OSError):
return None
def ve_string_stripper(self, x):
return convert_to_unicode(x).strip()
def ve_string_splitter(self, x):
return convert_to_unicode(x).strip().split()
def ve_integer_converter(self, x):
return int(x)
def ve_string_shlex_splitter(self, x):
return list(shlex.split(x))
def valid_ascii(self, x):
try:
x = str(x)
return x
except (UnicodeDecodeError, UnicodeEncodeError,):
return ""
def valid_yes_no(self, x):
return x in ("yes", "no")
def valid_yes_no_inherit(self, x):
return x in ("yes", "no", "inherit")
def valid_path_string(self, x):
try:
os.path.split(x)
except OSError:
return False
return True
def valid_path_string_first_list_item(self, x):
if not x:
return False
myx = x[0]
try:
os.path.split(myx)
except OSError:
return False
return True
def valid_comma_sep_list_list(self, input_str):
parts = []
for part in convert_to_unicode(input_str).split(","):
part = part.strip()
# do not filter out empty elements
parts.append(part.split())
return parts
def valid_path_list(self, x):
return [y.strip() for y in \
convert_to_unicode(x).split(",") if \
self.valid_path_string(y) and y.strip()]
class MatterSpecParser(object):
"""
Base class for implementing external .spec parsers
that can be used to extend the list of supported statements.
"""
def vital_parameters(self):
"""
Return a list of vital parameters.
"""
raise NotImplementedError()
def data(self):
"""
Return a dictionary containing parameter names as key and
dict containing keys 've' and 'cb' which values are three
callable functions that respectively do value extraction (ve),
value verification (cb) and value modding (mod).
@return: data path dictionary (see ChrootSpec code for more info)
@rtype: dict
"""
raise NotImplementedError()
class MatterSpec(MatterSpecParser):
_external_parsers = []
def __init__(self):
"""
MatterSpec constructor.
"""
self._funcs = GenericSpecFunctions()
@classmethod
def register_parser(cls, parser):
"""
Register an external parser object.
@param parser: a MatterSpecParser subclass
@type parser: MatterSpecParser
"""
MatterSpec._external_parsers.append(parser)
def vital_parameters(self):
"""
Return a list of vital .spec file parameters
@return: list of vital .spec file parameters
@rtype: list
"""
vital = set(["packages", "repository"])
for parser in MatterSpec._external_parsers:
vital |= set(parser.vital_parameters())
return sorted(vital)
def data(self):
"""
Return a dictionary containing parameter names as key and
dict containing keys 've' and 'cb' which values are three
callable functions that respectively do value extraction (ve),
value verification (cb) and value modding (mod).
@return: data path dictionary (see ChrootSpec code for more info)
@rtype: dict
"""
data = {
"build-args": {
"cb": self._funcs.ne_string,
"ve": self._funcs.ve_string_shlex_splitter,
"default": ["--verbose", "--nospinner"],
"desc": "Portage build arguments (default is --verbose\n "
"--nospinner)",
},
"build-only": {
"cb": self._funcs.valid_yes_no,
"ve": self._funcs.ve_string_stripper,
"default": "no",
"desc": "Only build the packages without merging them\n "
"into the system.",
},
"dependencies": {
"cb": self._funcs.valid_yes_no,
"ve": self._funcs.ve_string_stripper,
"default": "no",
"desc": "Allow dependencies to be pulled in? (yes/no)",
},
"downgrade": {
"cb": self._funcs.valid_yes_no,
"ve": self._funcs.ve_string_stripper,
"default": "no",
"desc": "Allow package downgrades? (yes/no)",
},
"keep-going": {
"cb": self._funcs.valid_yes_no,
"ve": self._funcs.ve_string_stripper,
"default": "no",
"desc": "Make possible to continue if one \n\t"
"or more packages fail to build? (yes/no)",
},
"new-useflags": {
"cb": self._funcs.valid_yes_no,
"ve": self._funcs.ve_string_stripper,
"default": "no",
"desc": "Allow new USE flags? (yes/no)",
},
"removed-useflags": {
"cb": self._funcs.valid_yes_no,
"ve": self._funcs.ve_string_stripper,
"default": "no",
"desc": "Allow removed USE flags? (yes/no)",
},
"rebuild": {
"cb": self._funcs.valid_yes_no,
"ve": self._funcs.ve_string_stripper,
"default": "no",
"desc": "Allow package rebuilds? (yes/no)",
},
"spm-repository-change": {
"cb": self._funcs.valid_yes_no,
"ve": self._funcs.ve_string_stripper,
"default": "no",
"desc": "Allow Source Package Manager (Portage) \n\t"
"repository change? (yes/no)",
},
"spm-repository-change-if-upstreamed": {
"cb": self._funcs.valid_yes_no,
"ve": self._funcs.ve_string_stripper,
"default": "no",
"desc": "In case of Source Package Manager \n\trepository "
"changes, allow execution if the original repository "
"\n\tdoes not contain the package anymore? (yes/no)",
},
"not-installed": {
"cb": self._funcs.valid_yes_no,
"ve": self._funcs.ve_string_stripper,
"default": "no",
"desc": "Allow compiling packages even if they "
"are not \n\tactually installed on the System? (yes/no)",
},
"soft-blocker": {
"cb": self._funcs.valid_yes_no,
"ve": self._funcs.ve_string_stripper,
"default": "yes",
"desc": "Allow soft-blockers in the merge queue?\n "
"Packages will be unmerged if yes. (yes/no)",
},
"unmerge": {
"cb": self._funcs.valid_yes_no,
"ve": self._funcs.ve_string_stripper,
"default": "yes",
"desc": "Allow package unmerges due to Portage\n "
"soft-blockers resolution. (yes/no)",
},
"pkgpre": {
"cb": self._funcs.not_none,
"ve": self._funcs.ve_string_open_file_read,
"default": None,
"desc": "Package pre execution script hook path, "
"executed \n\tfor each package (also see example files)",
},
"pkgpost": {
"cb": self._funcs.not_none,
"ve": self._funcs.ve_string_open_file_read,
"default": None,
"desc": "Package build post execution script hook path, "
"executed \n\tfor each package (also see example files)",
},
"buildfail": {
"cb": self._funcs.not_none,
"ve": self._funcs.ve_string_open_file_read,
"default": None,
"desc": "Package build failure execution script hook "
"path, \n\texecuted for each failing package (also see "
"example files)",
},
"packages": {
"cb": self._funcs.always_valid,
"ve": self._funcs.valid_comma_sep_list_list,
"mod": lambda l_l: [x for x in l_l if x],
"desc": "List of packages to scrape, separated by "
"comma. \n\tIf you want to let Portage consider a group "
"of packages, \n\tjust separate them with spaces/tabs but "
"no commas",
},
"repository": {
"cb": self._funcs.ne_string,
"ve": self._funcs.ve_string_stripper,
"desc": "Binary Package Manager repository in where "
"newly built \n\tpackages will be put and pushed to",
},
"stable": {
"cb": self._funcs.valid_yes_no_inherit,
"ve": self._funcs.ve_string_stripper,
"default": "inherit",
"desc": "Only accept Portage stable packages (no "
"unstable keywords)",
},
}
for parser in MatterSpec._external_parsers:
parser_data = parser.data()
common_keys = set(data.keys()) & set(parser_data.keys())
if common_keys:
raise ValueError("Parser %s duplicates support for %s" % (
parser, " ".join(sorted(common_keys)),))
data.update(parser_data)
return data
class SpecPreprocessor:
PREFIX = "%"
class PreprocessorError(Exception):
""" Error while preprocessing file """
def __init__(self, spec_file_obj):
self.__expanders = {}
self.__builtin_expanders = {}
self._spec_file_obj = spec_file_obj
self._add_builtin_expanders()
def add_expander(self, statement, expander_callback):
"""
Add Preprocessor expander.
@param statement: statement to expand
@type statement: string
@param expand_callback: one argument callback that is used to expand
given line (line is raw format). Line is already pre-parsed and
contains a valid preprocessor statement that callback can handle.
Preprocessor callback should raise SpecPreprocessor.PreprocessorError
if line is malformed.
@type expander_callback: callable
@raise KeyError: if expander is already available
@return: a raw string (containing \n and whatever)
@rtype: string
"""
return self._add_expander(statement, expander_callback, builtin = False)
def _add_expander(self, statement, expander_callback, builtin = False):
obj = self.__expanders
if builtin:
obj = self.__builtin_expanders
if statement in obj:
raise KeyError("expander %s already provided" % (statement,))
obj[SpecPreprocessor.PREFIX + statement] = \
expander_callback
def _add_builtin_expanders(self):
# import statement
self._add_expander("import", self._import_expander, builtin = True)
def _import_expander(self, line):
rest_line = line.split(" ", 1)[1].strip()
if not rest_line:
return line
spec_f = self._spec_file_obj
spec_f.seek(0)
lines = ""
try:
for line in spec_f.readlines():
# call recursively
split_line = line.split(" ", 1)
if split_line:
expander = self.__builtin_expanders.get(split_line[0])
if expander is not None:
try:
line = expander(line)
except RuntimeError as err:
raise SpecPreprocessor.PreprocessorError(
"invalid preprocessor line: %s" % (err,))
lines += line
finally:
spec_f.seek(0)
return lines
def parse(self):
content = []
spec_f = self._spec_file_obj
spec_f.seek(0)
try:
for line in spec_f.readlines():
split_line = line.split(" ", 1)
if split_line:
expander = self.__builtin_expanders.get(split_line[0])
if expander is not None:
line = expander(line)
content.append(line)
finally:
spec_f.seek(0)
final_content = []
for line in content:
split_line = line.split(" ", 1)
if split_line:
expander = self.__expanders.get(split_line[0])
if expander is not None:
line = expander(line)
final_content.append(line)
final_content = ("".join(final_content)).split("\n")
return final_content
class SpecParser:
def __init__(self, file_object):
self.file_object = file_object
self._preprocessor = SpecPreprocessor(self.file_object)
self.__parser = MatterSpec()
self.vital_parameters = self.__parser.vital_parameters()
self._data = self.__parser.data()
def _parse_line_statement(self, line_stmt):
try:
key, value = line_stmt.split(":", 1)
except ValueError:
return None, None
key, value = key.strip(), value.strip()
return key, value
def parse(self):
def _is_list_list(lst):
for x in lst:
if isinstance(x, list):
return True
return False
mydict = {}
data = self._generic_parser()
# compact lines properly
old_key = None
for line in data:
key = None
value = None
v_key, v_value = self._parse_line_statement(line)
check_dict = self._data.get(v_key)
if check_dict is not None:
key, value = v_key, v_value
old_key = key
elif isinstance(old_key, get_stringtype()):
key = old_key
value = line.strip()
if not value:
continue
# gather again... key is changed
check_dict = self._data.get(key)
if not isinstance(check_dict, dict):
continue
value = check_dict["ve"](value)
if not check_dict["cb"](value):
continue
if key in mydict:
if isinstance(value, get_stringtype()):
mydict[key] += " %s" % (value,)
elif isinstance(value, list) and _is_list_list(value):
# support multi-line "," separators
# append the first element of value to the last
# element of mydict[key] if it's there.
first_el = value.pop(0)
if mydict[key] and first_el:
mydict[key][-1] += first_el
mydict[key] += value
elif isinstance(value, list):
mydict[key] += value
else:
continue
else:
mydict[key] = value
self._validate_parse(mydict)
self._extend_parse(mydict)
self._mod_parse(mydict)
data = mydict.copy()
# add file name if possible
data["__name__"] = self.file_object.name
return data
def _extend_parse(self, mydata):
"""
Extend parsed data with default values for statements with
default option available.
"""
for statement, opts in self._data.items():
if "default" in opts and (statement not in mydata):
mydata[statement] = opts["default"]
def _mod_parse(self, mydata):
"""
For parser data exposing a mod, execute the mod against
the data itself.
"""
for statement, opts in self._data.items():
if statement in mydata and "mod" in opts:
mydata[statement] = opts["mod"](mydata[statement])
def _validate_parse(self, mydata):
for param in self.vital_parameters:
if param not in mydata:
raise ValueError(
"'%s' missing or invalid"
" '%s' parameter, it's vital. Your specification"
" file is incomplete!" % (self.file_object.name, param,)
)
def _generic_parser(self):
data = []
content = self._preprocessor.parse()
# filter comments and white lines
content = [x.strip().rsplit("#", 1)[0].strip() for x in content if \
not x.startswith("#") and x.strip()]
for line in content:
if line in data:
continue
data.append(line)
return data
| gpl-2.0 |
askulkarni2/ansible | lib/ansible/inventory/vars_plugins/noop.py | 317 | 1632 | # (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2014, Serge van Ginderachter <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class VarsModule(object):
"""
Loads variables for groups and/or hosts
"""
def __init__(self, inventory):
""" constructor """
self.inventory = inventory
self.inventory_basedir = inventory.basedir()
def run(self, host, vault_password=None):
""" For backwards compatibility, when only vars per host were retrieved
This method should return both host specific vars as well as vars
calculated from groups it is a member of """
return {}
def get_host_vars(self, host, vault_password=None):
""" Get host specific variables. """
return {}
def get_group_vars(self, group, vault_password=None):
""" Get group specific variables. """
return {}
| gpl-3.0 |
pronto/dotfiles | .vim/pylibs/logilab/astng/raw_building.py | 2 | 13555 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
# copyright 2003-2010 Sylvain Thenault, all rights reserved.
# contact mailto:[email protected]
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""this module contains a set of functions to create astng trees from scratch
(build_* functions) or from living object (object_build_* functions)
"""
__docformat__ = "restructuredtext en"
import sys
from os.path import abspath
from inspect import (getargspec, isdatadescriptor, isfunction, ismethod,
ismethoddescriptor, isclass, isbuiltin)
from logilab.astng import BUILTINS_MODULE
from logilab.astng.node_classes import CONST_CLS
from logilab.astng.nodes import (Module, Class, Const, const_factory, From,
Function, EmptyNode, Name, Arguments, Dict, List, Set, Tuple)
from logilab.astng.bases import Generator
from logilab.astng.manager import ASTNGManager
MANAGER = ASTNGManager()
_CONSTANTS = tuple(CONST_CLS) # the keys of CONST_CLS eg python builtin types
def _attach_local_node(parent, node, name):
node.name = name # needed by add_local_node
parent.add_local_node(node)
_marker = object()
def attach_dummy_node(node, name, object=_marker):
"""create a dummy node and register it in the locals of the given
node with the specified name
"""
enode = EmptyNode()
enode.object = object
_attach_local_node(node, enode, name)
EmptyNode.has_underlying_object = lambda self: self.object is not _marker
def attach_const_node(node, name, value):
"""create a Const node and register it in the locals of the given
node with the specified name
"""
if not name in node.special_attributes:
_attach_local_node(node, const_factory(value), name)
def attach_import_node(node, modname, membername):
"""create a From node and register it in the locals of the given
node with the specified name
"""
from_node = From(modname, [(membername, None)])
_attach_local_node(node, from_node, membername)
def build_module(name, doc=None):
"""create and initialize a astng Module node"""
node = Module(name, doc, pure_python=False)
node.package = False
node.parent = None
return node
def build_class(name, basenames=(), doc=None):
"""create and initialize a astng Class node"""
node = Class(name, doc)
for base in basenames:
basenode = Name()
basenode.name = base
node.bases.append(basenode)
basenode.parent = node
return node
def build_function(name, args=None, defaults=None, flag=0, doc=None):
"""create and initialize a astng Function node"""
args, defaults = args or [], defaults or []
# first argument is now a list of decorators
func = Function(name, doc)
func.args = argsnode = Arguments()
argsnode.args = []
for arg in args:
argsnode.args.append(Name())
argsnode.args[-1].name = arg
argsnode.args[-1].parent = argsnode
argsnode.defaults = []
for default in defaults:
argsnode.defaults.append(const_factory(default))
argsnode.defaults[-1].parent = argsnode
argsnode.kwarg = None
argsnode.vararg = None
argsnode.parent = func
if args:
register_arguments(func)
return func
def build_from_import(fromname, names):
"""create and initialize an astng From import statement"""
return From(fromname, [(name, None) for name in names])
def register_arguments(func, args=None):
"""add given arguments to local
args is a list that may contains nested lists
(i.e. def func(a, (b, c, d)): ...)
"""
if args is None:
args = func.args.args
if func.args.vararg:
func.set_local(func.args.vararg, func.args)
if func.args.kwarg:
func.set_local(func.args.kwarg, func.args)
for arg in args:
if isinstance(arg, Name):
func.set_local(arg.name, arg)
else:
register_arguments(func, arg.elts)
def object_build_class(node, member, localname):
"""create astng for a living class object"""
basenames = [base.__name__ for base in member.__bases__]
return _base_class_object_build(node, member, basenames,
localname=localname)
def object_build_function(node, member, localname):
"""create astng for a living function object"""
args, varargs, varkw, defaults = getargspec(member)
if varargs is not None:
args.append(varargs)
if varkw is not None:
args.append(varkw)
func = build_function(getattr(member, '__name__', None) or localname, args,
defaults, member.func_code.co_flags, member.__doc__)
node.add_local_node(func, localname)
def object_build_datadescriptor(node, member, name):
"""create astng for a living data descriptor object"""
return _base_class_object_build(node, member, [], name)
def object_build_methoddescriptor(node, member, localname):
"""create astng for a living method descriptor object"""
# FIXME get arguments ?
func = build_function(getattr(member, '__name__', None) or localname,
doc=member.__doc__)
# set node's arguments to None to notice that we have no information, not
# and empty argument list
func.args.args = None
node.add_local_node(func, localname)
def _base_class_object_build(node, member, basenames, name=None, localname=None):
"""create astng for a living class object, with a given set of base names
(e.g. ancestors)
"""
klass = build_class(name or getattr(member, '__name__', None) or localname,
basenames, member.__doc__)
klass._newstyle = isinstance(member, type)
node.add_local_node(klass, localname)
try:
# limit the instantiation trick since it's too dangerous
# (such as infinite test execution...)
# this at least resolves common case such as Exception.args,
# OSError.errno
if issubclass(member, Exception):
instdict = member().__dict__
else:
raise TypeError
except:
pass
else:
for name, obj in instdict.items():
valnode = EmptyNode()
valnode.object = obj
valnode.parent = klass
valnode.lineno = 1
klass.instance_attrs[name] = [valnode]
return klass
class InspectBuilder(object):
"""class for building nodes from living object
this is actually a really minimal representation, including only Module,
Function and Class nodes and some others as guessed.
"""
# astng from living objects ###############################################
def __init__(self):
self._done = {}
self._module = None
def inspect_build(self, module, modname=None, path=None):
"""build astng from a living module (i.e. using inspect)
this is used when there is no python source code available (either
because it's a built-in module or because the .py is not available)
"""
self._module = module
if modname is None:
modname = module.__name__
node = build_module(modname, module.__doc__)
node.file = node.path = path and abspath(path) or path
MANAGER.astng_cache[modname] = node
node.package = hasattr(module, '__path__')
self._done = {}
self.object_build(node, module)
return node
def object_build(self, node, obj):
"""recursive method which create a partial ast from real objects
(only function, class, and method are handled)
"""
if obj in self._done:
return self._done[obj]
self._done[obj] = node
for name in dir(obj):
try:
member = getattr(obj, name)
except AttributeError:
# damned ExtensionClass.Base, I know you're there !
attach_dummy_node(node, name)
continue
if ismethod(member):
member = member.im_func
if isfunction(member):
# verify this is not an imported function
filename = getattr(member.func_code, 'co_filename', None)
if filename is None:
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif filename != getattr(self._module, '__file__', None):
attach_dummy_node(node, name, member)
else:
object_build_function(node, member, name)
elif isbuiltin(member):
if self.imported_member(node, member, name):
#if obj is object:
# print 'skippp', obj, name, member
continue
object_build_methoddescriptor(node, member, name)
elif isclass(member):
if self.imported_member(node, member, name):
continue
if member in self._done:
class_node = self._done[member]
if not class_node in node.locals.get(name, ()):
node.add_local_node(class_node, name)
else:
class_node = object_build_class(node, member, name)
# recursion
self.object_build(class_node, member)
if name == '__class__' and class_node.parent is None:
class_node.parent = self._done[self._module]
elif ismethoddescriptor(member):
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif isdatadescriptor(member):
assert isinstance(member, object)
object_build_datadescriptor(node, member, name)
elif isinstance(member, _CONSTANTS):
attach_const_node(node, name, member)
else:
# create an empty node so that the name is actually defined
attach_dummy_node(node, name, member)
def imported_member(self, node, member, name):
"""verify this is not an imported class or handle it"""
# /!\ some classes like ExtensionClass doesn't have a __module__
# attribute ! Also, this may trigger an exception on badly built module
# (see http://www.logilab.org/ticket/57299 for instance)
try:
modname = getattr(member, '__module__', None)
except:
# XXX use logging
print 'unexpected error while building astng from living object'
import traceback
traceback.print_exc()
modname = None
if modname is None:
if name in ('__new__', '__subclasshook__'):
# Python 2.5.1 (r251:54863, Sep 1 2010, 22:03:14)
# >>> print object.__new__.__module__
# None
modname = BUILTINS_MODULE
else:
attach_dummy_node(node, name, member)
return True
if {'gtk': 'gtk._gtk'}.get(modname, modname) != self._module.__name__:
# check if it sounds valid and then add an import node, else use a
# dummy node
try:
getattr(sys.modules[modname], name)
except (KeyError, AttributeError):
attach_dummy_node(node, name, member)
else:
attach_import_node(node, modname, name)
return True
return False
### astng boot strapping ################################################### ###
_CONST_PROXY = {}
def astng_boot_strapping():
"""astng boot strapping the builtins module"""
# this boot strapping is necessary since we need the Const nodes to
# inspect_build builtins, and then we can proxy Const
builder = InspectBuilder()
from logilab.common.compat import builtins
astng_builtin = builder.inspect_build(builtins)
for cls, node_cls in CONST_CLS.items():
if cls is type(None):
proxy = build_class('NoneType')
proxy.parent = astng_builtin
else:
proxy = astng_builtin.getattr(cls.__name__)[0] # XXX
if cls in (dict, list, set, tuple):
node_cls._proxied = proxy
else:
_CONST_PROXY[cls] = proxy
astng_boot_strapping()
# TODO : find a nicer way to handle this situation;
# However __proxied introduced an
# infinite recursion (see https://bugs.launchpad.net/pylint/+bug/456870)
def _set_proxied(const):
return _CONST_PROXY[const.value.__class__]
Const._proxied = property(_set_proxied)
# FIXME : is it alright that Generator._proxied is not a astng node?
Generator._proxied = MANAGER.infer_astng_from_something(type(a for a in ()))
| bsd-2-clause |
bastiak/dnssyncd | dnssyncdlib/zonesync.py | 1 | 21561 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import copy
import traceback
from multiprocessing import Process
from dnssyncdlib.data import DNSDataPackage
from dnssyncdlib.dnsdiff import DNSDiff, derelativize_data
from dnssyncdlib.messages import (StatusMsg, TerminateMsg, DoSyncMsg, NotifyRecordMsg,
DoReInitMsg, SuspendMsg, ZoneDataMsg, GetZoneDataMsg)
from dnssyncdlib.registration import Registrator
from dnssyncdlib.constants import Const
from dnssyncdlib.util import log_plugin, setup_zone_logger
import dns.set
class SyncState():
init = 1
synced = 2
active = 3
active_write = 4
active_read = 5
unsynced = 6
unsynced_readfailed = 7
unsynced_writefailed = 8
part_synced = 9
suspend = 10
@classmethod
def to_str(cls,state):
state_str = {
cls.init : u'plugins initialization',
cls.synced : u'synchronized',
cls.active : u'synchronization active',
cls.active_write : u'synchronization - sending updates',
cls.active_read : u'synchronization - getting dns data',
cls.unsynced : u'unsynchronized',
cls.unsynced_readfailed : u'unsynchronized - failed to get dns data',
cls.unsynced_writefailed : u'unsynchronized - failed to send update',
cls.part_synced : u'partially synchronized',
cls.suspend : u'synchronization suspended',
}
return state_str.get(state, 'Internal error, Unknown state')
@classmethod
def to_shor_str(cls, state):
state_str = {
cls.init : u'init',
cls.synced : u'synced',
cls.active : u'active',
cls.active_write : u'active_write',
cls.active_read : u'active_read',
cls.unsynced : u'unsynced',
cls.unsynced_readfailed : u'unsynced_readfailed',
cls.unsynced_writefailed : u'unsynced_writefailed',
cls.part_synced : u'part_synced',
cls.suspend : u'suspended',
}
return state_str.get(state, 'unknown')
class ZoneSync(Process):
optional_options = { 'max_err_rate': 10, # restart count before process is terminated
'max_retry': 10, # retry count read/write data until state is unsynced
'ignore_ttl': False, # ignore TTL of records
'retry_interval': 1000,}
def __init__(self, commid, zonename, coreq, zoneq, **options):
"""
@param coreq queue in direction to core
@param zoneq queue in direction from core to this object
"""
super(ZoneSync, self).__init__()
self.zonename = zonename
self.coreq = coreq
self.zoneq = zoneq
self.commid = commid
self.state = SyncState.init
self.logger = setup_zone_logger(str(zonename))
self.unsynced_A = dns.set.Set()
self.unsynced_B = dns.set.Set()
self.A = None
self.B = None
self.options = options
self.count_write = 0
self.count_unsuccessful_write = 0
self.count_unsuccessful_write_since_last_succesful = 0
self.count_read = 0
self.count_unsuccesfull_read = 0
self.count_unsuccesfull_read_since_last_successful = 0
self.dwndld_all = False # download all data
self.dataA, self.dataB = None, None
self.updateA, self.updateB = None, None
# variables specify if data is ready to be written
self.write_A_pending = True
self.write_B_pending = True
self.diff = DNSDiff(self.zonename)
self.wait_for_msg = False # waiting
self.records_cache = None
self.ignored_record_types = [dns.rdatatype.RRSIG, dns.rdatatype.NSEC,
dns.rdatatype.NSEC3]
for opt, default in self.optional_options.iteritems():
if opt in options:
setattr(self, opt, options[opt])
else:
setattr(self, opt, default)
def _filter_records(self, data):
"""
filter record will not eb synchronized
:param data: DNSDataPackage
"""
assert isinstance(data, DNSDataPackage)
for record_set in [data.removed_records, data.added_records,
data.all_records]:
items_to_be_removed = []
for rrset in record_set:
if rrset.rdtype in self.ignored_record_types:
items_to_be_removed.append(rrset)
for item in items_to_be_removed:
record_set.discard(item)
def _cache_create(self, soa, records_set):
"""
Create and initialize cache with data
"""
assert isinstance(soa, dns.set.Set)
assert isinstance(records_set, dns.set.Set)
self.records_cache = {'soa': soa, 'records': records_set}
self.logger.debug("Data cache for zone %s created", self.zonename)
def _cache_delete(self):
"""
Delete cache
"""
self.records_cache = None
def _cache_empty(self):
"""
Return True if cache is empty
"""
if self.records_cache is None:
return True
return False
def _cache_update(self, soa, removed_records, added_records):
"""
Update cache with data
"""
assert isinstance(removed_records, dns.set.Set)
assert isinstance(added_records, dns.set.Set)
if soa:
self.records_cache['soa'] = soa
if removed_records:
# remove data first
self.records_cache['records'].difference_update(removed_records)
if added_records:
#add new data
self.records_cache['records'].union_update(added_records)
def _cache_diff(self, all_records, soa):
assert isinstance(all_records, dns.set.Set)
data = DNSDataPackage(self.zonename)
data.soa = soa # just pass SOA value to new instance
data.all_records = all_records
data.added_records = all_records.difference(self.records_cache['records'])
data.removed_records = self.records_cache['records'].difference(all_records)
data.incremental = True
return data
def _update_unsynced_records(self, unsynced_set, update):
# remove added records from unsynced set
for r in update.add_records:
unsynced_set.discard(r)
# remove removed records from unsynsed set
for r in update.remove_records:
unsynced_set.discard(r)
def _apply_unsynced_records_to_data(self):
"""
Records which are unsynced due to collisions, the unsynced records must,
be added back to data due check to work properly
:param data:
:return:
"""
for u_rec in self.unsynced_A:
# data which cannot be added to A side, put it back to B
if self.dataB.incremental:
if u_rec in self.dataB.removed_records:
# conflicted record was removed
continue
self.dataB.added_records.add(u_rec)
else:
# if we get all data, we do not need to add these unsynced records
break
for u_rec in self.unsynced_B:
# data which cannot be added to B side, put it abck to A
if self.dataA.incremental:
if u_rec in self.dataA.removed_records:
# conflicted record was removed
continue
self.dataA.added_records.add(u_rec)
else:
break
# DO note remove unsynced record here, use _update_unsynced_records,
# if sync was sucessfull, otherwise we can loose this information
def __init(self):
"""
INIT state process
Initialize plugins
"""
auth_source = self.options.get('authoritative')
first = True
for conf_name, conf in self.options['plugins_conf'].iteritems():
plugin, core, notify = Registrator.get_instance().get_plugin(
conf['plugin'])
if first:
first = False
self.A = plugin(self.zonename, conf_name, **conf)
if auth_source == conf_name:
self.diff.set_prio_A()
else:
self.B = plugin(self.zonename, conf_name, **conf)
if auth_source == conf_name:
self.diff.set_prio_B()
self.dwndld_all = True
if self.ignore_ttl:
self.diff.set_ignore_ttl()
self.records_cache = None
# next state
self.state = SyncState.active_read
def __synced(self):
"""
SYNCED state
:return:
"""
self.dwndld_all = False # Use incremental updates instead
self.wait_for_msg = True
def __active(self):
"""
ACTIVE state
:return:
"""
try:
if not self._cache_empty():
cache = self.records_cache
else:
cache = None
self.updateA, self.updateB = self.diff.base_diff(
self.dataA, self.dataB, cache)
except Exception as e:
self.logger.error(u'zone %s: Unable to diff data (%s)',
self.zonename, e)
self.logger.debug(u"Traceback: %s", traceback.format_exc())
self.state = SyncState.unsynced
return
self.unsynced_A.update(self.updateA[1])
self.unsynced_B.update(self.updateB[1])
# TODO set to false, if no data is required to write
self.write_A_pending = True
self.write_B_pending = True
# init cache
if self._cache_empty():
self._cache_create(self.updateA[0].soa,
self.updateA[0].same_records_added)
else:
# update with the same changes
self._cache_update(self.updateA[0].soa,
self.updateA[0].same_records_removed,
self.updateA[0].same_records_added)
# success next state
if self.updateA[0].update_required or self.updateB[0].update_required:
self.state = SyncState.active_write
elif not self.unsynced_A and not self.unsynced_B:
self.state = SyncState.synced
else:
self.state = SyncState.part_synced
def __active_read(self):
"""
ACTIVE_READ state
:return:
"""
self.count_read += 1
a_ready = False
b_ready = False
try:
self.dataA = self.A.get_zone_data(all_records=self.dwndld_all)
log_plugin(self.logger.debug, self.A.name,
"Got (A) data: %s" % self.dataA)
a_ready = True
derelativize_data(self.dataA, self.zonename)
self._filter_records(self.dataA)
if not self._cache_empty() and not self.dataA.incremental:
self.dataA = self._cache_diff(self.dataA.all_records,
self.dataA.soa)
log_plugin(self.logger.debug, self.A.name,
"Got (A) data (after cache diff): %s" % self.dataA)
except Exception as e:
self.logger.exception(u"zone %s: Unable to read data (%s)",
self.zonename, e)
try:
self.dataB = self.B.get_zone_data(all_records=self.dwndld_all)
log_plugin(self.logger.debug, self.B.name,
"Got (B) data: %s" % self.dataB)
b_ready = True
derelativize_data(self.dataB, self.zonename)
self._filter_records(self.dataB)
if not self._cache_empty() and not self.dataB.incremental:
self.dataB = self._cache_diff(self.dataB.all_records,
self.dataB.soa)
log_plugin(self.logger.debug, self.B.name,
"Got (B) data (after cache diff): %s" % self.dataB)
except Exception as e:
self.logger.exception(u"zone %s: Unable to read data (%s)",
self.zonename, e)
# next state
if a_ready and b_ready:
self.count_unsuccesfull_read_since_last_successful = 0
self.state = SyncState.active
# try to resolve conflicts again, add unresolved records into data package
self._apply_unsynced_records_to_data()
return
self.state = SyncState.unsynced_readfailed
def __active_write(self):
"""
ACTIVE_WRITE state
:return:
"""
self.count_write += 1
if self.write_A_pending:
log_plugin(self.logger.debug, self.A.name,
'A update:\n%s\nerrors:\n%s' % (self.updateA[0],
self.updateA[1]))
try:
self.A.update(self.updateA[0])
self.write_A_pending = False
self._update_unsynced_records(self.unsynced_A, self.updateA[0])
self.B.update_finished()
except Exception:
self.logger.exception(u'zone %s: Unable to write A data',
self.zonename)
if self.write_B_pending:
log_plugin(self.logger.debug, self.B.name,
'B update:\n%s\nerrors:\n%s' % (self.updateB[0],
self.updateB[1]))
try:
self.B.update(self.updateB[0])
self.write_B_pending = False
self._update_unsynced_records(self.unsynced_B, self.updateB[0])
self.A.update_finished()
except Exception:
self.logger.exception(u'zone %s: Unable to write B data',
self.zonename)
if self.write_B_pending or self.write_A_pending:
# some data haven't been uploaded yet
self.state = SyncState.unsynced_writefailed
return
if not self._cache_empty():
# update cache with both updates
self._cache_update(self.updateA[0].soa,
self.updateA[0].remove_records,
self.updateA[0].add_records)
self._cache_update(self.updateB[0].soa,
self.updateB[0].remove_records,
self.updateB[0].add_records)
self.logger.debug("cache: %r", self.records_cache)
self.count_unsuccessful_write_since_last_succesful = 0
if not self.unsynced_A and not self.unsynced_B:
self.state = SyncState.synced
else:
self.state = SyncState.part_synced
def __unsynced(self):
"""
UNSYNCED state
:return:
"""
self.wait_for_msg = True
def __unsynced_readfailed(self):
"""
UNSYNCED_READFAILED state
:return:
"""
self.count_unsuccesfull_read += 1
self.count_unsuccesfull_read_since_last_successful += 1
if (self.count_unsuccesfull_read_since_last_successful >
Const.MAX_UNSUCCESSFUL_READS):
self.logger.error(u'zone ' + str(self.zonename) +
'Reached maximum of unsuccessful reads')
self.state = SyncState.suspend
else:
self.state = SyncState.active_read
def __unsynced_writefailed(self):
"""
UNSYNCED_WRITEFAILED state
:return:
"""
self.count_unsuccessful_write += 1
self.count_unsuccessful_write_since_last_succesful += 1
if (self.count_unsuccessful_write_since_last_succesful >
Const.MAX_UNSUCCESSFUL_WRITES):
self.state = SyncState.suspend
# TODO add update_failed call for A and B
return
self.state = SyncState.active_write
def __part_synced(self):
"""
PART_SYNCED state
:return:
"""
self.logger.debug("Errors A (%s): %s", self.A.name,
', '.join(rrset.to_text() for rrset in self.unsynced_A))
self.logger.debug("Errors B (%s): %s", self.B.name,
', '.join(rrset.to_text() for rrset in self.unsynced_B))
self.wait_for_msg = True
def __suspend(self):
"""
SUSPEND state
:return:
"""
self.logger.critical('zone ' + str(self.zonename) +
' synchronization suspended')
self.wait_for_msg = True
def __process_notify_msg(self, msg):
if self.A.name == msg.plugin_name:
to_active_read, data = self.A.process_notify_message(
msg, self.records_cache)
self.dataA = data
elif self.B.name == msg.plugin_name:
to_active_read, data = self.B.process_notify_message(
msg, self.records_cache)
self.dataB = data
else:
self.logger.error("NotifyRecordMsg Received: Unknown plugin "
"name %s", msg.plugin_name)
return
if to_active_read:
self.state = SyncState.active_read
elif data is not None:
self.state = SyncState.active
else:
self.logger.debug("Zone %s notify msg processed: synchronization "
"not required", self.zonename)
return
self.logger.debug("Zone %s notify msg processed: moving to %s "
"state ", self.zonename,
SyncState.to_str(self.state))
def run(self):
while True:
self.logger.debug(str(self.zonename) + u' zone state is: ' +
SyncState.to_str(self.state))
#send state to core
self.coreq.put_nowait(StatusMsg(self.zonename, self.state,
self.commid))
if self.wait_for_msg:
# waiting for message from core
msg = self.zoneq.get()
self.wait_for_msg = False # msg already received
if (isinstance(msg, DoSyncMsg) and
self.state != SyncState.suspend):
self.state = SyncState.active_read
self.logger.debug('zone %s moved to active read state '
'by core', self.zonename)
elif isinstance(msg, DoReInitMsg):
self.state = SyncState.init
self.logger.info("zone %s was re-inicialized by user",
self.zonename)
elif isinstance(msg, SuspendMsg):
self.state = SyncState.suspend
self.logger.info("zone %s was suspended by user",
self.zonename)
elif (isinstance(msg, NotifyRecordMsg) and
self.state != SyncState.suspend):
self.logger.debug(u"zone %s received Notify message",
self.zonename)
self.__process_notify_msg(msg)
elif isinstance(msg, TerminateMsg):
#terminate process
self.logger.info(
u"zone %s synchronization TERMINATED by core",
self.zonename)
return # Queues should be closed during garbage collecting
elif isinstance(msg, GetZoneDataMsg):
self.coreq.put_nowait(
ZoneDataMsg(
self.zonename,
dict(status=self.state,
records=self.records_cache,
unsync_a=self.unsynced_A,
plug_a=self.A.name,
unsync_b=self.unsynced_B,
plug_b=self.B.name),
self.commid)
)
else:
# continue waiting no expected msg was received
self.wait_for_msg = True
if self.state == SyncState.init:
self.__init()
elif self.state == SyncState.synced:
self.__synced()
elif self.state == SyncState.active:
self.__active()
elif self.state == SyncState.active_read:
self.__active_read()
elif self.state == SyncState.active_write:
self.__active_write()
elif self.state == SyncState.unsynced:
self.__unsynced()
elif self.state == SyncState.unsynced_readfailed:
self.__unsynced_readfailed()
elif self.state == SyncState.unsynced_writefailed:
self.__unsynced_writefailed()
elif self.state == SyncState.part_synced:
self.__part_synced()
elif self.state == SyncState.suspend:
self.__suspend()
else:
self.logger.error('Zone ' + str(self.zonename) +
' unknown state, restarting process...')
self.state = SyncState.init
| gpl-3.0 |
CapOM/ChromiumGStreamerBackend | tools/telemetry/telemetry/web_perf/metrics/timeline_based_metric_unittest.py | 77 | 1129 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import telemetry.web_perf.metrics.timeline_based_metric as tbm_module
class TimelineBasedMetricTest(unittest.TestCase):
# pylint: disable=W0212
def testTimeRangesHasOverlap(self):
# Test cases with overlap on one side
self.assertTrue(tbm_module._TimeRangesHasOverlap([(10, 20), (5, 15)]))
self.assertTrue(tbm_module._TimeRangesHasOverlap([(5, 15), (10, 20)]))
self.assertTrue(tbm_module._TimeRangesHasOverlap(
[(5, 15), (25, 30), (10, 20)]))
# Test cases with one range fall in the middle of other
self.assertTrue(tbm_module._TimeRangesHasOverlap([(10, 20), (15, 18)]))
self.assertTrue(tbm_module._TimeRangesHasOverlap([(15, 18), (10, 20)]))
self.assertTrue(tbm_module._TimeRangesHasOverlap(
[(15, 18), (40, 50), (10, 20)]))
self.assertFalse(tbm_module._TimeRangesHasOverlap([(15, 18), (20, 25)]))
self.assertFalse(tbm_module._TimeRangesHasOverlap(
[(1, 2), (2, 3), (0, 1)]))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.