repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
AndroidOpenDevelopment/android_external_chromium_org | mojo/public/tools/bindings/generators/mojom_cpp_generator.py | 8 | 9880 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates C++ source files from a mojom.Module."""
import mojom.generate.generator as generator
import mojom.generate.module as mojom
import mojom.generate.pack as pack
from mojom.generate.template_expander import UseJinja
_kind_to_cpp_type = {
mojom.BOOL: "bool",
mojom.INT8: "int8_t",
mojom.UINT8: "uint8_t",
mojom.INT16: "int16_t",
mojom.UINT16: "uint16_t",
mojom.INT32: "int32_t",
mojom.UINT32: "uint32_t",
mojom.FLOAT: "float",
mojom.HANDLE: "mojo::Handle",
mojom.DCPIPE: "mojo::DataPipeConsumerHandle",
mojom.DPPIPE: "mojo::DataPipeProducerHandle",
mojom.MSGPIPE: "mojo::MessagePipeHandle",
mojom.SHAREDBUFFER: "mojo::SharedBufferHandle",
mojom.INT64: "int64_t",
mojom.UINT64: "uint64_t",
mojom.DOUBLE: "double",
}
def DefaultValue(field):
if field.default:
if isinstance(field.kind, mojom.Struct):
assert field.default == "default"
return "%s::New()" % GetNameForKind(field.kind)
return ExpressionToText(field.default)
return ""
def NamespaceToArray(namespace):
return namespace.split('.') if namespace else []
def GetNameForKind(kind, internal = False):
parts = []
if kind.imported_from:
parts.extend(NamespaceToArray(kind.imported_from["namespace"]))
if internal:
parts.append("internal")
if kind.parent_kind:
parts.append(kind.parent_kind.name)
parts.append(kind.name)
return "::".join(parts)
def GetCppType(kind):
if isinstance(kind, mojom.Struct):
return "%s_Data*" % GetNameForKind(kind, internal=True)
if isinstance(kind, mojom.Array):
return "mojo::internal::Array_Data<%s>*" % GetCppType(kind.kind)
if isinstance(kind, mojom.Interface) or \
isinstance(kind, mojom.InterfaceRequest):
return "mojo::MessagePipeHandle"
if isinstance(kind, mojom.Enum):
return "int32_t"
if kind.spec == 's':
return "mojo::internal::String_Data*"
return _kind_to_cpp_type[kind]
def GetCppPodType(kind):
if kind.spec == 's':
return "char*"
return _kind_to_cpp_type[kind]
def GetCppArrayArgWrapperType(kind):
if isinstance(kind, mojom.Enum):
return GetNameForKind(kind)
if isinstance(kind, mojom.Struct):
return "%sPtr" % GetNameForKind(kind)
if isinstance(kind, mojom.Array):
return "mojo::Array<%s> " % GetCppArrayArgWrapperType(kind.kind)
if isinstance(kind, mojom.Interface):
raise Exception("Arrays of interfaces not yet supported!")
if isinstance(kind, mojom.InterfaceRequest):
raise Exception("Arrays of interface requests not yet supported!")
if kind.spec == 's':
return "mojo::String"
if kind.spec == 'h':
return "mojo::ScopedHandle"
if kind.spec == 'h:d:c':
return "mojo::ScopedDataPipeConsumerHandle"
if kind.spec == 'h:d:p':
return "mojo::ScopedDataPipeProducerHandle"
if kind.spec == 'h:m':
return "mojo::ScopedMessagePipeHandle"
if kind.spec == 'h:s':
return "mojo::ScopedSharedBufferHandle"
return _kind_to_cpp_type[kind]
def GetCppResultWrapperType(kind):
if isinstance(kind, mojom.Enum):
return GetNameForKind(kind)
if isinstance(kind, mojom.Struct):
return "%sPtr" % GetNameForKind(kind)
if isinstance(kind, mojom.Array):
return "mojo::Array<%s>" % GetCppArrayArgWrapperType(kind.kind)
if isinstance(kind, mojom.Interface):
return "%sPtr" % GetNameForKind(kind)
if isinstance(kind, mojom.InterfaceRequest):
return "mojo::InterfaceRequest<%s>" % GetNameForKind(kind.kind)
if kind.spec == 's':
return "mojo::String"
if kind.spec == 'h':
return "mojo::ScopedHandle"
if kind.spec == 'h:d:c':
return "mojo::ScopedDataPipeConsumerHandle"
if kind.spec == 'h:d:p':
return "mojo::ScopedDataPipeProducerHandle"
if kind.spec == 'h:m':
return "mojo::ScopedMessagePipeHandle"
if kind.spec == 'h:s':
return "mojo::ScopedSharedBufferHandle"
return _kind_to_cpp_type[kind]
def GetCppWrapperType(kind):
if isinstance(kind, mojom.Enum):
return GetNameForKind(kind)
if isinstance(kind, mojom.Struct):
return "%sPtr" % GetNameForKind(kind)
if isinstance(kind, mojom.Array):
return "mojo::Array<%s>" % GetCppArrayArgWrapperType(kind.kind)
if isinstance(kind, mojom.Interface):
return "mojo::ScopedMessagePipeHandle"
if isinstance(kind, mojom.InterfaceRequest):
raise Exception("InterfaceRequest fields not supported!")
if kind.spec == 's':
return "mojo::String"
if kind.spec == 'h':
return "mojo::ScopedHandle"
if kind.spec == 'h:d:c':
return "mojo::ScopedDataPipeConsumerHandle"
if kind.spec == 'h:d:p':
return "mojo::ScopedDataPipeProducerHandle"
if kind.spec == 'h:m':
return "mojo::ScopedMessagePipeHandle"
if kind.spec == 'h:s':
return "mojo::ScopedSharedBufferHandle"
return _kind_to_cpp_type[kind]
def GetCppConstWrapperType(kind):
if isinstance(kind, mojom.Struct):
return "%sPtr" % GetNameForKind(kind)
if isinstance(kind, mojom.Array):
return "mojo::Array<%s>" % GetCppArrayArgWrapperType(kind.kind)
if isinstance(kind, mojom.Interface):
return "%sPtr" % GetNameForKind(kind)
if isinstance(kind, mojom.InterfaceRequest):
return "mojo::InterfaceRequest<%s>" % GetNameForKind(kind.kind)
if isinstance(kind, mojom.Enum):
return GetNameForKind(kind)
if kind.spec == 's':
return "const mojo::String&"
if kind.spec == 'h':
return "mojo::ScopedHandle"
if kind.spec == 'h:d:c':
return "mojo::ScopedDataPipeConsumerHandle"
if kind.spec == 'h:d:p':
return "mojo::ScopedDataPipeProducerHandle"
if kind.spec == 'h:m':
return "mojo::ScopedMessagePipeHandle"
if kind.spec == 'h:s':
return "mojo::ScopedSharedBufferHandle"
if not kind in _kind_to_cpp_type:
print "missing:", kind.spec
return _kind_to_cpp_type[kind]
def GetCppFieldType(kind):
if isinstance(kind, mojom.Struct):
return ("mojo::internal::StructPointer<%s_Data>" %
GetNameForKind(kind, internal=True))
if isinstance(kind, mojom.Array):
return "mojo::internal::ArrayPointer<%s>" % GetCppType(kind.kind)
if isinstance(kind, mojom.Interface) or \
isinstance(kind, mojom.InterfaceRequest):
return "mojo::MessagePipeHandle"
if isinstance(kind, mojom.Enum):
return GetNameForKind(kind)
if kind.spec == 's':
return "mojo::internal::StringPointer"
return _kind_to_cpp_type[kind]
def IsStructWithHandles(struct):
for pf in struct.packed.packed_fields:
if generator.IsHandleKind(pf.field.kind):
return True
return False
def TranslateConstants(token):
if isinstance(token, (mojom.NamedValue, mojom.EnumValue)):
# Both variable and enum constants are constructed like:
# Namespace::Struct::CONSTANT_NAME
name = []
if token.imported_from:
name.extend(NamespaceToArray(token.namespace))
if token.parent_kind:
name.append(token.parent_kind.name)
name.append(token.name)
return "::".join(name)
return token
def ExpressionToText(value):
return TranslateConstants(value)
def HasCallbacks(interface):
for method in interface.methods:
if method.response_parameters != None:
return True
return False
def ShouldInlineStruct(struct):
# TODO(darin): Base this on the size of the wrapper class.
if len(struct.fields) > 4:
return False
for field in struct.fields:
if generator.IsHandleKind(field.kind) or generator.IsObjectKind(field.kind):
return False
return True
_HEADER_SIZE = 8
class Generator(generator.Generator):
cpp_filters = {
"cpp_const_wrapper_type": GetCppConstWrapperType,
"cpp_field_type": GetCppFieldType,
"cpp_pod_type": GetCppPodType,
"cpp_result_type": GetCppResultWrapperType,
"cpp_type": GetCppType,
"cpp_wrapper_type": GetCppWrapperType,
"default_value": DefaultValue,
"expression_to_text": ExpressionToText,
"get_name_for_kind": GetNameForKind,
"get_pad": pack.GetPad,
"has_callbacks": HasCallbacks,
"should_inline": ShouldInlineStruct,
"is_enum_kind": generator.IsEnumKind,
"is_move_only_kind": generator.IsMoveOnlyKind,
"is_handle_kind": generator.IsHandleKind,
"is_interface_kind": generator.IsInterfaceKind,
"is_interface_request_kind": generator.IsInterfaceRequestKind,
"is_object_kind": generator.IsObjectKind,
"is_string_kind": generator.IsStringKind,
"is_struct_with_handles": IsStructWithHandles,
"struct_size": lambda ps: ps.GetTotalSize() + _HEADER_SIZE,
"struct_from_method": generator.GetStructFromMethod,
"response_struct_from_method": generator.GetResponseStructFromMethod,
"stylize_method": generator.StudlyCapsToCamel,
}
def GetJinjaExports(self):
return {
"module": self.module,
"namespace": self.module.namespace,
"namespaces_as_array": NamespaceToArray(self.module.namespace),
"imports": self.module.imports,
"kinds": self.module.kinds,
"enums": self.module.enums,
"structs": self.GetStructs(),
"interfaces": self.module.interfaces,
}
@UseJinja("cpp_templates/module.h.tmpl", filters=cpp_filters)
def GenerateModuleHeader(self):
return self.GetJinjaExports()
@UseJinja("cpp_templates/module-internal.h.tmpl", filters=cpp_filters)
def GenerateModuleInternalHeader(self):
return self.GetJinjaExports()
@UseJinja("cpp_templates/module.cc.tmpl", filters=cpp_filters)
def GenerateModuleSource(self):
return self.GetJinjaExports()
def GenerateFiles(self, args):
self.Write(self.GenerateModuleHeader(), "%s.h" % self.module.name)
self.Write(self.GenerateModuleInternalHeader(),
"%s-internal.h" % self.module.name)
self.Write(self.GenerateModuleSource(), "%s.cc" % self.module.name)
| bsd-3-clause |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Sphinx-1.2.3-py2.7.egg/sphinx/config.py | 11 | 10843 | # -*- coding: utf-8 -*-
"""
sphinx.config
~~~~~~~~~~~~~
Build configuration file handling.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
from os import path
from sphinx.errors import ConfigError
from sphinx.locale import l_
from sphinx.util.osutil import make_filename
from sphinx.util.pycompat import bytes, b, execfile_
nonascii_re = re.compile(b(r'[\x80-\xff]'))
CONFIG_SYNTAX_ERROR = "There is a syntax error in your configuration file: %s"
if sys.version_info >= (3, 0):
CONFIG_SYNTAX_ERROR += "\nDid you change the syntax from 2.x to 3.x?"
class Config(object):
"""
Configuration file abstraction.
"""
# the values are: (default, what needs to be rebuilt if changed)
# If you add a value here, don't forget to include it in the
# quickstart.py file template as well as in the docs!
config_values = dict(
# general options
project = ('Python', 'env'),
copyright = ('', 'html'),
version = ('', 'env'),
release = ('', 'env'),
today = ('', 'env'),
today_fmt = (None, 'env'), # the real default is locale-dependent
language = (None, 'env'),
locale_dirs = ([], 'env'),
master_doc = ('contents', 'env'),
source_suffix = ('.rst', 'env'),
source_encoding = ('utf-8-sig', 'env'),
exclude_patterns = ([], 'env'),
# the next three are all deprecated now
unused_docs = ([], 'env'),
exclude_trees = ([], 'env'),
exclude_dirnames = ([], 'env'),
default_role = (None, 'env'),
add_function_parentheses = (True, 'env'),
add_module_names = (True, 'env'),
trim_footnote_reference_space = (False, 'env'),
show_authors = (False, 'env'),
pygments_style = (None, 'html'),
highlight_language = ('python', 'env'),
templates_path = ([], 'html'),
template_bridge = (None, 'html'),
keep_warnings = (False, 'env'),
modindex_common_prefix = ([], 'html'),
rst_epilog = (None, 'env'),
rst_prolog = (None, 'env'),
trim_doctest_flags = (True, 'env'),
primary_domain = ('py', 'env'),
needs_sphinx = (None, None),
nitpicky = (False, 'env'),
nitpick_ignore = ([], 'html'),
# HTML options
html_theme = ('default', 'html'),
html_theme_path = ([], 'html'),
html_theme_options = ({}, 'html'),
html_title = (lambda self: l_('%s %s documentation') %
(self.project, self.release),
'html'),
html_short_title = (lambda self: self.html_title, 'html'),
html_style = (None, 'html'),
html_logo = (None, 'html'),
html_favicon = (None, 'html'),
html_static_path = ([], 'html'),
html_extra_path = ([], 'html'),
# the real default is locale-dependent
html_last_updated_fmt = (None, 'html'),
html_use_smartypants = (True, 'html'),
html_translator_class = (None, 'html'),
html_sidebars = ({}, 'html'),
html_additional_pages = ({}, 'html'),
html_use_modindex = (True, 'html'), # deprecated
html_domain_indices = (True, 'html'),
html_add_permalinks = (u'\u00B6', 'html'),
html_use_index = (True, 'html'),
html_split_index = (False, 'html'),
html_copy_source = (True, 'html'),
html_show_sourcelink = (True, 'html'),
html_use_opensearch = ('', 'html'),
html_file_suffix = (None, 'html'),
html_link_suffix = (None, 'html'),
html_show_copyright = (True, 'html'),
html_show_sphinx = (True, 'html'),
html_context = ({}, 'html'),
html_output_encoding = ('utf-8', 'html'),
html_compact_lists = (True, 'html'),
html_secnumber_suffix = ('. ', 'html'),
html_search_language = (None, 'html'),
html_search_options = ({}, 'html'),
html_search_scorer = ('', None),
# HTML help only options
htmlhelp_basename = (lambda self: make_filename(self.project), None),
# Qt help only options
qthelp_basename = (lambda self: make_filename(self.project), None),
# Devhelp only options
devhelp_basename = (lambda self: make_filename(self.project), None),
# Epub options
epub_basename = (lambda self: make_filename(self.project), None),
epub_theme = ('epub', 'html'),
epub_theme_options = ({}, 'html'),
epub_title = (lambda self: self.html_title, 'html'),
epub_author = ('unknown', 'html'),
epub_language = (lambda self: self.language or 'en', 'html'),
epub_publisher = ('unknown', 'html'),
epub_copyright = (lambda self: self.copyright, 'html'),
epub_identifier = ('unknown', 'html'),
epub_scheme = ('unknown', 'html'),
epub_uid = ('unknown', 'env'),
epub_cover = ((), 'env'),
epub_guide = ((), 'env'),
epub_pre_files = ([], 'env'),
epub_post_files = ([], 'env'),
epub_exclude_files = ([], 'env'),
epub_tocdepth = (3, 'env'),
epub_tocdup = (True, 'env'),
epub_tocscope = ('default', 'env'),
epub_fix_images = (False, 'env'),
epub_max_image_width = (0, 'env'),
epub_show_urls = ('inline', 'html'),
epub_use_index = (lambda self: self.html_use_index, 'html'),
# LaTeX options
latex_documents = (lambda self: [(self.master_doc,
make_filename(self.project) + '.tex',
self.project,
'', 'manual')],
None),
latex_logo = (None, None),
latex_appendices = ([], None),
latex_use_parts = (False, None),
latex_use_modindex = (True, None), # deprecated
latex_domain_indices = (True, None),
latex_show_urls = ('no', None),
latex_show_pagerefs = (False, None),
# paper_size and font_size are still separate values
# so that you can give them easily on the command line
latex_paper_size = ('letter', None),
latex_font_size = ('10pt', None),
latex_elements = ({}, None),
latex_additional_files = ([], None),
latex_docclass = ({}, None),
# now deprecated - use latex_elements
latex_preamble = ('', None),
# text options
text_sectionchars = ('*=-~"+`', 'env'),
text_newlines = ('unix', 'env'),
# manpage options
man_pages = (lambda self: [(self.master_doc,
make_filename(self.project).lower(),
'%s %s' % (self.project, self.release),
[], 1)],
None),
man_show_urls = (False, None),
# Texinfo options
texinfo_documents = (lambda self: [(self.master_doc,
make_filename(self.project).lower(),
self.project, '',
make_filename(self.project),
'The %s reference manual.' %
make_filename(self.project),
'Python')],
None),
texinfo_appendices = ([], None),
texinfo_elements = ({}, None),
texinfo_domain_indices = (True, None),
texinfo_show_urls = ('footnote', None),
texinfo_no_detailmenu = (False, None),
# linkcheck options
linkcheck_ignore = ([], None),
linkcheck_timeout = (None, None),
linkcheck_workers = (5, None),
linkcheck_anchors = (True, None),
# gettext options
gettext_compact = (True, 'gettext'),
# XML options
xml_pretty = (True, 'env'),
)
def __init__(self, dirname, filename, overrides, tags):
self.overrides = overrides
self.values = Config.config_values.copy()
config = {}
if "extensions" in overrides:
config["extensions"] = overrides["extensions"]
if dirname is not None:
config_file = path.join(dirname, filename)
config['__file__'] = config_file
config['tags'] = tags
olddir = os.getcwd()
try:
# we promise to have the config dir as current dir while the
# config file is executed
os.chdir(dirname)
try:
execfile_(filename, config)
except SyntaxError, err:
raise ConfigError(CONFIG_SYNTAX_ERROR % err)
finally:
os.chdir(olddir)
self._raw_config = config
# these two must be preinitialized because extensions can add their
# own config values
self.setup = config.get('setup', None)
self.extensions = config.get('extensions', [])
def check_unicode(self, warn):
# check all string values for non-ASCII characters in bytestrings,
# since that can result in UnicodeErrors all over the place
for name, value in self._raw_config.iteritems():
if isinstance(value, bytes) and nonascii_re.search(value):
warn('the config value %r is set to a string with non-ASCII '
'characters; this can lead to Unicode errors occurring. '
'Please use Unicode strings, e.g. %r.' % (name, u'Content')
)
def init_values(self):
config = self._raw_config
for valname, value in self.overrides.iteritems():
if '.' in valname:
realvalname, key = valname.split('.', 1)
config.setdefault(realvalname, {})[key] = value
else:
config[valname] = value
for name in config:
if name in self.values:
self.__dict__[name] = config[name]
del self._raw_config
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError(name)
if name not in self.values:
raise AttributeError('No such config value: %s' % name)
default = self.values[name][0]
if hasattr(default, '__call__'):
return default(self)
return default
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def __contains__(self, name):
return name in self.values
| apache-2.0 |
daviddao/luminosity | sklearn-server/flask/lib/python2.7/site-packages/werkzeug/serving.py | 116 | 25317 | # -*- coding: utf-8 -*-
"""
werkzeug.serving
~~~~~~~~~~~~~~~~
There are many ways to serve a WSGI application. While you're developing
it you usually don't want a full blown webserver like Apache but a simple
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
the standard library. If you're using older versions of Python you can
download the package from the cheeseshop.
However there are some caveats. Sourcecode won't reload itself when
changed and each time you kill the server using ``^C`` you get an
`KeyboardInterrupt` error. While the latter is easy to solve the first
one can be a pain in the ass in some situations.
The easiest way is creating a small ``start-myproject.py`` that runs the
application::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from myproject import make_app
from werkzeug.serving import run_simple
app = make_app(...)
run_simple('localhost', 8080, app, use_reloader=True)
You can also pass it a `extra_files` keyword argument with a list of
additional files (like configuration files) you want to observe.
For bigger applications you should consider using `werkzeug.script`
instead of a simple start file.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import os
import socket
import sys
import ssl
import signal
def _get_openssl_crypto_module():
try:
from OpenSSL import crypto
except ImportError:
raise TypeError('Using ad-hoc certificates requires the pyOpenSSL '
'library.')
else:
return crypto
try:
from SocketServer import ThreadingMixIn, ForkingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from socketserver import ThreadingMixIn, ForkingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
import werkzeug
from werkzeug._internal import _log
from werkzeug._compat import reraise, wsgi_encoding_dance
from werkzeug.urls import url_parse, url_unquote
from werkzeug.exceptions import InternalServerError
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
"""A request handler that implements WSGI dispatching."""
@property
def server_version(self):
return 'Werkzeug/' + werkzeug.__version__
def make_environ(self):
request_url = url_parse(self.path)
def shutdown_server():
self.server.shutdown_signal = True
url_scheme = self.server.ssl_context is None and 'http' or 'https'
path_info = url_unquote(request_url.path)
environ = {
'wsgi.version': (1, 0),
'wsgi.url_scheme': url_scheme,
'wsgi.input': self.rfile,
'wsgi.errors': sys.stderr,
'wsgi.multithread': self.server.multithread,
'wsgi.multiprocess': self.server.multiprocess,
'wsgi.run_once': False,
'werkzeug.server.shutdown': shutdown_server,
'SERVER_SOFTWARE': self.server_version,
'REQUEST_METHOD': self.command,
'SCRIPT_NAME': '',
'PATH_INFO': wsgi_encoding_dance(path_info),
'QUERY_STRING': wsgi_encoding_dance(request_url.query),
'CONTENT_TYPE': self.headers.get('Content-Type', ''),
'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
'REMOTE_ADDR': self.client_address[0],
'REMOTE_PORT': self.client_address[1],
'SERVER_NAME': self.server.server_address[0],
'SERVER_PORT': str(self.server.server_address[1]),
'SERVER_PROTOCOL': self.request_version
}
for key, value in self.headers.items():
key = 'HTTP_' + key.upper().replace('-', '_')
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
environ[key] = value
if request_url.netloc:
environ['HTTP_HOST'] = request_url.netloc
return environ
def run_wsgi(self):
if self.headers.get('Expect', '').lower().strip() == '100-continue':
self.wfile.write(b'HTTP/1.1 100 Continue\r\n\r\n')
environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, 'write() before start_response'
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
try:
code, msg = status.split(None, 1)
except ValueError:
code, msg = status, ""
self.send_response(int(code), msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if 'content-length' not in header_keys:
self.close_connection = True
self.send_header('Connection', 'close')
if 'server' not in header_keys:
self.send_header('Server', self.version_string())
if 'date' not in header_keys:
self.send_header('Date', self.date_time_string())
self.end_headers()
assert type(data) is bytes, 'applications must write bytes'
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
reraise(*exc_info)
finally:
exc_info = None
elif headers_set:
raise AssertionError('Headers already set')
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
if not headers_sent:
write(b'')
finally:
if hasattr(application_iter, 'close'):
application_iter.close()
application_iter = None
try:
execute(self.server.app)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e, environ)
except Exception:
if self.server.passthrough_errors:
raise
from werkzeug.debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except Exception:
pass
self.server.log('error', 'Error on request:\n%s',
traceback.plaintext)
def handle(self):
"""Handles a request ignoring dropped connections."""
rv = None
try:
rv = BaseHTTPRequestHandler.handle(self)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e)
except Exception:
if self.server.ssl_context is None or not is_ssl_error():
raise
if self.server.shutdown_signal:
self.initiate_shutdown()
return rv
def initiate_shutdown(self):
"""A horrible, horrible way to kill the server for Python 2.6 and
later. It's the best we can do.
"""
# Windows does not provide SIGKILL, go with SIGTERM then.
sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
# reloader active
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
os.kill(os.getpid(), sig)
# python 2.7
self.server._BaseServer__shutdown_request = True
# python 2.6
self.server._BaseServer__serving = False
def connection_dropped(self, error, environ=None):
"""Called if the connection was closed by the client. By default
nothing happens.
"""
def handle_one_request(self):
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
elif self.parse_request():
return self.run_wsgi()
def send_response(self, code, message=None):
"""Send the response header and log the response code."""
self.log_request(code)
if message is None:
message = code in self.responses and self.responses[code][0] or ''
if self.request_version != 'HTTP/0.9':
hdr = "%s %d %s\r\n" % (self.protocol_version, code, message)
self.wfile.write(hdr.encode('ascii'))
def version_string(self):
return BaseHTTPRequestHandler.version_string(self).strip()
def address_string(self):
return self.client_address[0]
def log_request(self, code='-', size='-'):
self.log('info', '"%s" %s %s', self.requestline, code, size)
def log_error(self, *args):
self.log('error', *args)
def log_message(self, format, *args):
self.log('info', format, *args)
def log(self, type, message, *args):
_log(type, '%s - - [%s] %s\n' % (self.address_string(),
self.log_date_time_string(),
message % args))
#: backwards compatible name if someone is subclassing it
BaseRequestHandler = WSGIRequestHandler
def generate_adhoc_ssl_pair(cn=None):
from random import random
crypto = _get_openssl_crypto_module()
# pretty damn sure that this is not actually accepted by anyone
if cn is None:
cn = '*'
cert = crypto.X509()
cert.set_serial_number(int(random() * sys.maxsize))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
subject = cert.get_subject()
subject.CN = cn
subject.O = 'Dummy Certificate'
issuer = cert.get_issuer()
issuer.CN = 'Untrusted Authority'
issuer.O = 'Self-Signed'
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 1024)
cert.set_pubkey(pkey)
cert.sign(pkey, 'md5')
return cert, pkey
def make_ssl_devcert(base_path, host=None, cn=None):
"""Creates an SSL key for development. This should be used instead of
the ``'adhoc'`` key which generates a new cert on each server start.
It accepts a path for where it should store the key and cert and
either a host or CN. If a host is given it will use the CN
``*.host/CN=host``.
For more information see :func:`run_simple`.
.. versionadded:: 0.9
:param base_path: the path to the certificate and key. The extension
``.crt`` is added for the certificate, ``.key`` is
added for the key.
:param host: the name of the host. This can be used as an alternative
for the `cn`.
:param cn: the `CN` to use.
"""
from OpenSSL import crypto
if host is not None:
cn = '*.%s/CN=%s' % (host, host)
cert, pkey = generate_adhoc_ssl_pair(cn=cn)
cert_file = base_path + '.crt'
pkey_file = base_path + '.key'
with open(cert_file, 'wb') as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(pkey_file, 'wb') as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
return cert_file, pkey_file
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
crypto = _get_openssl_crypto_module()
import tempfile
import atexit
cert, pkey = generate_adhoc_ssl_pair()
cert_handle, cert_file = tempfile.mkstemp()
pkey_handle, pkey_file = tempfile.mkstemp()
atexit.register(os.remove, pkey_file)
atexit.register(os.remove, cert_file)
os.write(cert_handle, crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
os.write(pkey_handle, crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
os.close(cert_handle)
os.close(pkey_handle)
ctx = load_ssl_context(cert_file, pkey_file)
return ctx
def load_ssl_context(cert_file, pkey_file=None, protocol=None):
"""Loads SSL context from cert/private key files and optional protocol.
Many parameters are directly taken from the API of
:py:class:`ssl.SSLContext`.
:param cert_file: Path of the certificate to use.
:param pkey_file: Path of the private key to use. If not given, the key
will be obtained from the certificate file.
:param protocol: One of the ``PROTOCOL_*`` constants in the stdlib ``ssl``
module. Defaults to ``PROTOCOL_SSLv23``.
"""
if protocol is None:
protocol = ssl.PROTOCOL_SSLv23
ctx = _SSLContext(protocol)
ctx.load_cert_chain(cert_file, pkey_file)
return ctx
class _SSLContext(object):
'''A dummy class with a small subset of Python3's ``ssl.SSLContext``, only
intended to be used with and by Werkzeug.'''
def __init__(self, protocol):
self._protocol = protocol
self._certfile = None
self._keyfile = None
self._password = None
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._certfile = certfile
self._keyfile = keyfile or certfile
self._password = password
def wrap_socket(self, sock, **kwargs):
return ssl.wrap_socket(sock, keyfile=self._keyfile,
certfile=self._certfile,
ssl_version=self._protocol, **kwargs)
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
exc_types = (ssl.SSLError,)
try:
from OpenSSL.SSL import Error
exc_types += (Error,)
except ImportError:
pass
if error is None:
error = sys.exc_info()[1]
return isinstance(error, exc_types)
def select_ip_version(host, port):
"""Returns AF_INET4 or AF_INET6 depending on where to connect to."""
# disabled due to problems with current ipv6 implementations
# and various operating systems. Probably this code also is
# not supposed to work, but I can't come up with any other
# ways to implement this.
# try:
# info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
# socket.SOCK_STREAM, 0,
# socket.AI_PASSIVE)
# if info:
# return info[0][0]
# except socket.gaierror:
# pass
if ':' in host and hasattr(socket, 'AF_INET6'):
return socket.AF_INET6
return socket.AF_INET
class BaseWSGIServer(HTTPServer, object):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
request_queue_size = 128
def __init__(self, host, port, app, handler=None,
passthrough_errors=False, ssl_context=None):
if handler is None:
handler = WSGIRequestHandler
self.address_family = select_ip_version(host, port)
HTTPServer.__init__(self, (host, int(port)), handler)
self.app = app
self.passthrough_errors = passthrough_errors
self.shutdown_signal = False
if ssl_context is not None:
if isinstance(ssl_context, tuple):
ssl_context = load_ssl_context(*ssl_context)
if ssl_context == 'adhoc':
ssl_context = generate_adhoc_ssl_context()
self.socket = ssl_context.wrap_socket(self.socket,
server_side=True)
self.ssl_context = ssl_context
else:
self.ssl_context = None
def log(self, type, message, *args):
_log(type, message, *args)
def serve_forever(self):
self.shutdown_signal = False
try:
HTTPServer.serve_forever(self)
except KeyboardInterrupt:
pass
finally:
self.server_close()
def handle_error(self, request, client_address):
if self.passthrough_errors:
raise
else:
return HTTPServer.handle_error(self, request, client_address)
def get_request(self):
con, info = self.socket.accept()
return con, info
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(self, host, port, app, processes=40, handler=None,
passthrough_errors=False, ssl_context=None):
BaseWSGIServer.__init__(self, host, port, app, handler,
passthrough_errors, ssl_context)
self.max_children = processes
def make_server(host, port, app=None, threaded=False, processes=1,
request_handler=None, passthrough_errors=False,
ssl_context=None):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and "
"multi process server.")
elif threaded:
return ThreadedWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
elif processes > 1:
return ForkingWSGIServer(host, port, app, processes, request_handler,
passthrough_errors, ssl_context)
else:
return BaseWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
def is_running_from_reloader():
"""Checks if the application is running from within the Werkzeug
reloader subprocess.
.. versionadded:: 0.10
"""
return os.environ.get('WERKZEUG_RUN_MAIN') == 'true'
def run_simple(hostname, port, application, use_reloader=False,
use_debugger=False, use_evalex=True,
extra_files=None, reloader_interval=1,
reloader_type='auto', threaded=False, processes=1,
request_handler=None, static_files=None,
passthrough_errors=False, ssl_context=None):
"""Start a WSGI application. Optional features include a reloader,
multithreading and fork support.
This function has a command-line interface too::
python -m werkzeug.serving --help
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
.. versionadded:: 0.8
Added support for automatically loading a SSL context from certificate
file and private key.
.. versionadded:: 0.9
Added command-line interface.
.. versionadded:: 0.10
Improved the reloader and added support for changing the backend
through the `reloader_type` parameter. See :ref:`reloader`
for more information.
:param hostname: The host for the application. eg: ``'localhost'``
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param reloader_type: the type of reloader to use. The default is
auto detection. Valid values are ``'stat'`` and
``'watchdog'``. See :ref:`reloader` for more
information.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of concurrent processes.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a dict of paths for static files. This works exactly
like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connection. Either an
:class:`ssl.SSLContext`, a tuple in the form
``(cert_file, pkey_file)``, the string ``'adhoc'`` if
the server should automatically create one, or ``None``
to disable SSL (which is the default).
"""
if use_debugger:
from werkzeug.debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
if static_files:
from werkzeug.wsgi import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
def inner():
make_server(hostname, port, application, threaded,
processes, request_handler,
passthrough_errors, ssl_context).serve_forever()
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
display_hostname = hostname != '*' and hostname or 'localhost'
if ':' in display_hostname:
display_hostname = '[%s]' % display_hostname
quit_msg = '(Press CTRL+C to quit)'
_log('info', ' * Running on %s://%s:%d/ %s', ssl_context is None
and 'http' or 'https', display_hostname, port, quit_msg)
if use_reloader:
# Create and destroy a socket so that any exceptions are raised before
# we spawn a separate Python interpreter and lose this ability.
address_family = select_ip_version(hostname, port)
test_socket = socket.socket(address_family, socket.SOCK_STREAM)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
test_socket.bind((hostname, port))
test_socket.close()
from ._reloader import run_with_reloader
run_with_reloader(inner, extra_files, reloader_interval,
reloader_type)
else:
inner()
def run_with_reloader(*args, **kwargs):
# People keep using undocumented APIs. Do not use this function
# please, we do not guarantee that it continues working.
from ._reloader import run_with_reloader
return run_with_reloader(*args, **kwargs)
def main():
'''A simple command-line interface for :py:func:`run_simple`.'''
# in contrast to argparse, this works at least under Python < 2.7
import optparse
from werkzeug.utils import import_string
parser = optparse.OptionParser(
usage='Usage: %prog [options] app_module:app_object')
parser.add_option('-b', '--bind', dest='address',
help='The hostname:port the app should listen on.')
parser.add_option('-d', '--debug', dest='use_debugger',
action='store_true', default=False,
help='Use Werkzeug\'s debugger.')
parser.add_option('-r', '--reload', dest='use_reloader',
action='store_true', default=False,
help='Reload Python process if modules change.')
options, args = parser.parse_args()
hostname, port = None, None
if options.address:
address = options.address.split(':')
hostname = address[0]
if len(address) > 1:
port = address[1]
if len(args) != 1:
sys.stdout.write('No application supplied, or too much. See --help\n')
sys.exit(1)
app = import_string(args[0])
run_simple(
hostname=(hostname or '127.0.0.1'), port=int(port or 5000),
application=app, use_reloader=options.use_reloader,
use_debugger=options.use_debugger
)
if __name__ == '__main__':
main()
| bsd-3-clause |
lambdamusic/testproject | konproj/libs/django_extensions/management/commands/runscript.py | 5 | 5776 | from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from optparse import make_option
import sys
import os
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
def vararg_callback(option, opt_str, opt_value, parser):
parser.rargs.insert(0, opt_value)
value = []
for arg in parser.rargs:
# stop on --foo like options
if arg[:2] == "--" and len(arg) > 2:
break
# stop on -a like options
if arg[:1] == "-":
break
value.append(arg)
del parser.rargs[:len(value)]
setattr(parser.values, option.dest, value)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--fixtures', action='store_true', dest='infixtures', default=False,
help='Only look in app.fixtures subdir'),
make_option('--noscripts', action='store_true', dest='noscripts', default=False,
help='Look in app.scripts subdir'),
make_option('-s', '--silent', action='store_true', dest='silent', default=False,
help='Run silently, do not show errors and tracebacks'),
make_option('--no-traceback', action='store_true', dest='no_traceback', default=False,
help='Do not show tracebacks'),
make_option('--script-args', action='callback', callback=vararg_callback, type='string',
help='Space-separated argument list to be passed to the scripts. Note that the '
'same arguments will be passed to all named scripts.'),
)
help = 'Runs a script in django context.'
args = "script [script ...]"
def handle(self, *scripts, **options):
from django.db.models import get_apps
NOTICE = self.style.SQL_TABLE
NOTICE2 = self.style.SQL_FIELD
ERROR = self.style.ERROR
ERROR2 = self.style.NOTICE
subdirs = []
if not options.get('noscripts'):
subdirs.append('scripts')
if options.get('infixtures'):
subdirs.append('fixtures')
verbosity = int(options.get('verbosity', 1))
show_traceback = options.get('traceback', True)
if show_traceback is None:
# XXX: traceback is set to None from Django ?
show_traceback = True
no_traceback = options.get('no_traceback', False)
if no_traceback:
show_traceback = False
silent = options.get('silent', False)
if silent:
verbosity = 0
if len(subdirs) < 1:
print NOTICE("No subdirs to run left.")
return
if len(scripts) < 1:
print ERROR("Script name required.")
return
def run_script(mod, *script_args):
try:
mod.run(*script_args)
except Exception, e:
if silent:
return
if verbosity > 0:
print ERROR("Exception while running run() in '%s'" % mod.__name__)
if show_traceback:
raise
def my_import(mod):
if verbosity > 1:
print NOTICE("Check for %s" % mod)
try:
t = __import__(mod, [], [], [" "])
#if verbosity > 1:
# print NOTICE("Found script %s ..." % mod)
if hasattr(t, "run"):
if verbosity > 1:
print NOTICE2("Found script '%s' ..." % mod)
#if verbosity > 1:
# print NOTICE("found run() in %s. executing..." % mod)
return t
else:
if verbosity > 1:
print ERROR2("Find script '%s' but no run() function found." % mod)
except ImportError:
return False
def find_modules_for_script(script):
""" find script module which contains 'run' attribute """
modules = []
# first look in apps
for app in get_apps():
app_name = app.__name__.split(".")[:-1] # + ['fixtures']
for subdir in subdirs:
mod = my_import(".".join(app_name + [subdir, script]))
if mod:
modules.append(mod)
# try app.DIR.script import
sa = script.split(".")
for subdir in subdirs:
nn = ".".join(sa[:-1] + [subdir, sa[-1]])
mod = my_import(nn)
if mod:
modules.append(mod)
# try direct import
if script.find(".") != -1:
mod = my_import(script)
if mod:
modules.append(mod)
return modules
if options.get('script_args'):
script_args = options['script_args']
else:
script_args = []
for script in scripts:
modules = find_modules_for_script(script)
if not modules:
if verbosity > 0 and not silent:
print ERROR("No module for script '%s' found" % script)
for mod in modules:
if verbosity > 1:
print NOTICE2("Running script '%s' ..." % mod.__name__)
run_script(mod, *script_args)
# Backwards compatibility for Django r9110
if not [opt for opt in Command.option_list if opt.dest == 'verbosity']:
Command.option_list += (
make_option('--verbosity', '-v', action="store", dest="verbosity",
default='1', type='choice', choices=['0', '1', '2'],
help="Verbosity level; 0=minimal output, 1=normal output, 2=all output"),
)
| gpl-2.0 |
oiertwo/vampyr | pdftoexcel.py | 1 | 8194 | __author__ = 'oier'
import os
import numpy as np
from data.parameters import true_params
from data.parameters import false_params
import distance as dist
import numpy as np
def pdftotext(path):
os.system("pdftotext {data}".format(data=path))
return(path.replace(".pdf",".txt"))
import pandas as pd
def parse(path):
txt = pd.read_table(path, sep='\n', na_values=False, header=None)
for i in txt.index:
try :
if pd.isnull(float(txt.ix[i])) == False:
name = getname(i,txt)
print(name)
print(float(txt.ix[i]))
except :
pass
def getname(index, df):
name = ""
for i in range(0,index):
size = len(df.ix[i].to_string().split())
idxname = " ".join(df.ix[i].to_string().split()[1:size])
if (len( idxname )> 5) and idxname != None and idxname != "NaN":
name = idxname
#print(name)
return (name)
from collections import deque
def getnamedict(path):
dict = {}
numdict = {}
names = deque()
txt = pd.read_table(path, sep='\n', na_values=False, header=None)
name = ""
for i in txt.index:
try :
size = len(txt.ix[i].to_string().split())
nextname = " ".join(txt.ix[i].to_string().split()[1:size])
if (len( nextname )> 5) and \
nextname != None and \
nextname != "NaN" and \
isclean(nextname) and \
validateparam(nextname):
names.append(nextname)
dict[i] = nextname
#print(name)
#print(nextname)
if pd.isnull(float(txt.ix[i])) == False:
number = float(txt.ix[i])
numdict[names.pop()] = number
#print(number)
#print(i)
except :
pass
print(dict.keys())
print(dict.values())
print(numdict.keys())
print(numdict.values())
#organize(dict,numdict)
# print(dict[i])
def organize(names, numbers):
'''
:param names: must be dictionary
:param numbers: must be dictionary
:return: dictionary, dict[name] = number
'''
numbs = dict(numbers)
nams = dict(names)
conn1 = {}
conn2 = {}
array1 = np.array(nams.keys())
for i in numbs.keys():
actual = 100.0
inconn2 = False
key = min(nams.keys(), key=lambda k: abs(k - i))
print(" {} - {} ".format(key,i))
print(" {} - {} ".format(nams[key],numbs[i]))
'''
for j in numbs.keys():
actual = i - j
if ( actual > conn1[i] or conn1[i] == None):
if( conn2[j] == None):
conn1[i] = j
conn2[j] = actual
else:
best = j
inconn2 = True
else if (conn2[j] != None ):
'''
return()
def isclean(word):
w = str(word)
test = True
strg = "_[]*"
bool = True
for i in range(len(strg)):
c = strg[i]
bool = bool or (w.find(c) != -1)
test = test and (bool)
return(test)
def validateparam(word):
t_dist = []
f_dist = []
for i in true_params:
t_dist.append(dist.levenshtein(word,i))
for i in false_params:
f_dist.append(dist.levenshtein(word, i))
print("Word: {}, T: {} , F: {}".format(word, np.min(t_dist), np.min(f_dist[0])))
if( min(t_dist) == 0):
print("TRUE")
return (True)
if (min(f_dist) == 0):
print("FALSE")
return("FALSE")
if ( np.mean(t_dist )< np.mean(f_dist) ):
print("TRUE")
return(True)
print("FALSE")
return(False)
def getmyarray(path, apath):
dict = {}
appearances = {}
names = deque()
with open(path) as f:
txt = f.readlines()
#txt = pd.read_table(path, sep='\n', na_values=False, header=None)
array_txt = pd.read_table(apath, sep='\n', header=None)
name = ""
for i in txt:
actual = i.replace("\n", '')
if(len(actual.strip()) == 0):
continue
try :
number = float(actual)
if (number > 10000000):
continue
try:
appearances[actual] += 1
except:
appearances[actual] = 1
name = localgetmyarray(path, apath, actual, appearances[i])
dict[name] = i
print("name: {} numb: {}".format(name, i))
except :
pass
print(dict.keys())
print(dict.values())
def localgetmyarray(path, apath, word, count):
with open(path) as f:
txt = f.readlines()
#txt = pd.read_table(path, sep='\n', na_values=False, header=None)
f = open(apath)
array_txt_str = f.read()
name = ""
idx = [k.start() for k in re.finditer(word, array_txt_str)][count -1]
opt = len(array_txt_str)
apps ={}
for i in txt:
try :
nextname = i.replace("\n", '')
try :
float(nextname)
except :
if (len( nextname )> 5) and nextname != None and \
nextname != "NaN" and isclean(nextname):
try:
apps[nextname ] += 1
except:
apps[nextname] = 1
id = [k for k in re.finditer(nextname, array_txt_str)][apps[nextname]-1].start()
myopt = idx - id
if (myopt > 0) and (myopt < opt):
opt = myopt
name = nextname
except :
pass
print("optimum: {} number: {} found: {}".format(opt, word, name))
f.close()
return name
#DOWN FROM HERE JAVA+PYTHON PDF TO TEXT:
import re
import extractText as txt
def remove_unwanted(str):
s = re.sub(r'\[.*?\]', '',str)
s = s.replace("\*", "")
s = s.replace("\n", "")
return (s)
def line_control(str):
#may return True if str is not valid
#returns false if str is valid
if(len(str) < 15):
return True
if(len(str) == 1):
return True
if(len(str.split(" ")) > 10):
return True
return False
def line_parser(str):
item = ''
valor = ''
dict = {}
sline = str.split(" ")
helper = {}
pos = 0
for schar in sline:
try:
#dict["val"]
if(len(dict.keys()) == 3 and len(sline) > 6):
helper[pos] = dict
dict = {}
pos += 1
dict["val"] #to force failure/raise ofd exception
except:
try:
valor = ''
table = [char for char in schar if '/' in char]
if schar.find('%') != -1:
valor = schar
if len(table) > 0:
valor = schar
if(valor != ''):
dict["val"] = valor
continue
except:
pass
try:
#dict["num"]
if(len(dict.keys()) == 3 and len(sline) > 6):
helper[pos] = dict
dict = {}
pos += 1
dict["num"]
except:
try:
num = float(schar)
if(num > 10000):
return({})
dict["num"] = num
continue
except:
pass
try:
dict["item"] += " " + schar
except:
dict["item"] = schar
helper[pos] = dict
return(helper)
def getfromjava(path, dest=''):
if (dest == ''):
d = path.replace(".pdf", ".txt")
txt.extractText(path, d, '')
with open(d) as f:
text = f.readlines()
for line in text:
sline = remove_unwanted(line)
if(line_control(sline) == True):
continue
dict = line_parser(sline)
for i in dict.keys():
if(len(dict[i].keys()) == 3):
print("ITEM: {} NUM: {} VAL: {}".format(dict[i]["item"], dict[i]["num"], dict[i]["val"]))
| mit |
dajohi/bitcoin | qa/rpc-tests/netutil.py | 8 | 4561 | #!/usr/bin/env python
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Linux network utilities
import sys
import socket
import fcntl
import struct
import array
import os
import binascii
# Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
STATE_ESTABLISHED = '01'
STATE_SYN_SENT = '02'
STATE_SYN_RECV = '03'
STATE_FIN_WAIT1 = '04'
STATE_FIN_WAIT2 = '05'
STATE_TIME_WAIT = '06'
STATE_CLOSE = '07'
STATE_CLOSE_WAIT = '08'
STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = binascii.unhexlify(host)
host_out = ''
for x in range(0, len(host)/4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', '\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split('\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return binascii.hexlify(bytearray(addr))
| mit |
russdill/juniper-vpn-py | tncc.py | 1 | 22051 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import logging
import StringIO
import mechanize
import cookielib
import struct
import socket
import ssl
import base64
import collections
import zlib
import HTMLParser
import socket
import netifaces
import urlgrabber
import urllib2
import platform
import json
import datetime
import pyasn1_modules.pem
import pyasn1_modules.rfc2459
import pyasn1.codec.der.decoder
import xml.etree.ElementTree
ssl._create_default_https_context = ssl._create_unverified_context
debug = False
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG if debug else logging.INFO)
MSG_POLICY = 0x58316
MSG_FUNK_PLATFORM = 0x58301
MSG_FUNK = 0xa4c01
# 0013 - Message
def decode_0013(buf, indent):
logging.debug('%scmd 0013 (Message) %d bytes', indent, len(buf))
ret = collections.defaultdict(list)
while (len(buf) >= 12):
length, cmd, out = decode_packet(buf, indent + " ")
buf = buf[length:]
ret[cmd].append(out)
return ret
# 0012 - u32
def decode_0012(buf, indent):
logging.debug('%scmd 0012 (u32) %d bytes', indent, len(buf))
return struct.unpack(">I", buf)
# 0016 - zlib compressed message
def decode_0016(buf, indent):
logging.debug('%scmd 0016 (compressed message) %d bytes', indent, len(buf))
_, compressed = struct.unpack(">I" + str(len(buf) - 4) + "s", buf)
buf = zlib.decompress(compressed)
ret = collections.defaultdict(list)
while (len(buf) >= 12):
length, cmd, out = decode_packet(buf, indent + " ")
buf = buf[length:]
ret[cmd].append(out)
return ret
# 0ce4 - encapsulation
def decode_0ce4(buf, indent):
logging.debug('%scmd 0ce4 (encapsulation) %d bytes', indent, len(buf))
ret = collections.defaultdict(list)
while (len(buf) >= 12):
length, cmd, out = decode_packet(buf, indent + " ")
buf = buf[length:]
ret[cmd].append(out)
return ret
# 0ce5 - string without hex prefixer
def decode_0ce5(buf, indent):
s = struct.unpack(str(len(buf)) + "s", buf)[0]
logging.debug('%scmd 0ce5 (string) %d bytes', indent, len(buf))
s = s.rstrip('\0')
logging.debug('%s', s)
return s
# 0ce7 - string with hex prefixer
def decode_0ce7(buf, indent):
id, s = struct.unpack(">I" + str(len(buf) - 4) + "s", buf)
logging.debug('%scmd 0ce7 (id %08x string) %d bytes', indent, id, len(buf))
if s.startswith('COMPRESSED:'):
typ, length, data = s.split(':', 2)
s = zlib.decompress(data)
s = s.rstrip('\0')
logging.debug('%s', s)
return (id, s)
# 0cf0 - encapsulation
def decode_0cf0(buf, indent):
logging.debug('%scmd 0cf0 (encapsulation) %d bytes', indent, len(buf))
ret = dict()
cmd, _, out = decode_packet(buf, indent + " ")
ret[cmd] = out
return ret
# 0cf1 - string without hex prefixer
def decode_0cf1(buf, indent):
s = struct.unpack(str(len(buf)) + "s", buf)[0]
logging.debug('%scmd 0cf1 (string) %d bytes', indent, len(buf))
s = s.rstrip('\0')
logging.debug('%s', s)
return s
# 0cf3 - u32
def decode_0cf3(buf, indent):
ret = struct.unpack(">I", buf)
logging.debug('%scmd 0cf3 (u32) %d bytes - %d', indent, len(buf), ret[0])
return ret
def decode_packet(buf, indent=""):
cmd, _1, _2, length, _3 = struct.unpack(">IBBHI", buf[:12])
if length < 12:
raise Exception("Invalid packet, cmd %04x, _1 %02x, _2 %02x, length %d" % (cmd, _1, _2, length))
data = buf[12:length]
if length % 4:
length += 4 - (length % 4)
if cmd == 0x0013:
data = decode_0013(data, indent)
elif cmd == 0x0012:
data = decode_0012(data, indent)
elif cmd == 0x0016:
data = decode_0016(data, indent)
elif cmd == 0x0ce4:
data = decode_0ce4(data, indent)
elif cmd == 0x0ce5:
data = decode_0ce5(data, indent)
elif cmd == 0x0ce7:
data = decode_0ce7(data, indent)
elif cmd == 0x0cf0:
data = decode_0cf0(data, indent)
elif cmd == 0x0cf1:
data = decode_0cf1(data, indent)
elif cmd == 0x0cf3:
data = decode_0cf3(data, indent)
else:
logging.debug('%scmd %04x(%02x:%02x) is unknown, length %d', indent, cmd, _1, _2, length)
data = None
return length, cmd, data
def encode_packet(cmd, align, buf):
align = 4
orig_len = len(buf)
if align > 1 and (len(buf) + 12) % align:
buf += struct.pack(str(align - len(buf) % align) + "x")
return struct.pack(">IBBHI", cmd, 0xc0, 0x00, orig_len + 12, 0x0000583) + buf
# 0013 - Message
def encode_0013(buf):
return encode_packet(0x0013, 4, buf)
# 0012 - u32
def encode_0012(i):
return encode_packet(0x0012, 1, struct.pack("<I", i))
# 0ce4 - encapsulation
def encode_0ce4(buf):
return encode_packet(0x0ce4, 4, buf)
# 0ce5 - string without hex prefixer
def encode_0ce5(s):
return encode_packet(0x0ce5, 1, struct.pack(str(len(s)) + "s", s))
# 0ce7 - string with hex prefixer
def encode_0ce7(s, prefix):
s += '\0'
return encode_packet(0x0ce7, 1, struct.pack(">I" + str(len(s)) + "sx",
prefix, s))
# 0cf0 - encapsulation
def encode_0cf0(buf):
return encode_packet(0x0cf0, 4, buf)
# 0cf1 - string without hex prefixer
def encode_0cf1(s):
s += '\0'
return encode_packet(0x0ce5, 1, struct.pack(str(len(s)) + "s", s))
# 0cf3 - u32
def encode_0cf3(i):
return encode_packet(0x0013, 1, struct.pack("<I", i))
class x509cert(object):
@staticmethod
def decode_names(data):
ret = dict()
for i in range(0, len(data)):
for attr in data[i]:
type = str(attr.getComponentByPosition(0).getComponentByName('type'))
value = str(attr.getComponentByPosition(0).getComponentByName('value'))
value = str(pyasn1.codec.der.decoder.decode(value)[0])
try:
ret[type].append(value)
except:
ret[type] = [value]
return ret
@staticmethod
def decode_time(tm):
tm_str = tm.getComponent()._value
tz = 0
if tm_str[-1] == 'Z':
tz = 0
tm_str = tm_str[:-1]
elif '-' in tm_str:
tm_str, tz = tm_str.split('-')
tz = datetime.datetime.strptime(tz, '%H%M')
tz = -(tz.hour * 60 + tz.minute)
elif '+' in tm_str:
tm_str, tz = tm_str.split('+')
tz = datetime.datetime.strptime(tz, '%H%M')
tz = tz.hour * 60 + tz.minute
else:
logging.warn('No timezone in certificate')
if tm.getName() == 'generalTime':
formats = ['%Y%m%d%H%M%S.%f', '%Y%m%d%H%M%S', '%Y%m%d%H%M', '%Y%m%d%H']
elif tm.getName() == 'utcTime':
formats = ['%y%m%d%H%M%S', '%y%m%d%H%M']
else:
raise Exception('Unknown time format')
for fmt in formats:
try:
ret = datetime.datetime.strptime(tm_str, fmt)
ret += datetime.timedelta(minutes=tz)
return ret
except:
pass
raise Exception('Could not parse certificate time')
def __init__(self, cert_file):
with open(cert_file, 'r') as f:
self.data = f.read()
f = StringIO.StringIO(self.data)
substrate = pyasn1_modules.pem.readPemFromFile(f)
cert = pyasn1.codec.der.decoder.decode(substrate, pyasn1_modules.rfc2459.Certificate())[0]
tbs = cert.getComponentByName('tbsCertificate')
self.issuer = self.decode_names(tbs.getComponentByName('issuer'))
validity = tbs.getComponentByName('validity')
self.not_before = self.decode_time(validity.getComponentByName("notBefore"))
self.not_after = self.decode_time(validity.getComponentByName("notAfter"))
self.subject = self.decode_names(tbs.getComponentByName('subject'))
class tncc(object):
def __init__(self, vpn_host, device_id=None, funk=None, platform=None, hostname=None, mac_addrs=[], certs=[]):
self.vpn_host = vpn_host
self.path = '/dana-na/'
self.funk = funk
self.platform = platform
self.hostname = hostname
self.mac_addrs = mac_addrs
self.avail_certs = certs
self.deviceid = device_id
self.br = mechanize.Browser()
self.cj = cookielib.LWPCookieJar()
self.br.set_cookiejar(self.cj)
# Browser options
self.br.set_handle_equiv(True)
self.br.set_handle_redirect(True)
self.br.set_handle_referer(True)
self.br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
self.br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),
max_time=1)
# Want debugging messages?
if debug:
self.br.set_debug_http(True)
self.br.set_debug_redirects(True)
self.br.set_debug_responses(True)
self.user_agent = 'Neoteris HC Http'
self.br.addheaders = [('User-agent', self.user_agent)]
def find_cookie(self, name):
for cookie in self.cj:
if cookie.name == name:
return cookie
return None
def set_cookie(self, name, value):
cookie = cookielib.Cookie(version=0, name=name, value=value,
port=None, port_specified=False, domain=self.vpn_host,
domain_specified=True, domain_initial_dot=False, path=self.path,
path_specified=True, secure=True, expires=None, discard=True,
comment=None, comment_url=None, rest=None, rfc2109=False)
self.cj.set_cookie(cookie)
def parse_response(self):
# Read in key/token fields in HTTP response
response = dict()
last_key = ''
for line in self.r.readlines():
line = line.strip()
# Note that msg is too long and gets wrapped, handle it special
if last_key == 'msg' and len(line):
response['msg'] += line
else:
key = ''
try:
key, val = line.split('=', 1)
response[key] = val
except:
pass
last_key = key
return response
def parse_policy_response(self, msg_data):
# The decompressed data is HTMLish, decode it. The value="" of each
# tag is the data we want.
objs = []
class ParamHTMLParser(HTMLParser.HTMLParser):
def handle_starttag(self, tag, attrs):
if tag.lower() == 'param':
for key, value in attrs:
if key.lower() == 'value':
# It's made up of a bunch of key=value pairs separated
# by semicolons
d = dict()
for field in value.split(';'):
field = field.strip()
try:
key, value = field.split('=', 1)
d[key] = value
except:
pass
objs.append(d)
p = ParamHTMLParser()
p.feed(msg_data)
p.close()
return objs
def parse_funk_response(self, msg_data):
e = xml.etree.ElementTree.fromstring(msg_data)
req_certs = dict()
for cert in e.find('AttributeRequest').findall('CertData'):
dns = dict()
cert_id = cert.attrib['Id']
for attr in cert.findall('Attribute'):
name = attr.attrib['Name']
value = attr.attrib['Value']
attr_type = attr.attrib['Type']
if attr_type == 'DN':
dns[name] = dict(n.strip().split('=') for n in value.split(','))
else:
# Unknown attribute type
pass
req_certs[cert_id] = dns
return req_certs
def gen_funk_platform(self):
# We don't know if the xml parser on the other end is fully complaint,
# just format a string like it expects.
msg = "<FunkMessage VendorID='2636' ProductID='1' Version='1' Platform='%s' ClientType='Agentless'> " % self.platform
msg += "<ClientAttributes SequenceID='-1'> "
def add_attr(key, val):
return "<Attribute Name='%s' Value='%s' />" % (key, val)
msg += add_attr('Platform', self.platform)
if self.hostname:
msg += add_attr(self.hostname, 'NETBIOSName') # Reversed
for mac in self.mac_addrs:
msg += add_attr(mac, 'MACAddress') # Reversed
msg += "</ClientAttributes> </FunkMessage>"
return encode_0ce7(msg, MSG_FUNK_PLATFORM)
def gen_funk_present(self):
msg = "<FunkMessage VendorID='2636' ProductID='1' Version='1' Platform='%s' ClientType='Agentless'> " % self.platform
msg += "<Present SequenceID='0'></Present> </FunkMessage>"
return encode_0ce7(msg, MSG_FUNK)
def gen_funk_response(self, certs):
msg = "<FunkMessage VendorID='2636' ProductID='1' Version='1' Platform='%s' ClientType='Agentless'> " % self.platform
msg += "<ClientAttributes SequenceID='0'> "
msg += "<Attribute Name='Platform' Value='%s' />" % self.platform
for name, value in certs.iteritems():
msg += "<Attribute Name='%s' Value='%s' />" % (name, value.data.strip())
msg += "<Attribute Name='%s' Value='%s' />" % (name, value.data.strip())
msg += "</ClientAttributes> </FunkMessage>"
return encode_0ce7(msg, MSG_FUNK)
def gen_policy_request(self):
policy_blocks = collections.OrderedDict({
'policy_request': {
'message_version': '3'
},
'esap': {
'esap_version': 'NOT_AVAILABLE',
'fileinfo': 'NOT_AVAILABLE',
'has_file_versions': 'YES',
'needs_exact_sdk': 'YES',
'opswat_sdk_version': '3'
},
'system_info': {
'os_version': '2.6.2',
'sp_version': '0',
'hc_mode': 'userMode'
}
})
msg = ''
for policy_key, policy_val in policy_blocks.iteritems():
v = ''.join([ '%s=%s;' % (k, v) for k, v in policy_val.iteritems()])
msg += '<parameter name="%s" value="%s">' % (policy_key, v)
return encode_0ce7(msg, 0xa4c18)
def gen_policy_response(self, policy_objs):
# Make a set of policies
policies = set()
for entry in policy_objs:
if 'policy' in entry:
policies.add(entry['policy'])
# Try to determine on policy name whether the response should be OK
# or NOTOK. Default to OK if we don't know, this may need updating.
msg = ''
for policy in policies:
msg += '\npolicy:%s\nstatus:' % policy
if 'Unsupported' in policy or 'Deny' in policy:
msg += 'NOTOK\nerror:Unknown error'
elif 'Required' in policy:
msg += 'OK\n'
else:
# Default action
msg += 'OK\n'
return encode_0ce7(msg, MSG_POLICY)
def get_cookie(self, dspreauth=None, dssignin=None):
if dspreauth is None or dssignin is None:
self.r = self.br.open('https://' + self.vpn_host)
else:
try:
self.cj.set_cookie(dspreauth)
except:
self.set_cookie('DSPREAUTH', dspreauth)
try:
self.cj.set_cookie(dssignin)
except:
self.set_cookie('DSSIGNIN', dssignin)
inner = self.gen_policy_request()
inner += encode_0ce7('policy request\x00v4', MSG_POLICY)
if self.funk:
inner += self.gen_funk_platform()
inner += self.gen_funk_present()
msg_raw = encode_0013(encode_0ce4(inner) + encode_0ce5('Accept-Language: en') + encode_0cf3(1))
logging.debug('Sending packet -')
decode_packet(msg_raw)
post_attrs = {
'connID': '0',
'timestamp': '0',
'msg': base64.b64encode(msg_raw),
'firsttime': '1'
}
if self.deviceid:
post_attrs['deviceid'] = self.deviceid
post_data = ''.join([ '%s=%s;' % (k, v) for k, v in post_attrs.iteritems()])
self.r = self.br.open('https://' + self.vpn_host + self.path + 'hc/tnchcupdate.cgi', post_data)
# Parse the data returned into a key/value dict
response = self.parse_response()
# msg has the stuff we want, it's base64 encoded
logging.debug('Receiving packet -')
msg_raw = base64.b64decode(response['msg'])
_1, _2, msg_decoded = decode_packet(msg_raw)
# Within msg, there is a field of data
sub_strings = msg_decoded[0x0ce4][0][0x0ce7]
# Pull the data out of the 'value' key in the htmlish stuff returned
policy_objs = []
req_certs = dict()
for str_id, sub_str in sub_strings:
if str_id == MSG_POLICY:
policy_objs += self.parse_policy_response(sub_str)
elif str_id == MSG_FUNK:
req_certs = self.parse_funk_response(sub_str)
if debug:
for obj in policy_objs:
if 'policy' in obj:
logging.debug('policy %s', obj['policy'])
for key, val in obj.iteritems():
if key != 'policy':
logging.debug('\t%s %s', key, val)
# Try to locate the required certificates
certs = dict()
for cert_id, req_dns in req_certs.iteritems():
for cert in self.avail_certs:
fail = False
for dn_name, dn_vals in req_dns.iteritems():
for name, val in dn_vals.iteritems():
try:
if dn_name == 'IssuerDN':
assert val in cert.issuer[name]
else:
logging.warn('Unknown DN type %s', str(dn_name))
raise Exception()
except:
fail = True
break
if fail:
break
if not fail:
certs[cert_id] = cert
break
if cert_id not in certs:
logging.warn('Could not find certificate for %s', str(req_dns))
inner = ''
if certs:
inner += self.gen_funk_response(certs)
inner += self.gen_policy_response(policy_objs)
msg_raw = encode_0013(encode_0ce4(inner) + encode_0ce5('Accept-Language: en'))
logging.debug('Sending packet -')
decode_packet(msg_raw)
post_attrs = {
'connID': '1',
'msg': base64.b64encode(msg_raw),
'firsttime': '1'
}
post_data = ''.join([ '%s=%s;' % (k, v) for k, v in post_attrs.iteritems()])
self.r = self.br.open('https://' + self.vpn_host + self.path + 'hc/tnchcupdate.cgi', post_data)
# We have a new DSPREAUTH cookie
return self.find_cookie('DSPREAUTH')
class tncc_server(object):
def __init__(self, s, t):
self.sock = s
self.tncc = t
def process_cmd(self):
buf = sock.recv(1024).decode('ascii')
if not len(buf):
sys.exit(0)
cmd, buf = buf.split('\n', 1)
cmd = cmd.strip()
args = dict()
for n in buf.split('\n'):
n = n.strip()
if len(n):
key, val = n.strip().split('=', 1)
args[key] = val
if cmd == 'start':
cookie = self.tncc.get_cookie(args['Cookie'], args['DSSIGNIN'])
resp = '200\n3\n%s\n\n' % cookie.value
sock.send(resp.encode('ascii'))
elif cmd == 'setcookie':
# FIXME: Support for periodic updates
dsid_value = args['Cookie']
if __name__ == "__main__":
vpn_host = sys.argv[1]
funk = 'TNCC_FUNK' in os.environ and os.environ['TNCC_FUNK'] != '0'
platform = os.environ.get('TNCC_PLATFORM', platform.system() + ' ' + platform.release())
if 'TNCC_HWADDR' in os.environ:
mac_addrs = [n.strip() for n in os.environ['TNCC_HWADDR'].split(',')]
else:
mac_addrs = []
for iface in netifaces.interfaces():
try:
mac = netifaces.ifaddresses(iface)[netifaces.AF_LINK][0]['addr']
assert mac != '00:00:00:00:00:00'
mac_addrs.append(mac)
except:
pass
hostname = os.environ.get('TNCC_HOSTNAME', socket.gethostname())
certs = []
if 'TNCC_CERTS' in os.environ:
now = datetime.datetime.now()
for f in os.environ['TNCC_CERTS'].split(','):
cert = x509cert(f.strip())
if now < cert.not_before:
logging.warn('WARNING: %s is not yet valid', f)
if now > cert.not_after:
logging.warn('WARNING: %s is expired', f)
certs.append(cert)
# \HKEY_CURRENT_USER\Software\Juniper Networks\Device Id
device_id = os.environ.get('TNCC_DEVICE_ID')
t = tncc(vpn_host, device_id, funk, platform, hostname, mac_addrs, certs)
if len(sys.argv) == 4:
dspreauth_value = sys.argv[2]
dssignin_value = sys.argv[3]
'TNCC ', dspreauth_value, dssignin_value
print t.get_cookie(dspreauth, dssignin).value
else:
sock = socket.fromfd(0, socket.AF_UNIX, socket.SOCK_SEQPACKET)
server = tncc_server(sock, t)
while True:
server.process_cmd()
| lgpl-2.1 |
cjayb/mne-python | mne/datasets/tests/test_datasets.py | 6 | 6158 | import os
from os import path as op
import shutil
import zipfile
import sys
import pytest
from mne import datasets, read_labels_from_annot, write_labels_to_annot
from mne.datasets import testing
from mne.datasets._fsaverage.base import _set_montage_coreg_path
from mne.datasets.utils import _manifest_check_download
from mne.utils import (run_tests_if_main, requires_good_network, modified_env,
get_subjects_dir, ArgvSetter, _pl, use_log_level,
catch_logging, hashfunc)
subjects_dir = op.join(testing.data_path(download=False), 'subjects')
def test_datasets_basic(tmpdir):
"""Test simple dataset functions."""
# XXX 'hf_sef' and 'misc' do not conform to these standards
for dname in ('sample', 'somato', 'spm_face', 'testing', 'opm',
'bst_raw', 'bst_auditory', 'bst_resting', 'multimodal',
'bst_phantom_ctf', 'bst_phantom_elekta', 'kiloword',
'mtrf', 'phantom_4dbti',
'visual_92_categories', 'fieldtrip_cmc'):
if dname.startswith('bst'):
dataset = getattr(datasets.brainstorm, dname)
check_name = 'brainstorm.%s' % (dname,)
else:
dataset = getattr(datasets, dname)
check_name = dname
if dataset.data_path(download=False) != '':
assert isinstance(dataset.get_version(), str)
assert datasets.utils.has_dataset(check_name)
else:
assert dataset.get_version() is None
assert not datasets.utils.has_dataset(check_name)
print('%s: %s' % (dname, datasets.utils.has_dataset(check_name)))
tempdir = str(tmpdir)
# don't let it read from the config file to get the directory,
# force it to look for the default
with modified_env(**{'_MNE_FAKE_HOME_DIR': tempdir, 'SUBJECTS_DIR': None}):
assert (datasets.utils._get_path(None, 'foo', 'bar') ==
op.join(tempdir, 'mne_data'))
assert get_subjects_dir(None) is None
_set_montage_coreg_path()
sd = get_subjects_dir()
assert sd.endswith('MNE-fsaverage-data')
def _fake_fetch_file(url, destination, print_destination=False):
with open(destination, 'w') as fid:
fid.write(url)
@requires_good_network
def test_downloads(tmpdir):
"""Test dataset URL handling."""
# Try actually downloading a dataset
path = datasets._fake.data_path(path=str(tmpdir), update_path=False)
assert op.isfile(op.join(path, 'bar'))
assert datasets._fake.get_version() is None
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_good_network
def test_fetch_parcellations(tmpdir):
"""Test fetching parcellations."""
this_subjects_dir = str(tmpdir)
os.mkdir(op.join(this_subjects_dir, 'fsaverage'))
os.mkdir(op.join(this_subjects_dir, 'fsaverage', 'label'))
os.mkdir(op.join(this_subjects_dir, 'fsaverage', 'surf'))
for hemi in ('lh', 'rh'):
shutil.copyfile(
op.join(subjects_dir, 'fsaverage', 'surf', '%s.white' % hemi),
op.join(this_subjects_dir, 'fsaverage', 'surf', '%s.white' % hemi))
# speed up by prenteding we have one of them
with open(op.join(this_subjects_dir, 'fsaverage', 'label',
'lh.aparc_sub.annot'), 'wb'):
pass
datasets.fetch_aparc_sub_parcellation(subjects_dir=this_subjects_dir)
with ArgvSetter(('--accept-hcpmmp-license',)):
datasets.fetch_hcp_mmp_parcellation(subjects_dir=this_subjects_dir)
for hemi in ('lh', 'rh'):
assert op.isfile(op.join(this_subjects_dir, 'fsaverage', 'label',
'%s.aparc_sub.annot' % hemi))
# test our annot round-trips here
kwargs = dict(subject='fsaverage', hemi='both', sort=False,
subjects_dir=this_subjects_dir)
labels = read_labels_from_annot(parc='HCPMMP1', **kwargs)
write_labels_to_annot(
labels, parc='HCPMMP1_round',
table_name='./left.fsaverage164.label.gii', **kwargs)
orig = op.join(this_subjects_dir, 'fsaverage', 'label', 'lh.HCPMMP1.annot')
first = hashfunc(orig)
new = orig[:-6] + '_round.annot'
second = hashfunc(new)
assert first == second
_zip_fnames = ['foo/foo.txt', 'foo/bar.txt', 'foo/baz.txt']
def _fake_zip_fetch(url, fname, hash_):
with zipfile.ZipFile(fname, 'w') as zipf:
with zipf.open('foo/', 'w'):
pass
for fname in _zip_fnames:
with zipf.open(fname, 'w'):
pass
@pytest.mark.skipif(sys.version_info < (3, 6),
reason="writing zip files requires python3.6 or higher")
@pytest.mark.parametrize('n_have', range(len(_zip_fnames)))
def test_manifest_check_download(tmpdir, n_have, monkeypatch):
"""Test our manifest downloader."""
monkeypatch.setattr(datasets.utils, '_fetch_file', _fake_zip_fetch)
destination = op.join(str(tmpdir), 'empty')
manifest_path = op.join(str(tmpdir), 'manifest.txt')
with open(manifest_path, 'w') as fid:
for fname in _zip_fnames:
fid.write('%s\n' % fname)
assert n_have in range(len(_zip_fnames) + 1)
assert not op.isdir(destination)
if n_have > 0:
os.makedirs(op.join(destination, 'foo'))
assert op.isdir(op.join(destination, 'foo'))
for fname in _zip_fnames:
assert not op.isfile(op.join(destination, fname))
for fname in _zip_fnames[:n_have]:
with open(op.join(destination, fname), 'w'):
pass
with catch_logging() as log:
with use_log_level(True):
url = hash_ = '' # we mock the _fetch_file so these are not used
_manifest_check_download(manifest_path, destination, url, hash_)
log = log.getvalue()
n_missing = 3 - n_have
assert ('%d file%s missing from' % (n_missing, _pl(n_missing))) in log
for want in ('Extracting missing', 'Successfully '):
if n_missing > 0:
assert want in log
else:
assert want not in log
assert op.isdir(destination)
for fname in _zip_fnames:
assert op.isfile(op.join(destination, fname))
run_tests_if_main()
| bsd-3-clause |
wufangjie/leetcode | 640. Solve the Equation.py | 1 | 1269 | class Solution(object):
def solveEquation(self, equation):
"""
:type equation: str
:rtype: str
"""
left, right = equation.split('=')
a1, b1 = self._parse(left)
a2, b2 = self._parse(right)
if a1 == a2:
if b1 == b2:
return "Infinite solutions"
else:
return "No solution"
else:
return 'x={}'.format((b2 - b1) // (a1 - a2))
@staticmethod
def _parse(experession):
ret = [0, 0]
pre, pre_idx, add_idx, sign = 0, 0, 1, '+'
for i, c in enumerate(experession + '+'):
if c in ('+', '-'):
ret[add_idx] += pre if sign == '+' else -pre
pre, pre_idx, add_idx, sign = 0, i + 1, 1, c
elif c == 'x':
add_idx = 0
if pre_idx == i:
pre = 1
else:
pre = pre * 10 + int(c)
return ret
assert Solution().solveEquation("x+5-3+x=6+x-2") == "x=2"
assert Solution().solveEquation("x=x") == "Infinite solutions"
assert Solution().solveEquation("2x=x") == "x=0"
assert Solution().solveEquation("x=x+2") == "No solution"
assert Solution().solveEquation("2x+3x-6x=x+2") == "x=-1"
| gpl-3.0 |
Pointedstick/ReplicatorG | skein_engines/skeinforge-35/fabmetheus_utilities/fabmetheus_tools/interpret_plugins/gts.py | 6 | 4607 | """
This page is in the table of contents.
The gts.py script is an import translator plugin to get a carving from an gts file.
An import plugin is a script in the interpret_plugins folder which has the function getCarving. It is meant to be run from the interpret tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name.
The getCarving function takes the file name of an gts file and returns the carving.
The GNU Triangulated Surface (.gts) format is described at:
http://gts.sourceforge.net/reference/gts-surfaces.html#GTS-SURFACE-WRITE
Quoted from http://gts.sourceforge.net/reference/gts-surfaces.html#GTS-SURFACE-WRITE
"All the lines beginning with GTS_COMMENTS (#!) are ignored. The first line contains three unsigned integers separated by spaces. The first integer is the number of vertexes, nv, the second is the number of edges, ne and the third is the number of faces, nf.
Follows nv lines containing the x, y and z coordinates of the vertexes. Follows ne lines containing the two indices (starting from one) of the vertexes of each edge. Follows nf lines containing the three ordered indices (also starting from one) of the edges of each face.
The format described above is the least common denominator to all GTS files. Consistent with an object-oriented approach, the GTS file format is extensible. Each of the lines of the file can be extended with user-specific attributes accessible through the read() and write() virtual methods of each of the objects written (surface, vertexes, edges or faces). When read with different object classes, these extra attributes are just ignored."
This example gets a carving for the gts file Screw Holder Bottom.gts. This example is run in a terminal in the folder which contains Screw Holder Bottom.gts and gts.py.
> python
Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31)
[GCC 4.2.1 (SUSE Linux)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import gts
>>> gts.getCarving()
[11.6000003815, 10.6837882996, 7.80209827423
..
many more lines of the carving
..
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.geometry_tools import face
from fabmetheus_utilities.geometry.solids import trianglemesh
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import gcodec
__author__ = 'Enrique Perez ([email protected])'
__credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GPL 3.0'
def getFromGNUTriangulatedSurfaceText( gnuTriangulatedSurfaceText, triangleMesh ):
"Initialize from a GNU Triangulated Surface Text."
if gnuTriangulatedSurfaceText == '':
return None
lines = archive.getTextLines( gnuTriangulatedSurfaceText )
linesWithoutComments = []
for line in lines:
if len(line) > 0:
firstCharacter = line[0]
if firstCharacter != '#' and firstCharacter != '!':
linesWithoutComments.append(line)
splitLine = linesWithoutComments[0].split()
numberOfVertexes = int( splitLine[0] )
numberOfEdges = int(splitLine[1])
numberOfFaces = int( splitLine[2] )
faceTriples = []
for vertexIndex in xrange( numberOfVertexes ):
line = linesWithoutComments[ vertexIndex + 1 ]
splitLine = line.split()
vertex = Vector3( float( splitLine[0] ), float(splitLine[1]), float( splitLine[2] ) )
triangleMesh.vertexes.append(vertex)
edgeStart = numberOfVertexes + 1
for edgeIndex in xrange( numberOfEdges ):
line = linesWithoutComments[ edgeIndex + edgeStart ]
splitLine = line.split()
vertexIndexes = []
for word in splitLine[ : 2 ]:
vertexIndexes.append( int(word) - 1 )
edge = face.Edge().getFromVertexIndexes( edgeIndex, vertexIndexes )
triangleMesh.edges.append( edge )
faceStart = edgeStart + numberOfEdges
for faceIndex in xrange( numberOfFaces ):
line = linesWithoutComments[ faceIndex + faceStart ]
splitLine = line.split()
edgeIndexes = []
for word in splitLine[ : 3 ]:
edgeIndexes.append( int(word) - 1 )
triangleMesh.faces.append( face.Face().getFromEdgeIndexes( edgeIndexes, triangleMesh.edges, faceIndex ) )
return triangleMesh
def getCarving(fileName):
"Get the carving for the gts file."
return getFromGNUTriangulatedSurfaceText( archive.getFileText(fileName), trianglemesh.TriangleMesh() )
| gpl-2.0 |
zooba/PTVS | Python/Tests/TestData/VirtualEnv/env/Lib/encodings/cp852.py | 593 | 35258 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp852',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
0x00f2: 0x02db, # OGONEK
0x00f3: 0x02c7, # CARON
0x00f4: 0x02d8, # BREVE
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
u'\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
u'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
u'\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
u'\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
u'\xd7' # 0x009e -> MULTIPLICATION SIGN
u'\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
u'\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
u'\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
u'\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
u'\xac' # 0x00aa -> NOT SIGN
u'\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
u'\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
u'\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
u'\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
u'\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
u'\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
u'\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
u'\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
u'\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
u'\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
u'\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
u'\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
u'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
u'\xb4' # 0x00ef -> ACUTE ACCENT
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
u'\u02db' # 0x00f2 -> OGONEK
u'\u02c7' # 0x00f3 -> CARON
u'\u02d8' # 0x00f4 -> BREVE
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\xb8' # 0x00f7 -> CEDILLA
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\xa8' # 0x00f9 -> DIAERESIS
u'\u02d9' # 0x00fa -> DOT ABOVE
u'\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
u'\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b8: 0x00f7, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
0x02c7: 0x00f3, # CARON
0x02d8: 0x00f4, # BREVE
0x02d9: 0x00fa, # DOT ABOVE
0x02db: 0x00f2, # OGONEK
0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
arangodb/arangodb | 3rdParty/rocksdb/6.8/tools/advisor/advisor/db_options_parser.py | 14 | 16518 | # Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
# This source code is licensed under both the GPLv2 (found in the
# COPYING file in the root directory) and Apache 2.0 License
# (found in the LICENSE.Apache file in the root directory).
import copy
from advisor.db_log_parser import DataSource, NO_COL_FAMILY
from advisor.ini_parser import IniParser
import os
class OptionsSpecParser(IniParser):
@staticmethod
def is_new_option(line):
return '=' in line
@staticmethod
def get_section_type(line):
'''
Example section header: [TableOptions/BlockBasedTable "default"]
Here ConfigurationOptimizer returned would be
'TableOptions.BlockBasedTable'
'''
section_path = line.strip()[1:-1].split()[0]
section_type = '.'.join(section_path.split('/'))
return section_type
@staticmethod
def get_section_name(line):
# example: get_section_name('[CFOptions "default"]')
token_list = line.strip()[1:-1].split('"')
# token_list = ['CFOptions', 'default', '']
if len(token_list) < 3:
return None
return token_list[1] # return 'default'
@staticmethod
def get_section_str(section_type, section_name):
# Example:
# Case 1: get_section_str('DBOptions', NO_COL_FAMILY)
# Case 2: get_section_str('TableOptions.BlockBasedTable', 'default')
section_type = '/'.join(section_type.strip().split('.'))
# Case 1: section_type = 'DBOptions'
# Case 2: section_type = 'TableOptions/BlockBasedTable'
section_str = '[' + section_type
if section_name == NO_COL_FAMILY:
# Case 1: '[DBOptions]'
return (section_str + ']')
else:
# Case 2: '[TableOptions/BlockBasedTable "default"]'
return section_str + ' "' + section_name + '"]'
@staticmethod
def get_option_str(key, values):
option_str = key + '='
# get_option_str('db_log_dir', None), returns 'db_log_dir='
if values:
# example:
# get_option_str('max_bytes_for_level_multiplier_additional',
# [1,1,1,1,1,1,1]), returned string:
# 'max_bytes_for_level_multiplier_additional=1:1:1:1:1:1:1'
if isinstance(values, list):
for value in values:
option_str += (str(value) + ':')
option_str = option_str[:-1]
else:
# example: get_option_str('write_buffer_size', 1048576)
# returned string: 'write_buffer_size=1048576'
option_str += str(values)
return option_str
class DatabaseOptions(DataSource):
@staticmethod
def is_misc_option(option_name):
# these are miscellaneous options that are not yet supported by the
# Rocksdb options file, hence they are not prefixed with any section
# name
return '.' not in option_name
@staticmethod
def get_options_diff(opt_old, opt_new):
# type: Dict[option, Dict[col_fam, value]] X 2 ->
# Dict[option, Dict[col_fam, Tuple(old_value, new_value)]]
# note: diff should contain a tuple of values only if they are
# different from each other
options_union = set(opt_old.keys()).union(set(opt_new.keys()))
diff = {}
for opt in options_union:
diff[opt] = {}
# if option in options_union, then it must be in one of the configs
if opt not in opt_old:
for col_fam in opt_new[opt]:
diff[opt][col_fam] = (None, opt_new[opt][col_fam])
elif opt not in opt_new:
for col_fam in opt_old[opt]:
diff[opt][col_fam] = (opt_old[opt][col_fam], None)
else:
for col_fam in opt_old[opt]:
if col_fam in opt_new[opt]:
if opt_old[opt][col_fam] != opt_new[opt][col_fam]:
diff[opt][col_fam] = (
opt_old[opt][col_fam],
opt_new[opt][col_fam]
)
else:
diff[opt][col_fam] = (opt_old[opt][col_fam], None)
for col_fam in opt_new[opt]:
if col_fam in opt_old[opt]:
if opt_old[opt][col_fam] != opt_new[opt][col_fam]:
diff[opt][col_fam] = (
opt_old[opt][col_fam],
opt_new[opt][col_fam]
)
else:
diff[opt][col_fam] = (None, opt_new[opt][col_fam])
if not diff[opt]:
diff.pop(opt)
return diff
def __init__(self, rocksdb_options, misc_options=None):
super().__init__(DataSource.Type.DB_OPTIONS)
# The options are stored in the following data structure:
# Dict[section_type, Dict[section_name, Dict[option_name, value]]]
self.options_dict = None
self.column_families = None
# Load the options from the given file to a dictionary.
self.load_from_source(rocksdb_options)
# Setup the miscellaneous options expected to be List[str], where each
# element in the List has the format "<option_name>=<option_value>"
# These options are the ones that are not yet supported by the Rocksdb
# OPTIONS file, so they are provided separately
self.setup_misc_options(misc_options)
def setup_misc_options(self, misc_options):
self.misc_options = {}
if misc_options:
for option_pair_str in misc_options:
option_name = option_pair_str.split('=')[0].strip()
option_value = option_pair_str.split('=')[1].strip()
self.misc_options[option_name] = option_value
def load_from_source(self, options_path):
self.options_dict = {}
with open(options_path, 'r') as db_options:
for line in db_options:
line = OptionsSpecParser.remove_trailing_comment(line)
if not line:
continue
if OptionsSpecParser.is_section_header(line):
curr_sec_type = (
OptionsSpecParser.get_section_type(line)
)
curr_sec_name = OptionsSpecParser.get_section_name(line)
if curr_sec_type not in self.options_dict:
self.options_dict[curr_sec_type] = {}
if not curr_sec_name:
curr_sec_name = NO_COL_FAMILY
self.options_dict[curr_sec_type][curr_sec_name] = {}
# example: if the line read from the Rocksdb OPTIONS file
# is [CFOptions "default"], then the section type is
# CFOptions and 'default' is the name of a column family
# that for this database, so it's added to the list of
# column families stored in this object
if curr_sec_type == 'CFOptions':
if not self.column_families:
self.column_families = []
self.column_families.append(curr_sec_name)
elif OptionsSpecParser.is_new_option(line):
key, value = OptionsSpecParser.get_key_value_pair(line)
self.options_dict[curr_sec_type][curr_sec_name][key] = (
value
)
else:
error = 'Not able to parse line in Options file.'
OptionsSpecParser.exit_with_parse_error(line, error)
def get_misc_options(self):
# these are options that are not yet supported by the Rocksdb OPTIONS
# file, hence they are provided and stored separately
return self.misc_options
def get_column_families(self):
return self.column_families
def get_all_options(self):
# This method returns all the options that are stored in this object as
# a: Dict[<sec_type>.<option_name>: Dict[col_fam, option_value]]
all_options = []
# Example: in the section header '[CFOptions "default"]' read from the
# OPTIONS file, sec_type='CFOptions'
for sec_type in self.options_dict:
for col_fam in self.options_dict[sec_type]:
for opt_name in self.options_dict[sec_type][col_fam]:
option = sec_type + '.' + opt_name
all_options.append(option)
all_options.extend(list(self.misc_options.keys()))
return self.get_options(all_options)
def get_options(self, reqd_options):
# type: List[str] -> Dict[str, Dict[str, Any]]
# List[option] -> Dict[option, Dict[col_fam, value]]
reqd_options_dict = {}
for option in reqd_options:
if DatabaseOptions.is_misc_option(option):
# the option is not prefixed by '<section_type>.' because it is
# not yet supported by the Rocksdb OPTIONS file; so it has to
# be fetched from the misc_options dictionary
if option not in self.misc_options:
continue
if option not in reqd_options_dict:
reqd_options_dict[option] = {}
reqd_options_dict[option][NO_COL_FAMILY] = (
self.misc_options[option]
)
else:
# Example: option = 'TableOptions.BlockBasedTable.block_align'
# then, sec_type = 'TableOptions.BlockBasedTable'
sec_type = '.'.join(option.split('.')[:-1])
# opt_name = 'block_align'
opt_name = option.split('.')[-1]
if sec_type not in self.options_dict:
continue
for col_fam in self.options_dict[sec_type]:
if opt_name in self.options_dict[sec_type][col_fam]:
if option not in reqd_options_dict:
reqd_options_dict[option] = {}
reqd_options_dict[option][col_fam] = (
self.options_dict[sec_type][col_fam][opt_name]
)
return reqd_options_dict
def update_options(self, options):
# An example 'options' object looks like:
# {'DBOptions.max_background_jobs': {NO_COL_FAMILY: 2},
# 'CFOptions.write_buffer_size': {'default': 1048576, 'cf_A': 128000},
# 'bloom_bits': {NO_COL_FAMILY: 4}}
for option in options:
if DatabaseOptions.is_misc_option(option):
# this is a misc_option i.e. an option that is not yet
# supported by the Rocksdb OPTIONS file, so it is not prefixed
# by '<section_type>.' and must be stored in the separate
# misc_options dictionary
if NO_COL_FAMILY not in options[option]:
print(
'WARNING(DatabaseOptions.update_options): not ' +
'updating option ' + option + ' because it is in ' +
'misc_option format but its scope is not ' +
NO_COL_FAMILY + '. Check format of option.'
)
continue
self.misc_options[option] = options[option][NO_COL_FAMILY]
else:
sec_name = '.'.join(option.split('.')[:-1])
opt_name = option.split('.')[-1]
if sec_name not in self.options_dict:
self.options_dict[sec_name] = {}
for col_fam in options[option]:
# if the option is not already present in the dictionary,
# it will be inserted, else it will be updated to the new
# value
if col_fam not in self.options_dict[sec_name]:
self.options_dict[sec_name][col_fam] = {}
self.options_dict[sec_name][col_fam][opt_name] = (
copy.deepcopy(options[option][col_fam])
)
def generate_options_config(self, nonce):
# this method generates a Rocksdb OPTIONS file in the INI format from
# the options stored in self.options_dict
this_path = os.path.abspath(os.path.dirname(__file__))
file_name = '../temp/OPTIONS_' + str(nonce) + '.tmp'
file_path = os.path.join(this_path, file_name)
with open(file_path, 'w') as fp:
for section in self.options_dict:
for col_fam in self.options_dict[section]:
fp.write(
OptionsSpecParser.get_section_str(section, col_fam) +
'\n'
)
for option in self.options_dict[section][col_fam]:
values = self.options_dict[section][col_fam][option]
fp.write(
OptionsSpecParser.get_option_str(option, values) +
'\n'
)
fp.write('\n')
return file_path
def check_and_trigger_conditions(self, conditions):
for cond in conditions:
reqd_options_dict = self.get_options(cond.options)
# This contains the indices of options that are specific to some
# column family and are not database-wide options.
incomplete_option_ix = []
options = []
missing_reqd_option = False
for ix, option in enumerate(cond.options):
if option not in reqd_options_dict:
print(
'WARNING(DatabaseOptions.check_and_trigger): ' +
'skipping condition ' + cond.name + ' because it '
'requires option ' + option + ' but this option is' +
' not available'
)
missing_reqd_option = True
break # required option is absent
if NO_COL_FAMILY in reqd_options_dict[option]:
options.append(reqd_options_dict[option][NO_COL_FAMILY])
else:
options.append(None)
incomplete_option_ix.append(ix)
if missing_reqd_option:
continue
# if all the options are database-wide options
if not incomplete_option_ix:
try:
if eval(cond.eval_expr):
cond.set_trigger({NO_COL_FAMILY: options})
except Exception as e:
print(
'WARNING(DatabaseOptions) check_and_trigger:' + str(e)
)
continue
# for all the options that are not database-wide, we look for their
# values specific to column families
col_fam_options_dict = {}
for col_fam in self.column_families:
present = True
for ix in incomplete_option_ix:
option = cond.options[ix]
if col_fam not in reqd_options_dict[option]:
present = False
break
options[ix] = reqd_options_dict[option][col_fam]
if present:
try:
if eval(cond.eval_expr):
col_fam_options_dict[col_fam] = (
copy.deepcopy(options)
)
except Exception as e:
print(
'WARNING(DatabaseOptions) check_and_trigger: ' +
str(e)
)
# Trigger for an OptionCondition object is of the form:
# Dict[col_fam_name: List[option_value]]
# where col_fam_name is the name of a column family for which
# 'eval_expr' evaluated to True and List[option_value] is the list
# of values of the options specified in the condition's 'options'
# field
if col_fam_options_dict:
cond.set_trigger(col_fam_options_dict)
| apache-2.0 |
vlaznev/gui_plotter | plot_window/color_picker.py | 1 | 1419 | __author__ = 'user'
from PyQt4 import Qt, QtGui
class QColorComboBox(QtGui.QComboBox):
ColorNames = ["darkGreen", "green", "gray", "red", "white", "blue", "cyan", "darkMagenta", "yellow",
"darkRed", "black", "magenta"]
ColorRole = Qt.Qt.UserRole + 1
def __init__(self, parent=None):
super(QColorComboBox, self).__init__(parent)
self.fillColors()
def fillColors(self):
size = self.style().pixelMetric(QtGui.QStyle.PM_SmallIconSize)
pixmap = QtGui.QPixmap(size, size)
idx = 0
for colorName in self.ColorNames:
color = QtGui.QColor(colorName)
self.addItem(colorName, QtGui.QColor(idx))
pixmap.fill(color)
self.setItemData(idx, pixmap, Qt.Qt.DecorationRole)
self.setItemData(idx, color, self.ColorRole)
idx += 1
def currentColor(self):
idx = self.currentIndex()
if idx >= 0:
return self.itemData(idx, self.ColorRole)
else:
return None
def color(self, index):
return self.itemData(index, self.ColorRole)
def setCurrentColor(self, color):
colorObject = QtGui.QColor(color)
for idx in xrange(self.count()):
if colorObject == self.color(idx):
self.setCurrentIndex(idx)
return
raise ValueError("Color not found: " + str(color))
| mit |
Fritz449/SRLF | models/feed_forward.py | 1 | 5832 | import tensorflow as tf
import os
import sys
sys.path.append(os.path.realpath(".."))
from helpers.layers import denselayer
from models.base_model import BaseModel
import numpy as np
class FeedForward(BaseModel):
def __init__(self, sess, args):
BaseModel.__init__(self, sess)
self.n_hiddens = args['n_hiddens']
self.n_features = args['n_features']
self.critic = args.get('critic')
self.nonlinearity = args.get('nonlin', tf.nn.tanh)
self.state_input = tf.placeholder(tf.float32, shape=(None, self.n_features))
self.value_weights = []
self.value_weights_phs = []
self.create_network()
def create_network(self):
input = self.state_input
mean = tf.get_variable("means", shape=(1, int(input.get_shape()[1])), initializer=tf.constant_initializer(0),
trainable=False)
std = tf.get_variable("stds", shape=(1, int(input.get_shape()[1])), initializer=tf.constant_initializer(1),
trainable=False)
mean_ph = tf.placeholder(tf.float32, shape=mean.get_shape())
std_ph = tf.placeholder(tf.float32, shape=std.get_shape())
self.norm_set_op = [mean.assign(mean_ph), std.assign(std_ph)]
self.norm_phs = [mean_ph, std_ph]
hidden = (input - mean) / (std + 1e-5)
hidden = tf.clip_by_value(hidden, -20, 20)
self.hidden = hidden
for index, n_hidden in enumerate(self.n_hiddens):
hidden, weights = denselayer("hidden_{}".format(index), hidden, n_hidden, self.nonlinearity)
self.weights += weights
self.weights_phs += [tf.placeholder(tf.float32, shape=w.get_shape()) for w in weights]
self.hidden = hidden
hidden = self.hidden
self.value, weights = denselayer("value", hidden, 1)
self.value = tf.reshape(self.value, [-1])
self.value_weights += weights
self.value_weights_phs += [tf.placeholder(tf.float32, shape=w.get_shape()) for w in weights]
class FFDiscrete(FeedForward):
def __init__(self, sess, args):
FeedForward.__init__(self, sess, args)
self.n_actions = args['n_actions']
self.create_output()
for weight, ph in zip(self.weights, self.weights_phs):
self.set_op.append(weight.assign(ph))
def create_output(self):
self.action_probs = []
self.action_logprobs = []
for index, n in enumerate(self.n_actions):
log_probs, weights = denselayer("lob_probs_{}".format(index), self.hidden, n, tf.nn.log_softmax)
self.action_logprobs.append(log_probs)
self.weights += weights
self.weights_phs += [tf.placeholder(tf.float32, shape=w.get_shape()) for w in weights]
self.action_probs.append(tf.exp(self.action_logprobs[index]))
self.sess.run(tf.global_variables_initializer())
def act(self, obs, exploration=True, return_dists=False):
log_probs = self.sess.run(self.action_logprobs, feed_dict={self.state_input: obs})
actions = np.zeros(shape=(len(log_probs),), dtype=np.int32)
for i in range(len(log_probs, )):
if not exploration:
actions[i] = np.argmax(log_probs[i][0])
continue
actions[i] = np.random.choice(np.arange(self.n_actions[i], dtype=np.int32), p=np.exp(log_probs[i][0]))
if return_dists:
return actions, log_probs
return actions
class FFContinuous(FeedForward):
def __init__(self, sess, args):
FeedForward.__init__(self, sess, args)
self.n_actions = args['n_actions']
self.std = args.get('std', "Const")
self.init_log_std = args.get('init_log_std', 0)
self.create_output()
for weight, ph in zip(self.weights, self.weights_phs):
self.set_op.append(weight.assign(ph))
def create_output(self):
self.action_means, weights = denselayer("means", self.hidden, len(self.n_actions))
self.weights += weights
self.weights_phs += [tf.placeholder(tf.float32, shape=w.get_shape()) for w in weights]
if self.std == "Const":
self.action_log_stds = tf.get_variable("std", shape=(1, len(self.n_actions),),
initializer=tf.constant_initializer(self.init_log_std),
trainable=False)
self.action_stds = tf.exp(self.action_log_stds)
elif self.std == "Param":
self.action_log_stds = tf.get_variable("std", shape=(1, len(self.n_actions)),
initializer=tf.constant_initializer(self.init_log_std))
self.action_stds = tf.exp(self.action_log_stds)
self.weights.append(self.action_log_stds)
self.weights_phs.append(tf.placeholder(tf.float32, shape=(1, len(self.n_actions))))
elif self.std == "Train":
self.action_stds, weights = denselayer("stds", self.hidden, len(self.n_actions), tf.exp)
self.weights += weights
self.weights_phs += [tf.placeholder(tf.float32, shape=w.get_shape()) for w in weights]
else:
raise Exception
self.sess.run(tf.global_variables_initializer())
def act(self, obs, exploration=True, return_dists=False):
means, stds = self.sess.run([self.action_means, self.action_stds], feed_dict={self.state_input: obs})
means = means[0]
stds = stds[0]
if not exploration:
return means
actions = np.zeros(shape=means.shape)
for i in range(actions.shape[0]):
actions[i] = np.random.normal(means[i], stds[i])
if return_dists:
return actions, [means, stds]
return actions
| apache-2.0 |
Stefan-Schmidt/linux-wpan-next | tools/power/pm-graph/analyze_boot.py | 84 | 31499 | #!/usr/bin/python
#
# Tool for analyzing boot timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# Authors:
# Todd Brandt <[email protected]>
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's boot time. It creates an html representation of
# the kernel boot timeline up to the start of the init process.
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
import shutil
from datetime import datetime, timedelta
from subprocess import call, Popen, PIPE
import analyze_suspend as aslib
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues(aslib.SystemValues):
title = 'BootGraph'
version = '2.1'
hostname = 'localhost'
testtime = ''
kernel = ''
dmesgfile = ''
ftracefile = ''
htmlfile = 'bootgraph.html'
outfile = ''
testdir = ''
testdirprefix = 'boot'
embedded = False
testlog = False
dmesglog = False
ftracelog = False
useftrace = False
usecallgraph = False
usedevsrc = True
suspendmode = 'boot'
max_graph_depth = 2
graph_filter = 'do_one_initcall'
reboot = False
manual = False
iscronjob = False
timeformat = '%.6f'
bootloader = 'grub'
blexec = []
def __init__(self):
if('LOG_FILE' in os.environ and 'TEST_RESULTS_IDENTIFIER' in os.environ):
self.embedded = True
self.dmesglog = True
self.outfile = os.environ['LOG_FILE']
self.htmlfile = os.environ['LOG_FILE']
self.hostname = platform.node()
self.testtime = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
if os.path.exists('/proc/version'):
fp = open('/proc/version', 'r')
val = fp.read().strip()
fp.close()
self.kernel = self.kernelVersion(val)
else:
self.kernel = 'unknown'
self.testdir = datetime.now().strftime('boot-%y%m%d-%H%M%S')
def kernelVersion(self, msg):
return msg.split()[2]
def checkFtraceKernelVersion(self):
val = tuple(map(int, self.kernel.split('-')[0].split('.')))
if val >= (4, 10, 0):
return True
return False
def kernelParams(self):
cmdline = 'initcall_debug log_buf_len=32M'
if self.useftrace:
if self.cpucount > 0:
bs = min(self.memtotal / 2, 2*1024*1024) / self.cpucount
else:
bs = 131072
cmdline += ' trace_buf_size=%dK trace_clock=global '\
'trace_options=nooverwrite,funcgraph-abstime,funcgraph-cpu,'\
'funcgraph-duration,funcgraph-proc,funcgraph-tail,'\
'nofuncgraph-overhead,context-info,graph-time '\
'ftrace=function_graph '\
'ftrace_graph_max_depth=%d '\
'ftrace_graph_filter=%s' % \
(bs, self.max_graph_depth, self.graph_filter)
return cmdline
def setGraphFilter(self, val):
master = self.getBootFtraceFilterFunctions()
fs = ''
for i in val.split(','):
func = i.strip()
if func == '':
doError('badly formatted filter function string')
if '[' in func or ']' in func:
doError('loadable module functions not allowed - "%s"' % func)
if ' ' in func:
doError('spaces found in filter functions - "%s"' % func)
if func not in master:
doError('function "%s" not available for ftrace' % func)
if not fs:
fs = func
else:
fs += ','+func
if not fs:
doError('badly formatted filter function string')
self.graph_filter = fs
def getBootFtraceFilterFunctions(self):
self.rootCheck(True)
fp = open(self.tpath+'available_filter_functions')
fulllist = fp.read().split('\n')
fp.close()
list = []
for i in fulllist:
if not i or ' ' in i or '[' in i or ']' in i:
continue
list.append(i)
return list
def myCronJob(self, line):
if '@reboot' not in line:
return False
if 'bootgraph' in line or 'analyze_boot.py' in line or '-cronjob' in line:
return True
return False
def cronjobCmdString(self):
cmdline = '%s -cronjob' % os.path.abspath(sys.argv[0])
args = iter(sys.argv[1:])
for arg in args:
if arg in ['-h', '-v', '-cronjob', '-reboot']:
continue
elif arg in ['-o', '-dmesg', '-ftrace', '-func']:
args.next()
continue
cmdline += ' '+arg
if self.graph_filter != 'do_one_initcall':
cmdline += ' -func "%s"' % self.graph_filter
cmdline += ' -o "%s"' % os.path.abspath(self.testdir)
return cmdline
def manualRebootRequired(self):
cmdline = self.kernelParams()
print 'To generate a new timeline manually, follow these steps:\n'
print '1. Add the CMDLINE string to your kernel command line.'
print '2. Reboot the system.'
print '3. After reboot, re-run this tool with the same arguments but no command (w/o -reboot or -manual).\n'
print 'CMDLINE="%s"' % cmdline
sys.exit()
def getExec(self, cmd):
dirlist = ['/sbin', '/bin', '/usr/sbin', '/usr/bin',
'/usr/local/sbin', '/usr/local/bin']
for path in dirlist:
cmdfull = os.path.join(path, cmd)
if os.path.exists(cmdfull):
return cmdfull
return ''
def blGrub(self):
blcmd = ''
for cmd in ['update-grub', 'grub-mkconfig', 'grub2-mkconfig']:
if blcmd:
break
blcmd = self.getExec(cmd)
if not blcmd:
doError('[GRUB] missing update command')
if not os.path.exists('/etc/default/grub'):
doError('[GRUB] missing /etc/default/grub')
if 'grub2' in blcmd:
cfg = '/boot/grub2/grub.cfg'
else:
cfg = '/boot/grub/grub.cfg'
if not os.path.exists(cfg):
doError('[GRUB] missing %s' % cfg)
if 'update-grub' in blcmd:
self.blexec = [blcmd]
else:
self.blexec = [blcmd, '-o', cfg]
def getBootLoader(self):
if self.bootloader == 'grub':
self.blGrub()
else:
doError('unknown boot loader: %s' % self.bootloader)
sysvals = SystemValues()
# Class: Data
# Description:
# The primary container for test data.
class Data(aslib.Data):
dmesg = {} # root data structure
start = 0.0 # test start
end = 0.0 # test end
dmesgtext = [] # dmesg text file in memory
testnumber = 0
idstr = ''
html_device_id = 0
valid = False
tUserMode = 0.0
boottime = ''
phases = ['kernel', 'user']
do_one_initcall = False
def __init__(self, num):
self.testnumber = num
self.idstr = 'a'
self.dmesgtext = []
self.dmesg = {
'kernel': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0,
'order': 0, 'color': 'linear-gradient(to bottom, #fff, #bcf)'},
'user': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0,
'order': 1, 'color': '#fff'}
}
def deviceTopology(self):
return ''
def newAction(self, phase, name, pid, start, end, ret, ulen):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
i = 2
origname = name
while(name in list):
name = '%s[%d]' % (origname, i)
i += 1
list[name] = {'name': name, 'start': start, 'end': end,
'pid': pid, 'length': length, 'row': 0, 'id': devid,
'ret': ret, 'ulen': ulen }
return name
def deviceMatch(self, pid, cg):
if cg.end - cg.start == 0:
return True
for p in data.phases:
list = self.dmesg[p]['list']
for devname in list:
dev = list[devname]
if pid != dev['pid']:
continue
if cg.name == 'do_one_initcall':
if(cg.start <= dev['start'] and cg.end >= dev['end'] and dev['length'] > 0):
dev['ftrace'] = cg
self.do_one_initcall = True
return True
else:
if(cg.start > dev['start'] and cg.end < dev['end']):
if 'ftraces' not in dev:
dev['ftraces'] = []
dev['ftraces'].append(cg)
return True
return False
# ----------------- FUNCTIONS --------------------
# Function: parseKernelLog
# Description:
# parse a kernel log for boot data
def parseKernelLog():
phase = 'kernel'
data = Data(0)
data.dmesg['kernel']['start'] = data.start = ktime = 0.0
sysvals.stamp = {
'time': datetime.now().strftime('%B %d %Y, %I:%M:%S %p'),
'host': sysvals.hostname,
'mode': 'boot', 'kernel': ''}
tp = aslib.TestProps()
devtemp = dict()
if(sysvals.dmesgfile):
lf = open(sysvals.dmesgfile, 'r')
else:
lf = Popen('dmesg', stdout=PIPE).stdout
for line in lf:
line = line.replace('\r\n', '')
# grab the stamp and sysinfo
if re.match(tp.stampfmt, line):
tp.stamp = line
continue
elif re.match(tp.sysinfofmt, line):
tp.sysinfo = line
continue
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
if(ktime > 120):
break
msg = m.group('msg')
data.dmesgtext.append(line)
if(ktime == 0.0 and re.match('^Linux version .*', msg)):
if(not sysvals.stamp['kernel']):
sysvals.stamp['kernel'] = sysvals.kernelVersion(msg)
continue
m = re.match('.* setting system clock to (?P<t>.*) UTC.*', msg)
if(m):
bt = datetime.strptime(m.group('t'), '%Y-%m-%d %H:%M:%S')
bt = bt - timedelta(seconds=int(ktime))
data.boottime = bt.strftime('%Y-%m-%d_%H:%M:%S')
sysvals.stamp['time'] = bt.strftime('%B %d %Y, %I:%M:%S %p')
continue
m = re.match('^calling *(?P<f>.*)\+.* @ (?P<p>[0-9]*)', msg)
if(m):
func = m.group('f')
pid = int(m.group('p'))
devtemp[func] = (ktime, pid)
continue
m = re.match('^initcall *(?P<f>.*)\+.* returned (?P<r>.*) after (?P<t>.*) usecs', msg)
if(m):
data.valid = True
data.end = ktime
f, r, t = m.group('f', 'r', 't')
if(f in devtemp):
start, pid = devtemp[f]
data.newAction(phase, f, pid, start, ktime, int(r), int(t))
del devtemp[f]
continue
if(re.match('^Freeing unused kernel memory.*', msg)):
data.tUserMode = ktime
data.dmesg['kernel']['end'] = ktime
data.dmesg['user']['start'] = ktime
phase = 'user'
if tp.stamp:
sysvals.stamp = 0
tp.parseStamp(data, sysvals)
data.dmesg['user']['end'] = data.end
lf.close()
return data
# Function: parseTraceLog
# Description:
# Check if trace is available and copy to a temp file
def parseTraceLog(data):
# parse the trace log
ftemp = dict()
tp = aslib.TestProps()
tp.setTracerType('function_graph')
tf = open(sysvals.ftracefile, 'r')
for line in tf:
if line[0] == '#':
continue
m = re.match(tp.ftrace_line_fmt, line.strip())
if(not m):
continue
m_time, m_proc, m_pid, m_msg, m_dur = \
m.group('time', 'proc', 'pid', 'msg', 'dur')
if float(m_time) > data.end:
break
if(m_time and m_pid and m_msg):
t = aslib.FTraceLine(m_time, m_msg, m_dur)
pid = int(m_pid)
else:
continue
if t.fevent or t.fkprobe:
continue
key = (m_proc, pid)
if(key not in ftemp):
ftemp[key] = []
ftemp[key].append(aslib.FTraceCallGraph(pid))
cg = ftemp[key][-1]
if(cg.addLine(t)):
ftemp[key].append(aslib.FTraceCallGraph(pid))
tf.close()
# add the callgraph data to the device hierarchy
for key in ftemp:
proc, pid = key
for cg in ftemp[key]:
if len(cg.list) < 1 or cg.invalid:
continue
if(not cg.postProcess()):
print('Sanity check failed for %s-%d' % (proc, pid))
continue
# match cg data to devices
if not data.deviceMatch(pid, cg):
print ' BAD: %s %s-%d [%f - %f]' % (cg.name, proc, pid, cg.start, cg.end)
# Function: retrieveLogs
# Description:
# Create copies of dmesg and/or ftrace for later processing
def retrieveLogs():
# check ftrace is configured first
if sysvals.useftrace:
tracer = sysvals.fgetVal('current_tracer').strip()
if tracer != 'function_graph':
doError('ftrace not configured for a boot callgraph')
# create the folder and get dmesg
sysvals.systemInfo(aslib.dmidecode(sysvals.mempath))
sysvals.initTestOutput('boot')
sysvals.writeDatafileHeader(sysvals.dmesgfile)
call('dmesg >> '+sysvals.dmesgfile, shell=True)
if not sysvals.useftrace:
return
# get ftrace
sysvals.writeDatafileHeader(sysvals.ftracefile)
call('cat '+sysvals.tpath+'trace >> '+sysvals.ftracefile, shell=True)
# Function: colorForName
# Description:
# Generate a repeatable color from a list for a given name
def colorForName(name):
list = [
('c1', '#ec9999'),
('c2', '#ffc1a6'),
('c3', '#fff0a6'),
('c4', '#adf199'),
('c5', '#9fadea'),
('c6', '#a699c1'),
('c7', '#ad99b4'),
('c8', '#eaffea'),
('c9', '#dcecfb'),
('c10', '#ffffea')
]
i = 0
total = 0
count = len(list)
while i < len(name):
total += ord(name[i])
i += 1
return list[total % count]
def cgOverview(cg, minlen):
stats = dict()
large = []
for l in cg.list:
if l.fcall and l.depth == 1:
if l.length >= minlen:
large.append(l)
if l.name not in stats:
stats[l.name] = [0, 0.0]
stats[l.name][0] += (l.length * 1000.0)
stats[l.name][1] += 1
return (large, stats)
# Function: createBootGraph
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createBootGraph(data):
# html function templates
html_srccall = '<div id={6} title="{5}" class="srccall" style="left:{1}%;top:{2}px;height:{3}px;width:{4}%;line-height:{3}px;">{0}</div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="blue">Init process starts @ <b>{0} ms</b></td>'\
'<td class="blue">Last initcall ends @ <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
# device timeline
devtl = aslib.Timeline(100, 20)
# write the test title and general info header
devtl.createHeader(sysvals)
# Generate the header for this timeline
t0 = data.start
tMax = data.end
tTotal = tMax - t0
if(tTotal == 0):
print('ERROR: No timeline data')
return False
user_mode = '%.0f'%(data.tUserMode*1000)
last_init = '%.0f'%(tTotal*1000)
devtl.html += html_timetotal.format(user_mode, last_init)
# determine the maximum number of rows we need to draw
devlist = []
for p in data.phases:
list = data.dmesg[p]['list']
for devname in list:
d = aslib.DevItem(0, p, list[devname])
devlist.append(d)
devtl.getPhaseRows(devlist, 0, 'start')
devtl.calcTotalRows()
# draw the timeline background
devtl.createZoomBox()
devtl.html += devtl.html_tblock.format('boot', '0', '100', devtl.scaleH)
for p in data.phases:
phase = data.dmesg[p]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
devtl.html += devtl.html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%devtl.bodyH, \
phase['color'], '')
# draw the device timeline
num = 0
devstats = dict()
for phase in data.phases:
list = data.dmesg[phase]['list']
for devname in sorted(list):
cls, color = colorForName(devname)
dev = list[devname]
info = '@|%.3f|%.3f|%.3f|%d' % (dev['start']*1000.0, dev['end']*1000.0,
dev['ulen']/1000.0, dev['ret'])
devstats[dev['id']] = {'info':info}
dev['color'] = color
height = devtl.phaseRowHeight(0, phase, dev['row'])
top = '%.6f' % ((dev['row']*height) + devtl.scaleH)
left = '%.6f' % (((dev['start']-t0)*100)/tTotal)
width = '%.6f' % (((dev['end']-dev['start'])*100)/tTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
devtl.html += devtl.html_device.format(dev['id'],
devname+length+phase+'_mode', left, top, '%.3f'%height,
width, devname, ' '+cls, '')
rowtop = devtl.phaseRowTop(0, phase, dev['row'])
height = '%.6f' % (devtl.rowH / 2)
top = '%.6f' % (rowtop + devtl.scaleH + (devtl.rowH / 2))
if data.do_one_initcall:
if('ftrace' not in dev):
continue
cg = dev['ftrace']
large, stats = cgOverview(cg, 0.001)
devstats[dev['id']]['fstat'] = stats
for l in large:
left = '%f' % (((l.time-t0)*100)/tTotal)
width = '%f' % (l.length*100/tTotal)
title = '%s (%0.3fms)' % (l.name, l.length * 1000.0)
devtl.html += html_srccall.format(l.name, left,
top, height, width, title, 'x%d'%num)
num += 1
continue
if('ftraces' not in dev):
continue
for cg in dev['ftraces']:
left = '%f' % (((cg.start-t0)*100)/tTotal)
width = '%f' % ((cg.end-cg.start)*100/tTotal)
cglen = (cg.end - cg.start) * 1000.0
title = '%s (%0.3fms)' % (cg.name, cglen)
cg.id = 'x%d' % num
devtl.html += html_srccall.format(cg.name, left,
top, height, width, title, dev['id']+cg.id)
num += 1
# draw the time scale, try to make the number of labels readable
devtl.createTimeScale(t0, tMax, tTotal, 'boot')
devtl.html += '</div>\n'
# timeline is finished
devtl.html += '</div>\n</div>\n'
# draw a legend which describes the phases by color
devtl.html += '<div class="legend">\n'
pdelta = 20.0
pmargin = 36.0
for phase in data.phases:
order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin)
devtl.html += devtl.html_legend.format(order, \
data.dmesg[phase]['color'], phase+'_mode', phase[0])
devtl.html += '</div>\n'
if(sysvals.outfile == sysvals.htmlfile):
hf = open(sysvals.htmlfile, 'a')
else:
hf = open(sysvals.htmlfile, 'w')
# add the css if this is not an embedded run
extra = '\
.c1 {background:rgba(209,0,0,0.4);}\n\
.c2 {background:rgba(255,102,34,0.4);}\n\
.c3 {background:rgba(255,218,33,0.4);}\n\
.c4 {background:rgba(51,221,0,0.4);}\n\
.c5 {background:rgba(17,51,204,0.4);}\n\
.c6 {background:rgba(34,0,102,0.4);}\n\
.c7 {background:rgba(51,0,68,0.4);}\n\
.c8 {background:rgba(204,255,204,0.4);}\n\
.c9 {background:rgba(169,208,245,0.4);}\n\
.c10 {background:rgba(255,255,204,0.4);}\n\
.vt {transform:rotate(-60deg);transform-origin:0 0;}\n\
table.fstat {table-layout:fixed;padding:150px 15px 0 0;font-size:10px;column-width:30px;}\n\
.fstat th {width:55px;}\n\
.fstat td {text-align:left;width:35px;}\n\
.srccall {position:absolute;font-size:10px;z-index:7;overflow:hidden;color:black;text-align:center;white-space:nowrap;border-radius:5px;border:1px solid black;background:linear-gradient(to bottom right,#CCC,#969696);}\n\
.srccall:hover {color:white;font-weight:bold;border:1px solid white;}\n'
if(not sysvals.embedded):
aslib.addCSS(hf, sysvals, 1, False, extra)
# write the device timeline
hf.write(devtl.html)
# add boot specific html
statinfo = 'var devstats = {\n'
for n in sorted(devstats):
statinfo += '\t"%s": [\n\t\t"%s",\n' % (n, devstats[n]['info'])
if 'fstat' in devstats[n]:
funcs = devstats[n]['fstat']
for f in sorted(funcs, key=funcs.get, reverse=True):
if funcs[f][0] < 0.01 and len(funcs) > 10:
break
statinfo += '\t\t"%f|%s|%d",\n' % (funcs[f][0], f, funcs[f][1])
statinfo += '\t],\n'
statinfo += '};\n'
html = \
'<div id="devicedetailtitle"></div>\n'\
'<div id="devicedetail" style="display:none;">\n'\
'<div id="devicedetail0">\n'
for p in data.phases:
phase = data.dmesg[p]
html += devtl.html_phaselet.format(p+'_mode', '0', '100', phase['color'])
html += '</div>\n</div>\n'\
'<script type="text/javascript">\n'+statinfo+\
'</script>\n'
hf.write(html)
# add the callgraph html
if(sysvals.usecallgraph):
aslib.addCallgraphs(sysvals, hf, data)
# add the dmesg log as a hidden div
if sysvals.dmesglog:
hf.write('<div id="dmesglog" style="display:none;">\n')
for line in data.dmesgtext:
line = line.replace('<', '<').replace('>', '>')
hf.write(line)
hf.write('</div>\n')
if(not sysvals.embedded):
# write the footer and close
aslib.addScriptCode(hf, [data])
hf.write('</body>\n</html>\n')
else:
# embedded out will be loaded in a page, skip the js
hf.write('<div id=bounds style=display:none>%f,%f</div>' % \
(data.start*1000, data.end*1000))
hf.close()
return True
# Function: updateCron
# Description:
# (restore=False) Set the tool to run automatically on reboot
# (restore=True) Restore the original crontab
def updateCron(restore=False):
if not restore:
sysvals.rootUser(True)
crondir = '/var/spool/cron/crontabs/'
if not os.path.exists(crondir):
crondir = '/var/spool/cron/'
if not os.path.exists(crondir):
doError('%s not found' % crondir)
cronfile = crondir+'root'
backfile = crondir+'root-analyze_boot-backup'
cmd = sysvals.getExec('crontab')
if not cmd:
doError('crontab not found')
# on restore: move the backup cron back into place
if restore:
if os.path.exists(backfile):
shutil.move(backfile, cronfile)
call([cmd, cronfile])
return
# backup current cron and install new one with reboot
if os.path.exists(cronfile):
shutil.move(cronfile, backfile)
else:
fp = open(backfile, 'w')
fp.close()
res = -1
try:
fp = open(backfile, 'r')
op = open(cronfile, 'w')
for line in fp:
if not sysvals.myCronJob(line):
op.write(line)
continue
fp.close()
op.write('@reboot python %s\n' % sysvals.cronjobCmdString())
op.close()
res = call([cmd, cronfile])
except Exception, e:
print 'Exception: %s' % str(e)
shutil.move(backfile, cronfile)
res = -1
if res != 0:
doError('crontab failed')
# Function: updateGrub
# Description:
# update grub.cfg for all kernels with our parameters
def updateGrub(restore=False):
# call update-grub on restore
if restore:
try:
call(sysvals.blexec, stderr=PIPE, stdout=PIPE,
env={'PATH': '.:/sbin:/usr/sbin:/usr/bin:/sbin:/bin'})
except Exception, e:
print 'Exception: %s\n' % str(e)
return
# extract the option and create a grub config without it
sysvals.rootUser(True)
tgtopt = 'GRUB_CMDLINE_LINUX_DEFAULT'
cmdline = ''
grubfile = '/etc/default/grub'
tempfile = '/etc/default/grub.analyze_boot'
shutil.move(grubfile, tempfile)
res = -1
try:
fp = open(tempfile, 'r')
op = open(grubfile, 'w')
cont = False
for line in fp:
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
opt = line.split('=')[0].strip()
if opt == tgtopt:
cmdline = line.split('=', 1)[1].strip('\\')
if line[-1] == '\\':
cont = True
elif cont:
cmdline += line.strip('\\')
if line[-1] != '\\':
cont = False
else:
op.write('%s\n' % line)
fp.close()
# if the target option value is in quotes, strip them
sp = '"'
val = cmdline.strip()
if val and (val[0] == '\'' or val[0] == '"'):
sp = val[0]
val = val.strip(sp)
cmdline = val
# append our cmd line options
if len(cmdline) > 0:
cmdline += ' '
cmdline += sysvals.kernelParams()
# write out the updated target option
op.write('\n%s=%s%s%s\n' % (tgtopt, sp, cmdline, sp))
op.close()
res = call(sysvals.blexec)
os.remove(grubfile)
except Exception, e:
print 'Exception: %s' % str(e)
res = -1
# cleanup
shutil.move(tempfile, grubfile)
if res != 0:
doError('update grub failed')
# Function: updateKernelParams
# Description:
# update boot conf for all kernels with our parameters
def updateKernelParams(restore=False):
# find the boot loader
sysvals.getBootLoader()
if sysvals.bootloader == 'grub':
updateGrub(restore)
# Function: doError Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help=False):
if help == True:
printHelp()
print 'ERROR: %s\n' % msg
sys.exit()
# Function: printHelp
# Description:
# print out the help text
def printHelp():
print('')
print('%s v%s' % (sysvals.title, sysvals.version))
print('Usage: bootgraph <options> <command>')
print('')
print('Description:')
print(' This tool reads in a dmesg log of linux kernel boot and')
print(' creates an html representation of the boot timeline up to')
print(' the start of the init process.')
print('')
print(' If no specific command is given the tool reads the current dmesg')
print(' and/or ftrace log and creates a timeline')
print('')
print(' Generates output files in subdirectory: boot-yymmdd-HHMMSS')
print(' HTML output: <hostname>_boot.html')
print(' raw dmesg output: <hostname>_boot_dmesg.txt')
print(' raw ftrace output: <hostname>_boot_ftrace.txt')
print('')
print('Options:')
print(' -h Print this help text')
print(' -v Print the current tool version')
print(' -addlogs Add the dmesg log to the html output')
print(' -o name Overrides the output subdirectory name when running a new test')
print(' default: boot-{date}-{time}')
print(' [advanced]')
print(' -f Use ftrace to add function detail (default: disabled)')
print(' -callgraph Add callgraph detail, can be very large (default: disabled)')
print(' -maxdepth N limit the callgraph data to N call levels (default: 2)')
print(' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)')
print(' -timeprec N Number of significant digits in timestamps (0:S, 3:ms, [6:us])')
print(' -expandcg pre-expand the callgraph data in the html output (default: disabled)')
print(' -func list Limit ftrace to comma-delimited list of functions (default: do_one_initcall)')
print(' -cgfilter S Filter the callgraph output in the timeline')
print(' -bl name Use the following boot loader for kernel params (default: grub)')
print(' -reboot Reboot the machine automatically and generate a new timeline')
print(' -manual Show the steps to generate a new timeline manually (used with -reboot)')
print('')
print('Other commands:')
print(' -flistall Print all functions capable of being captured in ftrace')
print(' -sysinfo Print out system info extracted from BIOS')
print(' [redo]')
print(' -dmesg file Create HTML output using dmesg input (used with -ftrace)')
print(' -ftrace file Create HTML output using ftrace input (used with -dmesg)')
print('')
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
# loop through the command line arguments
cmd = ''
testrun = True
simplecmds = ['-sysinfo', '-kpupdate', '-flistall', '-checkbl']
args = iter(sys.argv[1:])
for arg in args:
if(arg == '-h'):
printHelp()
sys.exit()
elif(arg == '-v'):
print("Version %s" % sysvals.version)
sys.exit()
elif(arg in simplecmds):
cmd = arg[1:]
elif(arg == '-f'):
sysvals.useftrace = True
elif(arg == '-callgraph'):
sysvals.useftrace = True
sysvals.usecallgraph = True
elif(arg == '-mincg'):
sysvals.mincglen = aslib.getArgFloat('-mincg', args, 0.0, 10000.0)
elif(arg == '-cgfilter'):
try:
val = args.next()
except:
doError('No callgraph functions supplied', True)
sysvals.setDeviceFilter(val)
elif(arg == '-bl'):
try:
val = args.next()
except:
doError('No boot loader name supplied', True)
if val.lower() not in ['grub']:
doError('Unknown boot loader: %s' % val, True)
sysvals.bootloader = val.lower()
elif(arg == '-timeprec'):
sysvals.setPrecision(aslib.getArgInt('-timeprec', args, 0, 6))
elif(arg == '-maxdepth'):
sysvals.max_graph_depth = aslib.getArgInt('-maxdepth', args, 0, 1000)
elif(arg == '-func'):
try:
val = args.next()
except:
doError('No filter functions supplied', True)
sysvals.useftrace = True
sysvals.usecallgraph = True
sysvals.rootCheck(True)
sysvals.setGraphFilter(val)
elif(arg == '-ftrace'):
try:
val = args.next()
except:
doError('No ftrace file supplied', True)
if(os.path.exists(val) == False):
doError('%s does not exist' % val)
testrun = False
sysvals.ftracefile = val
elif(arg == '-addlogs'):
sysvals.dmesglog = True
elif(arg == '-expandcg'):
sysvals.cgexp = True
elif(arg == '-dmesg'):
try:
val = args.next()
except:
doError('No dmesg file supplied', True)
if(os.path.exists(val) == False):
doError('%s does not exist' % val)
if(sysvals.htmlfile == val or sysvals.outfile == val):
doError('Output filename collision')
testrun = False
sysvals.dmesgfile = val
elif(arg == '-o'):
try:
val = args.next()
except:
doError('No subdirectory name supplied', True)
sysvals.testdir = sysvals.setOutputFolder(val)
elif(arg == '-reboot'):
sysvals.reboot = True
elif(arg == '-manual'):
sysvals.reboot = True
sysvals.manual = True
# remaining options are only for cron job use
elif(arg == '-cronjob'):
sysvals.iscronjob = True
else:
doError('Invalid argument: '+arg, True)
# compatibility errors and access checks
if(sysvals.iscronjob and (sysvals.reboot or \
sysvals.dmesgfile or sysvals.ftracefile or cmd)):
doError('-cronjob is meant for batch purposes only')
if(sysvals.reboot and (sysvals.dmesgfile or sysvals.ftracefile)):
doError('-reboot and -dmesg/-ftrace are incompatible')
if cmd or sysvals.reboot or sysvals.iscronjob or testrun:
sysvals.rootCheck(True)
if (testrun and sysvals.useftrace) or cmd == 'flistall':
if not sysvals.verifyFtrace():
doError('Ftrace is not properly enabled')
# run utility commands
sysvals.cpuInfo()
if cmd != '':
if cmd == 'kpupdate':
updateKernelParams()
elif cmd == 'flistall':
for f in sysvals.getBootFtraceFilterFunctions():
print f
elif cmd == 'checkbl':
sysvals.getBootLoader()
print 'Boot Loader: %s\n%s' % (sysvals.bootloader, sysvals.blexec)
elif(cmd == 'sysinfo'):
sysvals.printSystemInfo()
sys.exit()
# reboot: update grub, setup a cronjob, and reboot
if sysvals.reboot:
if (sysvals.useftrace or sysvals.usecallgraph) and \
not sysvals.checkFtraceKernelVersion():
doError('Ftrace functionality requires kernel v4.10 or newer')
if not sysvals.manual:
updateKernelParams()
updateCron()
call('reboot')
else:
sysvals.manualRebootRequired()
sys.exit()
# cronjob: remove the cronjob, grub changes, and disable ftrace
if sysvals.iscronjob:
updateCron(True)
updateKernelParams(True)
try:
sysvals.fsetVal('0', 'tracing_on')
except:
pass
# testrun: generate copies of the logs
if testrun:
retrieveLogs()
else:
sysvals.setOutputFile()
# process the log data
if sysvals.dmesgfile:
data = parseKernelLog()
if(not data.valid):
doError('No initcall data found in %s' % sysvals.dmesgfile)
if sysvals.useftrace and sysvals.ftracefile:
parseTraceLog(data)
else:
doError('dmesg file required')
print(' Host: %s' % sysvals.hostname)
print(' Test time: %s' % sysvals.testtime)
print(' Boot time: %s' % data.boottime)
print('Kernel Version: %s' % sysvals.kernel)
print(' Kernel start: %.3f' % (data.start * 1000))
print('Usermode start: %.3f' % (data.tUserMode * 1000))
print('Last Init Call: %.3f' % (data.end * 1000))
# handle embedded output logs
if(sysvals.outfile and sysvals.embedded):
fp = open(sysvals.outfile, 'w')
fp.write('pass %s initstart %.3f end %.3f boot %s\n' %
(data.valid, data.tUserMode*1000, data.end*1000, data.boottime))
fp.close()
createBootGraph(data)
# if running as root, change output dir owner to sudo_user
if testrun and os.path.isdir(sysvals.testdir) and \
os.getuid() == 0 and 'SUDO_USER' in os.environ:
cmd = 'chown -R {0}:{0} {1} > /dev/null 2>&1'
call(cmd.format(os.environ['SUDO_USER'], sysvals.testdir), shell=True)
| gpl-2.0 |
sqlobject/sqlobject | sqlobject/sqlite/sqliteconnection.py | 2 | 16587 | import base64
import os
try:
from _thread import get_ident
except ImportError:
from thread import get_ident
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
from sqlobject import col
from sqlobject import dberrors
from sqlobject.dbconnection import DBAPI, Boolean
sqlite2_Binary = None
class ErrorMessage(str):
def __new__(cls, e):
obj = str.__new__(cls, e.args[0])
obj.code = None
obj.module = e.__module__
obj.exception = e.__class__.__name__
return obj
class SQLiteConnection(DBAPI):
supportTransactions = True
dbName = 'sqlite'
schemes = [dbName]
def __init__(self, filename, autoCommit=1, **kw):
drivers = kw.pop('driver', None) or \
'supersqlite,pysqlite2,sqlite3,sqlite'
for driver in drivers.split(','):
driver = driver.strip()
if not driver:
continue
try:
if driver == 'supersqlite':
from supersqlite import sqlite3 as sqlite
self.using_sqlite2 = True
elif driver in ('sqlite2', 'pysqlite2'):
from pysqlite2 import dbapi2 as sqlite
self.using_sqlite2 = True
elif driver == 'sqlite3':
import sqlite3 as sqlite
self.using_sqlite2 = True
elif driver in ('sqlite', 'sqlite1'):
import sqlite
self.using_sqlite2 = False
else:
raise ValueError(
'Unknown SQLite driver "%s", '
'expected supersqlite, pysqlite2, sqlite3 '
'or sqlite' % driver)
except ImportError:
pass
else:
break
else:
raise ImportError(
'Cannot find an SQLite driver, tried %s' % drivers)
if self.using_sqlite2:
sqlite.encode = base64.b64encode
sqlite.decode = base64.b64decode
self.module = sqlite
self.filename = filename # full path to sqlite-db-file
self._memory = filename == ':memory:'
if self._memory and not self.using_sqlite2:
raise ValueError("You must use sqlite2 to use in-memory databases")
# connection options
opts = {}
if self.using_sqlite2:
if autoCommit:
opts["isolation_level"] = None
global sqlite2_Binary
if sqlite2_Binary is None:
sqlite2_Binary = sqlite.Binary
sqlite.Binary = lambda s: sqlite2_Binary(sqlite.encode(s))
if 'factory' in kw:
factory = kw.pop('factory')
if isinstance(factory, str):
factory = globals()[factory]
opts['factory'] = factory(sqlite)
else:
opts['autocommit'] = Boolean(autoCommit)
if 'encoding' in kw:
opts['encoding'] = kw.pop('encoding')
if 'mode' in kw:
opts['mode'] = int(kw.pop('mode'), 0)
if 'timeout' in kw:
if self.using_sqlite2:
opts['timeout'] = float(kw.pop('timeout'))
else:
opts['timeout'] = int(float(kw.pop('timeout')) * 1000)
if 'check_same_thread' in kw:
opts["check_same_thread"] = Boolean(kw.pop('check_same_thread'))
# use only one connection for sqlite - supports multiple)
# cursors per connection
self._connOptions = opts
self.use_table_info = Boolean(kw.pop("use_table_info", True))
DBAPI.__init__(self, **kw)
self._threadPool = {}
self._threadOrigination = {}
if self._memory:
self.makeMemoryConnection()
@classmethod
def _connectionFromParams(cls, user, password, host, port, path, args):
assert host is None and port is None, (
"SQLite can only be used locally (with a URI like "
"sqlite:/file or sqlite:///file, not sqlite://%s%s)" %
(host, port and ':%r' % port or ''))
assert user is None and password is None, (
"You may not provide usernames or passwords for SQLite "
"databases")
if path == "/:memory:":
path = ":memory:"
return cls(filename=path, **args)
def oldUri(self):
path = self.filename
if path == ":memory:":
path = "/:memory:"
else:
path = "//" + path
return 'sqlite:%s' % path
def uri(self):
path = self.filename
if path == ":memory:":
path = "/:memory:"
else:
if path.startswith('/'):
path = "//" + path
else:
path = "///" + path
path = quote(path)
return 'sqlite:%s' % path
def getConnection(self):
# SQLite can't share connections between threads, and so can't
# pool connections. Since we are isolating threads here, we
# don't have to worry about locking as much.
if self._memory:
conn = self.makeConnection()
self._connectionNumbers[id(conn)] = self._connectionCount
self._connectionCount += 1
return conn
threadid = get_ident()
if (self._pool is not None and threadid in self._threadPool):
conn = self._threadPool[threadid]
del self._threadPool[threadid]
if conn in self._pool:
self._pool.remove(conn)
else:
conn = self.makeConnection()
if self._pool is not None:
self._threadOrigination[id(conn)] = threadid
self._connectionNumbers[id(conn)] = self._connectionCount
self._connectionCount += 1
if self.debug:
s = 'ACQUIRE'
if self._pool is not None:
s += ' pool=[%s]' % ', '.join(
[str(self._connectionNumbers[id(v)]) for v in self._pool])
self.printDebug(conn, s, 'Pool')
return conn
def releaseConnection(self, conn, explicit=False):
if self._memory:
return
threadid = self._threadOrigination.get(id(conn))
DBAPI.releaseConnection(self, conn, explicit=explicit)
if (self._pool is not None and threadid
and threadid not in self._threadPool):
self._threadPool[threadid] = conn
else:
if self._pool and conn in self._pool:
self._pool.remove(conn)
conn.close()
def _setAutoCommit(self, conn, auto):
if self.using_sqlite2:
if auto:
conn.isolation_level = None
else:
conn.isolation_level = ""
else:
conn.autocommit = auto
def _setIsolationLevel(self, conn, level):
if not self.using_sqlite2:
return
conn.isolation_level = level
def makeMemoryConnection(self):
self._memoryConn = self.module.connect(
self.filename, **self._connOptions)
# Convert text data from SQLite to str, not unicode -
# SQLObject converts it to unicode itself.
self._memoryConn.text_factory = str
def makeConnection(self):
if self._memory:
return self._memoryConn
conn = self.module.connect(self.filename, **self._connOptions)
conn.text_factory = str # Convert text data to str, not unicode
return conn
def close(self):
DBAPI.close(self)
self._threadPool = {}
if self._memory:
self._memoryConn.close()
self.makeMemoryConnection()
def _executeRetry(self, conn, cursor, query):
if self.debug:
self.printDebug(conn, query, 'QueryR')
try:
return cursor.execute(query)
except self.module.OperationalError as e:
raise dberrors.OperationalError(ErrorMessage(e))
except self.module.IntegrityError as e:
msg = ErrorMessage(e)
if msg.startswith('column') and msg.endswith('not unique') \
or msg.startswith('UNIQUE constraint failed:'):
raise dberrors.DuplicateEntryError(msg)
else:
raise dberrors.IntegrityError(msg)
except self.module.InternalError as e:
raise dberrors.InternalError(ErrorMessage(e))
except self.module.ProgrammingError as e:
raise dberrors.ProgrammingError(ErrorMessage(e))
except self.module.DataError as e:
raise dberrors.DataError(ErrorMessage(e))
except self.module.NotSupportedError as e:
raise dberrors.NotSupportedError(ErrorMessage(e))
except self.module.DatabaseError as e:
raise dberrors.DatabaseError(ErrorMessage(e))
except self.module.InterfaceError as e:
raise dberrors.InterfaceError(ErrorMessage(e))
except self.module.Warning as e:
raise Warning(ErrorMessage(e))
except self.module.Error as e:
raise dberrors.Error(ErrorMessage(e))
def _queryInsertID(self, conn, soInstance, id, names, values):
table = soInstance.sqlmeta.table
idName = soInstance.sqlmeta.idName
c = conn.cursor()
if id is not None:
names = [idName] + names
values = [id] + values
q = self._insertSQL(table, names, values)
if self.debug:
self.printDebug(conn, q, 'QueryIns')
self._executeRetry(conn, c, q)
# lastrowid is a DB-API extension from "PEP 0249":
if id is None:
id = int(c.lastrowid)
c.close()
if self.debugOutput:
self.printDebug(conn, id, 'QueryIns', 'result')
return id
def _insertSQL(self, table, names, values):
if not names:
assert not values
# INSERT INTO table () VALUES () isn't allowed in
# SQLite (though it is in other databases)
return ("INSERT INTO %s VALUES (NULL)" % table)
else:
return DBAPI._insertSQL(self, table, names, values)
@classmethod
def _queryAddLimitOffset(cls, query, start, end):
if not start:
return "%s LIMIT %i" % (query, end)
if not end:
return "%s LIMIT 0 OFFSET %i" % (query, start)
return "%s LIMIT %i OFFSET %i" % (query, end - start, start)
def createColumn(self, soClass, col):
return col.sqliteCreateSQL()
def createReferenceConstraint(self, soClass, col):
return None
def createIDColumn(self, soClass):
return self._createIDColumn(soClass.sqlmeta)
def _createIDColumn(self, sqlmeta):
if sqlmeta.idType == str:
return '%s TEXT PRIMARY KEY' % sqlmeta.idName
return '%s INTEGER PRIMARY KEY AUTOINCREMENT' % sqlmeta.idName
def joinSQLType(self, join):
return 'INT NOT NULL'
def tableExists(self, tableName):
result = self.queryOne(
"SELECT tbl_name FROM sqlite_master "
"WHERE type='table' AND tbl_name = '%s'" % tableName)
# turn it into a boolean:
return not not result
def createIndexSQL(self, soClass, index):
return index.sqliteCreateIndexSQL(soClass)
def addColumn(self, tableName, column):
self.query('ALTER TABLE %s ADD COLUMN %s' %
(tableName,
column.sqliteCreateSQL()))
self.query('VACUUM')
def delColumn(self, sqlmeta, column):
self.recreateTableWithoutColumn(sqlmeta, column)
def recreateTableWithoutColumn(self, sqlmeta, column):
new_name = sqlmeta.table + '_ORIGINAL'
self.query('ALTER TABLE %s RENAME TO %s' % (sqlmeta.table, new_name))
cols = [self._createIDColumn(sqlmeta)] + \
[self.createColumn(None, col)
for col in sqlmeta.columnList if col.name != column.name]
cols = ",\n".join([" %s" % c for c in cols])
self.query('CREATE TABLE %s (\n%s\n)' % (sqlmeta.table, cols))
all_columns = ', '.join(
[sqlmeta.idName] + [col.dbName for col in sqlmeta.columnList])
self.query('INSERT INTO %s (%s) SELECT %s FROM %s' % (
sqlmeta.table, all_columns, all_columns, new_name))
self.query('DROP TABLE %s' % new_name)
def columnsFromSchema(self, tableName, soClass):
if self.use_table_info:
return self._columnsFromSchemaTableInfo(tableName, soClass)
else:
return self._columnsFromSchemaParse(tableName, soClass)
def _columnsFromSchemaTableInfo(self, tableName, soClass):
colData = self.queryAll("PRAGMA table_info(%s)" % tableName)
results = []
for index, field, t, nullAllowed, default, key in colData:
if field == soClass.sqlmeta.idName:
continue
colClass, kw = self.guessClass(t)
if default == 'NULL':
nullAllowed = True
default = None
kw['name'] = soClass.sqlmeta.style.dbColumnToPythonAttr(field)
kw['dbName'] = field
kw['notNone'] = not nullAllowed
kw['default'] = default
# @@ skip key...
# @@ skip extra...
results.append(colClass(**kw))
return results
def _columnsFromSchemaParse(self, tableName, soClass):
colData = self.queryOne(
"SELECT sql FROM sqlite_master WHERE type='table' AND name='%s'" %
tableName)
if not colData:
raise ValueError(
'The table %s was not found in the database. Load failed.' %
tableName)
colData = colData[0].split('(', 1)[1].strip()[:-2]
while True:
start = colData.find('(')
if start == -1:
break
end = colData.find(')', start)
if end == -1:
break
colData = colData[:start] + colData[end + 1:]
results = []
for colDesc in colData.split(','):
parts = colDesc.strip().split(' ', 2)
field = parts[0].strip()
# skip comments
if field.startswith('--'):
continue
# get rid of enclosing quotes
if field[0] == field[-1] == '"':
field = field[1:-1]
if field == getattr(soClass.sqlmeta, 'idName', 'id'):
continue
colClass, kw = self.guessClass(parts[1].strip())
if len(parts) == 2:
index_info = ''
else:
index_info = parts[2].strip().upper()
kw['name'] = soClass.sqlmeta.style.dbColumnToPythonAttr(field)
kw['dbName'] = field
import re
nullble = re.search(r'(\b\S*)\sNULL', index_info)
default = re.search(
r"DEFAULT\s((?:\d[\dA-FX.]*)|(?:'[^']*')|(?:#[^#]*#))",
index_info)
kw['notNone'] = nullble and nullble.group(1) == 'NOT'
kw['default'] = default and default.group(1)
# @@ skip key...
# @@ skip extra...
results.append(colClass(**kw))
return results
def guessClass(self, t):
t = t.upper()
if t.find('INT') >= 0:
return col.IntCol, {}
elif t.find('TEXT') >= 0 or t.find('CHAR') >= 0 or t.find('CLOB') >= 0:
return col.StringCol, {'length': 2 ** 32 - 1}
elif t.find('BLOB') >= 0:
return col.BLOBCol, {"length": 2 ** 32 - 1}
elif t.find('REAL') >= 0 or t.find('FLOAT') >= 0:
return col.FloatCol, {}
elif t.find('DECIMAL') >= 0:
return col.DecimalCol, {'size': None, 'precision': None}
elif t.find('BOOL') >= 0:
return col.BoolCol, {}
else:
return col.Col, {}
def listTables(self):
return [v[0] for v in self.queryAll(
"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name")]
def listDatabases(self):
# The pragma returns a list of (index, name, filename)
return [v[1] for v in self.queryAll("PRAGMA database_list")]
def createEmptyDatabase(self):
if self._memory:
return
open(self.filename, 'w').close()
def dropDatabase(self):
if self._memory:
return
os.unlink(self.filename)
| lgpl-2.1 |
OrmondHugh/2048Bot | src/src/utility.py | 1 | 3044 | import readBoard as rb
import State
import copy
def getPossibilities(state):
possibilities = []
#Loop through each tile to find the spaces that can be filled by a spawning tile
for i in range(4):
for j in range(4):
#If we find an empty space, add a child to children where a two is spawned, and a child where a four is spawned
if state.board[i][i] == ' ':
for value in range(2,5,2):
child = copy.deepcopy(state)
if value == 4:
child.fourSpawned == True
child.board[i][j] = value
possibilities.append(child)
#Assign the probability of each state occuring to it's object
for node in possibilities:
if node.fourSpawned:
node.probability = 0.1 / len(possibilities)
else:
node.probability = 0.9 / len(possibilities)
return possibilities
def getUtility(board, parentBoard):
utility = 0
#Count the empty spaces
emptySpacesCount = 0
for i in range(4):
for j in range(4):
if board[i][j] == ' ':
emptySpacesCount = emptySpacesCount + 1
#A full board is very likely to either be a losing state, or be close to one
if emptySpacesCount == 0:
return 0
#50 of the total utility is allocated to how clear the board is.
#A full 50 is awarded if there is at least 7 clear squares, if there are less utilitly
#is added based on how much of the board is clear
if emptySpacesCount == 0:
return 0
elif emptySpacesCount >= 7:
utility = utility + 60.0
else:
utility = utility + 60.0*(emptySpacesCount/7)
#Find the biggest tile. If it is in the top right, add 0.3 to utility
biggest = 0
for i in range(4):
for j in range(4):
if board[i][j] != ' ' and board[i][j] > biggest:
biggest = board[i][j]
if board[0][3] == biggest:
utility = utility + 40.0
#If we also have a full top line of different values, add more utility
if board[0][2] != ' 'and board[0][2] != board[0][3]:
utility = utility + 10.0
if board[0][1] != ' 'and board[0][1] != board[0][2]:
utility = utility + 10.0
if board[0][0] != ' 'and board[0][0] != board[0][1]:
utility = utility + 10.0
#Give utility for making the main tiles at the top bigger
if board[0][3] == parentBoard[0][3] * 2:
utility = utility + 35.0
if board[0][2] == parentBoard[0][2] * 2:
utility = utility + 30.0
if board[0][1] != ' ' and board[0][1] == parentBoard[0][1] * 2:
utility = utility + 25.0
if board[0][0] != ' ' and board[0][0] == parentBoard[0][0] * 2:
utility = utility + 15.0
return utility
def getExpectedValue(state, possibilities):
EV = 0
for child in possibilities:
EV = EV + (child.probability * getUtility(child.board, state.board))
return EV
if __name__ == '__main__':
currentBoard = State.State(rb.readBoard(), '', False, 0, 0)
children = currentBoard.getChildren()
for child in children:
child.EV = getExpectedValue(child)
print("Direction: ", child.path)
print("Utility: ", child.EV) | mit |
mcfletch/AutobahnPython | autobahn/autobahn/websocket/interfaces.py | 10 | 11716 | ###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ['IWebSocketChannel',
'IWebSocketChannelFrameApi',
'IWebSocketChannelStreamingApi']
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class IWebSocketChannel(object):
"""
A WebSocket channel is a bidirectional, full-duplex, ordered, reliable message channel
over a WebSocket connection as specified in RFC6455.
This interface defines a message-based API to WebSocket plus auxiliary hooks
and methods.
"""
@abc.abstractmethod
def onConnect(self, requestOrResponse):
"""
Callback fired during WebSocket opening handshake when a client connects (with
request from client) or when server connection established (with response from
server).
:param requestOrResponse: Connection request or response.
:type requestOrResponse: Instance of :class:`autobahn.websocket.protocol.ConnectionRequest`
or :class:`autobahn.websocket.protocol.ConnectionResponse`.
"""
@abc.abstractmethod
def onOpen(self):
"""
Callback fired when the initial WebSocket opening handshake was completed.
You now can send and receive WebSocket messages.
"""
@abc.abstractmethod
def sendMessage(self, payload, isBinary = False, fragmentSize = None, sync = False, doNotCompress = False):
"""
Send a WebSocket message.
You can send text or binary messages, and optionally specifiy a payload fragment size.
When the latter is given, the payload will be split up into WebSocket frames each with
payload length `<= fragmentSize`.
:param payload: The message payload.
:type payload: bytes
:param isBinary: `True` iff payload is binary, else the payload must be UTF-8 encoded text.
:type isBinary: bool
:param fragmentSize: Fragment message into WebSocket fragments of this size.
:type fragmentSize: int
:param sync: Iff `True`, try to force data onto the wire immediately. Note: do NOT use
this normally unless you know what you are doing. Performance likely will
suffer significantly. This feature is mainly here for use by Autobahn|Testsuite.
:type sync: bool
:param doNotCompress: Iff `True`, never compress this message. This only applies to
Hybi-Mode and only when WebSocket compression has been negotiated on
the WebSocket connection. Use when you know the payload
uncompressible (e.g. encrypted or already compressed).
:type doNotCompress: bool
"""
@abc.abstractmethod
def onMessage(self, payload, isBinary):
"""
Callback fired when a complete WebSocket message was received.
:param payload: Message payload (UTF-8 encoded text or binary). Can also be empty when
the WebSocket message contained no payload.
:type payload: bytes
:param isBinary: `True` iff payload is binary, else the payload is UTF-8 encoded text.
:type isBinary: bool
"""
@abc.abstractmethod
def sendClose(self, code = None, reason = None):
"""
Starts a WebSocket closing handshake tearing down the WebSocket connection.
:param code: An optional close status code (`1000` for normal close or `3000-4999` for
application specific close).
:type code: int
:param reason: An optional close reason (a string that when present, a status
code MUST also be present).
:type reason: str
"""
@abc.abstractmethod
def onClose(self, wasClean, code, reason):
"""
Callback fired when the WebSocket connection has been closed (WebSocket closing
handshake has been finished or the connection was closed uncleanly).
:param wasClean: True, iff the WebSocket connection was closed cleanly.
:type wasClean: bool
:param code: None or close status code (as sent by the WebSocket peer).
:type code: int
:param reason: None or close reason (as sent by the WebSocket peer).
:type reason: str
"""
@abc.abstractmethod
def sendPreparedMessage(self, preparedMsg):
"""
Send a message that was previously prepared with :func:`autobahn.websocket.protocol.WebSocketFactory.prepareMessage`.
:param prepareMessage: A previsouly prepared message.
:type prepareMessage: Instance of :class:`autobahn.websocket.protocol.PreparedMessage`.
"""
@abc.abstractmethod
def sendPing(self, payload = None):
"""
Send a WebSocket ping to the peer.
A peer is expected to pong back the payload a soon as "practical". When more than
one ping is outstanding at a peer, the peer may elect to respond only to the last ping.
:param payload: An (optional) arbitrary payload of length `<126` octets.
:type payload: bytes
"""
@abc.abstractmethod
def onPing(self, payload):
"""
Callback fired when a WebSocket ping was received. A default implementation responds
by sending a WebSocket pong.
:param payload: Payload of ping (when there was any). Can be arbitrary, up to `125` octets.
:type payload: bytes
"""
@abc.abstractmethod
def sendPong(self, payload = None):
"""
Send a WebSocket pong to the peer.
A WebSocket pong may be sent unsolicited. This serves as a unidirectional heartbeat.
A response to an unsolicited pong is "not expected".
:param payload: An (optional) arbitrary payload of length < 126 octets.
:type payload: bytes
"""
@abc.abstractmethod
def onPong(self, payload):
"""
Callback fired when a WebSocket pong was received. A default implementation does nothing.
:param payload: Payload of pong (when there was any). Can be arbitrary, up to 125 octets.
:type payload: bytes
"""
class IWebSocketChannelFrameApi(IWebSocketChannel):
"""
Frame-based API to a WebSocket channel.
"""
@abc.abstractmethod
def onMessageBegin(self, isBinary):
"""
Callback fired when receiving of a new WebSocket message has begun.
:param isBinary: `True` iff payload is binary, else the payload is UTF-8 encoded text.
:type isBinary: bool
"""
@abc.abstractmethod
def onMessageFrame(self, payload):
"""
Callback fired when a complete WebSocket message frame for a previously begun
WebSocket message has been received.
:param payload: Message frame payload (a list of chunks received).
:type payload: list of bytes
"""
@abc.abstractmethod
def onMessageEnd(self):
"""
Callback fired when a WebSocket message has been completely received (the last
WebSocket frame for that message has been received).
"""
@abc.abstractmethod
def beginMessage(self, isBinary = False, doNotCompress = False):
"""
Begin sending a new WebSocket message.
:param isBinary: `True` iff payload is binary, else the payload must be UTF-8 encoded text.
:type isBinary: bool
:param doNotCompress: Iff `True`, never compress this message. This only applies to
Hybi-Mode and only when WebSocket compression has been negotiated on
the WebSocket connection. Use when you know the payload
uncompressible (e.g. encrypted or already compressed).
:type doNotCompress: bool
"""
@abc.abstractmethod
def sendMessageFrame(self, payload, sync = False):
"""
When a message has been previously begun, send a complete message frame in one go.
:param payload: The message frame payload. When sending a text message, the payload must
be UTF-8 encoded already.
:type payload: bytes
:param sync: Iff `True`, try to force data onto the wire immediately. Note: do NOT use
this normally unless you know what you are doing. Performance likely will
suffer significantly. This feature is mainly here for use by Autobahn|Testsuite.
:type sync: bool
"""
@abc.abstractmethod
def endMessage(self):
"""
End a message previously begun message. No more frames may be sent (for that message).
You have to begin a new message before sending again.
"""
class IWebSocketChannelStreamingApi(IWebSocketChannelFrameApi):
"""
Streaming API to a WebSocket channel.
"""
@abc.abstractmethod
def onMessageFrameBegin(self, length):
"""
Callback fired when receiving a new message frame has begun.
A default implementation will prepare to buffer message frame data.
:param length: Payload length of message frame which is subsequently received.
:type length: int
"""
@abc.abstractmethod
def onMessageFrameData(self, payload):
"""
Callback fired when receiving data within a previously begun message frame.
A default implementation will buffer data for frame.
:param payload: Partial payload for message frame.
:type payload: bytes
"""
@abc.abstractmethod
def onMessageFrameEnd(self):
"""
Callback fired when a previously begun message frame has been completely received.
A default implementation will flatten the buffered frame data and
fire `onMessageFrame`.
"""
@abc.abstractmethod
def beginMessageFrame(self, length):
"""
Begin sending a new message frame.
:param length: Length of the frame which is to be started. Must be `>= 0` and `<= 2^63`.
:type length: int
"""
@abc.abstractmethod
def sendMessageFrameData(self, payload, sync = False):
"""
Send out data when within a message frame (message was begun, frame was begun).
Note that the frame is automatically ended when enough data has been sent.
In other words, there is no `endMessageFrame`, since you have begun the frame
specifying the frame length, which implicitly defined the frame end. This is different
from messages, which you begin *and* end explicitly , since a message can contain
an unlimited number of frames.
:param payload: Frame payload to send.
:type payload: bytes
:param sync: Iff `True`, try to force data onto the wire immediately. Note: do NOT use
this normally unless you know what you are doing. Performance likely will
suffer significantly. This feature is mainly here for use by Autobahn|Testsuite.
:type sync: bool
:returns: int -- When the currently sent message frame is still incomplete,
returns octets remaining to be sent. When the frame is complete,
returns `0`, when `< 0`, the amount of unconsumed data in payload
argument.
"""
| apache-2.0 |
jgelens/AutobahnTestSuite | autobahntestsuite/autobahntestsuite/case/case7_1_3.py | 14 | 1670 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
class Case7_1_3(Case):
DESCRIPTION = """Send a ping after close message"""
EXPECTATION = """Clean close with normal code, no pong."""
def init(self):
self.suppressClose = True
def onConnectionLost(self, failedByMe):
Case.onConnectionLost(self, failedByMe)
if self.behaviorClose == Case.WRONG_CODE:
self.behavior = Case.FAILED
self.passed = False
self.result = self.resultClose
def onOpen(self):
payload = "Hello World!"
self.expected[Case.OK] = []
self.expectedClose = {"closedByMe":True,"closeCode":[self.p.CLOSE_STATUS_CODE_NORMAL],"requireClean":True}
#self.p.sendFrame(opcode = 1, payload = payload)
self.p.sendClose(self.p.CLOSE_STATUS_CODE_NORMAL)
self.p.sendFrame(opcode = 9)
self.p.killAfter(1)
| apache-2.0 |
KyleMao/simple-ner-ie | .ropeproject/config.py | 387 | 3461 | # The default ``config.py``
def set_prefs(prefs):
"""This function is called before opening the project"""
# Specify which files and folders to ignore in the project.
# Changes to ignored resources are not added to the history and
# VCSs. Also they are not returned in `Project.get_files()`.
# Note that ``?`` and ``*`` match all characters but slashes.
# '*.pyc': matches 'test.pyc' and 'pkg/test.pyc'
# 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc'
# '.svn': matches 'pkg/.svn' and all of its children
# 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o'
# 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o'
prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject',
'.hg', '.svn', '_svn', '.git']
# Specifies which files should be considered python files. It is
# useful when you have scripts inside your project. Only files
# ending with ``.py`` are considered to be python files by
# default.
#prefs['python_files'] = ['*.py']
# Custom source folders: By default rope searches the project
# for finding source folders (folders that should be searched
# for finding modules). You can add paths to that list. Note
# that rope guesses project source folders correctly most of the
# time; use this if you have any problems.
# The folders should be relative to project root and use '/' for
# separating folders regardless of the platform rope is running on.
# 'src/my_source_folder' for instance.
#prefs.add('source_folders', 'src')
# You can extend python path for looking up modules
#prefs.add('python_path', '~/python/')
# Should rope save object information or not.
prefs['save_objectdb'] = True
prefs['compress_objectdb'] = False
# If `True`, rope analyzes each module when it is being saved.
prefs['automatic_soa'] = True
# The depth of calls to follow in static object analysis
prefs['soa_followed_calls'] = 0
# If `False` when running modules or unit tests "dynamic object
# analysis" is turned off. This makes them much faster.
prefs['perform_doa'] = True
# Rope can check the validity of its object DB when running.
prefs['validate_objectdb'] = True
# How many undos to hold?
prefs['max_history_items'] = 32
# Shows whether to save history across sessions.
prefs['save_history'] = True
prefs['compress_history'] = False
# Set the number spaces used for indenting. According to
# :PEP:`8`, it is best to use 4 spaces. Since most of rope's
# unit-tests use 4 spaces it is more reliable, too.
prefs['indent_size'] = 4
# Builtin and c-extension modules that are allowed to be imported
# and inspected by rope.
prefs['extension_modules'] = []
# Add all standard c-extensions to extension_modules list.
prefs['import_dynload_stdmods'] = True
# If `True` modules with syntax errors are considered to be empty.
# The default value is `False`; When `False` syntax errors raise
# `rope.base.exceptions.ModuleSyntaxError` exception.
prefs['ignore_syntax_errors'] = False
# If `True`, rope ignores unresolvable imports. Otherwise, they
# appear in the importing namespace.
prefs['ignore_bad_imports'] = False
def project_opened(project):
"""This function is called after opening the project"""
# Do whatever you like here!
| mit |
myerpengine/odoo | addons/product_extended/__init__.py | 374 | 1068 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_extended
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mediathread/mdtprint | app/bower_components/phantom/src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/text_format.py | 261 | 21737 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains routines for printing protocol messages in text format."""
__author__ = '[email protected] (Kenton Varda)'
import cStringIO
import re
from collections import deque
from google.protobuf.internal import type_checkers
from google.protobuf import descriptor
__all__ = [ 'MessageToString', 'PrintMessage', 'PrintField',
'PrintFieldValue', 'Merge' ]
# Infinity and NaN are not explicitly supported by Python pre-2.6, and
# float('inf') does not work on Windows (pre-2.6).
_INFINITY = 1e10000 # overflows, thus will actually be infinity.
_NAN = _INFINITY * 0
class ParseError(Exception):
"""Thrown in case of ASCII parsing error."""
def MessageToString(message, as_utf8=False, as_one_line=False):
out = cStringIO.StringIO()
PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line)
result = out.getvalue()
out.close()
if as_one_line:
return result.rstrip()
return result
def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False):
for field, value in message.ListFields():
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for element in value:
PrintField(field, element, out, indent, as_utf8, as_one_line)
else:
PrintField(field, value, out, indent, as_utf8, as_one_line)
def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False):
"""Print a single field name/value pair. For repeated fields, the value
should be a single element."""
out.write(' ' * indent);
if field.is_extension:
out.write('[')
if (field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
out.write(field.message_type.full_name)
else:
out.write(field.full_name)
out.write(']')
elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
# For groups, use the capitalized name.
out.write(field.message_type.name)
else:
out.write(field.name)
if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# The colon is optional in this case, but our cross-language golden files
# don't include it.
out.write(': ')
PrintFieldValue(field, value, out, indent, as_utf8, as_one_line)
if as_one_line:
out.write(' ')
else:
out.write('\n')
def PrintFieldValue(field, value, out, indent=0,
as_utf8=False, as_one_line=False):
"""Print a single field value (not including name). For repeated fields,
the value should be a single element."""
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
if as_one_line:
out.write(' { ')
PrintMessage(value, out, indent, as_utf8, as_one_line)
out.write('}')
else:
out.write(' {\n')
PrintMessage(value, out, indent + 2, as_utf8, as_one_line)
out.write(' ' * indent + '}')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
out.write(field.enum_type.values_by_number[value].name)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
out.write('\"')
if type(value) is unicode:
out.write(_CEscape(value.encode('utf-8'), as_utf8))
else:
out.write(_CEscape(value, as_utf8))
out.write('\"')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
if value:
out.write("true")
else:
out.write("false")
else:
out.write(str(value))
def Merge(text, message):
"""Merges an ASCII representation of a protocol message into a message.
Args:
text: Message ASCII representation.
message: A protocol buffer message to merge into.
Raises:
ParseError: On ASCII parsing problems.
"""
tokenizer = _Tokenizer(text)
while not tokenizer.AtEnd():
_MergeField(tokenizer, message)
def _MergeField(tokenizer, message):
"""Merges a single protocol message field into a message.
Args:
tokenizer: A tokenizer to parse the field name and values.
message: A protocol message to record the data.
Raises:
ParseError: In case of ASCII parsing problems.
"""
message_descriptor = message.DESCRIPTOR
if tokenizer.TryConsume('['):
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
name = '.'.join(name)
if not message_descriptor.is_extendable:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" does not have extensions.' %
message_descriptor.full_name)
field = message.Extensions._FindExtensionByName(name)
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" not registered.' % name)
elif message_descriptor != field.containing_type:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" does not extend message type "%s".' % (
name, message_descriptor.full_name))
tokenizer.Consume(']')
else:
name = tokenizer.ConsumeIdentifier()
field = message_descriptor.fields_by_name.get(name, None)
# Group names are expected to be capitalized as they appear in the
# .proto file, which actually matches their type names, not their field
# names.
if not field:
field = message_descriptor.fields_by_name.get(name.lower(), None)
if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP:
field = None
if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and
field.message_type.name != name):
field = None
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" has no field named "%s".' % (
message_descriptor.full_name, name))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
tokenizer.TryConsume(':')
if tokenizer.TryConsume('<'):
end_token = '>'
else:
tokenizer.Consume('{')
end_token = '}'
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
sub_message = message.Extensions[field].add()
else:
sub_message = getattr(message, field.name).add()
else:
if field.is_extension:
sub_message = message.Extensions[field]
else:
sub_message = getattr(message, field.name)
sub_message.SetInParent()
while not tokenizer.TryConsume(end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token))
_MergeField(tokenizer, sub_message)
else:
_MergeScalarField(tokenizer, message, field)
def _MergeScalarField(tokenizer, message, field):
"""Merges a single protocol message scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: A protocol message to record the data.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of ASCII parsing problems.
RuntimeError: On runtime errors.
"""
tokenizer.Consume(':')
value = None
if field.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_SFIXED32):
value = tokenizer.ConsumeInt32()
elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_SINT64,
descriptor.FieldDescriptor.TYPE_SFIXED64):
value = tokenizer.ConsumeInt64()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_FIXED32):
value = tokenizer.ConsumeUint32()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_FIXED64):
value = tokenizer.ConsumeUint64()
elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,
descriptor.FieldDescriptor.TYPE_DOUBLE):
value = tokenizer.ConsumeFloat()
elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
value = tokenizer.ConsumeBool()
elif field.type == descriptor.FieldDescriptor.TYPE_STRING:
value = tokenizer.ConsumeString()
elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:
value = tokenizer.ConsumeByteString()
elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
# Enum can be specified by a number (the enum value), or by
# a string literal (the enum name).
enum_descriptor = field.enum_type
if tokenizer.LookingAtInteger():
number = tokenizer.ConsumeInt32()
enum_value = enum_descriptor.values_by_number.get(number, None)
if enum_value is None:
raise tokenizer.ParseErrorPreviousToken(
'Enum type "%s" has no value with number %d.' % (
enum_descriptor.full_name, number))
else:
identifier = tokenizer.ConsumeIdentifier()
enum_value = enum_descriptor.values_by_name.get(identifier, None)
if enum_value is None:
raise tokenizer.ParseErrorPreviousToken(
'Enum type "%s" has no value named %s.' % (
enum_descriptor.full_name, identifier))
value = enum_value.number
else:
raise RuntimeError('Unknown field type %d' % field.type)
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
message.Extensions[field].append(value)
else:
getattr(message, field.name).append(value)
else:
if field.is_extension:
message.Extensions[field] = value
else:
setattr(message, field.name, value)
class _Tokenizer(object):
"""Protocol buffer ASCII representation tokenizer.
This class handles the lower level string parsing by splitting it into
meaningful tokens.
It was directly ported from the Java protocol buffer API.
"""
_WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE)
_TOKEN = re.compile(
'[a-zA-Z_][0-9a-zA-Z_+-]*|' # an identifier
'[0-9+-][0-9a-zA-Z_.+-]*|' # a number
'\"([^\"\n\\\\]|\\\\.)*(\"|\\\\?$)|' # a double-quoted string
'\'([^\'\n\\\\]|\\\\.)*(\'|\\\\?$)') # a single-quoted string
_IDENTIFIER = re.compile('\w+')
_INTEGER_CHECKERS = [type_checkers.Uint32ValueChecker(),
type_checkers.Int32ValueChecker(),
type_checkers.Uint64ValueChecker(),
type_checkers.Int64ValueChecker()]
_FLOAT_INFINITY = re.compile('-?inf(inity)?f?', re.IGNORECASE)
_FLOAT_NAN = re.compile("nanf?", re.IGNORECASE)
def __init__(self, text_message):
self._text_message = text_message
self._position = 0
self._line = -1
self._column = 0
self._token_start = None
self.token = ''
self._lines = deque(text_message.split('\n'))
self._current_line = ''
self._previous_line = 0
self._previous_column = 0
self._SkipWhitespace()
self.NextToken()
def AtEnd(self):
"""Checks the end of the text was reached.
Returns:
True iff the end was reached.
"""
return self.token == ''
def _PopLine(self):
while len(self._current_line) <= self._column:
if not self._lines:
self._current_line = ''
return
self._line += 1
self._column = 0
self._current_line = self._lines.popleft()
def _SkipWhitespace(self):
while True:
self._PopLine()
match = self._WHITESPACE.match(self._current_line, self._column)
if not match:
break
length = len(match.group(0))
self._column += length
def TryConsume(self, token):
"""Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed.
"""
if self.token == token:
self.NextToken()
return True
return False
def Consume(self, token):
"""Consumes a piece of text.
Args:
token: Text to consume.
Raises:
ParseError: If the text couldn't be consumed.
"""
if not self.TryConsume(token):
raise self._ParseError('Expected "%s".' % token)
def LookingAtInteger(self):
"""Checks if the current token is an integer.
Returns:
True iff the current token is an integer.
"""
if not self.token:
return False
c = self.token[0]
return (c >= '0' and c <= '9') or c == '-' or c == '+'
def ConsumeIdentifier(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER.match(result):
raise self._ParseError('Expected identifier.')
self.NextToken()
return result
def ConsumeInt32(self):
"""Consumes a signed 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 32bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=True, is_long=False)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeUint32(self):
"""Consumes an unsigned 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 32bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=False, is_long=False)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeInt64(self):
"""Consumes a signed 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 64bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=True, is_long=True)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeUint64(self):
"""Consumes an unsigned 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 64bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=False, is_long=True)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeFloat(self):
"""Consumes an floating point number.
Returns:
The number parsed.
Raises:
ParseError: If a floating point number couldn't be consumed.
"""
text = self.token
if self._FLOAT_INFINITY.match(text):
self.NextToken()
if text.startswith('-'):
return -_INFINITY
return _INFINITY
if self._FLOAT_NAN.match(text):
self.NextToken()
return _NAN
try:
result = float(text)
except ValueError, e:
raise self._FloatParseError(e)
self.NextToken()
return result
def ConsumeBool(self):
"""Consumes a boolean value.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
if self.token in ('true', 't', '1'):
self.NextToken()
return True
elif self.token in ('false', 'f', '0'):
self.NextToken()
return False
else:
raise self._ParseError('Expected "true" or "false".')
def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
"""
bytes = self.ConsumeByteString()
try:
return unicode(bytes, 'utf-8')
except UnicodeDecodeError, e:
raise self._StringParseError(e)
def ConsumeByteString(self):
"""Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed.
"""
list = [self._ConsumeSingleByteString()]
while len(self.token) > 0 and self.token[0] in ('\'', '"'):
list.append(self._ConsumeSingleByteString())
return "".join(list)
def _ConsumeSingleByteString(self):
"""Consume one token of a string literal.
String literals (whether bytes or text) can come in multiple adjacent
tokens which are automatically concatenated, like in C or Python. This
method only consumes one token.
"""
text = self.token
if len(text) < 1 or text[0] not in ('\'', '"'):
raise self._ParseError('Exptected string.')
if len(text) < 2 or text[-1] != text[0]:
raise self._ParseError('String missing ending quote.')
try:
result = _CUnescape(text[1:-1])
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def _ParseInteger(self, text, is_signed=False, is_long=False):
"""Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
pos = 0
if text.startswith('-'):
pos += 1
base = 10
if text.startswith('0x', pos) or text.startswith('0X', pos):
base = 16
elif text.startswith('0', pos):
base = 8
# Do the actual parsing. Exception handling is propagated to caller.
result = int(text, base)
# Check if the integer is sane. Exceptions handled by callers.
checker = self._INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
checker.CheckValue(result)
return result
def ParseErrorPreviousToken(self, message):
"""Creates and *returns* a ParseError for the previously read token.
Args:
message: A message to set for the exception.
Returns:
A ParseError instance.
"""
return ParseError('%d:%d : %s' % (
self._previous_line + 1, self._previous_column + 1, message))
def _ParseError(self, message):
"""Creates and *returns* a ParseError for the current token."""
return ParseError('%d:%d : %s' % (
self._line + 1, self._column - len(self.token) + 1, message))
def _IntegerParseError(self, e):
return self._ParseError('Couldn\'t parse integer: ' + str(e))
def _FloatParseError(self, e):
return self._ParseError('Couldn\'t parse number: ' + str(e))
def _StringParseError(self, e):
return self._ParseError('Couldn\'t parse string: ' + str(e))
def NextToken(self):
"""Reads the next meaningful token."""
self._previous_line = self._line
self._previous_column = self._column
self._column += len(self.token)
self._SkipWhitespace()
if not self._lines and len(self._current_line) <= self._column:
self.token = ''
return
match = self._TOKEN.match(self._current_line, self._column)
if match:
token = match.group(0)
self.token = token
else:
self.token = self._current_line[self._column]
# text.encode('string_escape') does not seem to satisfy our needs as it
# encodes unprintable characters using two-digit hex escapes whereas our
# C++ unescaping function allows hex escapes to be any length. So,
# "\0011".encode('string_escape') ends up being "\\x011", which will be
# decoded in C++ as a single-character string with char code 0x11.
def _CEscape(text, as_utf8):
def escape(c):
o = ord(c)
if o == 10: return r"\n" # optional escape
if o == 13: return r"\r" # optional escape
if o == 9: return r"\t" # optional escape
if o == 39: return r"\'" # optional escape
if o == 34: return r'\"' # necessary escape
if o == 92: return r"\\" # necessary escape
# necessary escapes
if not as_utf8 and (o >= 127 or o < 32): return "\\%03o" % o
return c
return "".join([escape(c) for c in text])
_CUNESCAPE_HEX = re.compile('\\\\x([0-9a-fA-F]{2}|[0-9a-fA-F])')
def _CUnescape(text):
def ReplaceHex(m):
return chr(int(m.group(0)[2:], 16))
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
return result.decode('string_escape')
| mit |
zofuthan/airmozilla | airmozilla/surveys/views.py | 9 | 3959 | from collections import defaultdict
from django.shortcuts import get_object_or_404, render, redirect
from django.db import transaction
from django import forms
from .models import Survey, Question, Answer
@transaction.atomic
def load(request, id):
survey = get_object_or_404(Survey, id=id, active=True)
context = {'survey': survey}
questions = Question.objects.filter(survey=survey)
answers = Answer.objects.filter(question__in=questions)
if request.method == 'POST' and request.POST.get('resetmine'):
answers.filter(user=request.user).delete()
return redirect('surveys:load', survey.id)
show_answers = True # default
if request.user.is_authenticated():
# don't show answers if this is POST
your_answers = answers.filter(user=request.user)
if request.method == 'POST' or not your_answers:
show_answers = False
else:
your_answers = answers.none()
if show_answers:
# make a map of question -> [answers]
_answers = defaultdict(list)
for answer in answers:
_answers[answer.question].append(answer)
questions_dicts = []
for question in questions:
if not question.question:
continue
item = {
'label': question.question['question'], # ugly
'choices': []
}
choices = defaultdict(int)
total_answers = 0
for answer in _answers[question]:
choices[answer.answer['answer']] += 1
total_answers += 1
try:
your_answer = your_answers.get(question=question)
except Answer.DoesNotExist:
your_answer = None
for choice in question.question['choices']:
try:
percent = 100.0 * choices[choice] / total_answers
except ZeroDivisionError:
percent = 0.0
choice_item = {
'number': choices[choice],
'percent': percent,
'answer': choice,
'your_answer': (
your_answer and choice == your_answer.answer['answer']
)
}
item['choices'].append(choice_item)
questions_dicts.append(item)
context['questions'] = questions_dicts
context['answers'] = answers
return render(request, 'surveys/answers.html', context)
if request.method == 'POST':
form = forms.Form(request.POST)
else:
form = forms.Form()
for question in questions:
if not question.question:
# it's empty
continue
q = question.question
if q.get('question') and q.get('choices'):
field = forms.ChoiceField(q.get('question'))
field.label = q.get('question')
field.widget = forms.widgets.RadioSelect()
field.choices = [
(x, x) for x in q.get('choices')
]
field.required = False
form.fields[str(question.id)] = field
if request.method == 'POST':
if form.is_valid():
# delete any previous answers
Answer.objects.filter(
question=question,
user=request.user
).delete()
for question_id, answer in form.cleaned_data.items():
if not answer:
continue
question = questions.get(id=question_id)
Answer.objects.create(
question=question,
user=request.user,
answer={
'answer': answer
}
)
return redirect('surveys:load', survey.id)
context['form'] = form
return render(request, 'surveys/questions.html', context)
| bsd-3-clause |
waseem18/bedrock | vendor-local/packages/chardet/chardet/langthaimodel.py | 235 | 11298 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = ( \
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = { \
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': constants.False,
'charsetName': "TIS-620"
}
| mpl-2.0 |
idea4bsd/idea4bsd | python/helpers/pydev/tests_pydevd_python/performance_check.py | 12 | 6676 | import debugger_unittest
import sys
import re
import os
CHECK_BASELINE, CHECK_REGULAR, CHECK_CYTHON = 'baseline', 'regular', 'cython'
class PerformanceWriterThread(debugger_unittest.AbstractWriterThread):
CHECK = None
debugger_unittest.AbstractWriterThread.get_environ # overrides
def get_environ(self):
env = os.environ.copy()
if self.CHECK == CHECK_BASELINE:
env['PYTHONPATH'] = r'X:\PyDev.Debugger.baseline'
elif self.CHECK == CHECK_CYTHON:
env['PYDEVD_USE_CYTHON'] = 'YES'
elif self.CHECK == CHECK_REGULAR:
env['PYDEVD_USE_CYTHON'] = 'NO'
else:
raise AssertionError("Don't know what to check.")
return env
debugger_unittest.AbstractWriterThread.get_pydevd_file # overrides
def get_pydevd_file(self):
if self.CHECK == CHECK_BASELINE:
return os.path.abspath(os.path.join(r'X:\PyDev.Debugger.baseline', 'pydevd.py'))
dirname = os.path.dirname(__file__)
dirname = os.path.dirname(dirname)
return os.path.abspath(os.path.join(dirname, 'pydevd.py'))
class WriterThreadPerformance1(PerformanceWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_performance_1.py')
BENCHMARK_NAME = 'method_calls_with_breakpoint'
def run(self):
self.start_socket()
self.write_add_breakpoint(17, 'method')
self.write_make_initial_run()
self.finished_ok = True
class WriterThreadPerformance2(PerformanceWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_performance_1.py')
BENCHMARK_NAME = 'method_calls_without_breakpoint'
def run(self):
self.start_socket()
self.write_make_initial_run()
self.finished_ok = True
class WriterThreadPerformance3(PerformanceWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_performance_1.py')
BENCHMARK_NAME = 'method_calls_with_step_over'
def run(self):
self.start_socket()
self.write_add_breakpoint(26, None)
self.write_make_initial_run()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
self.write_step_over(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('108', True)
self.write_run_thread(thread_id)
self.finished_ok = True
class WriterThreadPerformance4(PerformanceWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_performance_1.py')
BENCHMARK_NAME = 'method_calls_with_exception_breakpoint'
def run(self):
self.start_socket()
self.write_add_exception_breakpoint('ValueError')
self.write_make_initial_run()
self.finished_ok = True
class CheckDebuggerPerformance(debugger_unittest.DebuggerRunner):
def get_command_line(self):
return [sys.executable]
def _get_time_from_result(self, result):
stdout = ''.join(result['stdout'])
match = re.search('TotalTime>>((\d|\.)+)<<', stdout)
time_taken = match.group(1)
return float(time_taken)
def obtain_results(self, writer_thread_class):
time_when_debugged = self._get_time_from_result(self.check_case(writer_thread_class))
args = self.get_command_line()
args.append(writer_thread_class.TEST_FILE)
regular_time = self._get_time_from_result(self.run_process(args, writer_thread=None))
simple_trace_time = self._get_time_from_result(self.run_process(args+['--regular-trace'], writer_thread=None))
print(writer_thread_class.BENCHMARK_NAME, time_when_debugged, regular_time, simple_trace_time)
if 'SPEEDTIN_AUTHORIZATION_KEY' in os.environ:
SPEEDTIN_AUTHORIZATION_KEY = os.environ['SPEEDTIN_AUTHORIZATION_KEY']
# sys.path.append(r'X:\speedtin\pyspeedtin')
import pyspeedtin # If the authorization key is there, pyspeedtin must be available
import pydevd
pydevd_cython_project_id, pydevd_pure_python_project_id = 6, 7
if writer_thread_class.CHECK == CHECK_BASELINE:
project_ids = (pydevd_cython_project_id, pydevd_pure_python_project_id)
elif writer_thread_class.CHECK == CHECK_REGULAR:
project_ids = (pydevd_pure_python_project_id,)
elif writer_thread_class.CHECK == CHECK_CYTHON:
project_ids = (pydevd_cython_project_id,)
else:
raise AssertionError('Wrong check: %s' % (writer_thread_class.CHECK))
for project_id in project_ids:
api = pyspeedtin.PySpeedTinApi(authorization_key=SPEEDTIN_AUTHORIZATION_KEY, project_id=project_id)
benchmark_name = writer_thread_class.BENCHMARK_NAME
if writer_thread_class.CHECK == CHECK_BASELINE:
version = '0.0.1_baseline'
return # No longer commit the baseline (it's immutable right now).
else:
version=pydevd.__version__,
commit_id, branch, commit_date = api.git_commit_id_branch_and_date_from_path(pydevd.__file__)
api.add_benchmark(benchmark_name)
api.add_measurement(
benchmark_name,
value=time_when_debugged,
version=version,
released=False,
branch=branch,
commit_id=commit_id,
commit_date=commit_date,
)
api.commit()
def check_performance1(self):
self.obtain_results(WriterThreadPerformance1)
def check_performance2(self):
self.obtain_results(WriterThreadPerformance2)
def check_performance3(self):
self.obtain_results(WriterThreadPerformance3)
def check_performance4(self):
self.obtain_results(WriterThreadPerformance4)
if __name__ == '__main__':
debugger_unittest.SHOW_WRITES_AND_READS = False
debugger_unittest.SHOW_OTHER_DEBUG_INFO = False
debugger_unittest.SHOW_STDOUT = False
for check in (
# CHECK_BASELINE, -- Checks against the version checked out at X:\PyDev.Debugger.baseline.
CHECK_REGULAR,
CHECK_CYTHON
):
PerformanceWriterThread.CHECK = check
print('Checking: %s' % (check,))
check_debugger_performance = CheckDebuggerPerformance()
check_debugger_performance.check_performance1()
check_debugger_performance.check_performance2()
check_debugger_performance.check_performance3()
check_debugger_performance.check_performance4()
| apache-2.0 |
HyperBaton/ansible | lib/ansible/modules/network/f5/bigip_sys_daemon_log_tmm.py | 38 | 13357 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_sys_daemon_log_tmm
short_description: Manage BIG-IP tmm daemon log settings
description:
- Manage BIG-IP tmm log settings.
version_added: 2.8
options:
arp_log_level:
description:
- Specifies the lowest level of ARP messages from the tmm daemon
to include in the system log.
type: str
choices:
- debug
- error
- informational
- notice
- warning
http_compression_log_level:
description:
- Specifies the lowest level of HTTP compression messages from the tmm daemon
to include in the system log.
type: str
choices:
- debug
- error
- informational
- notice
- warning
http_log_level:
description:
- Specifies the lowest level of HTTP messages from the tmm daemon
to include in the system log.
type: str
choices:
- debug
- error
- informational
- notice
- warning
ip_log_level:
description:
- Specifies the lowest level of IP address messages from the tmm daemon
to include in the system log.
type: str
choices:
- debug
- informational
- notice
- warning
irule_log_level:
description:
- Specifies the lowest level of iRule messages from the tmm daemon
to include in the system log.
type: str
choices:
- debug
- error
- informational
- notice
- warning
layer4_log_level:
description:
- Specifies the lowest level of Layer 4 messages from the tmm daemon
to include in the system log.
type: str
choices:
- debug
- informational
- notice
net_log_level:
description:
- Specifies the lowest level of network messages from the tmm daemon
to include in the system log.
type: str
choices:
- critical
- debug
- error
- informational
- notice
- warning
os_log_level:
description:
- Specifies the lowest level of operating system messages from the tmm daemon
to include in the system log.
type: str
choices:
- alert
- critical
- debug
- emergency
- error
- informational
- notice
- warning
pva_log_level:
description:
- Specifies the lowest level of PVA messages from the tmm daemon
to include in the system log.
type: str
choices:
- debug
- informational
- notice
ssl_log_level:
description:
- Specifies the lowest level of SSL messages from the tmm daemon
to include in the system log.
type: str
choices:
- alert
- critical
- debug
- emergency
- error
- informational
- notice
- warning
state:
description:
- The state of the log level on the system. When C(present), guarantees
that an existing log level is set to C(value).
type: str
choices:
- present
default: present
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Set SSL log level to debug
bigip_sys_daemon_log_tmm:
provider:
password: secret
server: lb.mydomain.com
user: admin
ssl_log_level: debug
delegate_to: localhost
'''
RETURN = r'''
arp_log_level:
description: Lowest level of ARP messages from the tmm daemon to log.
returned: changed
type: str
sample: error
http_compression_log_level:
description: Lowest level of HTTP compression messages from the tmm daemon to log.
returned: changed
type: str
sample: debug
http_log_level:
description: Lowest level of HTTP messages from the tmm daemon to log.
returned: changed
type: str
sample: notice
ip_log_level:
description: Lowest level of IP address messages from the tmm daemon to log.
returned: changed
type: str
sample: warning
irule_log_level:
description: Lowest level of iRule messages from the tmm daemon to log.
returned: changed
type: str
sample: error
layer4_log_level:
description: Lowest level of Layer 4 messages from the tmm daemon to log.
returned: changed
type: str
sample: notice
net_log_level:
description: Lowest level of network messages from the tmm daemon to log.
returned: changed
type: str
sample: critical
os_log_level:
description: Lowest level of operating system messages from the tmm daemon to log.
returned: changed
type: str
sample: critical
pva_log_level:
description: Lowest level of PVA messages from the tmm daemon to log.
returned: changed
type: str
sample: debug
ssl_log_level:
description: Lowest level of SSL messages from the tmm daemon to log.
returned: changed
type: str
sample: critical
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
class Parameters(AnsibleF5Parameters):
api_map = {
'arpLogLevel': 'arp_log_level',
'httpCompressionLogLevel': 'http_compression_log_level',
'httpLogLevel': 'http_log_level',
'ipLogLevel': 'ip_log_level',
'iruleLogLevel': 'irule_log_level',
'layer4LogLevel': 'layer4_log_level',
'netLogLevel': 'net_log_level',
'osLogLevel': 'os_log_level',
'pvaLogLevel': 'pva_log_level',
'sslLogLevel': 'ssl_log_level',
}
api_attributes = [
'arpLogLevel',
'httpCompressionLogLevel',
'httpLogLevel',
'ipLogLevel',
'iruleLogLevel',
'layer4LogLevel',
'netLogLevel',
'osLogLevel',
'pvaLogLevel',
'sslLogLevel',
]
returnables = [
'arp_log_level',
'http_compression_log_level',
'http_log_level',
'ip_log_level',
'irule_log_level',
'layer4_log_level',
'net_log_level',
'os_log_level',
'pva_log_level',
'ssl_log_level',
]
updatables = [
'arp_log_level',
'http_compression_log_level',
'http_log_level',
'ip_log_level',
'irule_log_level',
'layer4_log_level',
'net_log_level',
'os_log_level',
'pva_log_level',
'ssl_log_level',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
result = dict()
changed = self.present()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
return self.update()
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/daemon-log-settings/tmm".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/daemon-log-settings/tmm".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.choices_min = ['debug', 'informational', 'notice']
self.choices_common = self.choices_min + ['warning', 'error']
self.choices_all = self.choices_common + ['alert', 'critical', 'emergency']
argument_spec = dict(
arp_log_level=dict(
choices=self.choices_common
),
http_compression_log_level=dict(
choices=self.choices_common
),
http_log_level=dict(
choices=self.choices_common
),
ip_log_level=dict(
choices=self.choices_min + ['warning']
),
irule_log_level=dict(
choices=self.choices_common
),
layer4_log_level=dict(
choices=self.choices_min
),
net_log_level=dict(
choices=self.choices_common + ['critical']
),
os_log_level=dict(
choices=self.choices_all
),
pva_log_level=dict(
choices=self.choices_min
),
ssl_log_level=dict(
choices=self.choices_all
),
state=dict(default='present', choices=['present'])
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
Hasimir/brython | www/src/Lib/test/test_with.py | 83 | 26527 | #!/usr/bin/env python3
"""Unit tests for the with statement specified in PEP 343."""
__author__ = "Mike Bland"
__email__ = "mbland at acm dot org"
import sys
import unittest
from collections import deque
from contextlib import _GeneratorContextManager, contextmanager
from test.support import run_unittest
class MockContextManager(_GeneratorContextManager):
def __init__(self, func, *args, **kwds):
super().__init__(func, *args, **kwds)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return _GeneratorContextManager.__enter__(self)
def __exit__(self, type, value, traceback):
self.exit_called = True
self.exit_args = (type, value, traceback)
return _GeneratorContextManager.__exit__(self, type,
value, traceback)
def mock_contextmanager(func):
def helper(*args, **kwds):
return MockContextManager(func, *args, **kwds)
return helper
class MockResource(object):
def __init__(self):
self.yielded = False
self.stopped = False
@mock_contextmanager
def mock_contextmanager_generator():
mock = MockResource()
try:
mock.yielded = True
yield mock
finally:
mock.stopped = True
class Nested(object):
def __init__(self, *managers):
self.managers = managers
self.entered = None
def __enter__(self):
if self.entered is not None:
raise RuntimeError("Context is not reentrant")
self.entered = deque()
vars = []
try:
for mgr in self.managers:
vars.append(mgr.__enter__())
self.entered.appendleft(mgr)
except:
if not self.__exit__(*sys.exc_info()):
raise
return vars
def __exit__(self, *exc_info):
# Behave like nested with statements
# first in, last out
# New exceptions override old ones
ex = exc_info
for mgr in self.entered:
try:
if mgr.__exit__(*ex):
ex = (None, None, None)
except:
ex = sys.exc_info()
self.entered = None
if ex is not exc_info:
raise ex[0](ex[1]).with_traceback(ex[2])
class MockNested(Nested):
def __init__(self, *managers):
Nested.__init__(self, *managers)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return Nested.__enter__(self)
def __exit__(self, *exc_info):
self.exit_called = True
self.exit_args = exc_info
return Nested.__exit__(self, *exc_info)
class FailureTestCase(unittest.TestCase):
def testNameError(self):
def fooNotDeclared():
with foo: pass
self.assertRaises(NameError, fooNotDeclared)
def testEnterAttributeError(self):
class LacksEnter(object):
def __exit__(self, type, value, traceback):
pass
def fooLacksEnter():
foo = LacksEnter()
with foo: pass
self.assertRaises(AttributeError, fooLacksEnter)
def testExitAttributeError(self):
class LacksExit(object):
def __enter__(self):
pass
def fooLacksExit():
foo = LacksExit()
with foo: pass
self.assertRaises(AttributeError, fooLacksExit)
def assertRaisesSyntaxError(self, codestr):
def shouldRaiseSyntaxError(s):
compile(s, '', 'single')
self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr)
def testAssignmentToNoneError(self):
self.assertRaisesSyntaxError('with mock as None:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None):\n'
' pass')
def testAssignmentToEmptyTupleError(self):
self.assertRaisesSyntaxError(
'with mock as ():\n'
' pass')
def testAssignmentToTupleOnlyContainingNoneError(self):
self.assertRaisesSyntaxError('with mock as None,:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None,):\n'
' pass')
def testAssignmentToTupleContainingNoneError(self):
self.assertRaisesSyntaxError(
'with mock as (foo, None, bar):\n'
' pass')
def testEnterThrows(self):
class EnterThrows(object):
def __enter__(self):
raise RuntimeError("Enter threw")
def __exit__(self, *args):
pass
def shouldThrow():
ct = EnterThrows()
self.foo = None
with ct as self.foo:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertEqual(self.foo, None)
def testExitThrows(self):
class ExitThrows(object):
def __enter__(self):
return
def __exit__(self, *args):
raise RuntimeError(42)
def shouldThrow():
with ExitThrows():
pass
self.assertRaises(RuntimeError, shouldThrow)
class ContextmanagerAssertionMixin(object):
def setUp(self):
self.TEST_EXCEPTION = RuntimeError("test exception")
def assertInWithManagerInvariants(self, mock_manager):
self.assertTrue(mock_manager.enter_called)
self.assertFalse(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, None)
def assertAfterWithManagerInvariants(self, mock_manager, exit_args):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, exit_args)
def assertAfterWithManagerInvariantsNoError(self, mock_manager):
self.assertAfterWithManagerInvariants(mock_manager,
(None, None, None))
def assertInWithGeneratorInvariants(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertFalse(mock_generator.stopped)
def assertAfterWithGeneratorInvariantsNoError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
def raiseTestException(self):
raise self.TEST_EXCEPTION
def assertAfterWithManagerInvariantsWithError(self, mock_manager,
exc_type=None):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
if exc_type is None:
self.assertEqual(mock_manager.exit_args[1], self.TEST_EXCEPTION)
exc_type = type(self.TEST_EXCEPTION)
self.assertEqual(mock_manager.exit_args[0], exc_type)
# Test the __exit__ arguments. Issue #7853
self.assertIsInstance(mock_manager.exit_args[1], exc_type)
self.assertIsNot(mock_manager.exit_args[2], None)
def assertAfterWithGeneratorInvariantsWithError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
class NonexceptionalTestCase(unittest.TestCase, ContextmanagerAssertionMixin):
def testInlineGeneratorSyntax(self):
with mock_contextmanager_generator():
pass
def testUnboundGenerator(self):
mock = mock_contextmanager_generator()
with mock:
pass
self.assertAfterWithManagerInvariantsNoError(mock)
def testInlineGeneratorBoundSyntax(self):
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
# FIXME: In the future, we'll try to keep the bound names from leaking
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToExistingVariable(self):
foo = None
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToDottedVariable(self):
with mock_contextmanager_generator() as self.foo:
self.assertInWithGeneratorInvariants(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.foo)
def testBoundGenerator(self):
mock = mock_contextmanager_generator()
with mock as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertInWithManagerInvariants(mock)
self.assertAfterWithGeneratorInvariantsNoError(foo)
self.assertAfterWithManagerInvariantsNoError(mock)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
with mock_a as foo:
mock_b = mock_contextmanager_generator()
with mock_b as bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(foo)
self.assertInWithGeneratorInvariants(bar)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsNoError(bar)
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithManagerInvariantsNoError(mock_a)
self.assertAfterWithGeneratorInvariantsNoError(foo)
class NestedNonexceptionalTestCase(unittest.TestCase,
ContextmanagerAssertionMixin):
def testSingleArgInlineGeneratorSyntax(self):
with Nested(mock_contextmanager_generator()):
pass
def testSingleArgBoundToNonTuple(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as foo:
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToSingleElementParenthesizedList(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as (foo):
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToMultipleElementTupleError(self):
def shouldThrowValueError():
with Nested(mock_contextmanager_generator()) as (foo, bar):
pass
self.assertRaises(ValueError, shouldThrowValueError)
def testSingleArgUnbound(self):
mock_contextmanager = mock_contextmanager_generator()
mock_nested = MockNested(mock_contextmanager)
with mock_nested:
self.assertInWithManagerInvariants(mock_contextmanager)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(mock_contextmanager)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgUnbound(self):
m = mock_contextmanager_generator()
n = mock_contextmanager_generator()
o = mock_contextmanager_generator()
mock_nested = MockNested(m, n, o)
with mock_nested:
self.assertInWithManagerInvariants(m)
self.assertInWithManagerInvariants(n)
self.assertInWithManagerInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(m)
self.assertAfterWithManagerInvariantsNoError(n)
self.assertAfterWithManagerInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgBound(self):
mock_nested = MockNested(mock_contextmanager_generator(),
mock_contextmanager_generator(), mock_contextmanager_generator())
with mock_nested as (m, n, o):
self.assertInWithGeneratorInvariants(m)
self.assertInWithGeneratorInvariants(n)
self.assertInWithGeneratorInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithGeneratorInvariantsNoError(m)
self.assertAfterWithGeneratorInvariantsNoError(n)
self.assertAfterWithGeneratorInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
class ExceptionalTestCase(ContextmanagerAssertionMixin, unittest.TestCase):
def testSingleResource(self):
cm = mock_contextmanager_generator()
def shouldThrow():
with cm as self.resource:
self.assertInWithManagerInvariants(cm)
self.assertInWithGeneratorInvariants(self.resource)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm)
self.assertAfterWithGeneratorInvariantsWithError(self.resource)
def testExceptionNormalized(self):
cm = mock_contextmanager_generator()
def shouldThrow():
with cm as self.resource:
# Note this relies on the fact that 1 // 0 produces an exception
# that is not normalized immediately.
1 // 0
self.assertRaises(ZeroDivisionError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm, ZeroDivisionError)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsWithError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsWithError(self.bar)
def testMultipleResourcesInSingleStatement(self):
cm_a = mock_contextmanager_generator()
cm_b = mock_contextmanager_generator()
mock_nested = MockNested(cm_a, cm_b)
def shouldThrow():
with mock_nested as (self.resource_a, self.resource_b):
self.assertInWithManagerInvariants(cm_a)
self.assertInWithManagerInvariants(cm_b)
self.assertInWithManagerInvariants(mock_nested)
self.assertInWithGeneratorInvariants(self.resource_a)
self.assertInWithGeneratorInvariants(self.resource_b)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm_a)
self.assertAfterWithManagerInvariantsWithError(cm_b)
self.assertAfterWithManagerInvariantsWithError(mock_nested)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_a)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_b)
def testNestedExceptionBeforeInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
self.bar = None
def shouldThrow():
with mock_a as self.foo:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(self.foo)
self.raiseTestException()
with mock_b as self.bar:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
# The inner statement stuff should never have been touched
self.assertEqual(self.bar, None)
self.assertFalse(mock_b.enter_called)
self.assertFalse(mock_b.exit_called)
self.assertEqual(mock_b.exit_args, None)
def testNestedExceptionAfterInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.bar)
def testRaisedStopIteration1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration2(self):
# From bug 1462485
class cm(object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration3(self):
# Another variant where the exception hasn't been instantiated
# From bug 1705170
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise next(iter([]))
self.assertRaises(StopIteration, shouldThrow)
def testRaisedGeneratorExit1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testRaisedGeneratorExit2(self):
# From bug 1462485
class cm (object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testErrorsInBool(self):
# issue4589: __exit__ return code may raise an exception
# when looking at its truth value.
class cm(object):
def __init__(self, bool_conversion):
class Bool:
def __bool__(self):
return bool_conversion()
self.exit_result = Bool()
def __enter__(self):
return 3
def __exit__(self, a, b, c):
return self.exit_result
def trueAsBool():
with cm(lambda: True):
self.fail("Should NOT see this")
trueAsBool()
def falseAsBool():
with cm(lambda: False):
self.fail("Should raise")
self.assertRaises(AssertionError, falseAsBool)
def failAsBool():
with cm(lambda: 1//0):
self.fail("Should NOT see this")
self.assertRaises(ZeroDivisionError, failAsBool)
class NonLocalFlowControlTestCase(unittest.TestCase):
def testWithBreak(self):
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
break
counter += 100 # Not reached
self.assertEqual(counter, 11)
def testWithContinue(self):
counter = 0
while True:
counter += 1
if counter > 2:
break
with mock_contextmanager_generator():
counter += 10
continue
counter += 100 # Not reached
self.assertEqual(counter, 12)
def testWithReturn(self):
def foo():
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
return counter
counter += 100 # Not reached
self.assertEqual(foo(), 11)
def testWithYield(self):
def gen():
with mock_contextmanager_generator():
yield 12
yield 13
x = list(gen())
self.assertEqual(x, [12, 13])
def testWithRaise(self):
counter = 0
try:
counter += 1
with mock_contextmanager_generator():
counter += 10
raise RuntimeError
counter += 100 # Not reached
except RuntimeError:
self.assertEqual(counter, 11)
else:
self.fail("Didn't raise RuntimeError")
class AssignmentTargetTestCase(unittest.TestCase):
def testSingleComplexTarget(self):
targets = {1: [0, 1, 2]}
with mock_contextmanager_generator() as targets[1][0]:
self.assertEqual(list(targets.keys()), [1])
self.assertEqual(targets[1][0].__class__, MockResource)
with mock_contextmanager_generator() as list(targets.values())[0][1]:
self.assertEqual(list(targets.keys()), [1])
self.assertEqual(targets[1][1].__class__, MockResource)
with mock_contextmanager_generator() as targets[2]:
keys = list(targets.keys())
keys.sort()
self.assertEqual(keys, [1, 2])
class C: pass
blah = C()
with mock_contextmanager_generator() as blah.foo:
self.assertEqual(hasattr(blah, "foo"), True)
def testMultipleComplexTargets(self):
class C:
def __enter__(self): return 1, 2, 3
def __exit__(self, t, v, tb): pass
targets = {1: [0, 1, 2]}
with C() as (targets[1][0], targets[1][1], targets[1][2]):
self.assertEqual(targets, {1: [1, 2, 3]})
with C() as (list(targets.values())[0][2], list(targets.values())[0][1], list(targets.values())[0][0]):
self.assertEqual(targets, {1: [3, 2, 1]})
with C() as (targets[1], targets[2], targets[3]):
self.assertEqual(targets, {1: 1, 2: 2, 3: 3})
class B: pass
blah = B()
with C() as (blah.one, blah.two, blah.three):
self.assertEqual(blah.one, 1)
self.assertEqual(blah.two, 2)
self.assertEqual(blah.three, 3)
class ExitSwallowsExceptionTestCase(unittest.TestCase):
def testExitTrueSwallowsException(self):
class AfricanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return True
try:
with AfricanSwallow():
1/0
except ZeroDivisionError:
self.fail("ZeroDivisionError should have been swallowed")
def testExitFalseDoesntSwallowException(self):
class EuropeanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return False
try:
with EuropeanSwallow():
1/0
except ZeroDivisionError:
pass
else:
self.fail("ZeroDivisionError should have been raised")
class NestedWith(unittest.TestCase):
class Dummy(object):
def __init__(self, value=None, gobble=False):
if value is None:
value = self
self.value = value
self.gobble = gobble
self.enter_called = False
self.exit_called = False
def __enter__(self):
self.enter_called = True
return self.value
def __exit__(self, *exc_info):
self.exit_called = True
self.exc_info = exc_info
if self.gobble:
return True
class InitRaises(object):
def __init__(self): raise RuntimeError()
class EnterRaises(object):
def __enter__(self): raise RuntimeError()
def __exit__(self, *exc_info): pass
class ExitRaises(object):
def __enter__(self): pass
def __exit__(self, *exc_info): raise RuntimeError()
def testNoExceptions(self):
with self.Dummy() as a, self.Dummy() as b:
self.assertTrue(a.enter_called)
self.assertTrue(b.enter_called)
self.assertTrue(a.exit_called)
self.assertTrue(b.exit_called)
def testExceptionInExprList(self):
try:
with self.Dummy() as a, self.InitRaises():
pass
except:
pass
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInEnter(self):
try:
with self.Dummy() as a, self.EnterRaises():
self.fail('body of bad with executed')
except RuntimeError:
pass
else:
self.fail('RuntimeError not reraised')
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInExit(self):
body_executed = False
with self.Dummy(gobble=True) as a, self.ExitRaises():
body_executed = True
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
self.assertTrue(body_executed)
self.assertNotEqual(a.exc_info[0], None)
def testEnterReturnsTuple(self):
with self.Dummy(value=(1,2)) as (a1, a2), \
self.Dummy(value=(10, 20)) as (b1, b2):
self.assertEqual(1, a1)
self.assertEqual(2, a2)
self.assertEqual(10, b1)
self.assertEqual(20, b2)
def test_main():
run_unittest(FailureTestCase, NonexceptionalTestCase,
NestedNonexceptionalTestCase, ExceptionalTestCase,
NonLocalFlowControlTestCase,
AssignmentTargetTestCase,
ExitSwallowsExceptionTestCase,
NestedWith)
if __name__ == '__main__':
test_main()
| bsd-3-clause |
APCVSRepo/sdl_core | src/3rd_party-static/jsoncpp/scons-tools/globtool.py | 256 | 1667 | import fnmatch
import os
def generate( env ):
def Glob( env, includes = None, excludes = None, dir = '.' ):
"""Adds Glob( includes = Split( '*' ), excludes = None, dir = '.')
helper function to environment.
Glob both the file-system files.
includes: list of file name pattern included in the return list when matched.
excludes: list of file name pattern exluced from the return list.
Example:
sources = env.Glob( ("*.cpp", '*.h'), "~*.cpp", "#src" )
"""
def filterFilename(path):
abs_path = os.path.join( dir, path )
if not os.path.isfile(abs_path):
return 0
fn = os.path.basename(path)
match = 0
for include in includes:
if fnmatch.fnmatchcase( fn, include ):
match = 1
break
if match == 1 and not excludes is None:
for exclude in excludes:
if fnmatch.fnmatchcase( fn, exclude ):
match = 0
break
return match
if includes is None:
includes = ('*',)
elif type(includes) in ( type(''), type(u'') ):
includes = (includes,)
if type(excludes) in ( type(''), type(u'') ):
excludes = (excludes,)
dir = env.Dir(dir).abspath
paths = os.listdir( dir )
def makeAbsFileNode( path ):
return env.File( os.path.join( dir, path ) )
nodes = filter( filterFilename, paths )
return map( makeAbsFileNode, nodes )
from SCons.Script import Environment
Environment.Glob = Glob
def exists(env):
"""
Tool always exists.
"""
return True
| bsd-3-clause |
yangleo/cloud-github | openstack_dashboard/dashboards/admin/networks/agents/tests.py | 9 | 7633 | # Copyright 2012 NEC Corporation
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
NETWORKS_DETAIL_URL = 'horizon:admin:networks:detail'
class NetworkAgentTests(test.BaseAdminViewTests):
@test.create_stubs({api.neutron: ('agent_list',
'network_get',
'list_dhcp_agent_hosting_networks',)})
def test_agent_add_get(self):
network = self.networks.first()
api.neutron.agent_list(IsA(http.HttpRequest), agent_type='DHCP agent')\
.AndReturn(self.agents.list())
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.agents.list())
self.mox.ReplayAll()
url = reverse('horizon:admin:networks:adddhcpagent',
args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/networks/agents/add.html')
@test.create_stubs({api.neutron: ('agent_list',
'network_get',
'list_dhcp_agent_hosting_networks',
'add_network_to_dhcp_agent',)})
def test_agent_add_post(self):
network = self.networks.first()
agent_id = self.agents.first().id
api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest),
network.id)\
.AndReturn([self.agents.list()[1]])
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
api.neutron.agent_list(IsA(http.HttpRequest), agent_type='DHCP agent')\
.AndReturn(self.agents.list())
api.neutron.add_network_to_dhcp_agent(IsA(http.HttpRequest),
agent_id, network.id)\
.AndReturn(True)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'network_name': network.name,
'agent': agent_id}
url = reverse('horizon:admin:networks:adddhcpagent',
args=[network.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[network.id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('agent_list',
'network_get',
'list_dhcp_agent_hosting_networks',
'add_network_to_dhcp_agent',)})
def test_agent_add_post_exception(self):
network = self.networks.first()
agent_id = self.agents.first().id
api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest),
network.id)\
.AndReturn([self.agents.list()[1]])
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
api.neutron.agent_list(IsA(http.HttpRequest), agent_type='DHCP agent')\
.AndReturn(self.agents.list())
api.neutron.add_network_to_dhcp_agent(IsA(http.HttpRequest),
agent_id, network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'network_name': network.name,
'agent': agent_id}
url = reverse('horizon:admin:networks:adddhcpagent',
args=[network.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[network.id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_list',
'port_list',
'list_dhcp_agent_hosting_networks',
'is_extension_supported',
'remove_network_from_dhcp_agent',)})
def test_agent_delete(self):
network_id = self.networks.first().id
agent_id = self.agents.first().id
api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest),
network_id).\
AndReturn(self.agents.list())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.remove_network_from_dhcp_agent(IsA(http.HttpRequest),
agent_id, network_id)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(False)
self.mox.ReplayAll()
form_data = {'action': 'agents__delete__%s' % agent_id}
url = reverse(NETWORKS_DETAIL_URL, args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.neutron: ('subnet_list',
'port_list',
'list_dhcp_agent_hosting_networks',
'is_extension_supported',
'remove_network_from_dhcp_agent',)})
def test_agent_delete_exception(self):
network_id = self.networks.first().id
agent_id = self.agents.first().id
api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest),
network_id).\
AndReturn(self.agents.list())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.remove_network_from_dhcp_agent(IsA(http.HttpRequest),
agent_id, network_id)\
.AndRaise(self.exceptions.neutron)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(False)
self.mox.ReplayAll()
form_data = {'action': 'agents__delete__%s' % agent_id}
url = reverse(NETWORKS_DETAIL_URL, args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
| apache-2.0 |
xtao/code | tools/update_issue.py | 3 | 1349 | # -*- coding: utf-8 -*-
from vilya.libs.store import store
def main():
rs = store.execute("select id, type "
"from issues "
"where type='project'")
for r in rs:
id, _ = r
rs1 = store.execute("select id, project_id, issue_id "
"from project_issues "
"where issue_id=%s",
id)
if rs1 and rs1[0]:
_, target_id, _ = rs1[0]
store.execute("update issues "
"set target_id=%s "
"where id=%s",
(target_id, id))
store.commit()
rs = store.execute("select id, type "
"from issues "
"where type='team'")
for r in rs:
id, _ = r
rs1 = store.execute("select id, team_id, issue_id "
"from team_issues "
"where issue_id=%s",
id)
if rs1 and rs1[0]:
_, target_id, _ = rs1[0]
store.execute("update issues "
"set target_id=%s "
"where id=%s",
(target_id, id))
store.commit()
if __name__ == "__main__":
main()
| bsd-3-clause |
yglazko/socorro | webapp-django/bin/linting.py | 9 | 1558 | #!/usr/bin/env python
"""
Use like this:
find somedir | xargs flake8 | python linting.py
or:
flake8 somedir | python linting.py
or:
git ls-files somedir | python linting.py
"""
import os
import sys
# Enter any part of a warning that we deem OK.
# It can be a pep8 warning error code or any other part of a string.
#
# NOTE! Be as specific as you possibly can!
# Only blanket whole files if you desperately have to
#
EXCEPTIONS = (
# has a exceptional use of `...import *`
'settings/base.py:4:',
# has a well known `...import *` trick that we like
'settings/__init__.py',
# ignore south migrations
'/migrations/',
# all downloaded libs to be ignored
'/js/lib/',
# See https://bugzilla.mozilla.org/show_bug.cgi?id=997270
'/js/jquery/',
'/js/flot',
'/js/timeago/',
'jquery.tablesorter.min.js',
'async-local-storage-with-Promise.min.js',
'underscore-min.js',
'moment.min.js',
'jquery.metadata.js',
)
EXTENSIONS_ONLY = (
'.py',
# commented out until we clean up our .js files
# See https://bugzilla.mozilla.org/show_bug.cgi?id=997272
# '.js'
)
def main():
errors = 0
for line in sys.stdin:
if not line.strip():
continue
_, ext = os.path.splitext(line.split(':')[0])
if ext not in EXTENSIONS_ONLY:
continue
if [f for f in EXCEPTIONS if f in line]:
continue
errors += 1
sys.stderr.write(line)
return errors
if __name__ == '__main__':
sys.exit(main())
| mpl-2.0 |
DrPantera/gsiege | resistencia/tests/test_round.py | 3 | 5377 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# This file is part of Resistencia Cadiz 1812.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Copyright (C) 2010, Pablo Recio Quijano
#----------------------------------------------------------------------
"""
Contains the class that specializes the round class of the competition, to
be used on the tests.
"""
import csv
import os.path
import os
from guadaboard import guada_board
#from resistencia import xdg
from resistencia.contest import round as contest_round
class TestRound(contest_round.Round):
"""
This class includes the same values that a contest round.
On the __init__ method, teams is a tuple with the fist value a list of
matchs, and the second has the translator of the keys that contains
the real team
"""
def __init__ (self, teams, num_turns = 150,
log_file=None, player = 0, logFolder = None):
#player must be 0 or 1
contest_round.Round.__init__(self, teams[0], teams[1],
num_turns)
self.player_team = player
self.log_file = log_file
self.logFolder = logFolder
self.round_stats = {}
self.round_stats['wins'] = 0
self.round_stats['looses'] = 0
self.round_stats['draws'] = 0
self.round_stats['turns_winning'] = 0
self.round_stats['turns_losing'] = 0
self.round_stats['num_pieces'] = 0
self.round_stats['val_pieces'] = 0
self.round_stats['max_death'] = 0
print "Creating test round"
def get_round_stats(self):
"""
Returns a list with the stats of the tests rounds
"""
if self.completed:
return self.round_stats
else:
raise contest_round.RoundError('Not all games played')
def _merge_stats(self, match_stats):
"""
Merge the stats of a match with the general stats.
"""
for k in match_stats:
self.round_stats[k] = self.round_stats[k] + match_stats[k]
def play_match(self, fast=None, cant_draw=None):
"""
Run a simulation of the next game on the round
"""
teams_keys = {}
teams_keys['a'] = self.round[self.next_game][0][0]
teams_keys['b'] = self.round[self.next_game][0][1]
team_a = (self.translator[teams_keys['a']],)
team_b = (self.translator[teams_keys['b']],)
logFileName = [""]
result, stats = guada_board.run(team_a, team_b, fast=True,
get_stats=True,
number_turns=self.num_turns,
logNameReference = logFileName)
# print "LogName from outside:", logFileName[0]
# print "Basename", os.path.basename(logFileName[0])
# print "LogFolder", self.logFolder
os.rename(logFileName[0], os.path.join(self.logFolder, os.path.basename(logFileName[0])))
stats_writer = csv.writer(open(self.log_file, 'a'), delimiter=',')#,
#quotechar='|', quoting=csv.QUOTE_MINIMAL)
key_result = ''
player_stats = stats[self.player_team]
number_of_turns = 0
key = ''
team = ''
if self.player_team == 0:
key = teams_keys['b']
team = 'A'
if result == 1:
number_of_turns = player_stats['turns_winning']
key_result = 'A'
elif result == -1:
number_of_turns = player_stats['turns_losing']
key_result = 'B'
else:
key_result = 'D'
else:
key = teams_keys['a']
team = 'B'
if result == -1:
number_of_turns = player_stats['turns_winning']
key_result = 'A'
elif result == 1:
number_of_turns = player_stats['turns_losing']
key_result = 'B'
else:
key_result = 'D'
write_results = [key, team, key_result, number_of_turns,
player_stats['num_pieces'],
player_stats['val_pieces'],
player_stats['max_death']]
#print write_results
stats_writer.writerow(write_results)
self.round[self.next_game] = (self.round[self.next_game][0],
True, result)
self.next_game = self.next_game + 1
self.completed = (self.next_game == self.number_games)
self._merge_stats(stats[self.player_team])
return (self.round[self.next_game-1][0],
self.round[self.next_game-1][2])
| gpl-3.0 |
blackzw/openwrt_sdk_dev1 | staging_dir/host/lib/python2.7/compileall.py | 144 | 7763 | """Module/script to byte-compile all .py files to .pyc (or .pyo) files.
When called as a script with arguments, this compiles the directories
given as arguments recursively; the -l option prevents it from
recursing into directories.
Without arguments, if compiles all modules on sys.path, without
recursing into subdirectories. (Even though it should do so for
packages -- for now, you'll have to deal with packages separately.)
See module py_compile for details of the actual byte-compilation.
"""
import os
import sys
import py_compile
import struct
import imp
__all__ = ["compile_dir","compile_file","compile_path"]
def compile_dir(dir, maxlevels=10, ddir=None,
force=0, rx=None, quiet=0):
"""Byte-compile all modules in the given directory tree.
Arguments (only dir is required):
dir: the directory to byte-compile
maxlevels: maximum recursion level (default 10)
ddir: the directory that will be prepended to the path to the
file as it is compiled into each byte-code file.
force: if 1, force compilation, even if timestamps are up-to-date
quiet: if 1, be quiet during compilation
"""
if not quiet:
print 'Listing', dir, '...'
try:
names = os.listdir(dir)
except os.error:
print "Can't list", dir
names = []
names.sort()
success = 1
for name in names:
fullname = os.path.join(dir, name)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if not os.path.isdir(fullname):
if not compile_file(fullname, ddir, force, rx, quiet):
success = 0
elif maxlevels > 0 and \
name != os.curdir and name != os.pardir and \
os.path.isdir(fullname) and \
not os.path.islink(fullname):
if not compile_dir(fullname, maxlevels - 1, dfile, force, rx,
quiet):
success = 0
return success
def compile_file(fullname, ddir=None, force=0, rx=None, quiet=0):
"""Byte-compile one file.
Arguments (only fullname is required):
fullname: the file to byte-compile
ddir: if given, the directory name compiled in to the
byte-code file.
force: if 1, force compilation, even if timestamps are up-to-date
quiet: if 1, be quiet during compilation
"""
success = 1
name = os.path.basename(fullname)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if rx is not None:
mo = rx.search(fullname)
if mo:
return success
if os.path.isfile(fullname):
head, tail = name[:-3], name[-3:]
if tail == '.py':
if not force:
try:
mtime = int(os.stat(fullname).st_mtime)
expect = struct.pack('<4sl', imp.get_magic(), mtime)
cfile = fullname + (__debug__ and 'c' or 'o')
with open(cfile, 'rb') as chandle:
actual = chandle.read(8)
if expect == actual:
return success
except IOError:
pass
if not quiet:
print 'Compiling', fullname, '...'
try:
ok = py_compile.compile(fullname, None, dfile, True)
except py_compile.PyCompileError,err:
if quiet:
print 'Compiling', fullname, '...'
print err.msg
success = 0
except IOError, e:
print "Sorry", e
success = 0
else:
if ok == 0:
success = 0
return success
def compile_path(skip_curdir=1, maxlevels=0, force=0, quiet=0):
"""Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default true)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default 0)
quiet: as for compile_dir() (default 0)
"""
success = 1
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
print 'Skipping current directory'
else:
success = success and compile_dir(dir, maxlevels, None,
force, quiet=quiet)
return success
def expand_args(args, flist):
"""read names in flist and append to args"""
expanded = args[:]
if flist:
try:
if flist == '-':
fd = sys.stdin
else:
fd = open(flist)
while 1:
line = fd.readline()
if not line:
break
expanded.append(line[:-1])
except IOError:
print "Error reading file list %s" % flist
raise
return expanded
def main():
"""Script main program."""
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'lfqd:x:i:')
except getopt.error, msg:
print msg
print "usage: python compileall.py [-l] [-f] [-q] [-d destdir] " \
"[-x regexp] [-i list] [directory|file ...]"
print
print "arguments: zero or more file and directory names to compile; " \
"if no arguments given, "
print " defaults to the equivalent of -l sys.path"
print
print "options:"
print "-l: don't recurse into subdirectories"
print "-f: force rebuild even if timestamps are up-to-date"
print "-q: output only error messages"
print "-d destdir: directory to prepend to file paths for use in " \
"compile-time tracebacks and in"
print " runtime tracebacks in cases where the source " \
"file is unavailable"
print "-x regexp: skip files matching the regular expression regexp; " \
"the regexp is searched for"
print " in the full path of each file considered for " \
"compilation"
print "-i file: add all the files and directories listed in file to " \
"the list considered for"
print ' compilation; if "-", names are read from stdin'
sys.exit(2)
maxlevels = 10
ddir = None
force = 0
quiet = 0
rx = None
flist = None
for o, a in opts:
if o == '-l': maxlevels = 0
if o == '-d': ddir = a
if o == '-f': force = 1
if o == '-q': quiet = 1
if o == '-x':
import re
rx = re.compile(a)
if o == '-i': flist = a
if ddir:
if len(args) != 1 and not os.path.isdir(args[0]):
print "-d destdir require exactly one directory argument"
sys.exit(2)
success = 1
try:
if args or flist:
try:
if flist:
args = expand_args(args, flist)
except IOError:
success = 0
if success:
for arg in args:
if os.path.isdir(arg):
if not compile_dir(arg, maxlevels, ddir,
force, rx, quiet):
success = 0
else:
if not compile_file(arg, ddir, force, rx, quiet):
success = 0
else:
success = compile_path()
except KeyboardInterrupt:
print "\n[interrupted]"
success = 0
return success
if __name__ == '__main__':
exit_status = int(not main())
sys.exit(exit_status)
| gpl-2.0 |
chromium/chromium | third_party/blink/tools/blinkpy/w3c/test_importer.py | 5 | 28640 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Fetches a copy of the latest state of a W3C test repository and commits.
If this script is given the argument --auto-update, it will also:
1. Upload a CL.
2. Trigger try jobs and wait for them to complete.
3. Make any changes that are required for new failing tests.
4. Attempt to land the CL.
"""
import argparse
import datetime
import json
import logging
import re
from blinkpy.common.net.git_cl import GitCL
from blinkpy.common.net.network_transaction import NetworkTimeout
from blinkpy.common.path_finder import PathFinder
from blinkpy.common.system.executive import ScriptError
from blinkpy.common.system.log_utils import configure_logging
from blinkpy.w3c.android_wpt_expectations_updater import AndroidWPTExpectationsUpdater
from blinkpy.w3c.chromium_exportable_commits import exportable_commits_over_last_n_commits
from blinkpy.w3c.common import read_credentials, is_testharness_baseline, is_file_exportable, WPT_GH_URL
from blinkpy.w3c.directory_owners_extractor import DirectoryOwnersExtractor
from blinkpy.w3c.import_notifier import ImportNotifier
from blinkpy.w3c.local_wpt import LocalWPT
from blinkpy.w3c.test_copier import TestCopier
from blinkpy.w3c.wpt_expectations_updater import WPTExpectationsUpdater
from blinkpy.w3c.wpt_github import WPTGitHub
from blinkpy.w3c.wpt_manifest import WPTManifest, BASE_MANIFEST_NAME
from blinkpy.web_tests.port.base import Port
from blinkpy.web_tests.models.test_expectations import TestExpectations
# Settings for how often to check try job results and how long to wait.
POLL_DELAY_SECONDS = 2 * 60
TIMEOUT_SECONDS = 210 * 60
# Sheriff calendar URL, used for getting the ecosystem infra sheriff to cc.
ROTATIONS_URL = 'https://chrome-ops-rotation-proxy.appspot.com/current/grotation:chrome-ecosystem-infra'
SHERIFF_EMAIL_FALLBACK = '[email protected]'
RUBBER_STAMPER_BOT = '[email protected]'
_log = logging.getLogger(__file__)
class TestImporter(object):
def __init__(self, host, wpt_github=None, wpt_manifests=None):
self.host = host
self.wpt_github = wpt_github
self.executive = host.executive
self.fs = host.filesystem
self.finder = PathFinder(self.fs)
self.chromium_git = self.host.git(self.finder.chromium_base())
self.dest_path = self.finder.path_from_web_tests('external', 'wpt')
# A common.net.git_cl.GitCL instance.
self.git_cl = None
# Another Git instance with local WPT as CWD, which can only be
# instantiated after the working directory is created.
self.wpt_git = None
# The WPT revision we are importing and the one imported last time.
self.wpt_revision = None
self.last_wpt_revision = None
# A set of rebaselined tests and a dictionary of new test expectations
# mapping failing tests to platforms to
# wpt_expectations_updater.SimpleTestResult.
self.rebaselined_tests = set()
self.new_test_expectations = {}
# New override expectations for Android targets. A dictionary mapping
# products to dictionary that maps test names to test expectation lines
self.new_override_expectations = {}
self.verbose = False
args = ['--clean-up-affected-tests-only',
'--clean-up-test-expectations']
self._expectations_updater = WPTExpectationsUpdater(
self.host, args, wpt_manifests)
args = [
'--android-product',
'android_weblayer'
]
self._android_expectations_updater = AndroidWPTExpectationsUpdater(
self.host, args, wpt_manifests)
def main(self, argv=None):
# TODO(robertma): Test this method! Split it to make it easier to test
# if necessary.
options = self.parse_args(argv)
self.verbose = options.verbose
log_level = logging.DEBUG if self.verbose else logging.INFO
configure_logging(logging_level=log_level, include_time=True)
# Having the full output when executive.run_command fails is useful when
# investigating a failed import, as all we have are logs.
self.executive.error_output_limit = None
if options.auto_update and options.auto_upload:
_log.error(
'--auto-upload and --auto-update cannot be used together.')
return 1
if not self.checkout_is_okay():
return 1
credentials = read_credentials(self.host, options.credentials_json)
gh_user = credentials.get('GH_USER')
gh_token = credentials.get('GH_TOKEN')
if not gh_user or not gh_token:
_log.warning('You have not set your GitHub credentials. This '
'script may fail with a network error when making '
'an API request to GitHub.')
_log.warning('See https://chromium.googlesource.com/chromium/src'
'/+/main/docs/testing/web_platform_tests.md'
'#GitHub-credentials for instructions on how to set '
'your credentials up.')
self.wpt_github = self.wpt_github or WPTGitHub(self.host, gh_user,
gh_token)
self.git_cl = GitCL(
self.host, auth_refresh_token_json=options.auth_refresh_token_json)
_log.debug('Noting the current Chromium revision.')
chromium_revision = self.chromium_git.latest_git_commit()
# Instantiate Git after local_wpt.fetch() to make sure the path exists.
local_wpt = LocalWPT(self.host, gh_token=gh_token)
local_wpt.fetch()
self.wpt_git = self.host.git(local_wpt.path)
if options.revision is not None:
_log.info('Checking out %s', options.revision)
self.wpt_git.run(['checkout', options.revision])
_log.debug('Noting the revision we are importing.')
self.wpt_revision = self.wpt_git.latest_git_commit()
self.last_wpt_revision = self._get_last_imported_wpt_revision()
import_commit = 'wpt@%s' % self.wpt_revision
_log.info('Importing %s to Chromium %s', import_commit,
chromium_revision)
if options.ignore_exportable_commits:
commit_message = self._commit_message(chromium_revision,
import_commit)
else:
commits = self.apply_exportable_commits_locally(local_wpt)
if commits is None:
_log.error('Could not apply some exportable commits cleanly.')
_log.error('Aborting import to prevent clobbering commits.')
return 1
commit_message = self._commit_message(
chromium_revision,
import_commit,
locally_applied_commits=commits)
self._clear_out_dest_path()
_log.info('Copying the tests from the temp repo to the destination.')
test_copier = TestCopier(self.host, local_wpt.path)
test_copier.do_import()
# TODO(robertma): Implement `add --all` in Git (it is different from `commit --all`).
self.chromium_git.run(['add', '--all', self.dest_path])
# Remove expectations for tests that were deleted and rename tests in
# expectations for renamed tests. This requires the old WPT manifest, so
# must happen before we regenerate it.
self._expectations_updater.cleanup_test_expectations_files()
self._generate_manifest()
# TODO(crbug.com/800570 robertma): Re-enable it once we fix the bug.
# self._delete_orphaned_baselines()
if not self.chromium_git.has_working_directory_changes():
_log.info('Done: no changes to import.')
return 0
if self._only_wpt_manifest_changed():
_log.info('Only manifest was updated; skipping the import.')
return 0
with self._expectations_updater.prepare_smoke_tests(self.chromium_git):
self._commit_changes(commit_message)
_log.info('Changes imported and committed.')
if not options.auto_upload and not options.auto_update:
return 0
self._upload_cl()
_log.info('Issue: %s', self.git_cl.run(['issue']).strip())
if not self.update_expectations_for_cl():
return 1
if not options.auto_update:
return 0
if not self.run_commit_queue_for_cl():
return 1
if not self.send_notifications(local_wpt, options.auto_file_bugs,
options.monorail_auth_json):
return 1
return 0
def update_expectations_for_cl(self):
"""Performs the expectation-updating part of an auto-import job.
This includes triggering try jobs and waiting; then, if applicable,
writing new baselines and TestExpectation lines, committing, and
uploading a new patchset.
This assumes that there is CL associated with the current branch.
Returns True if everything is OK to continue, or False on failure.
"""
_log.info('Triggering try jobs for updating expectations.')
self.git_cl.trigger_try_jobs(self.blink_try_bots())
cl_status = self.git_cl.wait_for_try_jobs(
poll_delay_seconds=POLL_DELAY_SECONDS,
timeout_seconds=TIMEOUT_SECONDS)
if not cl_status:
_log.error('No initial try job results, aborting.')
self.git_cl.run(['set-close'])
return False
if cl_status.status == 'closed':
_log.error('The CL was closed, aborting.')
return False
_log.info('All jobs finished.')
try_results = cl_status.try_job_results
if try_results and self.git_cl.some_failed(try_results):
self.fetch_new_expectations_and_baselines()
self.fetch_wpt_override_expectations()
if self.chromium_git.has_working_directory_changes():
self._generate_manifest()
message = 'Update test expectations and baselines.'
self._commit_changes(message)
self._upload_patchset(message)
return True
def run_commit_queue_for_cl(self):
"""Triggers CQ and either commits or aborts; returns True on success."""
_log.info('Triggering CQ try jobs.')
self.git_cl.run(['try'])
cl_status = self.git_cl.wait_for_try_jobs(
poll_delay_seconds=POLL_DELAY_SECONDS,
timeout_seconds=TIMEOUT_SECONDS,
cq_only=True)
if not cl_status:
self.git_cl.run(['set-close'])
_log.error('Timed out waiting for CQ; aborting.')
return False
if cl_status.status == 'closed':
_log.error('The CL was closed; aborting.')
return False
_log.info('All jobs finished.')
cq_try_results = cl_status.try_job_results
if not cq_try_results:
_log.error('No CQ try results found in try results')
self.git_cl.run(['set-close'])
return False
if not self.git_cl.all_success(cq_try_results):
_log.error('CQ appears to have failed; aborting.')
self.git_cl.run(['set-close'])
return False
_log.info(
'CQ appears to have passed; sending to the rubber-stamper bot for '
'CR+1 and commit.')
_log.info(
'If the rubber-stamper bot rejects the CL, you either need to '
'modify the benign file patterns, or manually CR+1 and land the '
'import yourself if it touches code files. See https://chromium.'
'googlesource.com/infra/infra/+/refs/heads/main/go/src/infra/'
'appengine/rubber-stamper/README.md')
# `--send-mail` is required to take the CL out of WIP mode.
self.git_cl.run([
'upload', '-f', '--send-mail', '--enable-auto-submit',
'--reviewers', RUBBER_STAMPER_BOT
])
if self.git_cl.wait_for_closed_status():
_log.info('Update completed.')
return True
_log.error('Cannot submit CL; aborting.')
try:
self.git_cl.run(['set-close'])
except ScriptError as e:
if e.output and 'Conflict: change is merged' in e.output:
_log.error('CL is already merged; treating as success.')
return True
else:
raise e
return False
def blink_try_bots(self):
"""Returns the collection of builders used for updating expectations."""
return self.host.builders.filter_builders(is_try=True)
def parse_args(self, argv):
parser = argparse.ArgumentParser()
parser.description = __doc__
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='log extra details that may be helpful when debugging')
parser.add_argument(
'--ignore-exportable-commits',
action='store_true',
help='do not check for exportable commits that would be clobbered')
parser.add_argument('-r', '--revision', help='target wpt revision')
parser.add_argument(
'--auto-upload',
action='store_true',
help='upload a CL, update expectations, but do NOT trigger CQ')
parser.add_argument(
'--auto-update',
action='store_true',
help='upload a CL, update expectations, and trigger CQ')
parser.add_argument(
'--auto-file-bugs',
action='store_true',
help='file new failures automatically to crbug.com')
parser.add_argument(
'--auth-refresh-token-json',
help='authentication refresh token JSON file used for try jobs, '
'generally not necessary on developer machines')
parser.add_argument(
'--credentials-json',
help='A JSON file with GitHub credentials, '
'generally not necessary on developer machines')
parser.add_argument(
'--monorail-auth-json',
help='A JSON file containing the private key of a service account '
'to access Monorail (crbug.com), only needed when '
'--auto-file-bugs is used')
return parser.parse_args(argv)
def checkout_is_okay(self):
if self.chromium_git.has_working_directory_changes():
_log.warning('Checkout is dirty; aborting.')
return False
# TODO(robertma): Add a method in Git to query a range of commits.
local_commits = self.chromium_git.run(
['log', '--oneline', 'origin/main..HEAD'])
if local_commits:
_log.warning('Checkout has local commits before import.')
return True
def apply_exportable_commits_locally(self, local_wpt):
"""Applies exportable Chromium changes to the local WPT repo.
The purpose of this is to avoid clobbering changes that were made in
Chromium but not yet merged upstream. By applying these changes to the
local copy of web-platform-tests before copying files over, we make
it so that the resulting change in Chromium doesn't undo the
previous Chromium change.
Args:
A LocalWPT instance for our local copy of WPT.
Returns:
A list of commits applied (could be empty), or None if any
of the patches could not be applied cleanly.
"""
commits = self.exportable_but_not_exported_commits(local_wpt)
for commit in commits:
_log.info('Applying exportable commit locally:')
_log.info(commit.url())
_log.info('Subject: %s', commit.subject().strip())
# Log a note about the corresponding PR.
# This might not be necessary, and could potentially be removed.
pull_request = self.wpt_github.pr_for_chromium_commit(commit)
if pull_request:
_log.info('PR: %spull/%d', WPT_GH_URL, pull_request.number)
else:
_log.warning('No pull request found.')
error = local_wpt.apply_patch(commit.format_patch())
if error:
_log.error('Commit cannot be applied cleanly:')
_log.error(error)
return None
self.wpt_git.commit_locally_with_message(
'Applying patch %s' % commit.sha)
return commits
def exportable_but_not_exported_commits(self, local_wpt):
"""Returns a list of commits that would be clobbered by importer.
The list contains all exportable but not exported commits, not filtered
by whether they can apply cleanly.
"""
# The errors returned by exportable_commits_over_last_n_commits are
# irrelevant and ignored here, because it tests patches *individually*
# while the importer tries to reapply these patches *cumulatively*.
commits, _ = exportable_commits_over_last_n_commits(
self.host,
local_wpt,
self.wpt_github,
require_clean=False,
verify_merged_pr=True)
return commits
def _generate_manifest(self):
"""Generates MANIFEST.json for imported tests.
Runs the (newly-updated) manifest command if it's found, and then
stages the generated MANIFEST.json in the git index, ready to commit.
"""
_log.info('Generating MANIFEST.json')
WPTManifest.generate_manifest(self.host.port_factory.get(),
self.dest_path)
manifest_path = self.fs.join(self.dest_path, 'MANIFEST.json')
assert self.fs.exists(manifest_path)
manifest_base_path = self.fs.normpath(
self.fs.join(self.dest_path, '..', BASE_MANIFEST_NAME))
self.copyfile(manifest_path, manifest_base_path)
self.chromium_git.add_list([manifest_base_path])
def _clear_out_dest_path(self):
"""Removes all files that are synced with upstream from Chromium WPT.
Instead of relying on TestCopier to overwrite these files, cleaning up
first ensures if upstream deletes some files, we also delete them.
"""
_log.info('Cleaning out tests from %s.', self.dest_path)
should_remove = lambda fs, dirname, basename: (
is_file_exportable(fs.relpath(fs.join(dirname, basename), self.finder.chromium_base())))
files_to_delete = self.fs.files_under(
self.dest_path, file_filter=should_remove)
for subpath in files_to_delete:
self.remove(self.finder.path_from_web_tests('external', subpath))
def _commit_changes(self, commit_message):
_log.info('Committing changes.')
self.chromium_git.commit_locally_with_message(commit_message)
def _only_wpt_manifest_changed(self):
changed_files = self.chromium_git.changed_files()
wpt_base_manifest = self.fs.relpath(
self.fs.join(self.dest_path, '..', BASE_MANIFEST_NAME),
self.finder.chromium_base())
return changed_files == [wpt_base_manifest]
def _commit_message(self,
chromium_commit_sha,
import_commit_sha,
locally_applied_commits=None):
message = 'Import {}\n\nUsing wpt-import in Chromium {}.\n'.format(
import_commit_sha, chromium_commit_sha)
if locally_applied_commits:
message += 'With Chromium commits locally applied on WPT:\n'
message += '\n'.join(
str(commit) for commit in locally_applied_commits)
message += '\nNo-Export: true'
return message
def _delete_orphaned_baselines(self):
_log.info('Deleting any orphaned baselines.')
is_baseline_filter = lambda fs, dirname, basename: is_testharness_baseline(basename)
baselines = self.fs.files_under(
self.dest_path, file_filter=is_baseline_filter)
# Note about possible refactoring:
# - the manifest path could be factored out to a common location, and
# - the logic for reading the manifest could be factored out from here
# and the Port class.
manifest_path = self.finder.path_from_web_tests(
'external', 'wpt', 'MANIFEST.json')
manifest = WPTManifest(self.fs.read_text_file(manifest_path))
wpt_urls = manifest.all_urls()
# Currently baselines for tests with query strings are merged,
# so that the tests foo.html?r=1 and foo.html?r=2 both have the same
# baseline, foo-expected.txt.
# TODO(qyearsley): Remove this when this behavior is fixed.
wpt_urls = [url.split('?')[0] for url in wpt_urls]
wpt_dir = self.finder.path_from_web_tests('external', 'wpt')
for full_path in baselines:
rel_path = self.fs.relpath(full_path, wpt_dir)
if not self._has_corresponding_test(rel_path, wpt_urls):
self.fs.remove(full_path)
def _has_corresponding_test(self, rel_path, wpt_urls):
# TODO(qyearsley): Ensure that this works with platform baselines and
# virtual baselines, and add unit tests.
base = '/' + rel_path.replace('-expected.txt', '')
return any(
(base + ext) in wpt_urls for ext in Port.supported_file_extensions)
def copyfile(self, source, destination):
_log.debug('cp %s %s', source, destination)
self.fs.copyfile(source, destination)
def remove(self, dest):
_log.debug('rm %s', dest)
self.fs.remove(dest)
def _upload_patchset(self, message):
self.git_cl.run(['upload', '--bypass-hooks', '-f', '-t', message])
def _upload_cl(self):
_log.info('Uploading change list.')
directory_owners = self.get_directory_owners()
description = self._cl_description(directory_owners)
sheriff_email = self.sheriff_email()
temp_file, temp_path = self.fs.open_text_tempfile()
temp_file.write(description)
temp_file.close()
self.git_cl.run([
'upload',
'--bypass-hooks',
'-f',
'--message-file',
temp_path,
'--cc',
sheriff_email,
])
self.fs.remove(temp_path)
def get_directory_owners(self):
"""Returns a mapping of email addresses to owners of changed tests."""
_log.info('Gathering directory owners emails to CC.')
changed_files = self.chromium_git.changed_files()
extractor = DirectoryOwnersExtractor(self.host)
return extractor.list_owners(changed_files)
def _cl_description(self, directory_owners):
"""Returns a CL description string.
Args:
directory_owners: A dict of tuples of owner names to lists of directories.
"""
# TODO(robertma): Add a method in Git for getting the commit body.
description = self.chromium_git.run(['log', '-1', '--format=%B'])
description += (
'Note to sheriffs: This CL imports external tests and adds\n'
'expectations for those tests; if this CL is large and causes\n'
'a few new failures, please fix the failures by adding new\n'
'lines to TestExpectations rather than reverting. See:\n'
'https://chromium.googlesource.com'
'/chromium/src/+/main/docs/testing/web_platform_tests.md\n\n')
if directory_owners:
description += self._format_directory_owners(
directory_owners) + '\n\n'
# Prevent FindIt from auto-reverting import CLs.
description += 'NOAUTOREVERT=true\n'
# Move any No-Export tag to the end of the description.
description = description.replace('No-Export: true', '')
description = description.replace('\n\n\n\n', '\n\n')
description += 'No-Export: true\n'
# Add the wptrunner MVP tryjobs as blocking trybots, to catch any test
# changes or infrastructure changes from upstream.
#
# If this starts blocking the importer unnecessarily, revert
# https://chromium-review.googlesource.com/c/chromium/src/+/2451504
description += (
'Cq-Include-Trybots: luci.chromium.try:linux-wpt-identity-fyi-rel,'
'linux-wpt-input-fyi-rel')
return description
@staticmethod
def _format_directory_owners(directory_owners):
message_lines = ['Directory owners for changes in this CL:']
for owner_tuple, directories in sorted(directory_owners.items()):
message_lines.append(', '.join(owner_tuple) + ':')
message_lines.extend(' ' + d for d in directories)
return '\n'.join(message_lines)
def sheriff_email(self):
"""Returns the sheriff email address to cc.
This tries to fetch the current ecosystem infra sheriff, but falls back
in case of error.
"""
email = ''
try:
email = self._fetch_ecosystem_infra_sheriff_email()
except (IOError, KeyError, ValueError) as error:
_log.error('Exception while fetching current sheriff: %s', error)
return email or SHERIFF_EMAIL_FALLBACK
def _fetch_ecosystem_infra_sheriff_email(self):
try:
content = self.host.web.get_binary(ROTATIONS_URL)
except NetworkTimeout:
_log.error('Cannot fetch %s', ROTATIONS_URL)
return ''
data = json.loads(content)
if not data.get('emails'):
_log.error(
'No email found for current sheriff. Retrieved content: %s',
content)
return ''
return data['emails'][0]
def fetch_new_expectations_and_baselines(self):
"""Modifies expectation lines and baselines based on try job results.
Assuming that there are some try job results available, this
adds new expectation lines to TestExpectations and downloads new
baselines based on the try job results.
This is the same as invoking the `wpt-update-expectations` script.
"""
_log.info('Adding test expectations lines to TestExpectations.')
self.rebaselined_tests, self.new_test_expectations = (
self._expectations_updater.update_expectations())
def fetch_wpt_override_expectations(self):
"""Modifies WPT Override expectations based on try job results.
Assuming that there are some try job results available, this
adds new expectation lines to WPT Override Expectation files,
e.g. WebLayerWPTOverrideExpectations
This is the same as invoking the `wpt-update-expectations` script.
"""
_log.info('Adding test expectations lines to Override Expectations.')
_, self.new_override_expectations = (
self._android_expectations_updater.update_expectations())
def _get_last_imported_wpt_revision(self):
"""Finds the last imported WPT revision."""
# TODO(robertma): Only match commit subjects.
output = self.chromium_git.most_recent_log_matching(
'^Import wpt@', self.finder.chromium_base())
# No line-start anchor (^) below because of the formatting of output.
result = re.search(r'Import wpt@(\w+)', output)
if result:
return result.group(1)
else:
_log.error('Cannot find last WPT import.')
return None
def send_notifications(self, local_wpt, auto_file_bugs,
monorail_auth_json):
issue = self.git_cl.run(['status', '--field=id']).strip()
patchset = self.git_cl.run(['status', '--field=patch']).strip()
# Construct the notifier here so that any errors won't affect the import.
notifier = ImportNotifier(self.host, self.chromium_git, local_wpt)
notifier.main(
self.last_wpt_revision,
self.wpt_revision,
self.rebaselined_tests,
self.new_test_expectations,
self.new_override_expectations,
issue,
patchset,
dry_run=not auto_file_bugs,
service_account_key_json=monorail_auth_json)
return True
| bsd-3-clause |
aliclark/libquic | src/third_party/protobuf/objectivec/DevTools/pddm_tests.py | 66 | 16785 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2015 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for pddm.py."""
import io
import unittest
import pddm
class TestParsingMacros(unittest.TestCase):
def testParseEmpty(self):
f = io.StringIO(u'')
result = pddm.MacroCollection(f)
self.assertEqual(len(result._macros), 0)
def testParseOne(self):
f = io.StringIO(u"""PDDM-DEFINE foo( )
body""")
result = pddm.MacroCollection(f)
self.assertEqual(len(result._macros), 1)
macro = result._macros.get('foo')
self.assertIsNotNone(macro)
self.assertEquals(macro.name, 'foo')
self.assertEquals(macro.args, tuple())
self.assertEquals(macro.body, 'body')
def testParseGeneral(self):
# Tests multiple defines, spaces in all places, etc.
f = io.StringIO(u"""
PDDM-DEFINE noArgs( )
body1
body2
PDDM-DEFINE-END
PDDM-DEFINE oneArg(foo)
body3
PDDM-DEFINE twoArgs( bar_ , baz )
body4
body5""")
result = pddm.MacroCollection(f)
self.assertEqual(len(result._macros), 3)
macro = result._macros.get('noArgs')
self.assertIsNotNone(macro)
self.assertEquals(macro.name, 'noArgs')
self.assertEquals(macro.args, tuple())
self.assertEquals(macro.body, 'body1\nbody2\n')
macro = result._macros.get('oneArg')
self.assertIsNotNone(macro)
self.assertEquals(macro.name, 'oneArg')
self.assertEquals(macro.args, ('foo',))
self.assertEquals(macro.body, 'body3')
macro = result._macros.get('twoArgs')
self.assertIsNotNone(macro)
self.assertEquals(macro.name, 'twoArgs')
self.assertEquals(macro.args, ('bar_', 'baz'))
self.assertEquals(macro.body, 'body4\nbody5')
# Add into existing collection
f = io.StringIO(u"""
PDDM-DEFINE another(a,b,c)
body1
body2""")
result.ParseInput(f)
self.assertEqual(len(result._macros), 4)
macro = result._macros.get('another')
self.assertIsNotNone(macro)
self.assertEquals(macro.name, 'another')
self.assertEquals(macro.args, ('a', 'b', 'c'))
self.assertEquals(macro.body, 'body1\nbody2')
def testParseDirectiveIssues(self):
test_list = [
# Unknown directive
(u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINED foo\nbaz',
'Hit a line with an unknown directive: '),
# End without begin
(u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINE-END\nPDDM-DEFINE-END\n',
'Got DEFINE-END directive without an active macro: '),
# Line not in macro block
(u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINE-END\nmumble\n',
'Hit a line that wasn\'t a directive and no open macro definition: '),
# Redefine macro
(u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINE foo(a)\nmumble\n',
'Attempt to redefine macro: '),
]
for idx, (input_str, expected_prefix) in enumerate(test_list, 1):
f = io.StringIO(input_str)
try:
result = pddm.MacroCollection(f)
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertTrue(e.message.startswith(expected_prefix),
'Entry %d failed: %r' % (idx, e))
def testParseBeginIssues(self):
test_list = [
# 1. No name
(u'PDDM-DEFINE\nmumble',
'Failed to parse macro definition: '),
# 2. No name (with spaces)
(u'PDDM-DEFINE \nmumble',
'Failed to parse macro definition: '),
# 3. No open paren
(u'PDDM-DEFINE foo\nmumble',
'Failed to parse macro definition: '),
# 4. No close paren
(u'PDDM-DEFINE foo(\nmumble',
'Failed to parse macro definition: '),
# 5. No close paren (with args)
(u'PDDM-DEFINE foo(a, b\nmumble',
'Failed to parse macro definition: '),
# 6. No name before args
(u'PDDM-DEFINE (a, b)\nmumble',
'Failed to parse macro definition: '),
# 7. No name before args
(u'PDDM-DEFINE foo bar(a, b)\nmumble',
'Failed to parse macro definition: '),
# 8. Empty arg name
(u'PDDM-DEFINE foo(a, ,b)\nmumble',
'Empty arg name in macro definition: '),
(u'PDDM-DEFINE foo(a,,b)\nmumble',
'Empty arg name in macro definition: '),
# 10. Duplicate name
(u'PDDM-DEFINE foo(a,b,a,c)\nmumble',
'Arg name "a" used more than once in macro definition: '),
# 11. Invalid arg name
(u'PDDM-DEFINE foo(a b,c)\nmumble',
'Invalid arg name "a b" in macro definition: '),
(u'PDDM-DEFINE foo(a.b,c)\nmumble',
'Invalid arg name "a.b" in macro definition: '),
(u'PDDM-DEFINE foo(a-b,c)\nmumble',
'Invalid arg name "a-b" in macro definition: '),
(u'PDDM-DEFINE foo(a,b,c.)\nmumble',
'Invalid arg name "c." in macro definition: '),
# 15. Extra stuff after the name
(u'PDDM-DEFINE foo(a,c) foo\nmumble',
'Failed to parse macro definition: '),
(u'PDDM-DEFINE foo(a,c) foo)\nmumble',
'Failed to parse macro definition: '),
]
for idx, (input_str, expected_prefix) in enumerate(test_list, 1):
f = io.StringIO(input_str)
try:
result = pddm.MacroCollection(f)
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertTrue(e.message.startswith(expected_prefix),
'Entry %d failed: %r' % (idx, e))
class TestExpandingMacros(unittest.TestCase):
def testExpandBasics(self):
f = io.StringIO(u"""
PDDM-DEFINE noArgs( )
body1
body2
PDDM-DEFINE-END
PDDM-DEFINE oneArg(a)
body3 a
PDDM-DEFINE-END
PDDM-DEFINE twoArgs(b,c)
body4 b c
body5
PDDM-DEFINE-END
""")
mc = pddm.MacroCollection(f)
test_list = [
(u'noArgs()',
'body1\nbody2\n'),
(u'oneArg(wee)',
'body3 wee\n'),
(u'twoArgs(having some, fun)',
'body4 having some fun\nbody5'),
# One arg, pass empty.
(u'oneArg()',
'body3 \n'),
# Two args, gets empty in each slot.
(u'twoArgs(, empty)',
'body4 empty\nbody5'),
(u'twoArgs(empty, )',
'body4 empty \nbody5'),
(u'twoArgs(, )',
'body4 \nbody5'),
]
for idx, (input_str, expected) in enumerate(test_list, 1):
result = mc.Expand(input_str)
self.assertEqual(result, expected,
'Entry %d --\n Result: %r\n Expected: %r' %
(idx, result, expected))
def testExpandArgOptions(self):
f = io.StringIO(u"""
PDDM-DEFINE bar(a)
a-a$S-a$l-a$L-a$u-a$U
PDDM-DEFINE-END
""")
mc = pddm.MacroCollection(f)
self.assertEqual(mc.Expand('bar(xYz)'), 'xYz- -xYz-xyz-XYz-XYZ')
self.assertEqual(mc.Expand('bar(MnoP)'), 'MnoP- -mnoP-mnop-MnoP-MNOP')
# Test empty
self.assertEqual(mc.Expand('bar()'), '-----')
def testExpandSimpleMacroErrors(self):
f = io.StringIO(u"""
PDDM-DEFINE foo(a, b)
<a-z>
PDDM-DEFINE baz(a)
a - a$z
""")
mc = pddm.MacroCollection(f)
test_list = [
# 1. Unknown macro
(u'bar()',
'No macro named "bar".'),
(u'bar(a)',
'No macro named "bar".'),
# 3. Arg mismatch
(u'foo()',
'Expected 2 args, got: "foo()".'),
(u'foo(a b)',
'Expected 2 args, got: "foo(a b)".'),
(u'foo(a,b,c)',
'Expected 2 args, got: "foo(a,b,c)".'),
# 6. Unknown option in expansion
(u'baz(mumble)',
'Unknown arg option "a$z" while expanding "baz(mumble)".'),
]
for idx, (input_str, expected_err) in enumerate(test_list, 1):
try:
result = mc.Expand(input_str)
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message, expected_err,
'Entry %d failed: %r' % (idx, e))
def testExpandReferences(self):
f = io.StringIO(u"""
PDDM-DEFINE StartIt()
foo(abc, def)
foo(ghi, jkl)
PDDM-DEFINE foo(a, b)
bar(a, int)
bar(b, NSString *)
PDDM-DEFINE bar(n, t)
- (t)n;
- (void)set##n$u##:(t)value;
""")
mc = pddm.MacroCollection(f)
expected = """- (int)abc;
- (void)setAbc:(int)value;
- (NSString *)def;
- (void)setDef:(NSString *)value;
- (int)ghi;
- (void)setGhi:(int)value;
- (NSString *)jkl;
- (void)setJkl:(NSString *)value;
"""
self.assertEqual(mc.Expand('StartIt()'), expected)
def testCatchRecursion(self):
f = io.StringIO(u"""
PDDM-DEFINE foo(a, b)
bar(1, a)
bar(2, b)
PDDM-DEFINE bar(x, y)
foo(x, y)
""")
mc = pddm.MacroCollection(f)
try:
result = mc.Expand('foo(A,B)')
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message,
'Found macro recusion, invoking "foo(1, A)":\n...while expanding "bar(1, A)".\n...while expanding "foo(A,B)".')
class TestParsingSource(unittest.TestCase):
def testBasicParse(self):
test_list = [
# 1. no directives
(u'a\nb\nc',
(3,) ),
# 2. One define
(u'a\n//%PDDM-DEFINE foo()\n//%body\nc',
(1, 2, 1) ),
# 3. Two defines
(u'a\n//%PDDM-DEFINE foo()\n//%body\n//%PDDM-DEFINE bar()\n//%body2\nc',
(1, 4, 1) ),
# 4. Two defines with ends
(u'a\n//%PDDM-DEFINE foo()\n//%body\n//%PDDM-DEFINE-END\n'
u'//%PDDM-DEFINE bar()\n//%body2\n//%PDDM-DEFINE-END\nc',
(1, 6, 1) ),
# 5. One expand, one define (that runs to end of file)
(u'a\n//%PDDM-EXPAND foo()\nbody\n//%PDDM-EXPAND-END\n'
u'//%PDDM-DEFINE bar()\n//%body2\n',
(1, 1, 2) ),
# 6. One define ended with an expand.
(u'a\nb\n//%PDDM-DEFINE bar()\n//%body2\n'
u'//%PDDM-EXPAND bar()\nbody2\n//%PDDM-EXPAND-END\n',
(2, 2, 1) ),
# 7. Two expands (one end), one define.
(u'a\n//%PDDM-EXPAND foo(1)\nbody\n//%PDDM-EXPAND foo(2)\nbody2\n//%PDDM-EXPAND-END\n'
u'//%PDDM-DEFINE foo()\n//%body2\n',
(1, 2, 2) ),
]
for idx, (input_str, line_counts) in enumerate(test_list, 1):
f = io.StringIO(input_str)
sf = pddm.SourceFile(f)
sf._ParseFile()
self.assertEqual(len(sf._sections), len(line_counts),
'Entry %d -- %d != %d' %
(idx, len(sf._sections), len(line_counts)))
for idx2, (sec, expected) in enumerate(zip(sf._sections, line_counts), 1):
self.assertEqual(sec.num_lines_captured, expected,
'Entry %d, section %d -- %d != %d' %
(idx, idx2, sec.num_lines_captured, expected))
def testErrors(self):
test_list = [
# 1. Directive within expansion
(u'//%PDDM-EXPAND a()\n//%PDDM-BOGUS',
'Ran into directive ("//%PDDM-BOGUS", line 2) while in "//%PDDM-EXPAND a()".'),
(u'//%PDDM-EXPAND a()\n//%PDDM-DEFINE a()\n//%body\n',
'Ran into directive ("//%PDDM-DEFINE", line 2) while in "//%PDDM-EXPAND a()".'),
# 3. Expansion ran off end of file
(u'//%PDDM-EXPAND a()\na\nb\n',
'Hit the end of the file while in "//%PDDM-EXPAND a()".'),
# 4. Directive within define
(u'//%PDDM-DEFINE a()\n//%body\n//%PDDM-BOGUS',
'Ran into directive ("//%PDDM-BOGUS", line 3) while in "//%PDDM-DEFINE a()".'),
(u'//%PDDM-DEFINE a()\n//%body\n//%PDDM-EXPAND-END a()',
'Ran into directive ("//%PDDM-EXPAND-END", line 3) while in "//%PDDM-DEFINE a()".'),
# 6. Directives that shouldn't start sections
(u'a\n//%PDDM-DEFINE-END a()\n//a\n',
'Unexpected line 2: "//%PDDM-DEFINE-END a()".'),
(u'a\n//%PDDM-EXPAND-END a()\n//a\n',
'Unexpected line 2: "//%PDDM-EXPAND-END a()".'),
(u'//%PDDM-BOGUS\n//a\n',
'Unexpected line 1: "//%PDDM-BOGUS".'),
]
for idx, (input_str, expected_err) in enumerate(test_list, 1):
f = io.StringIO(input_str)
try:
pddm.SourceFile(f)._ParseFile()
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message, expected_err,
'Entry %d failed: %r' % (idx, e))
class TestProcessingSource(unittest.TestCase):
def testBasics(self):
input_str = u"""
//%PDDM-IMPORT-DEFINES ImportFile
foo
//%PDDM-EXPAND mumble(abc)
//%PDDM-EXPAND-END
bar
//%PDDM-EXPAND mumble(def)
//%PDDM-EXPAND mumble(ghi)
//%PDDM-EXPAND-END
baz
//%PDDM-DEFINE mumble(a_)
//%a_: getName(a_)
"""
input_str2 = u"""
//%PDDM-DEFINE getName(x_)
//%do##x_$u##(int x_);
"""
expected = u"""
//%PDDM-IMPORT-DEFINES ImportFile
foo
//%PDDM-EXPAND mumble(abc)
// This block of code is generated, do not edit it directly.
abc: doAbc(int abc);
//%PDDM-EXPAND-END mumble(abc)
bar
//%PDDM-EXPAND mumble(def)
// This block of code is generated, do not edit it directly.
def: doDef(int def);
//%PDDM-EXPAND mumble(ghi)
// This block of code is generated, do not edit it directly.
ghi: doGhi(int ghi);
//%PDDM-EXPAND-END (2 expansions)
baz
//%PDDM-DEFINE mumble(a_)
//%a_: getName(a_)
"""
expected_stripped = u"""
//%PDDM-IMPORT-DEFINES ImportFile
foo
//%PDDM-EXPAND mumble(abc)
//%PDDM-EXPAND-END mumble(abc)
bar
//%PDDM-EXPAND mumble(def)
//%PDDM-EXPAND mumble(ghi)
//%PDDM-EXPAND-END (2 expansions)
baz
//%PDDM-DEFINE mumble(a_)
//%a_: getName(a_)
"""
def _Resolver(name):
self.assertEqual(name, 'ImportFile')
return io.StringIO(input_str2)
f = io.StringIO(input_str)
sf = pddm.SourceFile(f, _Resolver)
sf.ProcessContent()
self.assertEqual(sf.processed_content, expected)
# Feed it through and nothing should change.
f2 = io.StringIO(sf.processed_content)
sf2 = pddm.SourceFile(f2, _Resolver)
sf2.ProcessContent()
self.assertEqual(sf2.processed_content, expected)
self.assertEqual(sf2.processed_content, sf.processed_content)
# Test stripping (with the original input and expanded version).
f2 = io.StringIO(input_str)
sf2 = pddm.SourceFile(f2)
sf2.ProcessContent(strip_expansion=True)
self.assertEqual(sf2.processed_content, expected_stripped)
f2 = io.StringIO(sf.processed_content)
sf2 = pddm.SourceFile(f2, _Resolver)
sf2.ProcessContent(strip_expansion=True)
self.assertEqual(sf2.processed_content, expected_stripped)
def testProcessFileWithMacroParseError(self):
input_str = u"""
foo
//%PDDM-DEFINE mumble(a_)
//%body
//%PDDM-DEFINE mumble(x_)
//%body2
"""
f = io.StringIO(input_str)
sf = pddm.SourceFile(f)
try:
sf.ProcessContent()
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message,
'Attempt to redefine macro: "PDDM-DEFINE mumble(x_)"\n'
'...while parsing section that started:\n'
' Line 3: //%PDDM-DEFINE mumble(a_)')
def testProcessFileWithExpandError(self):
input_str = u"""
foo
//%PDDM-DEFINE mumble(a_)
//%body
//%PDDM-EXPAND foobar(x_)
//%PDDM-EXPAND-END
"""
f = io.StringIO(input_str)
sf = pddm.SourceFile(f)
try:
sf.ProcessContent()
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message,
'No macro named "foobar".\n'
'...while expanding "foobar(x_)" from the section that'
' started:\n Line 5: //%PDDM-EXPAND foobar(x_)')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
tianzhihen/python-mode | pymode/libs/pylama/lint/pylama_pyflakes/pyflakes/messages.py | 42 | 3808 | """
Provide the class Message and its subclasses.
"""
class Message(object):
message = ''
message_args = ()
def __init__(self, filename, loc):
self.filename = filename
self.lineno = loc.lineno
self.col = getattr(loc, 'col_offset', 0)
def __str__(self):
return '%s:%s: %s' % (self.filename, self.lineno,
self.message % self.message_args)
class UnusedImport(Message):
message = '%r imported but unused'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class RedefinedWhileUnused(Message):
message = 'redefinition of unused %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class RedefinedInListComp(Message):
message = 'list comprehension redefines %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class ImportShadowedByLoopVar(Message):
message = 'import %r from line %r shadowed by loop variable'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class ImportStarUsed(Message):
message = "'from %s import *' used; unable to detect undefined names"
def __init__(self, filename, loc, modname):
Message.__init__(self, filename, loc)
self.message_args = (modname,)
class UndefinedName(Message):
message = 'undefined name %r'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class DoctestSyntaxError(Message):
message = 'syntax error in doctest'
def __init__(self, filename, loc, position=None):
Message.__init__(self, filename, loc)
if position:
(self.lineno, self.col) = position
self.message_args = ()
class UndefinedExport(Message):
message = 'undefined name %r in __all__'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class UndefinedLocal(Message):
message = ('local variable %r (defined in enclosing scope on line %r) '
'referenced before assignment')
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class DuplicateArgument(Message):
message = 'duplicate argument %r in function definition'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class Redefined(Message):
message = 'redefinition of %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class LateFutureImport(Message):
message = 'future import(s) %r after other statements'
def __init__(self, filename, loc, names):
Message.__init__(self, filename, loc)
self.message_args = (names,)
class UnusedVariable(Message):
"""
Indicates that a variable has been explicity assigned to but not actually
used.
"""
message = 'local variable %r is assigned to but never used'
def __init__(self, filename, loc, names):
Message.__init__(self, filename, loc)
self.message_args = (names,)
class ReturnWithArgsInsideGenerator(Message):
"""
Indicates a return statement with arguments inside a generator.
"""
message = '\'return\' with argument inside generator'
| lgpl-3.0 |
sienatime/python_koans | python2/libs/colorama/win32.py | 86 | 2730 |
# from winbase.h
STDOUT = -11
STDERR = -12
try:
from ctypes import windll
except ImportError:
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort
)
handles = {
STDOUT: windll.kernel32.GetStdHandle(STDOUT),
STDERR: windll.kernel32.GetStdHandle(STDERR),
}
SHORT = c_short
WORD = c_ushort
DWORD = c_uint32
TCHAR = c_char
class COORD(Structure):
"""struct in wincon.h"""
_fields_ = [
('X', SHORT),
('Y', SHORT),
]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT),
]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def GetConsoleScreenBufferInfo(stream_id):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(
handle, byref(csbi))
# This fails when imported via setup.py when installing using 'pip'
# presumably the fix is that running setup.py should not trigger all
# this activity.
# assert success
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
success = windll.kernel32.SetConsoleTextAttribute(handle, attrs)
assert success
def SetConsoleCursorPosition(stream_id, position):
handle = handles[stream_id]
position = COORD(*position)
success = windll.kernel32.SetConsoleCursorPosition(handle, position)
assert success
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = TCHAR(char)
length = DWORD(length)
start = COORD(*start)
num_written = DWORD(0)
# AttributeError: function 'FillConsoleOutputCharacter' not found
# could it just be that my types are wrong?
success = windll.kernel32.FillConsoleOutputCharacter(
handle, char, length, start, byref(num_written))
assert success
return num_written.value
if __name__=='__main__':
x = GetConsoleScreenBufferInfo(STDOUT)
print(x.dwSize)
print(x.dwCursorPosition)
print(x.wAttributes)
print(x.srWindow)
print(x.dwMaximumWindowSize)
| mit |
2015fallproject/2015fallcase1 | static/Brython3.2.0-20150701-214155/Lib/heapq.py | 628 | 18065 | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, count, tee, chain
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappushpop_max(heap, item):
"""Maxheap version of a heappush followed by a heappop."""
if heap and item < heap[0]:
item, heap[0] = heap[0], item
_siftup_max(heap, 0)
return item
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
_heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
_heapify_max(result)
_heappushpop = _heappushpop_max
for elem in it:
_heappushpop(result, elem)
result.sort()
return result
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
# If available, use C implementation
#_heapq does not exist in brython, so lets just comment it out.
#try:
# from _heapq import *
#except ImportError:
# pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
_len = len
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while _len(h) > 1:
try:
while True:
v, itnum, next = s = h[0]
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
v, itnum, next = h[0]
yield v
yield from next.__self__
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [min(chain(head, it))]
return [min(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count()) # decorate
result = _nsmallest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return [r[2] for r in result] # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [max(chain(head, it))]
return [max(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count(0,-1)) # decorate
result = _nlargest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(0,-1), in2) # decorate
result = _nlargest(n, it)
return [r[2] for r in result] # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print(sort)
import doctest
doctest.testmod()
| agpl-3.0 |
RT-Thread/rt-thread | bsp/qemu-vexpress-a9/rtconfig.py | 11 | 2352 | import os
import uuid
def get_mac_address():
mac=uuid.UUID(int = uuid.getnode()).hex[-12:]
return "#define AUTOMAC".join([str(int(e/2) + 1) + ' 0x' + mac[e:e+2] + '\n' for e in range(5,11,2)])
header = '''
#ifndef __MAC_AUTO_GENERATE_H__
#define __MAC_AUTO_GENERATE_H__
/* Automatically generated file; DO NOT EDIT. */
/* mac configure file for RT-Thread qemu */
#define AUTOMAC0 0x52
#define AUTOMAC1 0x54
#define AUTOMAC2 0x00
#define AUTOMAC'''
end = '''
#endif
'''
automac_h_fn = os.path.join(os.path.dirname(__file__), 'drivers', 'automac.h')
with open(automac_h_fn, 'w') as f:
f.write(header + get_mac_address() + end)
# toolchains options
ARCH='arm'
CPU='cortex-a'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# only support GNU GCC compiler.
PLATFORM = 'gcc'
EXEC_PATH = '/usr/bin'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
STRIP = PREFIX + 'strip'
DEVICE = ' -march=armv7-a -marm -msoft-float'
CFLAGS = DEVICE + ' -Wall'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -D__ASSEMBLY__ -I.'
LINK_SCRIPT = 'link.lds'
LFLAGS = DEVICE + ' -nostartfiles -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,system_vectors'+\
' -T %s' % LINK_SCRIPT
CPATH = ''
LPATH = ''
# generate debug info in all cases
AFLAGS += ' -gdwarf-2'
CFLAGS += ' -g -gdwarf-2'
if BUILD == 'debug':
CFLAGS += ' -O0'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS + ' -Woverloaded-virtual -fno-exceptions -fno-rtti'
M_CFLAGS = CFLAGS + ' -mlong-calls -fPIC '
M_CXXFLAGS = CXXFLAGS + ' -mlong-calls -fPIC'
M_LFLAGS = DEVICE + CXXFLAGS + ' -Wl,--gc-sections,-z,max-page-size=0x4' +\
' -shared -fPIC -nostartfiles -nostdlib -static-libgcc'
M_POST_ACTION = STRIP + ' -R .hash $TARGET\n' + SIZE + ' $TARGET \n'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' +\
SIZE + ' $TARGET \n'
| apache-2.0 |
2013Commons/hue | desktop/core/ext-py/tablib-develop/tablib/packages/openpyxl/shared/exc.py | 118 | 2259 | # file openpyxl/shared/exc.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Definitions for openpyxl shared exception classes."""
class CellCoordinatesException(Exception):
"""Error for converting between numeric and A1-style cell references."""
class ColumnStringIndexException(Exception):
"""Error for bad column names in A1-style cell references."""
class DataTypeException(Exception):
"""Error for any data type inconsistencies."""
class NamedRangeException(Exception):
"""Error for badly formatted named ranges."""
class SheetTitleException(Exception):
"""Error for bad sheet names."""
class InsufficientCoordinatesException(Exception):
"""Error for partially specified cell coordinates."""
class OpenModeError(Exception):
"""Error for fileobj opened in non-binary mode."""
class InvalidFileException(Exception):
"""Error for trying to open a non-ooxml file."""
class ReadOnlyWorkbookException(Exception):
"""Error for trying to modify a read-only workbook"""
class MissingNumberFormat(Exception):
"""Error when a referenced number format is not in the stylesheet"""
| apache-2.0 |
loic/django | tests/middleware/test_security.py | 4 | 7727 | from django.http import HttpResponse
from django.test import RequestFactory, SimpleTestCase
from django.test.utils import override_settings
class SecurityMiddlewareTest(SimpleTestCase):
@property
def middleware(self):
from django.middleware.security import SecurityMiddleware
return SecurityMiddleware()
@property
def secure_request_kwargs(self):
return {"wsgi.url_scheme": "https"}
def response(self, *args, **kwargs):
headers = kwargs.pop("headers", {})
response = HttpResponse(*args, **kwargs)
for k, v in headers.items():
response[k] = v
return response
def process_response(self, *args, **kwargs):
request_kwargs = {}
if kwargs.pop("secure", False):
request_kwargs.update(self.secure_request_kwargs)
request = (kwargs.pop("request", None) or
self.request.get("/some/url", **request_kwargs))
ret = self.middleware.process_request(request)
if ret:
return ret
return self.middleware.process_response(
request, self.response(*args, **kwargs))
request = RequestFactory()
def process_request(self, method, *args, **kwargs):
if kwargs.pop("secure", False):
kwargs.update(self.secure_request_kwargs)
req = getattr(self.request, method.lower())(*args, **kwargs)
return self.middleware.process_request(req)
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_on(self):
"""
With HSTS_SECONDS=3600, the middleware adds
"strict-transport-security: max-age=3600" to the response.
"""
self.assertEqual(
self.process_response(secure=True)["strict-transport-security"],
"max-age=3600")
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_already_present(self):
"""
The middleware will not override a "strict-transport-security" header
already present in the response.
"""
response = self.process_response(
secure=True,
headers={"strict-transport-security": "max-age=7200"})
self.assertEqual(response["strict-transport-security"], "max-age=7200")
@override_settings(HSTS_SECONDS=3600)
def test_sts_only_if_secure(self):
"""
The "strict-transport-security" header is not added to responses going
over an insecure connection.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=False))
@override_settings(HSTS_SECONDS=0)
def test_sts_off(self):
"""
With HSTS_SECONDS of 0, the middleware does not add a
"strict-transport-security" header to the response.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=True))
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=True)
def test_sts_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
True, the middleware adds a "strict-transport-security" header with the
"includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(response["strict-transport-security"], "max-age=600; includeSubDomains")
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=False)
def test_sts_no_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
False, the middleware adds a "strict-transport-security" header without
the "includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(response["strict-transport-security"], "max-age=600")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=True)
def test_content_type_on(self):
"""
With CONTENT_TYPE_NOSNIFF set to True, the middleware adds
"x-content-type-options: nosniff" header to the response.
"""
self.assertEqual(self.process_response()["x-content-type-options"], "nosniff")
@override_settings(SECURE_CONTENT_TYPE_NO_SNIFF=True)
def test_content_type_already_present(self):
"""
The middleware will not override an "x-content-type-options" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-content-type-options": "foo"})
self.assertEqual(response["x-content-type-options"], "foo")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_content_type_off(self):
"""
With CONTENT_TYPE_NOSNIFF False, the middleware does not add an
"x-content-type-options" header to the response.
"""
self.assertNotIn("x-content-type-options", self.process_response())
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_on(self):
"""
With BROWSER_XSS_FILTER set to True, the middleware adds
"s-xss-protection: 1; mode=block" header to the response.
"""
self.assertEqual(
self.process_response()["x-xss-protection"],
"1; mode=block")
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_already_present(self):
"""
The middleware will not override an "x-xss-protection" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-xss-protection": "foo"})
self.assertEqual(response["x-xss-protection"], "foo")
@override_settings(BROWSER_XSS_FILTER=False)
def test_xss_filter_off(self):
"""
With BROWSER_XSS_FILTER set to False, the middleware does not add an
"x-xss-protection" header to the response.
"""
self.assertNotIn("x-xss-protection", self.process_response())
@override_settings(SECURE_SSL_REDIRECT=True)
def test_ssl_redirect_on(self):
"""
With SSL_REDIRECT True, the middleware redirects any non-secure
requests to the https:// version of the same URL.
"""
ret = self.process_request("get", "/some/url?query=string")
self.assertEqual(ret.status_code, 301)
self.assertEqual(
ret["Location"], "https://testserver/some/url?query=string")
@override_settings(SECURE_SSL_REDIRECT=True)
def test_no_redirect_ssl(self):
"""
The middleware does not redirect secure requests.
"""
ret = self.process_request("get", "/some/url", secure=True)
self.assertIsNone(ret)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_REDIRECT_EXEMPT=["^insecure/"])
def test_redirect_exempt(self):
"""
The middleware does not redirect requests with URL path matching an
exempt pattern.
"""
ret = self.process_request("get", "/insecure/page")
self.assertIsNone(ret)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_SSL_HOST="secure.example.com")
def test_redirect_ssl_host(self):
"""
The middleware redirects to SSL_HOST if given.
"""
ret = self.process_request("get", "/some/url")
self.assertEqual(ret.status_code, 301)
self.assertEqual(ret["Location"], "https://secure.example.com/some/url")
@override_settings(SECURE_SSL_REDIRECT=False)
def test_ssl_redirect_off(self):
"""
With SSL_REDIRECT False, the middleware does no redirect.
"""
ret = self.process_request("get", "/some/url")
self.assertIsNone(ret)
| bsd-3-clause |
collmot/ardupilot | Tools/LogAnalyzer/tests/TestDupeLogData.py | 21 | 2700 | from __future__ import print_function
from LogAnalyzer import Test,TestResult
import DataflashLog
class TestDupeLogData(Test):
'''test for duplicated data in log, which has been happening on PX4/Pixhawk'''
def __init__(self):
Test.__init__(self)
self.name = "Dupe Log Data"
def __matchSample(self, sample, sampleStartIndex, logdata):
'''return the line number where a match is found, otherwise return False'''
# ignore if all data in sample is the same value
nSame = 0
for s in sample:
if s[1] == sample[0][1]:
nSame += 1
if nSame == 20:
return False
# c
data = logdata.channels["ATT"]["Pitch"].listData
for i in range(sampleStartIndex, len(data)):
#print("Checking against index %d" % i)
if i == sampleStartIndex:
continue # skip matching against ourselves
j = 0
while j<20 and (i+j)<len(data) and data[i+j][1] == sample[j][1]:
#print("### Match found, j=%d, data=%f, sample=%f, log data matched to sample at line %d" % (j,data[i+j][1],sample[j][1],data[i+j][0]))
j += 1
if j == 20: # all samples match
return data[i][0]
return False
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
# this could be made more flexible by not hard-coding to use ATT data, could make it dynamic based on whatever is available as long as it is highly variable
if "ATT" not in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No ATT log data"
return
# pick 10 sample points within the range of ATT data we have
sampleStartIndices = []
attStartIndex = 0
attEndIndex = len(logdata.channels["ATT"]["Pitch"].listData)-1
step = int(attEndIndex / 11)
for i in range(step,attEndIndex-step,step):
sampleStartIndices.append(i)
#print("Dupe data sample point index %d at line %d" % (i, logdata.channels["ATT"]["Pitch"].listData[i][0]))
# get 20 datapoints of pitch from each sample location and check for a match elsewhere
sampleIndex = 0
for i in range(sampleStartIndices[0], len(logdata.channels["ATT"]["Pitch"].listData)):
if i == sampleStartIndices[sampleIndex]:
#print("Checking sample %d" % i)
sample = logdata.channels["ATT"]["Pitch"].listData[i:i+20]
matchedLine = self.__matchSample(sample, i, logdata)
if matchedLine:
#print("Data from line %d found duplicated at line %d" % (sample[0][0],matchedLine))
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Duplicate data chunks found in log (%d and %d)" % (sample[0][0],matchedLine)
return
sampleIndex += 1
if sampleIndex >= len(sampleStartIndices):
break
| gpl-3.0 |
2015fallproject/2015fallcase2 | static/Brython3.2.0-20150701-214155/Lib/decimal.py | 623 | 230510 | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module should be kept in sync with the latest updates of the
# IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is an implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
http://speleotrove.com/decimal/decarith.html
and IEEE standard 854-1987:
http://en.wikipedia.org/wiki/IEEE_854-1987
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of this module is to support arithmetic using familiar
"schoolhouse" rules and to avoid some of the tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected
Decimal('0.00')).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal('0')
>>> Decimal('1')
Decimal('1')
>>> Decimal('-.0123')
Decimal('-0.0123')
>>> Decimal(123456)
Decimal('123456')
>>> Decimal('123.45e12345678')
Decimal('1.2345E+12345680')
>>> Decimal('1.33') + Decimal('1.27')
Decimal('2.60')
>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
Decimal('-2.20')
>>> dig = Decimal(1)
>>> print(dig / Decimal(3))
0.333333333
>>> getcontext().prec = 18
>>> print(dig / Decimal(3))
0.333333333333333333
>>> print(dig.sqrt())
1
>>> print(Decimal(3).sqrt())
1.73205080756887729
>>> print(Decimal(3) ** 123)
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print(inf)
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print(neginf)
-Infinity
>>> print(neginf + inf)
NaN
>>> print(neginf * inf)
-Infinity
>>> print(dig / 0)
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print(dig / 0)
Traceback (most recent call last):
...
...
...
decimal.DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal('NaN')
>>> c.traps[InvalidOperation] = 1
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> print(c.divide(Decimal(0), Decimal(0)))
Traceback (most recent call last):
...
...
...
decimal.InvalidOperation: 0 / 0
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print(c.divide(Decimal(0), Decimal(0)))
NaN
>>> print(c.flags[InvalidOperation])
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
'FloatOperation',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP',
# Functions for manipulating contexts
'setcontext', 'getcontext', 'localcontext',
# Limits for the C version for compatibility
'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY',
# C version: compile time choice that enables the thread local context
'HAVE_THREADS'
]
__version__ = '1.70' # Highest version of the spec this complies with
# See http://speleotrove.com/decimal/
import copy as _copy
import math as _math
import numbers as _numbers
import sys
try:
from collections import namedtuple as _namedtuple
DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent')
except ImportError:
DecimalTuple = lambda *args: args
# Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
ROUND_05UP = 'ROUND_05UP'
# Compatibility with the C version
HAVE_THREADS = True
if sys.maxsize == 2**63-1:
MAX_PREC = 999999999999999999
MAX_EMAX = 999999999999999999
MIN_EMIN = -999999999999999999
else:
MAX_PREC = 425000000
MAX_EMAX = 425000000
MIN_EMIN = -425000000
MIN_ETINY = MIN_EMIN - (MAX_PREC-1)
# Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is not set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
#brython fixme
pass
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
The result of the operation after these is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
"""
def handle(self, context, *args):
if args:
ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True)
return ans._fix_nan(context)
return _NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if an string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, *args):
return _SignedInfinity[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
#brython fix me
pass
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
#brython fix me
pass
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
#brython fix me
pass
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return _SignedInfinity[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
if sign == 1:
if context.rounding == ROUND_FLOOR:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
#brython fix me
pass
class FloatOperation(DecimalException, TypeError):
"""Enable stricter semantics for mixing floats and Decimals.
If the signal is not trapped (default), mixing floats and Decimals is
permitted in the Decimal() constructor, context.create_decimal() and
all comparison operators. Both conversion and comparisons are exact.
Any occurrence of a mixed operation is silently recorded by setting
FloatOperation in the context flags. Explicit conversions with
Decimal.from_float() or context.create_decimal_from_float() do not
set the flag.
Otherwise (the signal is trapped), only equality comparisons and explicit
conversions are silent. All other mixed operations raise FloatOperation.
"""
#brython fix me
pass
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal, FloatOperation]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
# Valid rounding modes
_rounding_modes = (ROUND_DOWN, ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_CEILING,
ROUND_FLOOR, ROUND_UP, ROUND_HALF_DOWN, ROUND_05UP)
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.current_thread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
class MockThreading(object):
def local(self, sys=sys):
return sys.modules[__name__]
threading = MockThreading()
del MockThreading
try:
threading.local
except AttributeError:
# To fix reloading, force it to create a new context
# Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.current_thread(), '__decimal_context__'):
del threading.current_thread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.current_thread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.current_thread().__decimal_context__
except AttributeError:
context = Context()
threading.current_thread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
def localcontext(ctx=None):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
>>> setcontext(DefaultContext)
>>> print(getcontext().prec)
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print(ctx.prec)
...
30
>>> with localcontext(ExtendedContext):
... print(getcontext().prec)
...
9
>>> print(getcontext().prec)
28
"""
if ctx is None: ctx = getcontext()
return _ContextManager(ctx)
##### Decimal class #######################################################
# Do not subclass Decimal from numbers.Real and do not register it as such
# (because Decimals are not interoperable with floats). See the notes in
# numbers.py for more detail.
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal('3.14')
>>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent)
Decimal('3.14')
>>> Decimal(314) # int
Decimal('314')
>>> Decimal(Decimal(314)) # another decimal instance
Decimal('314')
>>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay
Decimal('3.14')
"""
# Note that the coefficient, self._int, is actually stored as
# a string rather than as a tuple of digits. This speeds up
# the "digits to integer" and "integer to digits" conversions
# that are used in almost every arithmetic operation on
# Decimals. This is an internal detail: the as_tuple function
# and the Decimal constructor still deal with tuples of
# digits.
self = object.__new__(cls)
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, str):
value=value.strip().lower()
if value.startswith("-"):
self._sign = 1
value=value[1:]
else:
self._sign = 0
if value in ('', 'nan'):
self._is_special = True
self._int = ''
#if m.group('signal'): #figure out what a signaling NaN is later
# self._exp = 'N'
#else:
# self._exp = 'n'
self._exp='n'
return self
if value in ('inf', 'infinity'):
self._int = '0'
self._exp = 'F'
self._is_special = True
return self
import _jsre as re
_m=re.match("^\d*\.?\d*(e\+?\d*)?$", value)
if not _m:
self._is_special = True
self._int = ''
self._exp='n'
return self
if '.' in value:
intpart, fracpart=value.split('.')
if 'e' in fracpart:
fracpart, exp=fracpart.split('e')
exp=int(exp)
else:
exp=0
#self._int = str(int(intpart+fracpart))
self._int = intpart+fracpart
self._exp = exp - len(fracpart)
self._is_special = False
return self
else:
#is this a pure int?
self._is_special = False
if 'e' in value:
self._int, _exp=value.split('e')
self._exp=int(_exp)
#print(self._int, self._exp)
else:
self._int = value
self._exp = 0
return self
#m = _parser(value.strip())
#if m is None:
if context is None:
context = getcontext()
return context._raise_error(ConversionSyntax,
"Invalid literal for Decimal: %r" % value)
#if m.group('sign') == "-":
# self._sign = 1
#else:
# self._sign = 0
#intpart = m.group('int')
#if intpart is not None:
# # finite number
# fracpart = m.group('frac') or ''
# exp = int(m.group('exp') or '0')
# self._int = str(int(intpart+fracpart))
# self._exp = exp - len(fracpart)
# self._is_special = False
#else:
# diag = m.group('diag')
# if diag is not None:
# # NaN
# self._int = str(int(diag or '0')).lstrip('0')
# if m.group('signal'):
# self._exp = 'N'
# else:
# self._exp = 'n'
# else:
# # infinity
# self._int = '0'
# self._exp = 'F'
# self._is_special = True
#return self
# From an integer
if isinstance(value, int):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = str(abs(value))
self._is_special = False
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = str(value.int)
self._exp = int(value.exp)
self._is_special = False
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError('Invalid tuple size in creation of Decimal '
'from list or tuple. The list or tuple '
'should have exactly three elements.')
# process sign. The isinstance test rejects floats
if not (isinstance(value[0], int) and value[0] in (0,1)):
raise ValueError("Invalid sign. The first value in the tuple "
"should be an integer; either 0 for a "
"positive number or 1 for a negative number.")
self._sign = value[0]
if value[2] == 'F':
# infinity: value[1] is ignored
self._int = '0'
self._exp = value[2]
self._is_special = True
else:
# process and validate the digits in value[1]
digits = []
for digit in value[1]:
if isinstance(digit, int) and 0 <= digit <= 9:
# skip leading zeros
if digits or digit != 0:
digits.append(digit)
else:
raise ValueError("The second value in the tuple must "
"be composed of integers in the range "
"0 through 9.")
if value[2] in ('n', 'N'):
# NaN: digits form the diagnostic
self._int = ''.join(map(str, digits))
self._exp = value[2]
self._is_special = True
elif isinstance(value[2], int):
# finite number: digits give the coefficient
self._int = ''.join(map(str, digits or [0]))
self._exp = value[2]
self._is_special = False
else:
raise ValueError("The third value in the tuple must "
"be an integer, or one of the "
"strings 'F', 'n', 'N'.")
return self
if isinstance(value, float):
if context is None:
context = getcontext()
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are "
"enabled")
value = Decimal.from_float(value)
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
raise TypeError("Cannot convert %r to Decimal" % value)
# @classmethod, but @decorator is not valid Python 2.3 syntax, so
# don't use it (see notes on Py2.3 compatibility at top of file)
def from_float(cls, f):
"""Converts a float to a decimal number, exactly.
Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
Since 0.1 is not exactly representable in binary floating point, the
value is stored as the nearest representable value which is
0x1.999999999999ap-4. The exact equivalent of the value in decimal
is 0.1000000000000000055511151231257827021181583404541015625.
>>> Decimal.from_float(0.1)
Decimal('0.1000000000000000055511151231257827021181583404541015625')
>>> Decimal.from_float(float('nan'))
Decimal('NaN')
>>> Decimal.from_float(float('inf'))
Decimal('Infinity')
>>> Decimal.from_float(-float('inf'))
Decimal('-Infinity')
>>> Decimal.from_float(-0.0)
Decimal('-0')
"""
if isinstance(f, int): # handle integer inputs
return cls(f)
if not isinstance(f, float):
raise TypeError("argument must be int or float.")
if _math.isinf(f) or _math.isnan(f):
return cls(repr(f))
if _math.copysign(1.0, f) == 1.0:
sign = 0
else:
sign = 1
n, d = abs(f).as_integer_ratio()
k = d.bit_length() - 1
result = _dec_from_triple(sign, str(n*5**k), -k)
if cls is Decimal:
return result
else:
return cls(result)
from_float = classmethod(from_float)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other=None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return self._fix_nan(context)
return other._fix_nan(context)
return 0
def _compare_check_nans(self, other, context):
"""Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN.
"""
if context is None:
context = getcontext()
if self._is_special or other._is_special:
if self.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
self)
elif other.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
other)
elif self.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
self)
elif other.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
other)
return 0
def __bool__(self):
"""Return True if self is nonzero; otherwise return False.
NaNs and infinities are considered nonzero.
"""
return self._is_special or self._int != '0'
def _cmp(self, other):
"""Compare the two non-NaN decimal instances self and other.
Returns -1 if self < other, 0 if self == other and 1
if self > other. This routine is for internal use only."""
if self._is_special or other._is_special:
self_inf = self._isinfinity()
other_inf = other._isinfinity()
if self_inf == other_inf:
return 0
elif self_inf < other_inf:
return -1
else:
return 1
# check for zeros; Decimal('0') == Decimal('-0')
if not self:
if not other:
return 0
else:
return -((-1)**other._sign)
if not other:
return (-1)**self._sign
# If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted:
self_padded = self._int + '0'*(self._exp - other._exp)
other_padded = other._int + '0'*(other._exp - self._exp)
if self_padded == other_padded:
return 0
elif self_padded < other_padded:
return -(-1)**self._sign
else:
return (-1)**self._sign
elif self_adjusted > other_adjusted:
return (-1)**self._sign
else: # self_adjusted < other_adjusted
return -((-1)**self._sign)
# Note: The Decimal standard doesn't cover rich comparisons for
# Decimals. In particular, the specification is silent on the
# subject of what should happen for a comparison involving a NaN.
# We take the following approach:
#
# == comparisons involving a quiet NaN always return False
# != comparisons involving a quiet NaN always return True
# == or != comparisons involving a signaling NaN signal
# InvalidOperation, and return False or True as above if the
# InvalidOperation is not trapped.
# <, >, <= and >= comparisons involving a (quiet or signaling)
# NaN signal InvalidOperation, and return False if the
# InvalidOperation is not trapped.
#
# This behavior is designed to conform as closely as possible to
# that specified by IEEE 754.
def __eq__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return False
return self._cmp(other) == 0
def __ne__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return True
return self._cmp(other) != 0
def __lt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) < 0
def __le__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) <= 0
def __gt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) > 0
def __ge__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) >= 0
def compare(self, other, context=None):
"""Compares one to another.
-1 => a < b
0 => a = b
1 => a > b
NaN => one is NaN
Like __cmp__, but returns Decimal instances.
"""
other = _convert_other(other, raiseit=True)
# Compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self._cmp(other))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# In order to make sure that the hash of a Decimal instance
# agrees with the hash of a numerically equal integer, float
# or Fraction, we follow the rules for numeric hashes outlined
# in the documentation. (See library docs, 'Built-in Types').
if self._is_special:
if self.is_snan():
raise TypeError('Cannot hash a signaling NaN value.')
elif self.is_nan():
return _PyHASH_NAN
else:
if self._sign:
return -_PyHASH_INF
else:
return _PyHASH_INF
if self._exp >= 0:
exp_hash = pow(10, self._exp, _PyHASH_MODULUS)
else:
exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS)
hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS
ans = hash_ if self >= 0 else -hash_
return -2 if ans == -1 else ans
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp)
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return "Decimal('%s')" % str(self)
def __str__(self, eng=False, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
sign = ['', '-'][self._sign]
if self._is_special:
if self._exp == 'F':
return sign + 'Infinity'
elif self._exp == 'n':
return sign + 'NaN' + self._int
else: # self._exp == 'N'
return sign + 'sNaN' + self._int
# number of digits of self._int to left of decimal point
leftdigits = self._exp + len(self._int)
# dotplace is number of digits of self._int to the left of the
# decimal point in the mantissa of the output string (that is,
# after adjusting the exponent)
if self._exp <= 0 and leftdigits > -6:
# no exponent required
dotplace = leftdigits
elif not eng:
# usual scientific notation: 1 digit on left of the point
dotplace = 1
elif self._int == '0':
# engineering notation, zero
dotplace = (leftdigits + 1) % 3 - 1
else:
# engineering notation, nonzero
dotplace = (leftdigits - 1) % 3 + 1
if dotplace <= 0:
intpart = '0'
fracpart = '.' + '0'*(-dotplace) + self._int
elif dotplace >= len(self._int):
intpart = self._int+'0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace]
fracpart = '.' + self._int[dotplace:]
if leftdigits == dotplace:
exp = ''
else:
if context is None:
context = getcontext()
exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace)
return sign + intpart + fracpart + exp
def to_eng_string(self, context=None):
"""Convert to engineering-type string.
Engineering notation has an exponent which is a multiple of 3, so there
are up to 3 digits left of the decimal place.
Same rules for when in exponential and when as a value as in __str__.
"""
return self.__str__(eng=True, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# -Decimal('0') is Decimal('0'), not Decimal('-0'), except
# in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = self.copy_negate()
return ans._fix(context)
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more then precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# + (-0) = 0, except in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = Decimal(self)
return ans._fix(context)
def __abs__(self, round=True, context=None):
"""Returns the absolute value of self.
If the keyword argument 'round' is false, do not round. The
expression self.__abs__(round=False) is equivalent to
self.copy_abs().
"""
if not round:
return self.copy_abs()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
# If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) # Can't both be infinity here
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
# If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
ans = _dec_from_triple(sign, '0', exp)
ans = ans._fix(context)
return ans
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
ans = _dec_from_triple(negativezero, '0', exp)
ans = ans._fix(context)
return ans
if op1.int < op2.int:
op1, op2 = op2, op1
# OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
# So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
# Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self - other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# self - other is computed as self + other.copy_negate()
return self.__add__(other.copy_negate(), context=context)
def __rsub__(self, other, context=None):
"""Return other - self"""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__sub__(self, context=context)
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return _SignedInfinity[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return _SignedInfinity[resultsign]
resultexp = self._exp + other._exp
# Special case for multiplying by zero
if not self or not other:
ans = _dec_from_triple(resultsign, '0', resultexp)
# Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == '1':
ans = _dec_from_triple(resultsign, other._int, resultexp)
ans = ans._fix(context)
return ans
if other._int == '1':
ans = _dec_from_triple(resultsign, self._int, resultexp)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp)
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __truediv__(self, other, context=None):
"""Return self / other."""
other = _convert_other(other)
if other is NotImplemented:
return NotImplemented
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity() and other._isinfinity():
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
return _SignedInfinity[sign]
if other._isinfinity():
context._raise_error(Clamped, 'Division by infinity')
return _dec_from_triple(sign, '0', context.Etiny())
# Special cases for zeroes
if not other:
if not self:
return context._raise_error(DivisionUndefined, '0 / 0')
return context._raise_error(DivisionByZero, 'x / 0', sign)
if not self:
exp = self._exp - other._exp
coeff = 0
else:
# OK, so neither = 0, INF or NaN
shift = len(other._int) - len(self._int) + context.prec + 1
exp = self._exp - other._exp - shift
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if shift >= 0:
coeff, remainder = divmod(op1.int * 10**shift, op2.int)
else:
coeff, remainder = divmod(op1.int, op2.int * 10**-shift)
if remainder:
# result is not exact; adjust to ensure correct rounding
if coeff % 5 == 0:
coeff += 1
else:
# result is exact; get as close to ideal exponent as possible
ideal_exp = self._exp - other._exp
while exp < ideal_exp and coeff % 10 == 0:
coeff //= 10
exp += 1
ans = _dec_from_triple(sign, str(coeff), exp)
return ans._fix(context)
def _divide(self, other, context):
"""Return (self // other, self % other), to context.prec precision.
Assumes that neither self nor other is a NaN, that self is not
infinite and that other is nonzero.
"""
sign = self._sign ^ other._sign
if other._isinfinity():
ideal_exp = self._exp
else:
ideal_exp = min(self._exp, other._exp)
expdiff = self.adjusted() - other.adjusted()
if not self or other._isinfinity() or expdiff <= -2:
return (_dec_from_triple(sign, '0', 0),
self._rescale(ideal_exp, context.rounding))
if expdiff <= context.prec:
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
if q < 10**context.prec:
return (_dec_from_triple(sign, str(q), 0),
_dec_from_triple(self._sign, str(r), ideal_exp))
# Here the quotient is too large to be representable
ans = context._raise_error(DivisionImpossible,
'quotient too large in //, % or divmod')
return ans, ans
def __rtruediv__(self, other, context=None):
"""Swaps self/other and returns __truediv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__truediv__(self, context=context)
def __divmod__(self, other, context=None):
"""
Return (self // other, self % other)
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return (ans, ans)
sign = self._sign ^ other._sign
if self._isinfinity():
if other._isinfinity():
ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)')
return ans, ans
else:
return (_SignedInfinity[sign],
context._raise_error(InvalidOperation, 'INF % x'))
if not other:
if not self:
ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)')
return ans, ans
else:
return (context._raise_error(DivisionByZero, 'x // 0', sign),
context._raise_error(InvalidOperation, 'x % 0'))
quotient, remainder = self._divide(other, context)
remainder = remainder._fix(context)
return quotient, remainder
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
return context._raise_error(InvalidOperation, 'INF % x')
elif not other:
if self:
return context._raise_error(InvalidOperation, 'x % 0')
else:
return context._raise_error(DivisionUndefined, '0 % 0')
remainder = self._divide(other, context)[1]
remainder = remainder._fix(context)
return remainder
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
# self == +/-infinity -> InvalidOperation
if self._isinfinity():
return context._raise_error(InvalidOperation,
'remainder_near(infinity, x)')
# other == 0 -> either InvalidOperation or DivisionUndefined
if not other:
if self:
return context._raise_error(InvalidOperation,
'remainder_near(x, 0)')
else:
return context._raise_error(DivisionUndefined,
'remainder_near(0, 0)')
# other = +/-infinity -> remainder = self
if other._isinfinity():
ans = Decimal(self)
return ans._fix(context)
# self = 0 -> remainder = self, with ideal exponent
ideal_exponent = min(self._exp, other._exp)
if not self:
ans = _dec_from_triple(self._sign, '0', ideal_exponent)
return ans._fix(context)
# catch most cases of large or small quotient
expdiff = self.adjusted() - other.adjusted()
if expdiff >= context.prec + 1:
# expdiff >= prec+1 => abs(self/other) > 10**prec
return context._raise_error(DivisionImpossible)
if expdiff <= -2:
# expdiff <= -2 => abs(self/other) < 0.1
ans = self._rescale(ideal_exponent, context.rounding)
return ans._fix(context)
# adjust both arguments to have the same exponent, then divide
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
# remainder is r*10**ideal_exponent; other is +/-op2.int *
# 10**ideal_exponent. Apply correction to ensure that
# abs(remainder) <= abs(other)/2
if 2*r + (q&1) > op2.int:
r -= op2.int
q += 1
if q >= 10**context.prec:
return context._raise_error(DivisionImpossible)
# result has same sign as self unless r is negative
sign = self._sign
if r < 0:
sign = 1-sign
r = -r
ans = _dec_from_triple(sign, str(r), ideal_exponent)
return ans._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if other._isinfinity():
return context._raise_error(InvalidOperation, 'INF // INF')
else:
return _SignedInfinity[self._sign ^ other._sign]
if not other:
if self:
return context._raise_error(DivisionByZero, 'x // 0',
self._sign ^ other._sign)
else:
return context._raise_error(DivisionUndefined, '0 // 0')
return self._divide(other, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
if self._isnan():
if self.is_snan():
raise ValueError("Cannot convert signaling NaN to float")
s = "-nan" if self._sign else "nan"
else:
s = str(self)
return float(s)
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
raise ValueError("Cannot convert NaN to integer")
elif self._isinfinity():
raise OverflowError("Cannot convert infinity to integer")
s = (-1)**self._sign
if self._exp >= 0:
return s*int(self._int)*10**self._exp
else:
return s*int(self._int[:self._exp] or '0')
__trunc__ = __int__
def real(self):
return self
real = property(real)
def imag(self):
return Decimal(0)
imag = property(imag)
def conjugate(self):
return self
def __complex__(self):
return complex(float(self))
def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if clamp=0,
# precision-1 if clamp=1.
max_payload_len = context.prec - context.clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self)
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
if self._isnan():
# decapitate payload if necessary
return self._fix_nan(context)
else:
# self is +/-Infinity; return unaltered
return Decimal(self)
# if self is zero then exponent should be between Etiny and
# Emax if clamp==0, and between Etiny and Etop if clamp==1.
Etiny = context.Etiny()
Etop = context.Etop()
if not self:
exp_max = [context.Emax, Etop][context.clamp]
new_exp = min(max(self._exp, Etiny), exp_max)
if new_exp != self._exp:
context._raise_error(Clamped)
return _dec_from_triple(self._sign, '0', new_exp)
else:
return Decimal(self)
# exp_min is the smallest allowable exponent of the result,
# equal to max(self.adjusted()-context.prec+1, Etiny)
exp_min = len(self._int) + self._exp - context.prec
if exp_min > Etop:
# overflow: exp_min > Etop iff self.adjusted() > Emax
ans = context._raise_error(Overflow, 'above Emax', self._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
self_is_subnormal = exp_min < Etiny
if self_is_subnormal:
exp_min = Etiny
# round if self has too many digits
if self._exp < exp_min:
digits = len(self._int) + self._exp - exp_min
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp_min-1)
digits = 0
rounding_method = self._pick_rounding_function[context.rounding]
changed = rounding_method(self, digits)
coeff = self._int[:digits] or '0'
if changed > 0:
coeff = str(int(coeff)+1)
if len(coeff) > context.prec:
coeff = coeff[:-1]
exp_min += 1
# check whether the rounding pushed the exponent out of range
if exp_min > Etop:
ans = context._raise_error(Overflow, 'above Emax', self._sign)
else:
ans = _dec_from_triple(self._sign, coeff, exp_min)
# raise the appropriate signals, taking care to respect
# the precedence described in the specification
if changed and self_is_subnormal:
context._raise_error(Underflow)
if self_is_subnormal:
context._raise_error(Subnormal)
if changed:
context._raise_error(Inexact)
context._raise_error(Rounded)
if not ans:
# raise Clamped on underflow to 0
context._raise_error(Clamped)
return ans
if self_is_subnormal:
context._raise_error(Subnormal)
# fold down if clamp == 1 and self has too few digits
if context.clamp == 1 and self._exp > Etop:
context._raise_error(Clamped)
self_padded = self._int + '0'*(self._exp - Etop)
return _dec_from_triple(self._sign, self_padded, Etop)
# here self was representable to begin with; return unchanged
return Decimal(self)
# for each of the rounding functions below:
# self is a finite, nonzero Decimal
# prec is an integer satisfying 0 <= prec < len(self._int)
#
# each function returns either -1, 0, or 1, as follows:
# 1 indicates that self should be rounded up (away from zero)
# 0 indicates that self should be truncated, and that all the
# digits to be truncated are zeros (so the value is unchanged)
# -1 indicates that there are nonzero digits to be truncated
def _round_down(self, prec):
"""Also known as round-towards-0, truncate."""
if _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_up(self, prec):
"""Rounds away from 0."""
return -self._round_down(prec)
def _round_half_up(self, prec):
"""Rounds 5 up (away from 0)"""
if self._int[prec] in '56789':
return 1
elif _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_half_down(self, prec):
"""Round 5 down"""
if _exact_half(self._int, prec):
return -1
else:
return self._round_half_up(prec)
def _round_half_even(self, prec):
"""Round 5 to even, rest to nearest."""
if _exact_half(self._int, prec) and \
(prec == 0 or self._int[prec-1] in '02468'):
return -1
else:
return self._round_half_up(prec)
def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_floor(self, prec):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec)
_pick_rounding_function = dict(
ROUND_DOWN = _round_down,
ROUND_UP = _round_up,
ROUND_HALF_UP = _round_half_up,
ROUND_HALF_DOWN = _round_half_down,
ROUND_HALF_EVEN = _round_half_even,
ROUND_CEILING = _round_ceiling,
ROUND_FLOOR = _round_floor,
ROUND_05UP = _round_05up,
)
def __round__(self, n=None):
"""Round self to the nearest integer, or to a given precision.
If only one argument is supplied, round a finite Decimal
instance self to the nearest integer. If self is infinite or
a NaN then a Python exception is raised. If self is finite
and lies exactly halfway between two integers then it is
rounded to the integer with even last digit.
>>> round(Decimal('123.456'))
123
>>> round(Decimal('-456.789'))
-457
>>> round(Decimal('-3.0'))
-3
>>> round(Decimal('2.5'))
2
>>> round(Decimal('3.5'))
4
>>> round(Decimal('Inf'))
Traceback (most recent call last):
...
OverflowError: cannot round an infinity
>>> round(Decimal('NaN'))
Traceback (most recent call last):
...
ValueError: cannot round a NaN
If a second argument n is supplied, self is rounded to n
decimal places using the rounding mode for the current
context.
For an integer n, round(self, -n) is exactly equivalent to
self.quantize(Decimal('1En')).
>>> round(Decimal('123.456'), 0)
Decimal('123')
>>> round(Decimal('123.456'), 2)
Decimal('123.46')
>>> round(Decimal('123.456'), -2)
Decimal('1E+2')
>>> round(Decimal('-Infinity'), 37)
Decimal('NaN')
>>> round(Decimal('sNaN123'), 0)
Decimal('NaN123')
"""
if n is not None:
# two-argument form: use the equivalent quantize call
if not isinstance(n, int):
raise TypeError('Second argument to round should be integral')
exp = _dec_from_triple(0, '1', -n)
return self.quantize(exp)
# one-argument form
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_HALF_EVEN))
def __floor__(self):
"""Return the floor of self, as an integer.
For a finite Decimal instance self, return the greatest
integer n such that n <= self. If self is infinite or a NaN
then a Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_FLOOR))
def __ceil__(self):
"""Return the ceiling of self, as an integer.
For a finite Decimal instance self, return the least integer n
such that n >= self. If self is infinite or a NaN then a
Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_CEILING))
def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
third = _convert_other(third, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = _SignedInfinity[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = _SignedInfinity[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
return product.__add__(third, context)
def _power_modulo(self, other, modulo, context=None):
"""Three argument version of __pow__"""
other = _convert_other(other)
if other is NotImplemented:
return other
modulo = _convert_other(modulo)
if modulo is NotImplemented:
return modulo
if context is None:
context = getcontext()
# deal with NaNs: if there are any sNaNs then first one wins,
# (i.e. behaviour for NaNs is identical to that of fma)
self_is_nan = self._isnan()
other_is_nan = other._isnan()
modulo_is_nan = modulo._isnan()
if self_is_nan or other_is_nan or modulo_is_nan:
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if modulo_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
modulo)
if self_is_nan:
return self._fix_nan(context)
if other_is_nan:
return other._fix_nan(context)
return modulo._fix_nan(context)
# check inputs: we apply same restrictions as Python's pow()
if not (self._isinteger() and
other._isinteger() and
modulo._isinteger()):
return context._raise_error(InvalidOperation,
'pow() 3rd argument not allowed '
'unless all arguments are integers')
if other < 0:
return context._raise_error(InvalidOperation,
'pow() 2nd argument cannot be '
'negative when 3rd argument specified')
if not modulo:
return context._raise_error(InvalidOperation,
'pow() 3rd argument cannot be 0')
# additional restriction for decimal: the modulus must be less
# than 10**prec in absolute value
if modulo.adjusted() >= context.prec:
return context._raise_error(InvalidOperation,
'insufficient precision: pow() 3rd '
'argument must not have more than '
'precision digits')
# define 0**0 == NaN, for consistency with two-argument pow
# (even though it hurts!)
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
'and 2nd argument must be nonzero ;'
'0**0 is not defined')
# compute sign of result
if other._iseven():
sign = 0
else:
sign = self._sign
# convert modulo to a Python integer, and self and other to
# Decimal integers (i.e. force their exponents to be >= 0)
modulo = abs(int(modulo))
base = _WorkRep(self.to_integral_value())
exponent = _WorkRep(other.to_integral_value())
# compute result using integer pow()
base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo
for i in range(exponent.exp):
base = pow(base, 10, modulo)
base = pow(base, exponent.int, modulo)
return _dec_from_triple(sign, str(base), 0)
def _power_exact(self, other, p):
"""Attempt to compute self**other exactly.
Given Decimals self and other and an integer p, attempt to
compute an exact result for the power self**other, with p
digits of precision. Return None if self**other is not
exactly representable in p digits.
Assumes that elimination of special cases has already been
performed: self and other must both be nonspecial; self must
be positive and not numerically equal to 1; other must be
nonzero. For efficiency, other._exp should not be too large,
so that 10**abs(other._exp) is a feasible calculation."""
# In the comments below, we write x for the value of self and y for the
# value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc
# and yc positive integers not divisible by 10.
# The main purpose of this method is to identify the *failure*
# of x**y to be exactly representable with as little effort as
# possible. So we look for cheap and easy tests that
# eliminate the possibility of x**y being exact. Only if all
# these tests are passed do we go on to actually compute x**y.
# Here's the main idea. Express y as a rational number m/n, with m and
# n relatively prime and n>0. Then for x**y to be exactly
# representable (at *any* precision), xc must be the nth power of a
# positive integer and xe must be divisible by n. If y is negative
# then additionally xc must be a power of either 2 or 5, hence a power
# of 2**n or 5**n.
#
# There's a limit to how small |y| can be: if y=m/n as above
# then:
#
# (1) if xc != 1 then for the result to be representable we
# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So
# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=
# 2**(1/|y|), hence xc**|y| < 2 and the result is not
# representable.
#
# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if
# |y| < 1/|xe| then the result is not representable.
#
# Note that since x is not equal to 1, at least one of (1) and
# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <
# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.
#
# There's also a limit to how large y can be, at least if it's
# positive: the normalized result will have coefficient xc**y,
# so if it's representable then xc**y < 10**p, and y <
# p/log10(xc). Hence if y*log10(xc) >= p then the result is
# not exactly representable.
# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,
# so |y| < 1/xe and the result is not representable.
# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|
# < 1/nbits(xc).
x = _WorkRep(self)
xc, xe = x.int, x.exp
while xc % 10 == 0:
xc //= 10
xe += 1
y = _WorkRep(other)
yc, ye = y.int, y.exp
while yc % 10 == 0:
yc //= 10
ye += 1
# case where xc == 1: result is 10**(xe*y), with xe*y
# required to be an integer
if xc == 1:
xe *= yc
# result is now 10**(xe * 10**ye); xe * 10**ye must be integral
while xe % 10 == 0:
xe //= 10
ye += 1
if ye < 0:
return None
exponent = xe * 10**ye
if y.sign == 1:
exponent = -exponent
# if other is a nonnegative integer, use ideal exponent
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(exponent-ideal_exponent, p-1)
else:
zeros = 0
return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros)
# case where y is negative: xc must be either a power
# of 2 or a power of 5.
if y.sign == 1:
last_digit = xc % 10
if last_digit in (2,4,6,8):
# quick test for power of 2
if xc & -xc != xc:
return None
# now xc is a power of 2; e is its exponent
e = _nbits(xc)-1
# We now have:
#
# x = 2**e * 10**xe, e > 0, and y < 0.
#
# The exact result is:
#
# x**y = 5**(-e*y) * 10**(e*y + xe*y)
#
# provided that both e*y and xe*y are integers. Note that if
# 5**(-e*y) >= 10**p, then the result can't be expressed
# exactly with p digits of precision.
#
# Using the above, we can guard against large values of ye.
# 93/65 is an upper bound for log(10)/log(5), so if
#
# ye >= len(str(93*p//65))
#
# then
#
# -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5),
#
# so 5**(-e*y) >= 10**p, and the coefficient of the result
# can't be expressed in p digits.
# emax >= largest e such that 5**e < 10**p.
emax = p*93//65
if ye >= len(str(emax)):
return None
# Find -e*y and -xe*y; both must be integers
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 5**e
elif last_digit == 5:
# e >= log_5(xc) if xc is a power of 5; we have
# equality all the way up to xc=5**2658
e = _nbits(xc)*28//65
xc, remainder = divmod(5**e, xc)
if remainder:
return None
while xc % 5 == 0:
xc //= 5
e -= 1
# Guard against large values of ye, using the same logic as in
# the 'xc is a power of 2' branch. 10/3 is an upper bound for
# log(10)/log(2).
emax = p*10//3
if ye >= len(str(emax)):
return None
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 2**e
else:
return None
if xc >= 10**p:
return None
xe = -e-xe
return _dec_from_triple(0, str(xc), xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
m, n = yc*10**ye, 1
else:
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
m //= 2
n //= 2
while m % 5 == n % 5 == 0:
m //= 5
n //= 5
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
if xc != 1 and xc_bits <= n:
return None
xe, rem = divmod(xe, n)
if rem != 0:
return None
# compute nth root of xc using Newton's method
a = 1 << -(-_nbits(xc)//n) # initial estimate
while True:
q, r = divmod(xc, a**(n-1))
if a <= q:
break
else:
a = (a*(n-1) + q)//n
if not (a == q and r == 0):
return None
xc = a
# now xc*10**xe is the nth root of the original xc*10**xe
# compute mth power of xc*10**xe
# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m >
# 10**p and the result is not representable.
if xc > 1 and m > p*100//_log10_lb(xc):
return None
xc = xc**m
xe *= m
if xc > 10**p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
else:
zeros = 0
return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros)
def __pow__(self, other, modulo=None, context=None):
"""Return self ** other [ % modulo].
With two arguments, compute self**other.
With three arguments, compute (self**other) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- other must be nonnegative
- either self or other (or both) must be nonzero
- modulo must be nonzero and must have at most p digits,
where p is the context precision.
If any of these restrictions is violated the InvalidOperation
flag is raised.
The result of pow(self, other, modulo) is identical to the
result that would be obtained by computing (self**other) %
modulo with unbounded precision, but is computed more
efficiently. It is always exact.
"""
if modulo is not None:
return self._power_modulo(other, modulo, context)
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
# either argument is a NaN => result is NaN
ans = self._check_nans(other, context)
if ans:
return ans
# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)
if not other:
if not self:
return context._raise_error(InvalidOperation, '0 ** 0')
else:
return _One
# result has sign 1 iff self._sign is 1 and other is an odd integer
result_sign = 0
if self._sign == 1:
if other._isinteger():
if not other._iseven():
result_sign = 1
else:
# -ve**noninteger = NaN
# (-0)**noninteger = 0**noninteger
if self:
return context._raise_error(InvalidOperation,
'x ** y with x negative and y not an integer')
# negate self, without doing any unwanted rounding
self = self.copy_negate()
# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity
if not self:
if other._sign == 0:
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0
if self._isinfinity():
if other._sign == 0:
return _SignedInfinity[result_sign]
else:
return _dec_from_triple(result_sign, '0', 0)
# 1**other = 1, but the choice of exponent and the flags
# depend on the exponent of self, and on whether other is a
# positive integer, a negative integer, or neither
if self == _One:
if other._isinteger():
# exp = max(self._exp*max(int(other), 0),
# 1-context.prec) but evaluating int(other) directly
# is dangerous until we know other is small (other
# could be 1e999999999)
if other._sign == 1:
multiplier = 0
elif other > context.prec:
multiplier = context.prec
else:
multiplier = int(other)
exp = self._exp * multiplier
if exp < 1-context.prec:
exp = 1-context.prec
context._raise_error(Rounded)
else:
context._raise_error(Inexact)
context._raise_error(Rounded)
exp = 1-context.prec
return _dec_from_triple(result_sign, '1'+'0'*-exp, exp)
# compute adjusted exponent of self
self_adj = self.adjusted()
# self ** infinity is infinity if self > 1, 0 if self < 1
# self ** -infinity is infinity if self < 1, 0 if self > 1
if other._isinfinity():
if (other._sign == 0) == (self_adj < 0):
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# from here on, the result always goes through the call
# to _fix at the end of this function.
ans = None
exact = False
# crude test to catch cases of extreme overflow/underflow. If
# log10(self)*other >= 10**bound and bound >= len(str(Emax))
# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence
# self**other >= 10**(Emax+1), so overflow occurs. The test
# for underflow is similar.
bound = self._log10_exp_bound() + other.adjusted()
if (self_adj >= 0) == (other._sign == 0):
# self > 1 and other +ve, or self < 1 and other -ve
# possibility of overflow
if bound >= len(str(context.Emax)):
ans = _dec_from_triple(result_sign, '1', context.Emax+1)
else:
# self > 1 and other -ve, or self < 1 and other +ve
# possibility of underflow to 0
Etiny = context.Etiny()
if bound >= len(str(-Etiny)):
ans = _dec_from_triple(result_sign, '1', Etiny-1)
# try for an exact result with precision +1
if ans is None:
ans = self._power_exact(other, context.prec + 1)
if ans is not None:
if result_sign == 1:
ans = _dec_from_triple(1, ans._int, ans._exp)
exact = True
# usual case: inexact result, x**y computed directly as exp(y*log(x))
if ans is None:
p = context.prec
x = _WorkRep(self)
xc, xe = x.int, x.exp
y = _WorkRep(other)
yc, ye = y.int, y.exp
if y.sign == 1:
yc = -yc
# compute correctly rounded result: start with precision +3,
# then increase precision until result is unambiguously roundable
extra = 3
while True:
coeff, exp = _dpower(xc, xe, yc, ye, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(result_sign, str(coeff), exp)
# unlike exp, ln and log10, the power function respects the
# rounding mode; no need to switch to ROUND_HALF_EVEN here
# There's a difficulty here when 'other' is not an integer and
# the result is exact. In this case, the specification
# requires that the Inexact flag be raised (in spite of
# exactness), but since the result is exact _fix won't do this
# for us. (Correspondingly, the Underflow signal should also
# be raised for subnormal results.) We can't directly raise
# these signals either before or after calling _fix, since
# that would violate the precedence for signals. So we wrap
# the ._fix call in a temporary context, and reraise
# afterwards.
if exact and not other._isinteger():
# pad with zeros up to length context.prec+1 if necessary; this
# ensures that the Rounded signal will be raised.
if len(ans._int) <= context.prec:
expdiff = context.prec + 1 - len(ans._int)
ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff,
ans._exp-expdiff)
# create a copy of the current context, with cleared flags/traps
newcontext = context.copy()
newcontext.clear_flags()
for exception in _signals:
newcontext.traps[exception] = 0
# round in the new context
ans = ans._fix(newcontext)
# raise Inexact, and if necessary, Underflow
newcontext._raise_error(Inexact)
if newcontext.flags[Subnormal]:
newcontext._raise_error(Underflow)
# propagate signals to the original context; _fix could
# have raised any of Overflow, Underflow, Subnormal,
# Inexact, Rounded, Clamped. Overflow needs the correct
# arguments. Note that the order of the exceptions is
# important here.
if newcontext.flags[Overflow]:
context._raise_error(Overflow, 'above Emax', ans._sign)
for exception in Underflow, Subnormal, Inexact, Rounded, Clamped:
if newcontext.flags[exception]:
context._raise_error(exception)
else:
ans = ans._fix(context)
return ans
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return _dec_from_triple(dup._sign, '0', 0)
exp_max = [context.Emax, context.Etop()][context.clamp]
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == '0' and exp < exp_max:
exp += 1
end -= 1
return _dec_from_triple(dup._sign, dup._int[:end], exp)
def quantize(self, exp, rounding=None, context=None, watchexp=True):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
exp = _convert_other(exp, raiseit=True)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return Decimal(self) # if both are inf, it is OK
return context._raise_error(InvalidOperation,
'quantize with one INF')
# if we're not watching exponents, do a simple rescale
if not watchexp:
ans = self._rescale(exp._exp, rounding)
# raise Inexact and Rounded where appropriate
if ans._exp > self._exp:
context._raise_error(Rounded)
if ans != self:
context._raise_error(Inexact)
return ans
# exp._exp should be between Etiny and Emax
if not (context.Etiny() <= exp._exp <= context.Emax):
return context._raise_error(InvalidOperation,
'target exponent out of bounds in quantize')
if not self:
ans = _dec_from_triple(self._sign, '0', exp._exp)
return ans._fix(context)
self_adjusted = self.adjusted()
if self_adjusted > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if self_adjusted - exp._exp + 1 > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
ans = self._rescale(exp._exp, rounding)
if ans.adjusted() > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if len(ans._int) > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
# raise appropriate flags
if ans and ans.adjusted() < context.Emin:
context._raise_error(Subnormal)
if ans._exp > self._exp:
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
# call to fix takes care of any necessary folddown, and
# signals Clamped if necessary
ans = ans._fix(context)
return ans
def same_quantum(self, other, context=None):
"""Return True if self and other have the same exponent; otherwise
return False.
If either operand is a special value, the following rules are used:
* return True if both operands are infinities
* return True if both operands are NaNs
* otherwise, return False.
"""
other = _convert_other(other, raiseit=True)
if self._is_special or other._is_special:
return (self.is_nan() and other.is_nan() or
self.is_infinite() and other.is_infinite())
return self._exp == other._exp
def _rescale(self, exp, rounding):
"""Rescale self so that the exponent is exp, either by padding with zeros
or by truncating digits, using the given rounding mode.
Specials are returned without change. This operation is
quiet: it raises no flags, and uses no information from the
context.
exp = exp to scale to (an integer)
rounding = rounding mode
"""
if self._is_special:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', exp)
if self._exp >= exp:
# pad answer with zeros if necessary
return _dec_from_triple(self._sign,
self._int + '0'*(self._exp - exp), exp)
# too many digits; round and lose data. If self.adjusted() <
# exp-1, replace self by 10**(exp-1) before rounding
digits = len(self._int) + self._exp - exp
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp-1)
digits = 0
this_function = self._pick_rounding_function[rounding]
changed = this_function(self, digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
return _dec_from_triple(self._sign, coeff, exp)
def _round(self, places, rounding):
"""Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
"""
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale alters the adjusted exponent;
# for example when rounding 99.97 to 3 significant figures.
# When this happens we end up with an extra 0 at the end of
# the number; a second rescale fixes this.
if ans.adjusted() != self.adjusted():
ans = ans._rescale(ans.adjusted()+1-places, rounding)
return ans
def to_integral_exact(self, rounding=None, context=None):
"""Rounds to a nearby integer.
If no rounding mode is specified, take the rounding mode from
the context. This method raises the Rounded and Inexact flags
when appropriate.
See also: to_integral_value, which does exactly the same as
this method except that it doesn't raise Inexact or Rounded.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', 0)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
ans = self._rescale(0, rounding)
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
def to_integral_value(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
else:
return self._rescale(0, rounding)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
def sqrt(self, context=None):
"""Return the square root of self."""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
# exponent = self._exp // 2. sqrt(-0) = -0
ans = _dec_from_triple(self._sign, '0', self._exp // 2)
return ans._fix(context)
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
# At this point self represents a positive number. Let p be
# the desired precision and express self in the form c*100**e
# with c a positive real number and e an integer, c and e
# being chosen so that 100**(p-1) <= c < 100**p. Then the
# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)
# <= sqrt(c) < 10**p, so the closest representable Decimal at
# precision p is n*10**e where n = round_half_even(sqrt(c)),
# the closest integer to sqrt(c) with the even integer chosen
# in the case of a tie.
#
# To ensure correct rounding in all cases, we use the
# following trick: we compute the square root to an extra
# place (precision p+1 instead of precision p), rounding down.
# Then, if the result is inexact and its last digit is 0 or 5,
# we increase the last digit to 1 or 6 respectively; if it's
# exact we leave the last digit alone. Now the final round to
# p places (or fewer in the case of underflow) will round
# correctly and raise the appropriate flags.
# use an extra digit of precision
prec = context.prec+1
# write argument in the form c*100**e where e = self._exp//2
# is the 'ideal' exponent, to be used if the square root is
# exactly representable. l is the number of 'digits' of c in
# base 100, so that 100**(l-1) <= c < 100**l.
op = _WorkRep(self)
e = op.exp >> 1
if op.exp & 1:
c = op.int * 10
l = (len(self._int) >> 1) + 1
else:
c = op.int
l = len(self._int)+1 >> 1
# rescale so that c has exactly prec base 100 'digits'
shift = prec-l
if shift >= 0:
c *= 100**shift
exact = True
else:
c, remainder = divmod(c, 100**-shift)
exact = not remainder
e -= shift
# find n = floor(sqrt(c)) using Newton's method
n = 10**prec
while True:
q = c//n
if n <= q:
break
else:
n = n + q >> 1
exact = exact and n*n == c
if exact:
# result is exact; rescale to use ideal exponent e
if shift >= 0:
# assert n % 10**shift == 0
n //= 10**shift
else:
n *= 10**-shift
e += shift
else:
# result is not exact; fix last digit as described above
if n % 5 == 0:
n += 1
ans = _dec_from_triple(0, str(n), e)
# round, and fit to current context
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def max(self, other, context=None):
"""Returns the larger value.
Like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
# If both operands are finite and equal in numerical value
# then an ordering is applied:
#
# If the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# If the signs are the same then the exponent is used to select
# the result. This is exactly the ordering used in compare_total.
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min(self, other, context=None):
"""Returns the smaller value.
Like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def _isinteger(self):
"""Returns whether self is an integer"""
if self._is_special:
return False
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == '0'*len(rest)
def _iseven(self):
"""Returns True if self is even. Assumes self is an integer."""
if not self or self._exp > 0:
return True
return self._int[-1+self._exp] in '02468'
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
# If NaN or Infinity, self._exp is string
except TypeError:
return 0
def canonical(self):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
"""
return self
def compare_signal(self, other, context=None):
"""Compares self to the other operand numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
"""
other = _convert_other(other, raiseit = True)
ans = self._compare_check_nans(other, context)
if ans:
return ans
return self.compare(other, context=context)
def compare_total(self, other, context=None):
"""Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
"""
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
def compare_total_mag(self, other, context=None):
"""Compares self to other using abstract repr., ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
other = _convert_other(other, raiseit=True)
s = self.copy_abs()
o = other.copy_abs()
return s.compare_total(o)
def copy_abs(self):
"""Returns a copy with the sign set to 0. """
return _dec_from_triple(0, self._int, self._exp, self._is_special)
def copy_negate(self):
"""Returns a copy with the sign inverted."""
if self._sign:
return _dec_from_triple(0, self._int, self._exp, self._is_special)
else:
return _dec_from_triple(1, self._int, self._exp, self._is_special)
def copy_sign(self, other, context=None):
"""Returns self with the sign of other."""
other = _convert_other(other, raiseit=True)
return _dec_from_triple(other._sign, self._int,
self._exp, self._is_special)
def exp(self, context=None):
"""Returns e ** self."""
if context is None:
context = getcontext()
# exp(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# exp(-Infinity) = 0
if self._isinfinity() == -1:
return _Zero
# exp(0) = 1
if not self:
return _One
# exp(Infinity) = Infinity
if self._isinfinity() == 1:
return Decimal(self)
# the result is now guaranteed to be inexact (the true
# mathematical result is transcendental). There's no need to
# raise Rounded and Inexact here---they'll always be raised as
# a result of the call to _fix.
p = context.prec
adj = self.adjusted()
# we only need to do any computation for quite a small range
# of adjusted exponents---for example, -29 <= adj <= 10 for
# the default context. For smaller exponent the result is
# indistinguishable from 1 at the given precision, while for
# larger exponent the result either overflows or underflows.
if self._sign == 0 and adj > len(str((context.Emax+1)*3)):
# overflow
ans = _dec_from_triple(0, '1', context.Emax+1)
elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)):
# underflow to 0
ans = _dec_from_triple(0, '1', context.Etiny()-1)
elif self._sign == 0 and adj < -p:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p)
elif self._sign == 1 and adj < -p-1:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '9'*(p+1), -p-1)
# general case
else:
op = _WorkRep(self)
c, e = op.int, op.exp
if op.sign == 1:
c = -c
# compute correctly rounded result: increase precision by
# 3 digits at a time until we get an unambiguously
# roundable result
extra = 3
while True:
coeff, exp = _dexp(c, e, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(0, str(coeff), exp)
# at this stage, ans should round correctly with *any*
# rounding mode, not just with ROUND_HALF_EVEN
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
def is_finite(self):
"""Return True if self is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
"""
return not self._is_special
def is_infinite(self):
"""Return True if self is infinite; otherwise return False."""
return self._exp == 'F'
def is_nan(self):
"""Return True if self is a qNaN or sNaN; otherwise return False."""
return self._exp in ('n', 'N')
def is_normal(self, context=None):
"""Return True if self is a normal number; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return context.Emin <= self.adjusted()
def is_qnan(self):
"""Return True if self is a quiet NaN; otherwise return False."""
return self._exp == 'n'
def is_signed(self):
"""Return True if self is negative; otherwise return False."""
return self._sign == 1
def is_snan(self):
"""Return True if self is a signaling NaN; otherwise return False."""
return self._exp == 'N'
def is_subnormal(self, context=None):
"""Return True if self is subnormal; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return self.adjusted() < context.Emin
def is_zero(self):
"""Return True if self is a zero; otherwise return False."""
return not self._is_special and self._int == '0'
def _ln_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.ln().
In other words, compute r such that self.ln() >= 10**r. Assumes
that self is finite and positive and that self != 1.
"""
# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1
adj = self._exp + len(self._int) - 1
if adj >= 1:
# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)
return len(str(adj*23//10)) - 1
if adj <= -2:
# argument <= 0.1
return len(str((-1-adj)*23//10)) - 1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(c)
return len(num) - len(den) - (num < den)
# adj == -1, 0.1 <= self < 1
return e + len(str(10**-e - c)) - 1
def ln(self, context=None):
"""Returns the natural (base e) logarithm of self."""
if context is None:
context = getcontext()
# ln(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# ln(0.0) == -Infinity
if not self:
return _NegativeInfinity
# ln(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# ln(1.0) == 0.0
if self == _One:
return _Zero
# ln(negative) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'ln of a negative value')
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision by 3
# until we get an unambiguously roundable result
places = p - self._ln_exp_bound() + 2 # at least p+3 places
while True:
coeff = _dlog(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def _log10_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.log10().
In other words, find r such that self.log10() >= 10**r.
Assumes that self is finite and positive and that self != 1.
"""
# For x >= 10 or x < 0.1 we only need a bound on the integer
# part of log10(self), and this comes directly from the
# exponent of x. For 0.1 <= x <= 10 we use the inequalities
# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| >
# (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0
adj = self._exp + len(self._int) - 1
if adj >= 1:
# self >= 10
return len(str(adj))-1
if adj <= -2:
# self < 0.1
return len(str(-1-adj))-1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(231*c)
return len(num) - len(den) - (num < den) + 2
# adj == -1, 0.1 <= self < 1
num = str(10**-e-c)
return len(num) + e - (num < "231") - 1
def log10(self, context=None):
"""Returns the base 10 logarithm of self."""
if context is None:
context = getcontext()
# log10(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# log10(0.0) == -Infinity
if not self:
return _NegativeInfinity
# log10(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# log10(negative or -Infinity) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'log10 of a negative value')
# log10(10**n) = n
if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1):
# answer may need rounding
ans = Decimal(self._exp + len(self._int) - 1)
else:
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision
# until result is unambiguously roundable
places = p-self._log10_exp_bound()+2
while True:
coeff = _dlog10(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def logb(self, context=None):
""" Returns the exponent of the magnitude of self's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of self (as though it were truncated
to a single digit while maintaining the value of that digit and
without limiting the resulting exponent).
"""
# logb(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
# logb(+/-Inf) = +Inf
if self._isinfinity():
return _Infinity
# logb(0) = -Inf, DivisionByZero
if not self:
return context._raise_error(DivisionByZero, 'logb(0)', 1)
# otherwise, simply return the adjusted exponent of self, as a
# Decimal. Note that no attempt is made to fit the result
# into the current context.
ans = Decimal(self.adjusted())
return ans._fix(context)
def _islogical(self):
"""Return True if self is a logical operand.
For being logical, it must be a finite number with a sign of 0,
an exponent of 0, and a coefficient whose digits must all be
either 0 or 1.
"""
if self._sign != 0 or self._exp != 0:
return False
for dig in self._int:
if dig not in '01':
return False
return True
def _fill_logical(self, context, opa, opb):
dif = context.prec - len(opa)
if dif > 0:
opa = '0'*dif + opa
elif dif < 0:
opa = opa[-context.prec:]
dif = context.prec - len(opb)
if dif > 0:
opb = '0'*dif + opb
elif dif < 0:
opb = opb[-context.prec:]
return opa, opb
def logical_and(self, other, context=None):
"""Applies an 'and' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_invert(self, context=None):
"""Invert all its digits."""
if context is None:
context = getcontext()
return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0),
context)
def logical_or(self, other, context=None):
"""Applies an 'or' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_xor(self, other, context=None):
"""Applies an 'xor' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def max_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def next_minus(self, context=None):
"""Returns the largest representable number smaller than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == -1:
return _NegativeInfinity
if self._isinfinity() == 1:
return _dec_from_triple(0, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_FLOOR)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_plus(self, context=None):
"""Returns the smallest representable number larger than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == 1:
return _Infinity
if self._isinfinity() == -1:
return _dec_from_triple(1, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_CEILING)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_toward(self, other, context=None):
"""Returns the number closest to self, in the direction towards other.
The result is the closest representable number to self
(excluding self) that is in the direction towards other,
unless both have the same value. If the two operands are
numerically equal, then the result is a copy of self with the
sign set to be the same as the sign of other.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
comparison = self._cmp(other)
if comparison == 0:
return self.copy_sign(other)
if comparison == -1:
ans = self.next_plus(context)
else: # comparison == 1
ans = self.next_minus(context)
# decide which flags to raise using value of ans
if ans._isinfinity():
context._raise_error(Overflow,
'Infinite result from next_toward',
ans._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
elif ans.adjusted() < context.Emin:
context._raise_error(Underflow)
context._raise_error(Subnormal)
context._raise_error(Inexact)
context._raise_error(Rounded)
# if precision == 1 then we don't raise Clamped for a
# result 0E-Etiny.
if not ans:
context._raise_error(Clamped)
return ans
def number_class(self, context=None):
"""Returns an indication of the class of self.
The class is one of the following strings:
sNaN
NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
"""
if self.is_snan():
return "sNaN"
if self.is_qnan():
return "NaN"
inf = self._isinfinity()
if inf == 1:
return "+Infinity"
if inf == -1:
return "-Infinity"
if self.is_zero():
if self._sign:
return "-Zero"
else:
return "+Zero"
if context is None:
context = getcontext()
if self.is_subnormal(context=context):
if self._sign:
return "-Subnormal"
else:
return "+Subnormal"
# just a normal, regular, boring number, :)
if self._sign:
return "-Normal"
else:
return "+Normal"
def radix(self):
"""Just returns 10, as this is Decimal, :)"""
return Decimal(10)
def rotate(self, other, context=None):
"""Returns a rotated copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's rotate!
rotated = rotdig[torot:] + rotdig[:torot]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
def scaleb(self, other, context=None):
"""Returns self operand after adding the second value to its exp."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
liminf = -2 * (context.Emax + context.prec)
limsup = 2 * (context.Emax + context.prec)
if not (liminf <= int(other) <= limsup):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
d = _dec_from_triple(self._sign, self._int, self._exp + int(other))
d = d._fix(context)
return d
def shift(self, other, context=None):
"""Returns a shifted copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's shift!
if torot < 0:
shifted = rotdig[:torot]
else:
shifted = rotdig + '0'*torot
shifted = shifted[-context.prec:]
return _dec_from_triple(self._sign,
shifted.lstrip('0') or '0', self._exp)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) is Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) is Decimal:
return self # My components are also immutable
return self.__class__(str(self))
# PEP 3101 support. the _localeconv keyword argument should be
# considered private: it's provided for ease of testing only.
def __format__(self, specifier, context=None, _localeconv=None):
"""Format a Decimal instance according to the given specifier.
The specifier should be a standard format specifier, with the
form described in PEP 3101. Formatting types 'e', 'E', 'f',
'F', 'g', 'G', 'n' and '%' are supported. If the formatting
type is omitted it defaults to 'g' or 'G', depending on the
value of context.capitals.
"""
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to make sure that
# format(float, '') behaves similarly to str(float).
if context is None:
context = getcontext()
spec = _parse_format_specifier(specifier, _localeconv=_localeconv)
# special values don't care about the type or precision
if self._is_special:
sign = _format_sign(self._sign, spec)
body = str(self.copy_abs())
return _format_align(sign, body, spec)
# a type of None defaults to 'g' or 'G', depending on context
if spec['type'] is None:
spec['type'] = ['g', 'G'][context.capitals]
# if type is '%', adjust exponent of self accordingly
if spec['type'] == '%':
self = _dec_from_triple(self._sign, self._int, self._exp+2)
# round if necessary, taking rounding mode from the context
rounding = context.rounding
precision = spec['precision']
if precision is not None:
if spec['type'] in 'eE':
self = self._round(precision+1, rounding)
elif spec['type'] in 'fF%':
self = self._rescale(-precision, rounding)
elif spec['type'] in 'gG' and len(self._int) > precision:
self = self._round(precision, rounding)
# special case: zeros with a positive exponent can't be
# represented in fixed point; rescale them to 0e0.
if not self and self._exp > 0 and spec['type'] in 'fF%':
self = self._rescale(0, rounding)
# figure out placement of the decimal point
leftdigits = self._exp + len(self._int)
if spec['type'] in 'eE':
if not self and precision is not None:
dotplace = 1 - precision
else:
dotplace = 1
elif spec['type'] in 'fF%':
dotplace = leftdigits
elif spec['type'] in 'gG':
if self._exp <= 0 and leftdigits > -6:
dotplace = leftdigits
else:
dotplace = 1
# find digits before and after decimal point, and get exponent
if dotplace < 0:
intpart = '0'
fracpart = '0'*(-dotplace) + self._int
elif dotplace > len(self._int):
intpart = self._int + '0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace] or '0'
fracpart = self._int[dotplace:]
exp = leftdigits-dotplace
# done with the decimal-specific stuff; hand over the rest
# of the formatting to the _format_number function
return _format_number(self._sign, intpart, fracpart, exp, spec)
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
self = object.__new__(Decimal)
self._sign = sign
self._int = coefficient
self._exp = exponent
self._is_special = special
return self
# Register Decimal as a kind of Number (an abstract base class).
# However, do not register it as Real (because Decimals are not
# interoperable with floats).
_numbers.Number.register(Decimal)
##### Context class #######################################################
class _ContextManager(object):
"""Context manager class to support localcontext().
Sets a copy of the supplied context in __enter__() and restores
the previous decimal context in __exit__()
"""
def __init__(self, new_context):
self.new_context = new_context.copy()
def __enter__(self):
self.saved_context = getcontext()
setcontext(self.new_context)
return self.new_context
def __exit__(self, t, v, tb):
setcontext(self.saved_context)
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type (how you round)
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is set.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None, traps=None,
_ignored_flags=None):
# Set defaults; for everything except flags and _ignored_flags,
# inherit from DefaultContext.
try:
dc = DefaultContext
except NameError:
pass
self.prec = prec if prec is not None else dc.prec
self.rounding = rounding if rounding is not None else dc.rounding
self.Emin = Emin if Emin is not None else dc.Emin
self.Emax = Emax if Emax is not None else dc.Emax
self.capitals = capitals if capitals is not None else dc.capitals
self.clamp = clamp if clamp is not None else dc.clamp
if _ignored_flags is None:
self._ignored_flags = []
else:
self._ignored_flags = _ignored_flags
if traps is None:
self.traps = dc.traps.copy()
elif not isinstance(traps, dict):
self.traps = dict((s, int(s in traps)) for s in _signals + traps)
else:
self.traps = traps
if flags is None:
self.flags = dict.fromkeys(_signals, 0)
elif not isinstance(flags, dict):
self.flags = dict((s, int(s in flags)) for s in _signals + flags)
else:
self.flags = flags
def _set_integer_check(self, name, value, vmin, vmax):
if not isinstance(value, int):
raise TypeError("%s must be an integer" % name)
if vmin == '-inf':
if value > vmax:
raise ValueError("%s must be in [%s, %d]. got: %s" % (name, vmin, vmax, value))
elif vmax == 'inf':
if value < vmin:
raise ValueError("%s must be in [%d, %s]. got: %s" % (name, vmin, vmax, value))
else:
if value < vmin or value > vmax:
raise ValueError("%s must be in [%d, %d]. got %s" % (name, vmin, vmax, value))
return object.__setattr__(self, name, value)
def _set_signal_dict(self, name, d):
if not isinstance(d, dict):
raise TypeError("%s must be a signal dict" % d)
for key in d:
if not key in _signals:
raise KeyError("%s is not a valid signal dict" % d)
for key in _signals:
if not key in d:
raise KeyError("%s is not a valid signal dict" % d)
return object.__setattr__(self, name, d)
def __setattr__(self, name, value):
if name == 'prec':
return self._set_integer_check(name, value, 1, 'inf')
elif name == 'Emin':
return self._set_integer_check(name, value, '-inf', 0)
elif name == 'Emax':
return self._set_integer_check(name, value, 0, 'inf')
elif name == 'capitals':
return self._set_integer_check(name, value, 0, 1)
elif name == 'clamp':
return self._set_integer_check(name, value, 0, 1)
elif name == 'rounding':
if not value in _rounding_modes:
# raise TypeError even for strings to have consistency
# among various implementations.
raise TypeError("%s: invalid rounding mode" % value)
return object.__setattr__(self, name, value)
elif name == 'flags' or name == 'traps':
return self._set_signal_dict(name, value)
elif name == '_ignored_flags':
return object.__setattr__(self, name, value)
else:
raise AttributeError(
"'decimal.Context' object has no attribute '%s'" % name)
def __delattr__(self, name):
raise AttributeError("%s cannot be deleted" % name)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
flags = [sig for sig, v in self.flags.items() if v]
traps = [sig for sig, v in self.traps.items() if v]
return (self.__class__,
(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, flags, traps))
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, '
'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, '
'clamp=%(clamp)d'
% vars(self))
names = [f.__name__ for f, v in self.flags.items() if v]
s.append('flags=[' + ', '.join(names) + ']')
names = [t.__name__ for t, v in self.traps.items() if v]
s.append('traps=[' + ', '.join(names) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def clear_traps(self):
"""Reset all traps to zero"""
for flag in self.traps:
self.traps[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, self.flags, self.traps,
self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp,
self.flags.copy(), self.traps.copy(),
self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it sets the flag, then, if the corresponding
trap_enabler is set, it reraises the exception. Otherwise, it returns
the default value after setting the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
# Don't touch the flag
return error().handle(self, *args)
self.flags[error] = 1
if not self.traps[error]:
# The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
# self._ignored_flags = []
raise error(explanation)
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding= type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context.
This method implements the to-number operation of the
IBM Decimal specification."""
if isinstance(num, str) and num != num.strip():
return self._raise_error(ConversionSyntax,
"no trailing or leading whitespace is "
"permitted.")
d = Decimal(num, context=self)
if d._isnan() and len(d._int) > self.prec - self.clamp:
return self._raise_error(ConversionSyntax,
"diagnostic info too long in NaN")
return d._fix(self)
def create_decimal_from_float(self, f):
"""Creates a new Decimal instance from a float but rounding using self
as the context.
>>> context = Context(prec=5, rounding=ROUND_DOWN)
>>> context.create_decimal_from_float(3.1415926535897932)
Decimal('3.1415')
>>> context = Context(prec=5, traps=[Inexact])
>>> context.create_decimal_from_float(3.1415926535897932)
Traceback (most recent call last):
...
decimal.Inexact: None
"""
d = Decimal.from_float(f) # An exact conversion
return d._fix(self) # Apply the context rounding
# Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal('19.00')
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal('1.02E+4')
>>> ExtendedContext.add(1, Decimal(2))
Decimal('3')
>>> ExtendedContext.add(Decimal(8), 5)
Decimal('13')
>>> ExtendedContext.add(5, 5)
Decimal('10')
"""
a = _convert_other(a, raiseit=True)
r = a.__add__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def _apply(self, a):
return str(a._fix(self))
def canonical(self, a):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
>>> ExtendedContext.canonical(Decimal('2.50'))
Decimal('2.50')
"""
if not isinstance(a, Decimal):
raise TypeError("canonical requires a Decimal as an argument.")
return a.canonical()
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal('-1')
>>> ExtendedContext.compare(1, 2)
Decimal('-1')
>>> ExtendedContext.compare(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare(b, context=self)
def compare_signal(self, a, b):
"""Compares the values of the two operands numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
>>> c = ExtendedContext
>>> c.compare_signal(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> c.compare_signal(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('NaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('sNaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.compare_signal(-1, 2)
Decimal('-1')
>>> c.compare_signal(Decimal(-1), 2)
Decimal('-1')
>>> c.compare_signal(-1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_signal(b, context=self)
def compare_total(self, a, b):
"""Compares two operands using their abstract representation.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
>>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))
Decimal('0')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))
Decimal('1')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))
Decimal('-1')
>>> ExtendedContext.compare_total(1, 2)
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare_total(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_total(b)
def compare_total_mag(self, a, b):
"""Compares two operands using their abstract representation ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
a = _convert_other(a, raiseit=True)
return a.compare_total_mag(b)
def copy_abs(self, a):
"""Returns a copy of the operand with the sign set to 0.
>>> ExtendedContext.copy_abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.copy_abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_abs()
def copy_decimal(self, a):
"""Returns a copy of the decimal object.
>>> ExtendedContext.copy_decimal(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_decimal(Decimal('-1.00'))
Decimal('-1.00')
>>> ExtendedContext.copy_decimal(1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return Decimal(a)
def copy_negate(self, a):
"""Returns a copy of the operand with the sign inverted.
>>> ExtendedContext.copy_negate(Decimal('101.5'))
Decimal('-101.5')
>>> ExtendedContext.copy_negate(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.copy_negate(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_negate()
def copy_sign(self, a, b):
"""Copies the second operand's sign to the first one.
In detail, it returns a copy of the first operand with the sign
equal to the sign of the second operand.
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(1, -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(Decimal(1), -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(1, Decimal(-2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__truediv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal('0')
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal('3')
>>> ExtendedContext.divide_int(10, 3)
Decimal('3')
>>> ExtendedContext.divide_int(Decimal(10), 3)
Decimal('3')
>>> ExtendedContext.divide_int(10, Decimal(3))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__floordiv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divmod(self, a, b):
"""Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
"""
a = _convert_other(a, raiseit=True)
r = a.__divmod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def exp(self, a):
"""Returns e ** a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.exp(Decimal('-Infinity'))
Decimal('0')
>>> c.exp(Decimal('-1'))
Decimal('0.367879441')
>>> c.exp(Decimal('0'))
Decimal('1')
>>> c.exp(Decimal('1'))
Decimal('2.71828183')
>>> c.exp(Decimal('0.693147181'))
Decimal('2.00000000')
>>> c.exp(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.exp(10)
Decimal('22026.4658')
"""
a =_convert_other(a, raiseit=True)
return a.exp(context=self)
def fma(self, a, b, c):
"""Returns a multiplied by b, plus c.
The first two operands are multiplied together, using multiply,
the third operand is then added to the result of that
multiplication, using add, all with only one final rounding.
>>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7'))
Decimal('22')
>>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7'))
Decimal('-8')
>>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578'))
Decimal('1.38435736E+12')
>>> ExtendedContext.fma(1, 3, 4)
Decimal('7')
>>> ExtendedContext.fma(1, Decimal(3), 4)
Decimal('7')
>>> ExtendedContext.fma(1, 3, Decimal(4))
Decimal('7')
"""
a = _convert_other(a, raiseit=True)
return a.fma(b, c, context=self)
def is_canonical(self, a):
"""Return True if the operand is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
>>> ExtendedContext.is_canonical(Decimal('2.50'))
True
"""
if not isinstance(a, Decimal):
raise TypeError("is_canonical requires a Decimal as an argument.")
return a.is_canonical()
def is_finite(self, a):
"""Return True if the operand is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
>>> ExtendedContext.is_finite(Decimal('2.50'))
True
>>> ExtendedContext.is_finite(Decimal('-0.3'))
True
>>> ExtendedContext.is_finite(Decimal('0'))
True
>>> ExtendedContext.is_finite(Decimal('Inf'))
False
>>> ExtendedContext.is_finite(Decimal('NaN'))
False
>>> ExtendedContext.is_finite(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_finite()
def is_infinite(self, a):
"""Return True if the operand is infinite; otherwise return False.
>>> ExtendedContext.is_infinite(Decimal('2.50'))
False
>>> ExtendedContext.is_infinite(Decimal('-Inf'))
True
>>> ExtendedContext.is_infinite(Decimal('NaN'))
False
>>> ExtendedContext.is_infinite(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_infinite()
def is_nan(self, a):
"""Return True if the operand is a qNaN or sNaN;
otherwise return False.
>>> ExtendedContext.is_nan(Decimal('2.50'))
False
>>> ExtendedContext.is_nan(Decimal('NaN'))
True
>>> ExtendedContext.is_nan(Decimal('-sNaN'))
True
>>> ExtendedContext.is_nan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_nan()
def is_normal(self, a):
"""Return True if the operand is a normal number;
otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_normal(Decimal('2.50'))
True
>>> c.is_normal(Decimal('0.1E-999'))
False
>>> c.is_normal(Decimal('0.00'))
False
>>> c.is_normal(Decimal('-Inf'))
False
>>> c.is_normal(Decimal('NaN'))
False
>>> c.is_normal(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_normal(context=self)
def is_qnan(self, a):
"""Return True if the operand is a quiet NaN; otherwise return False.
>>> ExtendedContext.is_qnan(Decimal('2.50'))
False
>>> ExtendedContext.is_qnan(Decimal('NaN'))
True
>>> ExtendedContext.is_qnan(Decimal('sNaN'))
False
>>> ExtendedContext.is_qnan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_qnan()
def is_signed(self, a):
"""Return True if the operand is negative; otherwise return False.
>>> ExtendedContext.is_signed(Decimal('2.50'))
False
>>> ExtendedContext.is_signed(Decimal('-12'))
True
>>> ExtendedContext.is_signed(Decimal('-0'))
True
>>> ExtendedContext.is_signed(8)
False
>>> ExtendedContext.is_signed(-8)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_signed()
def is_snan(self, a):
"""Return True if the operand is a signaling NaN;
otherwise return False.
>>> ExtendedContext.is_snan(Decimal('2.50'))
False
>>> ExtendedContext.is_snan(Decimal('NaN'))
False
>>> ExtendedContext.is_snan(Decimal('sNaN'))
True
>>> ExtendedContext.is_snan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_snan()
def is_subnormal(self, a):
"""Return True if the operand is subnormal; otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_subnormal(Decimal('2.50'))
False
>>> c.is_subnormal(Decimal('0.1E-999'))
True
>>> c.is_subnormal(Decimal('0.00'))
False
>>> c.is_subnormal(Decimal('-Inf'))
False
>>> c.is_subnormal(Decimal('NaN'))
False
>>> c.is_subnormal(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_subnormal(context=self)
def is_zero(self, a):
"""Return True if the operand is a zero; otherwise return False.
>>> ExtendedContext.is_zero(Decimal('0'))
True
>>> ExtendedContext.is_zero(Decimal('2.50'))
False
>>> ExtendedContext.is_zero(Decimal('-0E+2'))
True
>>> ExtendedContext.is_zero(1)
False
>>> ExtendedContext.is_zero(0)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_zero()
def ln(self, a):
"""Returns the natural (base e) logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.ln(Decimal('0'))
Decimal('-Infinity')
>>> c.ln(Decimal('1.000'))
Decimal('0')
>>> c.ln(Decimal('2.71828183'))
Decimal('1.00000000')
>>> c.ln(Decimal('10'))
Decimal('2.30258509')
>>> c.ln(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.ln(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.ln(context=self)
def log10(self, a):
"""Returns the base 10 logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.log10(Decimal('0'))
Decimal('-Infinity')
>>> c.log10(Decimal('0.001'))
Decimal('-3')
>>> c.log10(Decimal('1.000'))
Decimal('0')
>>> c.log10(Decimal('2'))
Decimal('0.301029996')
>>> c.log10(Decimal('10'))
Decimal('1')
>>> c.log10(Decimal('70'))
Decimal('1.84509804')
>>> c.log10(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.log10(0)
Decimal('-Infinity')
>>> c.log10(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.log10(context=self)
def logb(self, a):
""" Returns the exponent of the magnitude of the operand's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of the operand (as though the
operand were truncated to a single digit while maintaining the
value of that digit and without limiting the resulting exponent).
>>> ExtendedContext.logb(Decimal('250'))
Decimal('2')
>>> ExtendedContext.logb(Decimal('2.50'))
Decimal('0')
>>> ExtendedContext.logb(Decimal('0.03'))
Decimal('-2')
>>> ExtendedContext.logb(Decimal('0'))
Decimal('-Infinity')
>>> ExtendedContext.logb(1)
Decimal('0')
>>> ExtendedContext.logb(10)
Decimal('1')
>>> ExtendedContext.logb(100)
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.logb(context=self)
def logical_and(self, a, b):
"""Applies the logical operation 'and' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))
Decimal('1000')
>>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))
Decimal('10')
>>> ExtendedContext.logical_and(110, 1101)
Decimal('100')
>>> ExtendedContext.logical_and(Decimal(110), 1101)
Decimal('100')
>>> ExtendedContext.logical_and(110, Decimal(1101))
Decimal('100')
"""
a = _convert_other(a, raiseit=True)
return a.logical_and(b, context=self)
def logical_invert(self, a):
"""Invert all the digits in the operand.
The operand must be a logical number.
>>> ExtendedContext.logical_invert(Decimal('0'))
Decimal('111111111')
>>> ExtendedContext.logical_invert(Decimal('1'))
Decimal('111111110')
>>> ExtendedContext.logical_invert(Decimal('111111111'))
Decimal('0')
>>> ExtendedContext.logical_invert(Decimal('101010101'))
Decimal('10101010')
>>> ExtendedContext.logical_invert(1101)
Decimal('111110010')
"""
a = _convert_other(a, raiseit=True)
return a.logical_invert(context=self)
def logical_or(self, a, b):
"""Applies the logical operation 'or' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010'))
Decimal('1110')
>>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10'))
Decimal('1110')
>>> ExtendedContext.logical_or(110, 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(Decimal(110), 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(110, Decimal(1101))
Decimal('1111')
"""
a = _convert_other(a, raiseit=True)
return a.logical_or(b, context=self)
def logical_xor(self, a, b):
"""Applies the logical operation 'xor' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010'))
Decimal('110')
>>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10'))
Decimal('1101')
>>> ExtendedContext.logical_xor(110, 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(Decimal(110), 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(110, Decimal(1101))
Decimal('1011')
"""
a = _convert_other(a, raiseit=True)
return a.logical_xor(b, context=self)
def max(self, a, b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal('3')
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max(1, 2)
Decimal('2')
>>> ExtendedContext.max(Decimal(1), 2)
Decimal('2')
>>> ExtendedContext.max(1, Decimal(2))
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.max(b, context=self)
def max_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10'))
Decimal('-10')
>>> ExtendedContext.max_mag(1, -2)
Decimal('-2')
>>> ExtendedContext.max_mag(Decimal(1), -2)
Decimal('-2')
>>> ExtendedContext.max_mag(1, Decimal(-2))
Decimal('-2')
"""
a = _convert_other(a, raiseit=True)
return a.max_mag(b, context=self)
def min(self, a, b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal('2')
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal('-10')
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal('1.0')
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.min(1, 2)
Decimal('1')
>>> ExtendedContext.min(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.min(1, Decimal(29))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min(b, context=self)
def min_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2'))
Decimal('-2')
>>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN'))
Decimal('-3')
>>> ExtendedContext.min_mag(1, -2)
Decimal('1')
>>> ExtendedContext.min_mag(Decimal(1), -2)
Decimal('1')
>>> ExtendedContext.min_mag(1, Decimal(-2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min_mag(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal('-1.3')
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal('1.3')
>>> ExtendedContext.minus(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together
('long multiplication'), resulting in a number which may be as long as
the sum of the lengths of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal('3.60')
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal('21')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal('0.72')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal('-0.0')
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal('4.28135971E+11')
>>> ExtendedContext.multiply(7, 7)
Decimal('49')
>>> ExtendedContext.multiply(Decimal(7), 7)
Decimal('49')
>>> ExtendedContext.multiply(7, Decimal(7))
Decimal('49')
"""
a = _convert_other(a, raiseit=True)
r = a.__mul__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def next_minus(self, a):
"""Returns the largest representable number smaller than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_minus(Decimal('1'))
Decimal('0.999999999')
>>> c.next_minus(Decimal('1E-1007'))
Decimal('0E-1007')
>>> ExtendedContext.next_minus(Decimal('-1.00000003'))
Decimal('-1.00000004')
>>> c.next_minus(Decimal('Infinity'))
Decimal('9.99999999E+999')
>>> c.next_minus(1)
Decimal('0.999999999')
"""
a = _convert_other(a, raiseit=True)
return a.next_minus(context=self)
def next_plus(self, a):
"""Returns the smallest representable number larger than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_plus(Decimal('1'))
Decimal('1.00000001')
>>> c.next_plus(Decimal('-1E-1007'))
Decimal('-0E-1007')
>>> ExtendedContext.next_plus(Decimal('-1.00000003'))
Decimal('-1.00000002')
>>> c.next_plus(Decimal('-Infinity'))
Decimal('-9.99999999E+999')
>>> c.next_plus(1)
Decimal('1.00000001')
"""
a = _convert_other(a, raiseit=True)
return a.next_plus(context=self)
def next_toward(self, a, b):
"""Returns the number closest to a, in direction towards b.
The result is the closest representable number from the first
operand (but not the first operand) that is in the direction
towards the second operand, unless the operands have the same
value.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.next_toward(Decimal('1'), Decimal('2'))
Decimal('1.00000001')
>>> c.next_toward(Decimal('-1E-1007'), Decimal('1'))
Decimal('-0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('0'))
Decimal('-1.00000002')
>>> c.next_toward(Decimal('1'), Decimal('0'))
Decimal('0.999999999')
>>> c.next_toward(Decimal('1E-1007'), Decimal('-100'))
Decimal('0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('-10'))
Decimal('-1.00000004')
>>> c.next_toward(Decimal('0.00'), Decimal('-0.0000'))
Decimal('-0.00')
>>> c.next_toward(0, 1)
Decimal('1E-1007')
>>> c.next_toward(Decimal(0), 1)
Decimal('1E-1007')
>>> c.next_toward(0, Decimal(1))
Decimal('1E-1007')
"""
a = _convert_other(a, raiseit=True)
return a.next_toward(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
"""
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
def number_class(self, a):
"""Returns an indication of the class of the operand.
The class is one of the following strings:
-sNaN
-NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.number_class(Decimal('Infinity'))
'+Infinity'
>>> c.number_class(Decimal('1E-10'))
'+Normal'
>>> c.number_class(Decimal('2.50'))
'+Normal'
>>> c.number_class(Decimal('0.1E-999'))
'+Subnormal'
>>> c.number_class(Decimal('0'))
'+Zero'
>>> c.number_class(Decimal('-0'))
'-Zero'
>>> c.number_class(Decimal('-0.1E-999'))
'-Subnormal'
>>> c.number_class(Decimal('-1E-10'))
'-Normal'
>>> c.number_class(Decimal('-2.50'))
'-Normal'
>>> c.number_class(Decimal('-Infinity'))
'-Infinity'
>>> c.number_class(Decimal('NaN'))
'NaN'
>>> c.number_class(Decimal('-NaN'))
'NaN'
>>> c.number_class(Decimal('sNaN'))
'sNaN'
>>> c.number_class(123)
'+Normal'
"""
a = _convert_other(a, raiseit=True)
return a.number_class(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal('1.3')
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal('-1.3')
>>> ExtendedContext.plus(-1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
With two arguments, compute a**b. If a is negative then b
must be integral. The result will be inexact unless b is
integral and the result is finite and can be expressed exactly
in 'precision' digits.
With three arguments, compute (a**b) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- b must be nonnegative
- at least one of a or b must be nonzero
- modulo must be nonzero and have at most 'precision' digits
The result of pow(a, b, modulo) is identical to the result
that would be obtained by computing (a**b) % modulo with
unbounded precision, but is computed more efficiently. It is
always exact.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.power(Decimal('2'), Decimal('3'))
Decimal('8')
>>> c.power(Decimal('-2'), Decimal('3'))
Decimal('-8')
>>> c.power(Decimal('2'), Decimal('-3'))
Decimal('0.125')
>>> c.power(Decimal('1.7'), Decimal('8'))
Decimal('69.7575744')
>>> c.power(Decimal('10'), Decimal('0.301029996'))
Decimal('2.00000000')
>>> c.power(Decimal('Infinity'), Decimal('-1'))
Decimal('0')
>>> c.power(Decimal('Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('Infinity'), Decimal('1'))
Decimal('Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('-1'))
Decimal('-0')
>>> c.power(Decimal('-Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('-Infinity'), Decimal('1'))
Decimal('-Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('2'))
Decimal('Infinity')
>>> c.power(Decimal('0'), Decimal('0'))
Decimal('NaN')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('16'))
Decimal('11')
>>> c.power(Decimal('-3'), Decimal('7'), Decimal('16'))
Decimal('-11')
>>> c.power(Decimal('-3'), Decimal('8'), Decimal('16'))
Decimal('1')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('-16'))
Decimal('11')
>>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789'))
Decimal('11729830')
>>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729'))
Decimal('-0')
>>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537'))
Decimal('1')
>>> ExtendedContext.power(7, 7)
Decimal('823543')
>>> ExtendedContext.power(Decimal(7), 7)
Decimal('823543')
>>> ExtendedContext.power(7, Decimal(7), 2)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__pow__(b, modulo, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded), having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is
an error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal('2.170')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal('2.17')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal('2.2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal('2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal('0E+1')
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal('-Infinity')
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal('-0')
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal('-0E+5')
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal('217.0')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal('217')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal('2.2E+2')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal('2E+2')
>>> ExtendedContext.quantize(1, 2)
Decimal('1')
>>> ExtendedContext.quantize(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.quantize(1, Decimal(2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.quantize(b, context=self)
def radix(self):
"""Just returns 10, as this is Decimal, :)
>>> ExtendedContext.radix()
Decimal('10')
"""
return Decimal(10)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded
to precision digits if necessary. The sign of the result, if
non-zero, is the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal('2.1')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal('1.0')
>>> ExtendedContext.remainder(22, 6)
Decimal('4')
>>> ExtendedContext.remainder(Decimal(22), 6)
Decimal('4')
>>> ExtendedContext.remainder(22, Decimal(6))
Decimal('4')
"""
a = _convert_other(a, raiseit=True)
r = a.__mod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
return a.remainder_near(b, context=self)
def rotate(self, a, b):
"""Returns a rotated copy of a, b times.
The coefficient of the result is a rotated copy of the digits in
the coefficient of the first operand. The number of places of
rotation is taken from the absolute value of the second operand,
with the rotation being to the left if the second operand is
positive or to the right otherwise.
>>> ExtendedContext.rotate(Decimal('34'), Decimal('8'))
Decimal('400000003')
>>> ExtendedContext.rotate(Decimal('12'), Decimal('9'))
Decimal('12')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2'))
Decimal('891234567')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2'))
Decimal('345678912')
>>> ExtendedContext.rotate(1333333, 1)
Decimal('13333330')
>>> ExtendedContext.rotate(Decimal(1333333), 1)
Decimal('13333330')
>>> ExtendedContext.rotate(1333333, Decimal(1))
Decimal('13333330')
"""
a = _convert_other(a, raiseit=True)
return a.rotate(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
>>> ExtendedContext.same_quantum(10000, -1)
True
>>> ExtendedContext.same_quantum(Decimal(10000), -1)
True
>>> ExtendedContext.same_quantum(10000, Decimal(-1))
True
"""
a = _convert_other(a, raiseit=True)
return a.same_quantum(b)
def scaleb (self, a, b):
"""Returns the first operand after adding the second value its exp.
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2'))
Decimal('0.0750')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0'))
Decimal('7.50')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3'))
Decimal('7.50E+3')
>>> ExtendedContext.scaleb(1, 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(Decimal(1), 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(1, Decimal(4))
Decimal('1E+4')
"""
a = _convert_other(a, raiseit=True)
return a.scaleb(b, context=self)
def shift(self, a, b):
"""Returns a shifted copy of a, b times.
The coefficient of the result is a shifted copy of the digits
in the coefficient of the first operand. The number of places
to shift is taken from the absolute value of the second operand,
with the shift being to the left if the second operand is
positive or to the right otherwise. Digits shifted into the
coefficient are zeros.
>>> ExtendedContext.shift(Decimal('34'), Decimal('8'))
Decimal('400000000')
>>> ExtendedContext.shift(Decimal('12'), Decimal('9'))
Decimal('0')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2'))
Decimal('1234567')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2'))
Decimal('345678900')
>>> ExtendedContext.shift(88888888, 2)
Decimal('888888800')
>>> ExtendedContext.shift(Decimal(88888888), 2)
Decimal('888888800')
>>> ExtendedContext.shift(88888888, Decimal(2))
Decimal('888888800')
"""
a = _convert_other(a, raiseit=True)
return a.shift(b, context=self)
def sqrt(self, a):
"""Square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal('0')
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal('-0')
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal('0.624499800')
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal('10')
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal('1')
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal('2.64575131')
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal('3.16227766')
>>> ExtendedContext.sqrt(2)
Decimal('1.41421356')
>>> ExtendedContext.prec
9
"""
a = _convert_other(a, raiseit=True)
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal('0.23')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal('0.00')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal('-0.77')
>>> ExtendedContext.subtract(8, 5)
Decimal('3')
>>> ExtendedContext.subtract(Decimal(8), 5)
Decimal('3')
>>> ExtendedContext.subtract(8, Decimal(5))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__sub__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def to_eng_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.__str__(context=self)
def to_integral_exact(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting; Inexact and Rounded flags
are allowed in this operation. The rounding mode is taken from the
context.
>>> ExtendedContext.to_integral_exact(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_exact(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_exact(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_exact(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_exact(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_exact(context=self)
def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_value(context=self)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
self.int = int(value._int)
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
##### Integer arithmetic functions used by ln, log10, exp and __pow__ #####
_nbits = int.bit_length
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10**e
else:
# val_n = largest power of 10 dividing n.
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10**-e
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1 << shift, x >> shift
return q + (2*(x & (b-1)) + (q&1) > b)
def _div_nearest(a, b):
"""Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
q, r = divmod(a, b)
return q + (2*r + (q&1) > b)
def _ilog(x, M, L = 8):
"""Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
# The basic algorithm is the following: let log1p be the function
# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use
# the reduction
#
# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))
#
# repeatedly until the argument to log1p is small (< 2**-L in
# absolute value). For small y we can use the Taylor series
# expansion
#
# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T
#
# truncating at T such that y**T is small enough. The whole
# computation is carried out in a form of fixed-point arithmetic,
# with a real number z being represented by an integer
# approximation to z*M. To avoid loss of precision, the y below
# is actually an integer approximation to 2**R*y*M, where R is the
# number of reductions performed so far.
y = x-M
# argument reduction; R = number of reductions performed
R = 0
while (R <= L and abs(y) << L-R >= M or
R > L and abs(y) >> R-L >= M):
y = _div_nearest((M*y) << 1,
M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M))
R += 1
# Taylor series with T terms
T = -int(-10*len(str(M))//(3*L))
yshift = _rshift_nearest(y, R)
w = _div_nearest(M, T)
for k in range(T-1, 0, -1):
w = _div_nearest(M, k) - _div_nearest(yshift*w, M)
return _div_nearest(w*y, M)
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = _div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
def _dlog(c, e, p):
"""Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# Increase precision by 2. The precision increase is compensated
# for at the end with a division by 100.
p += 2
# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,
# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)
# as 10**p * log(d) + 10**p*f * log(10).
l = len(str(c))
f = e+l - (e+l >= 1)
# compute approximation to 10**p*log(d), with error < 27
if p > 0:
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k) # error of <= 0.5 in c
# _ilog magnifies existing error in c by a factor of at most 10
log_d = _ilog(c, 10**p) # error < 5 + 22 = 27
else:
# p <= 0: just approximate the whole thing by 0; error < 2.31
log_d = 0
# compute approximation to f*10**p*log(10), with error < 11.
if f:
extra = len(str(abs(f)))-1
if p + extra >= 0:
# error in f * _log10_digits(p+extra) < |f| * 1 = |f|
# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11
f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra)
else:
f_log_ten = 0
else:
f_log_ten = 0
# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1
return _div_nearest(f_log_ten + log_d, 100)
class _Log10Memoize(object):
"""Class to compute, store, and allow retrieval of, digits of the
constant log(10) = 2.302585.... This constant is needed by
Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__."""
def __init__(self):
self.digits = "23025850929940456840179914546843642076011014886"
def getdigits(self, p):
"""Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
# digits are stored as a string, for quick conversion to
# integer in the case that we've already computed enough
# digits; the stored digits should always be correct
# (truncated, not rounded to nearest).
if p < 0:
raise ValueError("p should be nonnegative")
if p >= len(self.digits):
# compute p+3, p+6, p+9, ... digits; continue until at
# least one of the extra digits is nonzero
extra = 3
while True:
# compute p+extra digits, correct to within 1ulp
M = 10**(p+extra+2)
digits = str(_div_nearest(_ilog(10*M, M), 100))
if digits[-extra:] != '0'*extra:
break
extra += 3
# keep all reliable digits so far; remove trailing zeros
# and next nonzero digit
self.digits = digits.rstrip('0')[:-1]
return int(self.digits[:p+1])
_log10_digits = _Log10Memoize().getdigits
def _iexp(x, M, L=8):
"""Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
# Algorithm: to compute exp(z) for a real number z, first divide z
# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then
# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor
# series
#
# expm1(x) = x + x**2/2! + x**3/3! + ...
#
# Now use the identity
#
# expm1(2x) = expm1(x)*(expm1(x)+2)
#
# R times to compute the sequence expm1(z/2**R),
# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).
# Find R such that x/2**R/M <= 2**-L
R = _nbits((x<<L)//M)
# Taylor series. (2**L)**T > M
T = -int(-10*len(str(M))//(3*L))
y = _div_nearest(x, T)
Mshift = M<<R
for i in range(T-1, 0, -1):
y = _div_nearest(x*(Mshift + y), Mshift * i)
# Expansion
for k in range(R-1, -1, -1):
Mshift = M<<(k+2)
y = _div_nearest(y*(y+Mshift), Mshift)
return M+y
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
def _dpower(xc, xe, yc, ye, p):
"""Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
# Find b such that 10**(b-1) <= |y| <= 10**b
b = len(str(abs(yc))) + ye
# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point
lxc = _dlog(xc, xe, p+b+1)
# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)
shift = ye-b
if shift >= 0:
pc = lxc*yc*10**shift
else:
pc = _div_nearest(lxc*yc, 10**-shift)
if pc == 0:
# we prefer a result that isn't exactly 1; this makes it
# easier to compute a correctly rounded result in __pow__
if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1:
coeff, exp = 10**(p-1)+1, 1-p
else:
coeff, exp = 10**p-1, -p
else:
coeff, exp = _dexp(pc, -(p+1), p+1)
coeff = _div_nearest(coeff, 10)
exp += 1
return coeff, exp
def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]]
##### Helper Functions ####################################################
def _convert_other(other, raiseit=False, allow_float=False):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
If allow_float is true, allow conversion from float; this
is used in the comparison methods (__eq__ and friends).
"""
if isinstance(other, Decimal):
return other
if isinstance(other, int):
return Decimal(other)
if allow_float and isinstance(other, float):
return Decimal.from_float(other)
if raiseit:
raise TypeError("Unable to convert %s to Decimal" % other)
return NotImplemented
def _convert_for_comparison(self, other, equality_op=False):
"""Given a Decimal instance self and a Python object other, return
a pair (s, o) of Decimal instances such that "s op o" is
equivalent to "self op other" for any of the 6 comparison
operators "op".
"""
if isinstance(other, Decimal):
return self, other
# Comparison with a Rational instance (also includes integers):
# self op n/d <=> self*d op n (for n and d integers, d positive).
# A NaN or infinity can be left unchanged without affecting the
# comparison result.
if isinstance(other, _numbers.Rational):
if not self._is_special:
self = _dec_from_triple(self._sign,
str(int(self._int) * other.denominator),
self._exp)
return self, Decimal(other.numerator)
# Comparisons with float and complex types. == and != comparisons
# with complex numbers should succeed, returning either True or False
# as appropriate. Other comparisons return NotImplemented.
if equality_op and isinstance(other, _numbers.Complex) and other.imag == 0:
other = other.real
if isinstance(other, float):
context = getcontext()
if equality_op:
context.flags[FloatOperation] = 1
else:
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are enabled")
return self, Decimal.from_float(other)
return NotImplemented, NotImplemented
##### Setup Specific Contexts ############################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=17, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
Emax=308,
Emin=-324,
capitals=1,
clamp=0
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### crud for parsing strings #############################################
#
# Regular expression used for parsing numeric strings. Additional
# comments:
#
# 1. Uncomment the two '\s*' lines to allow leading and/or trailing
# whitespace. But note that the specification disallows whitespace in
# a numeric string.
#
# 2. For finite numbers (not infinities and NaNs) the body of the
# number between the optional sign and the optional exponent must have
# at least one decimal digit, possibly after the decimal point. The
# lookahead expression '(?=\d|\.\d)' checks this.
#import re
#_parser = re.compile(r""" # A numeric string consists of:
# \s*
# (?P<sign>[-+])? # an optional sign, followed by either...
# (
# (?=\d|\.\d) # ...a number (with at least one digit)
# (?P<int>\d*) # having a (possibly empty) integer part
# (\.(?P<frac>\d*))? # followed by an optional fractional part
# (E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
# |
# Inf(inity)? # ...an infinity, or...
# |
# (?P<signal>s)? # ...an (optionally signaling)
# NaN # NaN
# (?P<diag>\d*) # with (possibly empty) diagnostic info.
# )
# \s*
# \Z
#""", re.VERBOSE | re.IGNORECASE).match
import _jsre as re
_all_zeros = re.compile('0*$').match
_exact_half = re.compile('50*$').match
##### PEP3101 support functions ##############################################
# The functions in this section have little to do with the Decimal
# class, and could potentially be reused or adapted for other pure
# Python numeric classes that want to implement __format__
#
# A format specifier for Decimal looks like:
#
# [[fill]align][sign][#][0][minimumwidth][,][.precision][type]
#_parse_format_specifier_regex = re.compile(r"""\A
#(?:
# (?P<fill>.)?
# (?P<align>[<>=^])
#)?
#(?P<sign>[-+ ])?
#(?P<alt>\#)?
#(?P<zeropad>0)?
#(?P<minimumwidth>(?!0)\d+)?
#(?P<thousands_sep>,)?
#(?:\.(?P<precision>0|(?!0)\d+))?
#(?P<type>[eEfFgGn%])?
#\Z
#""", re.VERBOSE|re.DOTALL)
del re
# The locale module is only needed for the 'n' format specifier. The
# rest of the PEP 3101 code functions quite happily without it, so we
# don't care too much if locale isn't present.
try:
import locale as _locale
except ImportError:
pass
def _parse_format_specifier(format_spec, _localeconv=None):
"""Parse and validate a format specifier.
Turns a standard numeric format specifier into a dict, with the
following entries:
fill: fill character to pad field to minimum width
align: alignment type, either '<', '>', '=' or '^'
sign: either '+', '-' or ' '
minimumwidth: nonnegative integer giving minimum width
zeropad: boolean, indicating whether to pad with zeros
thousands_sep: string to use as thousands separator, or ''
grouping: grouping for thousands separators, in format
used by localeconv
decimal_point: string to use for decimal point
precision: nonnegative integer giving precision, or None
type: one of the characters 'eEfFgG%', or None
"""
m = _parse_format_specifier_regex.match(format_spec)
if m is None:
raise ValueError("Invalid format specifier: " + format_spec)
# get the dictionary
format_dict = m.groupdict()
# zeropad; defaults for fill and alignment. If zero padding
# is requested, the fill and align fields should be absent.
fill = format_dict['fill']
align = format_dict['align']
format_dict['zeropad'] = (format_dict['zeropad'] is not None)
if format_dict['zeropad']:
if fill is not None:
raise ValueError("Fill character conflicts with '0'"
" in format specifier: " + format_spec)
if align is not None:
raise ValueError("Alignment conflicts with '0' in "
"format specifier: " + format_spec)
format_dict['fill'] = fill or ' '
# PEP 3101 originally specified that the default alignment should
# be left; it was later agreed that right-aligned makes more sense
# for numeric types. See http://bugs.python.org/issue6857.
format_dict['align'] = align or '>'
# default sign handling: '-' for negative, '' for positive
if format_dict['sign'] is None:
format_dict['sign'] = '-'
# minimumwidth defaults to 0; precision remains None if not given
format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0')
if format_dict['precision'] is not None:
format_dict['precision'] = int(format_dict['precision'])
# if format type is 'g' or 'G' then a precision of 0 makes little
# sense; convert it to 1. Same if format type is unspecified.
if format_dict['precision'] == 0:
if format_dict['type'] is None or format_dict['type'] in 'gGn':
format_dict['precision'] = 1
# determine thousands separator, grouping, and decimal separator, and
# add appropriate entries to format_dict
if format_dict['type'] == 'n':
# apart from separators, 'n' behaves just like 'g'
format_dict['type'] = 'g'
if _localeconv is None:
_localeconv = _locale.localeconv()
if format_dict['thousands_sep'] is not None:
raise ValueError("Explicit thousands separator conflicts with "
"'n' type in format specifier: " + format_spec)
format_dict['thousands_sep'] = _localeconv['thousands_sep']
format_dict['grouping'] = _localeconv['grouping']
format_dict['decimal_point'] = _localeconv['decimal_point']
else:
if format_dict['thousands_sep'] is None:
format_dict['thousands_sep'] = ''
format_dict['grouping'] = [3, 0]
format_dict['decimal_point'] = '.'
return format_dict
def _format_align(sign, body, spec):
"""Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
"""
# how much extra space do we have to play with?
minimumwidth = spec['minimumwidth']
fill = spec['fill']
padding = fill*(minimumwidth - len(sign) - len(body))
align = spec['align']
if align == '<':
result = sign + body + padding
elif align == '>':
result = padding + sign + body
elif align == '=':
result = sign + padding + body
elif align == '^':
half = len(padding)//2
result = padding[:half] + sign + body + padding[half:]
else:
raise ValueError('Unrecognised alignment field')
return result
def _group_lengths(grouping):
"""Convert a localeconv-style grouping into a (possibly infinite)
iterable of integers representing group lengths.
"""
# The result from localeconv()['grouping'], and the input to this
# function, should be a list of integers in one of the
# following three forms:
#
# (1) an empty list, or
# (2) nonempty list of positive integers + [0]
# (3) list of positive integers + [locale.CHAR_MAX], or
from itertools import chain, repeat
if not grouping:
return []
elif grouping[-1] == 0 and len(grouping) >= 2:
return chain(grouping[:-1], repeat(grouping[-2]))
elif grouping[-1] == _locale.CHAR_MAX:
return grouping[:-1]
else:
raise ValueError('unrecognised format for grouping')
def _insert_thousands_sep(digits, spec, min_width=1):
"""Insert thousands separators into a digit string.
spec is a dictionary whose keys should include 'thousands_sep' and
'grouping'; typically it's the result of parsing the format
specifier using _parse_format_specifier.
The min_width keyword argument gives the minimum length of the
result, which will be padded on the left with zeros if necessary.
If necessary, the zero padding adds an extra '0' on the left to
avoid a leading thousands separator. For example, inserting
commas every three digits in '123456', with min_width=8, gives
'0,123,456', even though that has length 9.
"""
sep = spec['thousands_sep']
grouping = spec['grouping']
groups = []
for l in _group_lengths(grouping):
if l <= 0:
raise ValueError("group length should be positive")
# max(..., 1) forces at least 1 digit to the left of a separator
l = min(max(len(digits), min_width, 1), l)
groups.append('0'*(l - len(digits)) + digits[-l:])
digits = digits[:-l]
min_width -= l
if not digits and min_width <= 0:
break
min_width -= len(sep)
else:
l = max(len(digits), min_width, 1)
groups.append('0'*(l - len(digits)) + digits[-l:])
return sep.join(reversed(groups))
def _format_sign(is_negative, spec):
"""Determine sign character."""
if is_negative:
return '-'
elif spec['sign'] in ' +':
return spec['sign']
else:
return ''
def _format_number(is_negative, intpart, fracpart, exp, spec):
"""Format a number, given the following data:
is_negative: true if the number is negative, else false
intpart: string of digits that must appear before the decimal point
fracpart: string of digits that must come after the point
exp: exponent, as an integer
spec: dictionary resulting from parsing the format specifier
This function uses the information in spec to:
insert separators (decimal separator and thousands separators)
format the sign
format the exponent
add trailing '%' for the '%' type
zero-pad if necessary
fill and align if necessary
"""
sign = _format_sign(is_negative, spec)
if fracpart or spec['alt']:
fracpart = spec['decimal_point'] + fracpart
if exp != 0 or spec['type'] in 'eE':
echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']]
fracpart += "{0}{1:+}".format(echar, exp)
if spec['type'] == '%':
fracpart += '%'
if spec['zeropad']:
min_width = spec['minimumwidth'] - len(fracpart) - len(sign)
else:
min_width = 0
intpart = _insert_thousands_sep(intpart, spec, min_width)
return _format_align(sign, intpart+fracpart, spec)
##### Useful Constants (internal use only) ################################
# Reusable defaults
_Infinity = Decimal('Inf')
_NegativeInfinity = Decimal('-Inf')
_NaN = Decimal('NaN')
_Zero = Decimal(0)
_One = Decimal(1)
_NegativeOne = Decimal(-1)
# _SignedInfinity[sign] is infinity w/ that sign
_SignedInfinity = (_Infinity, _NegativeInfinity)
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo _PyHASH_MODULUS
_PyHASH_MODULUS = sys.hash_info.modulus
# hash values to use for positive and negative infinities, and nans
_PyHASH_INF = sys.hash_info.inf
_PyHASH_NAN = sys.hash_info.nan
# _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUS
_PyHASH_10INV = pow(10, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
del sys
try:
import _decimal
except ImportError:
pass
else:
s1 = set(dir())
s2 = set(dir(_decimal))
for name in s1 - s2:
del globals()[name]
del s1, s2, name
from _decimal import *
if __name__ == '__main__':
import doctest, decimal
doctest.testmod(decimal)
| agpl-3.0 |
xcbat/vnpy | examples/CtaBacktesting/runBacktesting.py | 5 | 1089 | # encoding: UTF-8
"""
展示如何执行策略回测。
"""
from __future__ import division
from vnpy.trader.app.ctaStrategy.ctaBacktesting import BacktestingEngine, MINUTE_DB_NAME
if __name__ == '__main__':
from vnpy.trader.app.ctaStrategy.strategy.strategyAtrRsi import AtrRsiStrategy
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20120101')
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3/10000) # 万0.3
engine.setSize(300) # 股指合约大小
engine.setPriceTick(0.2) # 股指最小价格变动
# 设置使用的历史数据库
engine.setDatabase(MINUTE_DB_NAME, 'IF0000')
# 在引擎中创建策略对象
d = {'atrLength': 11}
engine.initStrategy(AtrRsiStrategy, d)
# 开始跑回测
engine.runBacktesting()
# 显示回测结果
engine.showBacktestingResult() | mit |
MiLk/ansible | lib/ansible/modules/network/avi/avi_networksecuritypolicy.py | 44 | 4118 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_networksecuritypolicy
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of NetworkSecurityPolicy Avi RESTful Object
description:
- This module is used to configure NetworkSecurityPolicy object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
cloud_config_cksum:
description:
- Checksum of cloud configuration for network sec policy.
- Internally set by cloud connector.
created_by:
description:
- Creator name.
description:
description:
- User defined description for the object.
name:
description:
- Name of the object.
rules:
description:
- List of networksecurityrule.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a network security policy to block clients represented by ip group known_attackers
avi_networksecuritypolicy:
controller: ''
username: ''
password: ''
name: vs-gurutest-ns
rules:
- action: NETWORK_SECURITY_POLICY_ACTION_TYPE_DENY
age: 0
enable: true
index: 1
log: false
match:
client_ip:
group_refs:
- Demo:known_attackers
match_criteria: IS_IN
name: Rule 1
tenant_ref: Demo
'''
RETURN = '''
obj:
description: NetworkSecurityPolicy (api/networksecuritypolicy) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
cloud_config_cksum=dict(type='str',),
created_by=dict(type='str',),
description=dict(type='str',),
name=dict(type='str',),
rules=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'networksecuritypolicy',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
blowekamp/SimpleITK | Examples/AdvancedImageReading/AdvancedImageReading.py | 4 | 3257 | #!/usr/bin/env python
# =========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =========================================================================
import sys
import SimpleITK as sitk
import numpy as np
if len(sys.argv) < 2:
print('Wrong number of arguments.', file=sys.stderr)
print('Usage: ' + __file__ + ' image_file_name', file=sys.stderr)
sys.exit(1)
# Read image information without reading the bulk data.
file_reader = sitk.ImageFileReader()
file_reader.SetFileName(sys.argv[1])
file_reader.ReadImageInformation()
print(f'image size: {file_reader.GetSize()}\nimage spacing: {file_reader.GetSpacing()}')
# Some files have a rich meta-data dictionary (e.g. DICOM)
for key in file_reader.GetMetaDataKeys():
print(key + ': ' + file_reader.GetMetaData(key))
print('-' * 20)
# When low on memory, we can incrementally work on sub-images. The following
# subtracts two images (ok, the same image) by reading them as multiple'
# sub-images.
image1_file_name = sys.argv[1]
image2_file_name = sys.argv[1]
parts = 5 # Number of sub-regions we use
file_reader = sitk.ImageFileReader()
file_reader.SetFileName(image1_file_name)
file_reader.ReadImageInformation()
image_size = file_reader.GetSize()
result_img = sitk.Image(file_reader.GetSize(), file_reader.GetPixelID(),
file_reader.GetNumberOfComponents())
result_img.SetSpacing(file_reader.GetSpacing())
result_img.SetOrigin(file_reader.GetOrigin())
result_img.SetDirection(file_reader.GetDirection())
extract_size = list(file_reader.GetSize())
extract_size[-1] = extract_size[-1] // parts
current_index = [0] * file_reader.GetDimension()
for i in range(parts):
if i == (parts - 1):
extract_size[-1] = image_size[-1] - current_index[-1]
file_reader.SetFileName(image1_file_name)
file_reader.SetExtractIndex(current_index)
file_reader.SetExtractSize(extract_size)
sub_image1 = file_reader.Execute()
file_reader.SetFileName(image2_file_name)
file_reader.SetExtractIndex(current_index)
file_reader.SetExtractSize(extract_size)
sub_image2 = file_reader.Execute()
idx = [slice(None,None)]*file_reader.GetDimension()
idx[-1] = current_index[-1]
result_img[idx] = sub_image1 - sub_image2
current_index[-1] += extract_size[-1]
del sub_image1
del sub_image2
# Check that our iterative approach is equivalent to reading the whole images.
if np.any(sitk.GetArrayViewFromImage(result_img
- sitk.ReadImage(image1_file_name)
+ sitk.ReadImage(image2_file_name))):
print('Subtraction error.')
sys.exit(1)
sys.exit(0)
| apache-2.0 |
kmod/icbd | stdlib/python2.5/ctypes/test/__init__.py | 15 | 6870 | import glob, os, sys, unittest, getopt, time
use_resources = []
class ResourceDenied(Exception):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. Resources are defined by test modules.
"""
def is_resource_enabled(resource):
"""Test whether a resource is enabled.
If the caller's module is __main__ then automatically return True."""
if sys._getframe().f_back.f_globals.get("__name__") == "__main__":
return True
result = use_resources is not None and \
(resource in use_resources or "*" in use_resources)
if not result:
_unavail[resource] = None
return result
_unavail = {}
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True."""
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe().f_back.f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
def find_package_modules(package, mask):
import fnmatch
if hasattr(package, "__loader__"):
path = package.__name__.replace(".", os.path.sep)
mask = os.path.join(path, mask)
for fnm in package.__loader__._files.iterkeys():
if fnmatch.fnmatchcase(fnm, mask):
yield os.path.splitext(fnm)[0].replace(os.path.sep, ".")
else:
path = package.__path__[0]
for fnm in os.listdir(path):
if fnmatch.fnmatchcase(fnm, mask):
yield "%s.%s" % (package.__name__, os.path.splitext(fnm)[0])
def get_tests(package, mask, verbosity):
"""Return a list of skipped test modules, and a list of test cases."""
tests = []
skipped = []
for modname in find_package_modules(package, mask):
try:
mod = __import__(modname, globals(), locals(), ['*'])
except ResourceDenied, detail:
skipped.append(modname)
if verbosity > 1:
print >> sys.stderr, "Skipped %s: %s" % (modname, detail)
continue
except Exception, detail:
print >> sys.stderr, "Warning: could not import %s: %s" % (modname, detail)
continue
for name in dir(mod):
if name.startswith("_"):
continue
o = getattr(mod, name)
if type(o) is type(unittest.TestCase) and issubclass(o, unittest.TestCase):
tests.append(o)
return skipped, tests
def usage():
print __doc__
return 1
def test_with_refcounts(runner, verbosity, testcase):
"""Run testcase several times, tracking reference counts."""
import gc
import ctypes
ptc = ctypes._pointer_type_cache.copy()
cfc = ctypes._c_functype_cache.copy()
wfc = ctypes._win_functype_cache.copy()
# when searching for refcount leaks, we have to manually reset any
# caches that ctypes has.
def cleanup():
ctypes._pointer_type_cache = ptc.copy()
ctypes._c_functype_cache = cfc.copy()
ctypes._win_functype_cache = wfc.copy()
gc.collect()
test = unittest.makeSuite(testcase)
for i in range(5):
rc = sys.gettotalrefcount()
runner.run(test)
cleanup()
COUNT = 5
refcounts = [None] * COUNT
for i in range(COUNT):
rc = sys.gettotalrefcount()
runner.run(test)
cleanup()
refcounts[i] = sys.gettotalrefcount() - rc
if filter(None, refcounts):
print "%s leaks:\n\t" % testcase, refcounts
elif verbosity:
print "%s: ok." % testcase
class TestRunner(unittest.TextTestRunner):
def run(self, test, skipped):
"Run the given test case or test suite."
# Same as unittest.TextTestRunner.run, except that it reports
# skipped tests.
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
if _unavail: #skipped:
requested = _unavail.keys()
requested.sort()
self.stream.writeln("Ran %d test%s in %.3fs (%s module%s skipped)" %
(run, run != 1 and "s" or "", timeTaken,
len(skipped),
len(skipped) != 1 and "s" or ""))
self.stream.writeln("Unavailable resources: %s" % ", ".join(requested))
else:
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed: self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
def main(*packages):
try:
opts, args = getopt.getopt(sys.argv[1:], "rqvu:")
except getopt.error:
return usage()
verbosity = 1
search_leaks = False
for flag, value in opts:
if flag == "-q":
verbosity -= 1
elif flag == "-v":
verbosity += 1
elif flag == "-r":
try:
sys.gettotalrefcount
except AttributeError:
print >> sys.stderr, "-r flag requires Python debug build"
return -1
search_leaks = True
elif flag == "-u":
use_resources.extend(value.split(","))
mask = "test_*.py"
if args:
mask = args[0]
for package in packages:
run_tests(package, mask, verbosity, search_leaks)
def run_tests(package, mask, verbosity, search_leaks):
skipped, testcases = get_tests(package, mask, verbosity)
runner = TestRunner(verbosity=verbosity)
suites = [unittest.makeSuite(o) for o in testcases]
suite = unittest.TestSuite(suites)
result = runner.run(suite, skipped)
if search_leaks:
# hunt for refcount leaks
runner = BasicTestRunner()
for t in testcases:
test_with_refcounts(runner, verbosity, t)
return bool(result.errors)
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
| mit |
beezee/GAE-Django-site | django/db/backends/postgresql/creation.py | 247 | 3753 | from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.util import truncate_name
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer CHECK ("%(column)s" >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK ("%(column)s" >= 0)',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
assert self.connection.settings_dict['TEST_COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
if self.connection.settings_dict['TEST_CHARSET']:
return "WITH ENCODING '%s'" % self.connection.settings_dict['TEST_CHARSET']
return ''
def sql_indexes_for_field(self, model, f, style):
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
def get_index_sql(index_name, opclass=''):
return (style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(index_name,self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(db_table)) + ' ' +
"(%s%s)" % (style.SQL_FIELD(qn(f.column)), opclass) +
"%s;" % tablespace_sql)
output = [get_index_sql('%s_%s' % (db_table, f.column))]
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
db_type = f.db_type(connection=self.connection)
if db_type.startswith('varchar'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' varchar_pattern_ops'))
elif db_type.startswith('text'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' text_pattern_ops'))
else:
output = []
return output
| bsd-3-clause |
SubhasisDutta/subhasisdutta.com | src/controller/WorkAdminController.py | 2 | 10845 | import webapp2
import os
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import ndb
from google.appengine.api import images
from src.model.WorkModels import Work,PhotoModel,WorkResourceAttribute
class WorkAdminCreate(webapp2.RequestHandler):
#create project page render
def get(self):
user=users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"]="text/html"
admins=['subhasisdutta300887','subhasistubai','[email protected]']
if user.nickname() not in admins:
self.response.out.write(callAccessDeniedPage())
else:
template_values={
'pageTitle':"Create New Work Project",
}
path=os.path.join(os.path.dirname(__file__),'../../template/createWork.html')
page=template.render(path,template_values)
self.response.out.write(page)
#create project post
def post(self):
handle=self.request.get("handle")
category=self.request.get("category")
title=self.request.get("title")
description=self.request.get("description")
icon=self.request.get("iconImage")
largeImage=self.request.get("largeImage")
iconImg=db.Blob(icon)
largeImg=db.Blob(largeImage)
workItem=Work(id=handle)
workItem.category=category
workItem.title=title
workItem.order=int(self.request.get("order"))
workItem.description=description
workItem.iconImage=iconImg
workItem.bigImage=largeImg
workItem.put()
self.response.headers["Content-Type"]="text/html"
self.response.out.write("""
<html>
<head>
<title>New WORK Created</title>
</head>
<body>
<h3> New WORK Created</h3>
</body></html>
""")
class WorkAdminEdit(webapp2.RequestHandler):
#display the 3 form controller to get
def get(self):
user=users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"]="text/html"
admins=['subhasisdutta300887','subhasistubai','[email protected]']
if user.nickname() not in admins:
self.response.out.write(callAccessDeniedPage())
else:
work_name=self.request.get("name")
work_key=ndb.Key('Work',work_name)
work=work_key.get()
if work is None:
self.response.out.write(callNoSuchWorkPage())
else:
attrList=ndb.get_multi(work.attributes)
photoList=ndb.get_multi(work.photoGallery)
template_values={
'pageTitle':"Edit Work",
'work':work,
'attrList': attrList,
'photoList': photoList
}
path=os.path.join(os.path.dirname(__file__),'../../template/editWork.html')
page=template.render(path,template_values)
self.response.out.write(page)
def post(self):
user=users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"]="text/html"
admins=['subhasisdutta300887','subhasistubai','[email protected]']
if user.nickname() not in admins:
self.response.out.write(callAccessDeniedPage())
else:
work_name=self.request.get("name")
work_key=ndb.Key('Work',work_name)
work=work_key.get()
if work is None:
self.response.out.write(callNoSuchWorkPage())
else:
title=self.request.get("title")
description=self.request.get("description")
work.title=title
work.description=description
work.order=int(self.request.get("order"))
work.publish=bool(self.request.get("publish"))
work.put()
self.response.headers["Content-Type"]="text/html"
self.response.out.write("""
<html>
<head>
<title>Project main Updated</title>
</head>
<body>
<h3> Project main Updated</h3>
</body></html>
""")
class AddWorkAttribute(webapp2.RequestHandler):
def post(self):
attribute=WorkResourceAttribute(name=self.request.get("name"),htmlDescription=self.request.get("htmlDescription"),order=int(self.request.get("order")))
attribute.put()
work_key=ndb.Key('Work',self.request.get("workHandle"))
work=work_key.get()
work.attributes.append(attribute.key)
work.put()
self.response.headers["Content-Type"]="text/html"
self.response.out.write("""
<html>
<head>
<title>New Attribute Added</title>
</head>
<body>
<h3> New Attribute Added</h3>
</body></html>
""")
class AddPhotoTOCollection(webapp2.RequestHandler):
def get(self):
user=users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"]="text/html"
admins=['subhasisdutta300887','subhasistubai','[email protected]']
if user.nickname() not in admins:
self.response.out.write(callAccessDeniedPage())
else:
template_values={
'pageTitle':"Create New Photo",
}
path=os.path.join(os.path.dirname(__file__),'../../template/createPhoto.html')
page=template.render(path,template_values)
self.response.out.write(page)
def post(self):
photoImage=self.request.get("image")
photoImg=db.Blob(photoImage)
thumbnail=images.resize(photoImage, 250, 170)
thumbnailImg=db.Blob(thumbnail)
photo=PhotoModel(title=self.request.get("title"),image=photoImg,type=self.request.get("type"),thumbnail=thumbnailImg,caption=self.request.get("caption"))
photo.put()
self.response.headers["Content-Type"]="text/html"
self.response.out.write("""
<html>
<head>
<title>Photo Added</title>
</head>
<body>
<h3> New Photo Added</h3>
</body></html>
""")
class MapPhotoToWork(webapp2.RequestHandler):
def get(self):
user=users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"]="text/html"
admins=['subhasisdutta300887','subhasistubai','[email protected]']
if user.nickname() not in admins:
self.response.out.write(callAccessDeniedPage())
else:
work_name=self.request.get("name")
work_key=ndb.Key('Work',work_name)
work=work_key.get()
if work is None:
self.response.out.write(callNoSuchWorkPage())
else:
attrList=ndb.get_multi(work.attributes)
photoList=ndb.get_multi(work.photoGallery)
photoCollection= PhotoModel.query()
template_values={
'pageTitle':"Map Photo To Work : ",
'work':work,
'attrList': attrList,
'photoList': photoList,
'photoCollection': photoCollection
}
path=os.path.join(os.path.dirname(__file__),'../../template/mapPhotoWork.html')
page=template.render(path,template_values)
self.response.out.write(page)
def post(self):
workPhoto_key=ndb.Key(urlsafe=self.request.get("photoKey"))
work_key=ndb.Key('Work',self.request.get("name"))
work=work_key.get()
work.photoGallery.append(workPhoto_key)
work.put()
self.response.headers["Content-Type"]="text/html"
self.response.out.write("""
<html>
<head>
<title>New Photo Added</title>
</head>
<body>
<h3> New Photo Added</h3>
</body></html>
""")
def callNoSuchWorkPage():
template_parameters={
'pageTitle':'No Such Work!!!!',
'title':"ERROR! Requested Work cannot be found",
'message':"ERROR!! The requested work was not found. Please check the name again."
}
error_template=os.path.join(os.path.dirname(__file__),'../../template/error.html')
page=template.render(error_template,template_parameters)
return page
def callAccessDeniedPage():
template_parameters={
'pageTitle':'Access Denied!!!!',
'title':"ERROR! You don't have access to this page",
'message':"ERROR!! You don't have access to this page."
}
error_template=os.path.join(os.path.dirname(__file__),'../../template/error.html')
page=template.render(error_template,template_parameters)
return page | mit |
jarussi/riotpy | riotpy/managers/match_history.py | 1 | 1786 | # coding: utf-8
from riotpy.resources.match import MatchResource
from base import Manager
class MatchHistoryManager(Manager):
"""
We are spliting this one and MatchManager because Riot did it.
That way, our lib is closer to their documentation.
"""
def get_summoner_match_history(self, summoner_id, begin_index=None, end_index=None,
champion_ids=None, ranked_queues=None):
"""
Get a summoner match history
:param summoner_id: The ID of the summoner.
:param begin_index: The begin index to use for fetching games.
:param end_index: The end index to use for fetching games.
:param champion_ids: Comma-separated list of champion IDs to use
for fetching games.
:param ranked_queue: Comma-separated list of ranked queue types
to use for fetching games. Non-ranked queue types will be ignored.
Ex: riotpy.constants.RankedQueues
:return: A resources.match.MatchResource list
"""
extra = {}
if begin_index:
try:
extra['beginIndex'] = int(begin_index)
except:
pass
if end_index:
try:
extra['endIndex'] = int(end_index)
except:
pass
if champion_ids:
extra['championIds'] = champion_ids
if ranked_queues:
extra['rankedQueues'] = ranked_queues
content = self._get('api/lol/{}/{}/matchhistory/{}'.format(
self.api.region,
self.version,
summoner_id), extra=extra
)
return self._dict_to_resource(content['matches'], resource_class=MatchResource)
| bsd-3-clause |
proxysh/Safejumper-for-Desktop | buildlinux/env64/lib/python2.7/site-packages/Crypto/PublicKey/__init__.py | 124 | 1876 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Public-key encryption and signature algorithms.
Public-key encryption uses two different keys, one for encryption and
one for decryption. The encryption key can be made public, and the
decryption key is kept private. Many public-key algorithms can also
be used to sign messages, and some can *only* be used for signatures.
======================== =============================================
Module Description
======================== =============================================
Crypto.PublicKey.DSA Digital Signature Algorithm (Signature only)
Crypto.PublicKey.ElGamal (Signing and encryption)
Crypto.PublicKey.RSA (Signing, encryption, and blinding)
======================== =============================================
:undocumented: _DSA, _RSA, _fastmath, _slowmath, pubkey
"""
__all__ = ['RSA', 'DSA', 'ElGamal']
__revision__ = "$Id$"
| gpl-2.0 |
10clouds/edx-platform | cms/djangoapps/course_creators/models.py | 62 | 4060 | """
Table for storing information about whether or not Studio users have course creation privileges.
"""
from django.db import models
from django.db.models.signals import post_init, post_save
from django.dispatch import receiver, Signal
from django.contrib.auth.models import User
from django.utils import timezone
from django.utils.translation import ugettext as _
# A signal that will be sent when users should be added or removed from the creator group
update_creator_state = Signal(providing_args=["caller", "user", "state"])
# A signal that will be sent when admin should be notified of a pending user request
send_admin_notification = Signal(providing_args=["user"])
# A signal that will be sent when user should be notified of change in course creator privileges
send_user_notification = Signal(providing_args=["user", "state"])
class CourseCreator(models.Model):
"""
Creates the database table model.
"""
UNREQUESTED = 'unrequested'
PENDING = 'pending'
GRANTED = 'granted'
DENIED = 'denied'
# Second value is the "human-readable" version.
STATES = (
(UNREQUESTED, _(u'unrequested')),
(PENDING, _(u'pending')),
(GRANTED, _(u'granted')),
(DENIED, _(u'denied')),
)
user = models.OneToOneField(User, help_text=_("Studio user"))
state_changed = models.DateTimeField('state last updated', auto_now_add=True,
help_text=_("The date when state was last updated"))
state = models.CharField(max_length=24, blank=False, choices=STATES, default=UNREQUESTED,
help_text=_("Current course creator state"))
note = models.CharField(max_length=512, blank=True, help_text=_("Optional notes about this user (for example, "
"why course creation access was denied)"))
def __unicode__(self):
return u"{0} | {1} [{2}]".format(self.user, self.state, self.state_changed)
@receiver(post_init, sender=CourseCreator)
def post_init_callback(sender, **kwargs):
"""
Extend to store previous state.
"""
instance = kwargs['instance']
instance.orig_state = instance.state
@receiver(post_save, sender=CourseCreator)
def post_save_callback(sender, **kwargs):
"""
Extend to update state_changed time and fire event to update course creator group, if appropriate.
"""
instance = kwargs['instance']
# We only wish to modify the state_changed time if the state has been modified. We don't wish to
# modify it for changes to the notes field.
if instance.state != instance.orig_state:
granted_state_change = instance.state == CourseCreator.GRANTED or instance.orig_state == CourseCreator.GRANTED
# If either old or new state is 'granted', we must manipulate the course creator
# group maintained by authz. That requires staff permissions (stored admin).
if granted_state_change:
assert hasattr(instance, 'admin'), 'Must have stored staff user to change course creator group'
update_creator_state.send(
sender=sender,
caller=instance.admin,
user=instance.user,
state=instance.state
)
# If user has been denied access, granted access, or previously granted access has been
# revoked, send a notification message to the user.
if instance.state == CourseCreator.DENIED or granted_state_change:
send_user_notification.send(
sender=sender,
user=instance.user,
state=instance.state
)
# If the user has gone into the 'pending' state, send a notification to interested admin.
if instance.state == CourseCreator.PENDING:
send_admin_notification.send(
sender=sender,
user=instance.user
)
instance.state_changed = timezone.now()
instance.orig_state = instance.state
instance.save()
| agpl-3.0 |
Gamebasis/3DGamebasisServer | GameData/blender-2.71-windows64/2.71/scripts/addons/game_engine_save_as_runtime.py | 5 | 8665 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "Save As Game Engine Runtime",
"author": "Mitchell Stokes (Moguri)",
"version": (0, 3, 1),
"blender": (2, 61, 0),
"location": "File > Export",
"description": "Bundle a .blend file with the Blenderplayer",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Game_Engine/Save_As_Runtime",
"category": "Game Engine",
}
import bpy
import os
import sys
import shutil
import tempfile
def CopyPythonLibs(dst, overwrite_lib, report=print):
import platform
# use python module to find pytohn's libpath
src = os.path.dirname(platform.__file__)
# dst points to lib/, but src points to current python's library path, eg:
# '/usr/lib/python3.2' vs '/usr/lib'
# append python's library dir name to destination, so only python's
# libraries would be copied
if os.name == 'posix':
dst = os.path.join(dst, os.path.basename(src))
if os.path.exists(src):
write = False
if os.path.exists(dst):
if overwrite_lib:
shutil.rmtree(dst)
write = True
else:
write = True
if write:
shutil.copytree(src, dst, ignore=lambda dir, contents: [i for i in contents if i == '__pycache__'])
else:
report({'WARNING'}, "Python not found in %r, skipping pythn copy" % src)
def WriteAppleRuntime(player_path, output_path, copy_python, overwrite_lib):
# Enforce the extension
if not output_path.endswith('.app'):
output_path += '.app'
# Use the system's cp command to preserve some meta-data
os.system('cp -R "%s" "%s"' % (player_path, output_path))
bpy.ops.wm.save_as_mainfile(filepath=os.path.join(output_path, "Contents/Resources/game.blend"),
relative_remap=False,
compress=False,
copy=True,
)
# Python doesn't need to be copied for OS X since it's already inside blenderplayer.app
def WriteRuntime(player_path, output_path, copy_python, overwrite_lib, copy_dlls, report=print):
import struct
# Check the paths
if not os.path.isfile(player_path) and not(os.path.exists(player_path) and player_path.endswith('.app')):
report({'ERROR'}, "The player could not be found! Runtime not saved")
return
# Check if we're bundling a .app
if player_path.endswith('.app'):
WriteAppleRuntime(player_path, output_path, copy_python, overwrite_lib)
return
# Enforce "exe" extension on Windows
if player_path.endswith('.exe') and not output_path.endswith('.exe'):
output_path += '.exe'
# Get the player's binary and the offset for the blend
file = open(player_path, 'rb')
player_d = file.read()
offset = file.tell()
file.close()
# Create a tmp blend file (Blenderplayer doesn't like compressed blends)
tempdir = tempfile.mkdtemp()
blend_path = os.path.join(tempdir, bpy.path.clean_name(output_path))
bpy.ops.wm.save_as_mainfile(filepath=blend_path,
relative_remap=False,
compress=False,
copy=True,
)
# Get the blend data
blend_file = open(blend_path, 'rb')
blend_d = blend_file.read()
blend_file.close()
# Get rid of the tmp blend, we're done with it
os.remove(blend_path)
os.rmdir(tempdir)
# Create a new file for the bundled runtime
output = open(output_path, 'wb')
# Write the player and blend data to the new runtime
print("Writing runtime...", end=" ")
output.write(player_d)
output.write(blend_d)
# Store the offset (an int is 4 bytes, so we split it up into 4 bytes and save it)
output.write(struct.pack('B', (offset>>24)&0xFF))
output.write(struct.pack('B', (offset>>16)&0xFF))
output.write(struct.pack('B', (offset>>8)&0xFF))
output.write(struct.pack('B', (offset>>0)&0xFF))
# Stuff for the runtime
output.write(b'BRUNTIME')
output.close()
print("done")
# Make the runtime executable on Linux
if os.name == 'posix':
os.chmod(output_path, 0o755)
# Copy bundled Python
blender_dir = os.path.dirname(bpy.app.binary_path)
runtime_dir = os.path.dirname(output_path)
if copy_python:
print("Copying Python files...", end=" ")
py_folder = os.path.join(bpy.app.version_string.split()[0], "python", "lib")
dst = os.path.join(runtime_dir, py_folder)
CopyPythonLibs(dst, overwrite_lib, report)
print("done")
# And DLLs
if copy_dlls:
print("Copying DLLs...", end=" ")
for file in [i for i in os.listdir(blender_dir) if i.lower().endswith('.dll')]:
src = os.path.join(blender_dir, file)
dst = os.path.join(runtime_dir, file)
shutil.copy2(src, dst)
print("done")
from bpy.props import *
class SaveAsRuntime(bpy.types.Operator):
bl_idname = "wm.save_as_runtime"
bl_label = "Save As Game Engine Runtime"
bl_options = {'REGISTER'}
if sys.platform == 'darwin':
# XXX, this line looks suspicious, could be done better?
blender_bin_dir = '/' + os.path.join(*bpy.app.binary_path.split('/')[0:-4])
ext = '.app'
else:
blender_bin_path = bpy.app.binary_path
blender_bin_dir = os.path.dirname(blender_bin_path)
ext = os.path.splitext(blender_bin_path)[-1].lower()
default_player_path = os.path.join(blender_bin_dir, 'blenderplayer' + ext)
player_path = StringProperty(
name="Player Path",
description="The path to the player to use",
default=default_player_path,
subtype='FILE_PATH',
)
filepath = StringProperty(
subtype='FILE_PATH',
)
copy_python = BoolProperty(
name="Copy Python",
description="Copy bundle Python with the runtime",
default=True,
)
overwrite_lib = BoolProperty(
name="Overwrite 'lib' folder",
description="Overwrites the lib folder (if one exists) with the bundled Python lib folder",
default=False,
)
# Only Windows has dlls to copy
if ext == '.exe':
copy_dlls = BoolProperty(
name="Copy DLLs",
description="Copy all needed DLLs with the runtime",
default=True,
)
else:
copy_dlls = False
def execute(self, context):
import time
start_time = time.clock()
print("Saving runtime to %r" % self.filepath)
WriteRuntime(self.player_path,
self.filepath,
self.copy_python,
self.overwrite_lib,
self.copy_dlls,
self.report,
)
print("Finished in %.4fs" % (time.clock()-start_time))
return {'FINISHED'}
def invoke(self, context, event):
if not self.filepath:
ext = '.app' if sys.platform == 'darwin' else os.path.splitext(bpy.app.binary_path)[-1]
self.filepath = bpy.path.ensure_ext(bpy.data.filepath, ext)
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
def menu_func(self, context):
self.layout.operator(SaveAsRuntime.bl_idname)
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func)
if __name__ == "__main__":
register()
| gpl-3.0 |
wrightni/OSSP | segment.py | 1 | 6298 | # title: Watershed Transform
# author: Nick Wright
# adapted from: Justin Chen, Arnold Song
import numpy as np
import gc
import warnings
from skimage import filters, morphology, feature, img_as_ubyte
from scipy import ndimage
from ctypes import *
from lib import utils
# For Testing:
from skimage import segmentation
import matplotlib.image as mimg
def segment_image(input_data, image_type=False):
'''
Wrapper function that handles all of the processing to create watersheds
'''
#### Define segmentation parameters
# High_threshold:
# Low_threshold: Lower threshold for canny edge detection. Determines which "weak" edges to keep.
# Values above this amount that are connected to a strong edge will be marked as an edge.
# Gauss_sigma: sigma value to use in the gaussian blur applied to the image prior to segmentation.
# Value chosen here should be based on the quality and resolution of the image
# Feature_separation: minimum distance, in pixels, between the center point of multiple features. Use a lower value
# for lower resolution (.5m) images, and higher resolution for aerial images (~.1m).
# These values are dependent on the type of imagery being processed, and are
# mostly empirically derived.
# band_list contains the three bands to be used for segmentation
if image_type == 'pan':
high_threshold = 0.15 * 255 ## Needs to be checked
low_threshold = 0.05 * 255 ## Needs to be checked
gauss_sigma = 1
feature_separation = 1
band_list = [0, 0, 0]
elif image_type == 'wv02_ms':
high_threshold = 0.20 * 255 ## Needs to be checked
low_threshold = 0.05 * 255 ## Needs to be checked
gauss_sigma = 1.5
feature_separation = 3
band_list = [4, 2, 1]
else: #image_type == 'srgb'
high_threshold = 0.15 * 255
low_threshold = 0.05 * 255
gauss_sigma = 2
feature_separation = 5
band_list = [0, 1, 2]
segmented_data = watershed_transformation(input_data, band_list, low_threshold, high_threshold,
gauss_sigma,feature_separation)
# Method that provides the user an option to view the original image
# side by side with the segmented image.
# print(np.amax(segmented_data))
# image_data = np.array([input_data[band_list[0]],
# input_data[band_list[1]],
# input_data[band_list[2]]],
# dtype=np.uint8)
# ws_bound = segmentation.find_boundaries(segmented_data)
# ws_display = utils.create_composite(image_data)
#
# # save_name = '/Users/nicholas/Desktop/original_{}.png'
# # mimg.imsave(save_name.format(np.random.randint(0,100)), ws_display, format='png')
#
# ws_display[:, :, 0][ws_bound] = 240
# ws_display[:, :, 1][ws_bound] = 80
# ws_display[:, :, 2][ws_bound] = 80
#
# save_name = '/Users/nicholas/Desktop/seg_{}.png'
# mimg.imsave(save_name.format(np.random.randint(0, 100)), ws_display, format='png')
return segmented_data
def watershed_transformation(image_data, band_list, low_threshold, high_threshold, gauss_sigma, feature_separation):
'''
Runs a watershed transform on the main dataset
1. Create a gradient image using the sobel algorithm
2. Adjust the gradient image based on given threshold and amplification.
3. Find the local minimum gradient values and place a marker
4. Construct watersheds on top of the gradient image starting at the
markers.
'''
# If this block has no data, return a placeholder watershed.
if np.amax(image_data[0]) <= 1:
# We just need the dimensions from one band
return np.zeros(np.shape(image_data[0]))
# Build a raster of detected edges to inform the creation of watershed seed points
edge_image = edge_detect(image_data, band_list, gauss_sigma, low_threshold, high_threshold)
# Build a raster of image gradient that will be the base for watershed expansion.
grad_image = build_gradient(image_data, band_list, gauss_sigma)
image_data = None
# Find local minimum values in the edge image by inverting
# edge_image and finding the local maximum values
inv_edge = np.empty_like(edge_image, dtype=np.uint8)
np.subtract(255, edge_image, out=inv_edge)
edge_image = None
# Distance to the nearest detected edge
distance_image = ndimage.distance_transform_edt(inv_edge)
inv_edge = None
# Local maximum distance
local_min = feature.peak_local_max(distance_image, min_distance=feature_separation,
exclude_border=False, indices=False, num_peaks_per_label=1)
distance_image = None
markers = ndimage.label(local_min)[0]
local_min = None
# Build a watershed from the markers on top of the edge image
im_watersheds = morphology.watershed(grad_image, markers)
grad_image = None
# Set all values outside of the image area (empty pixels, usually caused by
# orthorectification) to one value, at the end of the watershed list.
# im_watersheds[empty_pixels] = np.amax(im_watersheds)+1
gc.collect()
return im_watersheds
def edge_detect(image_data, band_list, gauss_sigma, low_threshold, high_threshold):
# Detect edges in the image with a canny edge detector
with warnings.catch_warnings():
warnings.simplefilter("ignore")
edge_image = img_as_ubyte(feature.canny(image_data[band_list[1]], sigma=gauss_sigma,
low_threshold=low_threshold, high_threshold=high_threshold))
return edge_image
def build_gradient(image_data, band_list, gauss_sigma):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
smooth_im_blue = ndimage.filters.gaussian_filter(image_data[band_list[2]], sigma=gauss_sigma)
grad_image = img_as_ubyte(filters.scharr(smooth_im_blue))
# Prevent the watersheds from 'leaking' along the sides of the image
grad_image[:, 0] = grad_image[:, 1]
grad_image[:, -1] = grad_image[:, -2]
grad_image[0, :] = grad_image[1, :]
grad_image[-1, :] = grad_image[-2, :]
return grad_image | mit |
shaistaansari/django | django/db/backends/base/schema.py | 339 | 43421 | import hashlib
import logging
from django.db.backends.utils import truncate_name
from django.db.transaction import atomic
from django.utils import six
from django.utils.encoding import force_bytes
logger = logging.getLogger('django.db.backends.schema')
def _related_non_m2m_objects(old_field, new_field):
# Filters out m2m objects from reverse relations.
# Returns (old_relation, new_relation) tuples.
return zip(
(obj for obj in old_field.model._meta.related_objects if not obj.field.many_to_many),
(obj for obj in new_field.model._meta.related_objects if not obj.field.many_to_many)
)
class BaseDatabaseSchemaEditor(object):
"""
This class (and its subclasses) are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
It is intended to eventually completely replace DatabaseCreation.
This class should be used by creating an instance for each set of schema
changes (e.g. a migration file), and by first calling start(),
then the relevant actions, and then commit(). This is necessary to allow
things like circular foreign key references - FKs will only be created once
commit() is called.
"""
# Overrideable SQL templates
sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE"
sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
sql_update_with_default = "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)"
sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_fk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) "
"REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED"
)
sql_create_inline_fk = None
sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
sql_delete_index = "DROP INDEX %(name)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
def __init__(self, connection, collect_sql=False):
self.connection = connection
self.collect_sql = collect_sql
if self.collect_sql:
self.collected_sql = []
# State-managing methods
def __enter__(self):
self.deferred_sql = []
if self.connection.features.can_rollback_ddl:
self.atomic = atomic(self.connection.alias)
self.atomic.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
if self.connection.features.can_rollback_ddl:
self.atomic.__exit__(exc_type, exc_value, traceback)
# Core utility functions
def execute(self, sql, params=[]):
"""
Executes the given SQL statement, with optional parameters.
"""
# Log the command we're running, then run it
logger.debug("%s; (params %r)" % (sql, params))
if self.collect_sql:
ending = "" if sql.endswith(";") else ";"
if params is not None:
self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ending)
else:
self.collected_sql.append(sql + ending)
else:
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
def quote_name(self, name):
return self.connection.ops.quote_name(name)
@classmethod
def _digest(cls, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
h = hashlib.md5()
for arg in args:
h.update(force_bytes(arg))
return h.hexdigest()[:8]
# Field <-> database mapping functions
def column_sql(self, model, field, include_default=False):
"""
Takes a field and returns its column definition.
The field must already have had set_attributes_from_name called.
"""
# Get the column's type and use that as the basis of the SQL
db_params = field.db_parameters(connection=self.connection)
sql = db_params['type']
params = []
# Check for fields that aren't actually columns (e.g. M2M)
if sql is None:
return None, None
# Work out nullability
null = field.null
# If we were told to include a default value, do so
include_default = include_default and not self.skip_default(field)
if include_default:
default_value = self.effective_default(field)
if default_value is not None:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
sql += " DEFAULT %s" % self.prepare_default(default_value)
else:
sql += " DEFAULT %s"
params += [default_value]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (field.empty_strings_allowed and not field.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if null and not self.connection.features.implied_column_null:
sql += " NULL"
elif not null:
sql += " NOT NULL"
# Primary key/unique outputs
if field.primary_key:
sql += " PRIMARY KEY"
elif field.unique:
sql += " UNIQUE"
# Optionally add the tablespace if it's an implicitly indexed column
tablespace = field.db_tablespace or model._meta.db_tablespace
if tablespace and self.connection.features.supports_tablespaces and field.unique:
sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True)
# Return the sql
return sql, params
def skip_default(self, field):
"""
Some backends don't accept default values for certain columns types
(i.e. MySQL longtext and longblob).
"""
return False
def prepare_default(self, value):
"""
Only used for backends which have requires_literal_defaults feature
"""
raise NotImplementedError(
'subclasses of BaseDatabaseSchemaEditor for backends which have '
'requires_literal_defaults must provide a prepare_default() method'
)
def effective_default(self, field):
"""
Returns a field's effective database default value
"""
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
if field.get_internal_type() == "BinaryField":
default = six.binary_type()
else:
default = six.text_type()
else:
default = None
# If it's a callable, call it
if six.callable(default):
default = default()
# Run it through the field's get_db_prep_save method so we can send it
# to the database.
default = field.get_db_prep_save(default, self.connection)
return default
def quote_value(self, value):
"""
Returns a quoted version of the value so it's safe to use in an SQL
string. This is not safe against injection from user code; it is
intended only for use in making SQL scripts or preparing default values
for particularly tricky backends (defaults are not user-defined, though,
so this is safe).
"""
raise NotImplementedError()
# Actions
def create_model(self, model):
"""
Takes a model and creates a table for it in the database.
Will also create any accompanying indexes or unique constraints.
"""
# Create column SQL, add FK deferreds if needed
column_sqls = []
params = []
for field in model._meta.local_fields:
# SQL
definition, extra_params = self.column_sql(model, field)
if definition is None:
continue
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Autoincrement SQL (for backends with inline variant)
col_type_suffix = field.db_type_suffix(connection=self.connection)
if col_type_suffix:
definition += " %s" % col_type_suffix
params.extend(extra_params)
# FK
if field.remote_field and field.db_constraint:
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column
if self.connection.features.supports_foreign_keys:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
elif self.sql_create_inline_fk:
definition += " " + self.sql_create_inline_fk % {
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
# Add the SQL to our big list
column_sqls.append("%s %s" % (
self.quote_name(field.column),
definition,
))
# Autoincrement SQL (for backends with post table definition variant)
if field.get_internal_type() == "AutoField":
autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
# Add any unique_togethers (always deferred, as some fields might be
# created afterwards, like geometry fields with some backends)
for fields in model._meta.unique_together:
columns = [model._meta.get_field(field).column for field in fields]
self.deferred_sql.append(self._create_unique_sql(model, columns))
# Make the table
sql = self.sql_create_table % {
"table": self.quote_name(model._meta.db_table),
"definition": ", ".join(column_sqls)
}
if model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
if tablespace_sql:
sql += ' ' + tablespace_sql
# Prevent using [] as params, in the case a literal '%' is used in the definition
self.execute(sql, params or None)
# Add any field index and index_together's (deferred as SQLite3 _remake_table needs it)
self.deferred_sql.extend(self._model_indexes_sql(model))
# Make M2M tables
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.create_model(field.remote_field.through)
def delete_model(self, model):
"""
Deletes a model from the database.
"""
# Handle auto-created intermediary models
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# Delete the table
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_unique_together)
news = set(tuple(fields) for fields in new_unique_together)
# Deleted uniques
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'unique': True}, self.sql_delete_unique)
# Created uniques
for fields in news.difference(olds):
columns = [model._meta.get_field(field).column for field in fields]
self.execute(self._create_unique_sql(model, columns))
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_index_together)
news = set(tuple(fields) for fields in new_index_together)
# Deleted indexes
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'index': True}, self.sql_delete_index)
# Created indexes
for field_names in news.difference(olds):
fields = [model._meta.get_field(field) for field in field_names]
self.execute(self._create_index_sql(model, fields, suffix="_idx"))
def _delete_composed_index(self, model, fields, constraint_kwargs, sql):
columns = [model._meta.get_field(field).column for field in fields]
constraint_names = self._constraint_names(model, columns, **constraint_kwargs)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(self._delete_constraint_sql(sql, model, constraint_names[0]))
def alter_db_table(self, model, old_db_table, new_db_table):
"""
Renames the table a model points to.
"""
if old_db_table == new_db_table:
return
self.execute(self.sql_rename_table % {
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
})
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""
Moves a model's table between tablespaces
"""
self.execute(self.sql_retablespace_table % {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
"new_tablespace": self.quote_name(new_db_tablespace),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
if definition is None:
return
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Build the SQL and run it
sql = self.sql_create_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"definition": definition,
}
self.execute(sql, params)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if not self.skip_default(field) and field.default is not None:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(field.column),
}
}
self.execute(sql)
# Add an index, if required
if field.db_index and not field.unique:
self.deferred_sql.append(self._create_index_sql(model, [field]))
# Add any FK constraints later
if field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.delete_model(field.remote_field.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
# Drop any FK constraints, MySQL requires explicit deletion
if field.remote_field:
fk_names = self._constraint_names(model, [field.column], foreign_key=True)
for fk_name in fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Delete the column
sql = self.sql_delete_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allows a field's type, uniqueness, nullability, default, column,
constraints etc. to be modified.
Requires a copy of the old field as well so we can only perform
changes that are required.
If strict is true, raises errors if the old column does not match old_field precisely.
"""
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params['type']
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params['type']
if ((old_type is None and old_field.remote_field is None) or
(new_type is None and new_field.remote_field is None)):
raise ValueError(
"Cannot alter field %s into %s - they do not properly define "
"db_type (are you using a badly-written custom field?)" %
(old_field, new_field),
)
elif old_type is None and new_type is None and (
old_field.remote_field.through and new_field.remote_field.through and
old_field.remote_field.through._meta.auto_created and
new_field.remote_field.through._meta.auto_created):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif old_type is None and new_type is None and (
old_field.remote_field.through and new_field.remote_field.through and
not old_field.remote_field.through._meta.auto_created and
not new_field.remote_field.through._meta.auto_created):
# Both sides have through models; this is a no-op.
return
elif old_type is None or new_type is None:
raise ValueError(
"Cannot alter field %s into %s - they are not compatible types "
"(you cannot alter to or from M2M fields, or add or remove "
"through= on M2M fields)" % (old_field, new_field)
)
self._alter_field(model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict)
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Actually perform a "physical" (non-ManyToMany) field update."""
# Drop any FK constraints, we'll remake them later
fks_dropped = set()
if old_field.remote_field and old_field.db_constraint:
fk_names = self._constraint_names(model, [old_field.column], foreign_key=True)
if strict and len(fk_names) != 1:
raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % (
len(fk_names),
model._meta.db_table,
old_field.column,
))
for fk_name in fk_names:
fks_dropped.add((old_field.column,))
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Has unique been removed?
if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)):
# Find the unique constraint for this field
constraint_names = self._constraint_names(model, [old_field.column], unique=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_unique, model, constraint_name))
# Drop incoming FK constraints if we're a primary key and things are going
# to change.
if old_field.primary_key and new_field.primary_key and old_type != new_type:
# '_meta.related_field' also contains M2M reverse fields, these
# will be filtered out
for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field):
rel_fk_names = self._constraint_names(
new_rel.related_model, [new_rel.field.column], foreign_key=True
)
for fk_name in rel_fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, new_rel.related_model, fk_name))
# Removed an index? (no strict check, as multiple indexes are possible)
if (old_field.db_index and not new_field.db_index and
not old_field.unique and not
(not new_field.unique and old_field.unique)):
# Find the index for this field
index_names = self._constraint_names(model, [old_field.column], index=True)
for index_name in index_names:
self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name))
# Change check constraints?
if old_db_params['check'] != new_db_params['check'] and old_db_params['check']:
constraint_names = self._constraint_names(model, [old_field.column], check=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_check, model, constraint_name))
# Have they renamed the column?
if old_field.column != new_field.column:
self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type))
# Next, start accumulating actions to do
actions = []
null_actions = []
post_actions = []
# Type change?
if old_type != new_type:
fragment, other_actions = self._alter_column_type_sql(
model._meta.db_table, old_field, new_field, new_type
)
actions.append(fragment)
post_actions.extend(other_actions)
# When changing a column NULL constraint to NOT NULL with a given
# default value, we need to perform 4 steps:
# 1. Add a default for new incoming writes
# 2. Update existing NULL rows with new default
# 3. Replace NULL constraint with NOT NULL
# 4. Drop the default again.
# Default change?
old_default = self.effective_default(old_field)
new_default = self.effective_default(new_field)
needs_database_default = (
old_default != new_default and
new_default is not None and
not self.skip_default(new_field)
)
if needs_database_default:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": self.prepare_default(new_default),
},
[],
))
else:
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
))
# Nullability change?
if old_field.null != new_field.null:
if (self.connection.features.interprets_empty_strings_as_nulls and
new_field.get_internal_type() in ("CharField", "TextField")):
# The field is nullable in the database anyway, leave it alone
pass
elif new_field.null:
null_actions.append((
self.sql_alter_column_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
else:
null_actions.append((
self.sql_alter_column_not_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
# Only if we have a default and there is a change from NULL to NOT NULL
four_way_default_alteration = (
new_field.has_default() and
(old_field.null and not new_field.null)
)
if actions or null_actions:
if not four_way_default_alteration:
# If we don't have to do a 4-way default alteration we can
# directly run a (NOT) NULL alteration
actions = actions + null_actions
# Combine actions together if we can (e.g. postgres)
if self.connection.features.supports_combined_alters and actions:
sql, params = tuple(zip(*actions))
actions = [(", ".join(sql), sum(params, []))]
# Apply those actions
for sql, params in actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if four_way_default_alteration:
# Update existing rows with default value
self.execute(
self.sql_update_with_default % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
)
# Since we didn't run a NOT NULL change before we need to do it
# now
for sql, params in null_actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if post_actions:
for sql, params in post_actions:
self.execute(sql, params)
# Added a unique?
if (not old_field.unique and new_field.unique) or (
old_field.primary_key and not new_field.primary_key and new_field.unique
):
self.execute(self._create_unique_sql(model, [new_field.column]))
# Added an index?
if (not old_field.db_index and new_field.db_index and
not new_field.unique and not
(not old_field.unique and new_field.unique)):
self.execute(self._create_index_sql(model, [new_field], suffix="_uniq"))
# Type alteration on primary key? Then we need to alter the column
# referring to us.
rels_to_update = []
if old_field.primary_key and new_field.primary_key and old_type != new_type:
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Changed to become primary key?
# Note that we don't detect unsetting of a PK, as we assume another field
# will always come along and replace it.
if not old_field.primary_key and new_field.primary_key:
# First, drop the old PK
constraint_names = self._constraint_names(model, primary_key=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of PK constraints for %s" % (
len(constraint_names),
model._meta.db_table,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_pk, model, constraint_name))
# Make the new one
self.execute(
self.sql_create_pk % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_pk")),
"columns": self.quote_name(new_field.column),
}
)
# Update all referencing columns
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Handle our type alters on the other end of rels from the PK stuff above
for old_rel, new_rel in rels_to_update:
rel_db_params = new_rel.field.db_parameters(connection=self.connection)
rel_type = rel_db_params['type']
fragment, other_actions = self._alter_column_type_sql(
new_rel.related_model._meta.db_table, old_rel.field, new_rel.field, rel_type
)
self.execute(
self.sql_alter_column % {
"table": self.quote_name(new_rel.related_model._meta.db_table),
"changes": fragment[0],
},
fragment[1],
)
for sql, params in other_actions:
self.execute(sql, params)
# Does it have a foreign key?
if (new_field.remote_field and
(fks_dropped or not old_field.remote_field or not old_field.db_constraint) and
new_field.db_constraint):
self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s"))
# Rebuild FKs that pointed to us if we previously had to drop them
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.related_objects:
if not rel.many_to_many:
self.execute(self._create_fk_sql(rel.related_model, rel.field, "_fk"))
# Does it have check constraints we need to add?
if old_db_params['check'] != new_db_params['check'] and new_db_params['check']:
self.execute(
self.sql_create_check % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_check")),
"column": self.quote_name(new_field.column),
"check": new_db_params['check'],
}
)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if needs_database_default:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(new_field.column),
}
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
"""
Hook to specialize column type alteration for different backends,
for cases when a creation type is different to an alteration type
(e.g. SERIAL in PostgreSQL, PostGIS fields).
Should return two things; an SQL fragment of (sql, params) to insert
into an ALTER TABLE statement, and a list of extra (sql, params) tuples
to run once the field is altered.
"""
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
),
[],
)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
# Rename the through table
if old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table:
self.alter_db_table(old_field.remote_field.through, old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table)
# Repoint the FK to the other side
self.alter_field(
new_field.remote_field.through,
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()),
)
self.alter_field(
new_field.remote_field.through,
# for self-referential models we need to alter field from the other end too
old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()),
)
def _create_index_name(self, model, column_names, suffix=""):
"""
Generates a unique name for an index/unique constraint.
"""
# If there is just one column in the index, use a default algorithm from Django
if len(column_names) == 1 and not suffix:
return truncate_name(
'%s_%s' % (model._meta.db_table, self._digest(column_names[0])),
self.connection.ops.max_name_length()
)
# Else generate the name for the index using a different algorithm
table_name = model._meta.db_table.replace('"', '').replace('.', '_')
index_unique_name = '_%s' % self._digest(table_name, *column_names)
max_length = self.connection.ops.max_name_length() or 200
# If the index name is too long, truncate it
index_name = ('%s_%s%s%s' % (
table_name, column_names[0], index_unique_name, suffix,
)).replace('"', '').replace('.', '_')
if len(index_name) > max_length:
part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix))
index_name = '%s%s' % (table_name[:(max_length - len(part))], part)
# It shouldn't start with an underscore (Oracle hates this)
if index_name[0] == "_":
index_name = index_name[1:]
# If it's STILL too long, just hash it down
if len(index_name) > max_length:
index_name = hashlib.md5(force_bytes(index_name)).hexdigest()[:max_length]
# It can't start with a number on Oracle, so prepend D if we need to
if index_name[0].isdigit():
index_name = "D%s" % index_name[:-1]
return index_name
def _create_index_sql(self, model, fields, suffix="", sql=None):
"""
Return the SQL statement to create the index for one or several fields.
`sql` can be specified if the syntax differs from the standard (GIS
indexes, ...).
"""
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
columns = [field.column for field in fields]
sql_create_index = sql or self.sql_create_index
return sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix=suffix)),
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": tablespace_sql,
}
def _model_indexes_sql(self, model):
"""
Return all index SQL statements (field indexes, index_together) for the
specified model, as a list.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for field in model._meta.local_fields:
if field.db_index and not field.unique:
output.append(self._create_index_sql(model, [field], suffix=""))
for field_names in model._meta.index_together:
fields = [model._meta.get_field(field) for field in field_names]
output.append(self._create_index_sql(model, fields, suffix="_idx"))
return output
def _rename_field_sql(self, table, old_field, new_field, new_type):
return self.sql_rename_column % {
"table": self.quote_name(table),
"old_column": self.quote_name(old_field.column),
"new_column": self.quote_name(new_field.column),
"type": new_type,
}
def _create_fk_sql(self, model, field, suffix):
from_table = model._meta.db_table
from_column = field.column
to_table = field.target_field.model._meta.db_table
to_column = field.target_field.column
suffix = suffix % {
"to_table": to_table,
"to_column": to_column,
}
return self.sql_create_fk % {
"table": self.quote_name(from_table),
"name": self.quote_name(self._create_index_name(model, [from_column], suffix=suffix)),
"column": self.quote_name(from_column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
def _create_unique_sql(self, model, columns):
return self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix="_uniq")),
"columns": ", ".join(self.quote_name(column) for column in columns),
}
def _delete_constraint_sql(self, template, model, name):
return template % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(name),
}
def _constraint_names(self, model, column_names=None, unique=None,
primary_key=None, index=None, foreign_key=None,
check=None):
"""
Returns all constraint names matching the columns and conditions
"""
column_names = list(column_names) if column_names else None
with self.connection.cursor() as cursor:
constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict['columns']:
if unique is not None and infodict['unique'] != unique:
continue
if primary_key is not None and infodict['primary_key'] != primary_key:
continue
if index is not None and infodict['index'] != index:
continue
if check is not None and infodict['check'] != check:
continue
if foreign_key is not None and not infodict['foreign_key']:
continue
result.append(name)
return result
| bsd-3-clause |
toshywoshy/ansible | lib/ansible/modules/cloud/digital_ocean/digital_ocean_certificate_info.py | 25 | 3570 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: digital_ocean_certificate_info
short_description: Gather information about DigitalOcean certificates
description:
- This module can be used to gather information about DigitalOcean provided certificates.
- This module was called C(digital_ocean_certificate_facts) before Ansible 2.9. The usage did not change.
author: "Abhijeet Kasurde (@Akasurde)"
version_added: "2.6"
options:
certificate_id:
description:
- Certificate ID that can be used to identify and reference a certificate.
required: false
requirements:
- "python >= 2.6"
extends_documentation_fragment: digital_ocean.documentation
'''
EXAMPLES = '''
- name: Gather information about all certificates
digital_ocean_certificate_info:
oauth_token: "{{ oauth_token }}"
- name: Gather information about certificate with given id
digital_ocean_certificate_info:
oauth_token: "{{ oauth_token }}"
certificate_id: "892071a0-bb95-49bc-8021-3afd67a210bf"
- name: Get not after information about certificate
digital_ocean_certificate_info:
register: resp_out
- set_fact:
not_after_date: "{{ item.not_after }}"
loop: "{{ resp_out.data|json_query(name) }}"
vars:
name: "[?name=='web-cert-01']"
- debug: var=not_after_date
'''
RETURN = '''
data:
description: DigitalOcean certificate information
returned: success
type: list
sample: [
{
"id": "892071a0-bb95-49bc-8021-3afd67a210bf",
"name": "web-cert-01",
"not_after": "2017-02-22T00:23:00Z",
"sha1_fingerprint": "dfcc9f57d86bf58e321c2c6c31c7a971be244ac7",
"created_at": "2017-02-08T16:02:37Z"
},
]
'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
from ansible.module_utils._text import to_native
def core(module):
certificate_id = module.params.get('certificate_id', None)
rest = DigitalOceanHelper(module)
base_url = 'certificates?'
if certificate_id is not None:
response = rest.get("%s/%s" % (base_url, certificate_id))
status_code = response.status_code
if status_code != 200:
module.fail_json(msg="Failed to retrieve certificates for DigitalOcean")
resp_json = response.json
certificate = resp_json['certificate']
else:
certificate = rest.get_paginated_data(base_url=base_url, data_key_name='certificates')
module.exit_json(changed=False, data=certificate)
def main():
argument_spec = DigitalOceanHelper.digital_ocean_argument_spec()
argument_spec.update(
certificate_id=dict(type='str', required=False),
)
module = AnsibleModule(argument_spec=argument_spec)
if module._name == 'digital_ocean_certificate_facts':
module.deprecate("The 'digital_ocean_certificate_facts' module has been renamed to 'digital_ocean_certificate_info'", version='2.13')
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
FRidh/Sea | Sea/adapter/couplings/Coupling.py | 2 | 3004 | import abc
import logging
import Sea
import numpy as np
from ..base import Base
class Coupling(Base):
"""
Abstract base class for all :mod:`Sea.adapter.couplings` classes.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, obj, connection, subsystem_from, subsystem_to):
Base.__init__(self, obj)
obj.addProperty("App::PropertyLink", "Connection", "Coupling", "Connection this coupling is part of.")
obj.Connection = connection
obj.setEditorMode("Connection", 1)
obj.Frequency = connection.Frequency
obj.addProperty("App::PropertyFloatList", "CLF", "Coupling", "Coupling loss factor.")
obj.setEditorMode("CLF", 1)
obj.addProperty("App::PropertyLink", "SubsystemFrom", "Coupling", "Subsystem from")
obj.setEditorMode("SubsystemFrom", 1)
obj.addProperty("App::PropertyLink", "SubsystemTo", "Coupling", "Subsystem to")
obj.setEditorMode("SubsystemTo", 1)
obj.addProperty("App::PropertyFloatList", "ImpedanceFrom", "Subsystem From", "Impedance of connection corrected From subsystem.")
obj.setEditorMode("ImpedanceFrom", 1)
obj.addProperty("App::PropertyFloatList", "ResistanceFrom", "Subsystem From", "Resistance of connection corrected From subsystem.")
obj.setEditorMode("ResistanceFrom", 1)
obj.addProperty("App::PropertyFloatList", "ImpedanceTo", "Subsystem To", "Impedance of connection corrected To subsystem.")
obj.setEditorMode("ImpedanceTo", 1)
obj.addProperty("App::PropertyFloatList", "ResistanceTo", "Subsystem To", "Resistance of connection corrected To subsystem.")
obj.setEditorMode("ResistanceTo", 1)
"""How or more specifically, when to update the size of the coupling?"""
#obj.addProperty("App::PropertyFloat", "Size", "Coupling", "Size of the junction.")
#subsystem_from.CouplingsFrom = subsystem_from.CouplingsFrom + [obj]
#subsystem_to.CouplingsTo = subsystem_to.CouplingsTo + [obj]
obj.SubsystemFrom = subsystem_from
obj.SubsystemTo = subsystem_to
def onChanged(self, obj, prop):
Base.onChanged(self, obj, prop)
if prop == 'SubsystemFrom':
obj.Proxy.subsystem_from = obj.SubsystemFrom.Proxy
if prop == 'SubsystemTo':
obj.Proxy.subsystem_to = obj.SubsystemTo.Proxy
def execute(self, obj):
Base.execute(self, obj)
obj.CLF = obj.Proxy.clf.tolist()
obj.ImpedanceFrom = obj.Proxy.impedance_from.tolist()
obj.ImpedanceTo = obj.Proxy.impedance_to.tolist()
obj.ResistanceFrom = obj.Proxy.resistance_from.tolist()
obj.ResistanceTo = obj.Proxy.resistance_to.tolist()
#@abc.abstractmethod
#def size(self, obj):
#"""
#Return the size of the coupling.
#"""
#return
| bsd-3-clause |
gtacoin-dev/gtacoin | qa/rpc-tests/test_framework/comptool.py | 1 | 18090 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Gtacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from .mininode import *
from .blockstore import BlockStore, TxStore
from .util import p2p_port
'''
This is a tool for comparing two or more gtacoinds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
class RejectResult(object):
'''
Outcome that expects rejection of a transaction or block.
'''
def __init__(self, code, reason=b''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks):
# print [ c.cb.block_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
# print [ c.cb.tx_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
print('Block not in reject map: %064x' % (blockhash))
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
print('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash))
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
# print c.cb.bestblockhash, blockhash, outcome
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
# print c.rpc.getrawmempool()
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
print('Tx not in reject map: %064x' % (txhash))
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
print('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash))
return False
elif ((txhash in c.cb.lastInv) != outcome):
# print c.rpc.getrawmempool(), c.cb.lastInv
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
print("Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ])
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
| mit |
NL66278/odoo | addons/resource/faces/plocale.py | 433 | 1910 | ############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# [email protected]
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
import gettext
import os.path
import locale
import sys
def _get_translation():
try:
return gettext.translation("faces")
except:
try:
if sys.frozen:
path = os.path.dirname(sys.argv[0])
path = os.path.join(path, "resources", "faces", "locale")
else:
path = os.path.split(__file__)[0]
path = os.path.join(path, "locale")
return gettext.translation("faces", path)
except Exception, e:
return None
def get_gettext():
trans = _get_translation()
if trans: return trans.ugettext
return lambda msg: msg
def get_encoding():
trans = _get_translation()
if trans: return trans.charset()
return locale.getpreferredencoding()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
arrabito/DIRAC | FrameworkSystem/private/standardLogging/Formatter/ColoredBaseFormatter.py | 4 | 2106 | """
ColoredBaseFormatter
"""
__RCSID__ = "$Id$"
import sys
from DIRAC.FrameworkSystem.private.standardLogging.Formatter.BaseFormatter import BaseFormatter
class ColoredBaseFormatter(BaseFormatter):
"""
ColoredBaseFormatter is used to format log record to create a string representing a log message.
It is based on the BaseFormatter object which is based on the of the standard logging library.
This custom formatter is useful for format messages to correspond with the gLogger format.
It adds color on all messages which come from StdoutBackend and StderrBackend
and color them according to their levels.
"""
COLOR_MAP = {
'black': 0,
'red': 1,
'green': 2,
'yellow': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7
}
LEVEL_MAP = {
'ALWAYS': ('black', 'white', False),
'NOTICE': (None, 'magenta', False),
'INFO': (None, 'green', False),
'VERBOSE': (None, 'cyan', False),
'DEBUG': (None, 'blue', False),
'WARN': (None, 'yellow', False),
'ERROR': (None, 'red', False),
'FATAL': ('red', 'black', False)
}
def format(self, record):
"""
Overriding.
format is the main method of the Formatter object because it is the method which transforms
a log record into a string with colors.
According to the level, the method get colors from LEVEL_MAP to add them to the message.
:params record: the log record containing all the information about the log message: name, level, threadid...
"""
stringRecord = super(ColoredBaseFormatter, self).format(record)
# post treatment
if self._options['Color'] and sys.stdout.isatty() and sys.stderr.isatty():
params = []
bg, fg, bold = self.LEVEL_MAP[record.levelname]
if bg in self.COLOR_MAP:
params.append(str(self.COLOR_MAP[bg] + 40))
if fg in self.COLOR_MAP:
params.append(str(self.COLOR_MAP[fg] + 30))
if bold:
params.append('1')
stringRecord = ("".join(('\x1b[', ";".join(params), 'm', stringRecord, '\x1b[0m')))
return stringRecord
| gpl-3.0 |
eLBati/odoo | openerp/tools/amount_to_text_en.py | 441 | 5103 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from translate import _
_logger = logging.getLogger(__name__)
#-------------------------------------------------------------
#ENGLISH
#-------------------------------------------------------------
to_19 = ( 'Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six',
'Seven', 'Eight', 'Nine', 'Ten', 'Eleven', 'Twelve', 'Thirteen',
'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen' )
tens = ( 'Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety')
denom = ( '',
'Thousand', 'Million', 'Billion', 'Trillion', 'Quadrillion',
'Quintillion', 'Sextillion', 'Septillion', 'Octillion', 'Nonillion',
'Decillion', 'Undecillion', 'Duodecillion', 'Tredecillion', 'Quattuordecillion',
'Sexdecillion', 'Septendecillion', 'Octodecillion', 'Novemdecillion', 'Vigintillion' )
def _convert_nn(val):
"""convert a value < 100 to English.
"""
if val < 20:
return to_19[val]
for (dcap, dval) in ((k, 20 + (10 * v)) for (v, k) in enumerate(tens)):
if dval + 10 > val:
if val % 10:
return dcap + '-' + to_19[val % 10]
return dcap
def _convert_nnn(val):
"""
convert a value < 1000 to english, special cased because it is the level that kicks
off the < 100 special case. The rest are more general. This also allows you to
get strings in the form of 'forty-five hundred' if called directly.
"""
word = ''
(mod, rem) = (val % 100, val // 100)
if rem > 0:
word = to_19[rem] + ' Hundred'
if mod > 0:
word += ' '
if mod > 0:
word += _convert_nn(mod)
return word
def english_number(val):
if val < 100:
return _convert_nn(val)
if val < 1000:
return _convert_nnn(val)
for (didx, dval) in ((v - 1, 1000 ** v) for v in range(len(denom))):
if dval > val:
mod = 1000 ** didx
l = val // mod
r = val - (l * mod)
ret = _convert_nnn(l) + ' ' + denom[didx]
if r > 0:
ret = ret + ', ' + english_number(r)
return ret
def amount_to_text(number, currency):
number = '%.2f' % number
units_name = currency
list = str(number).split('.')
start_word = english_number(int(list[0]))
end_word = english_number(int(list[1]))
cents_number = int(list[1])
cents_name = (cents_number > 1) and 'Cents' or 'Cent'
return ' '.join(filter(None, [start_word, units_name, (start_word or units_name) and (end_word or cents_name) and 'and', end_word, cents_name]))
#-------------------------------------------------------------
# Generic functions
#-------------------------------------------------------------
_translate_funcs = {'en' : amount_to_text}
#TODO: we should use the country AND language (ex: septante VS soixante dix)
#TODO: we should use en by default, but the translation func is yet to be implemented
def amount_to_text(nbr, lang='en', currency='euro'):
""" Converts an integer to its textual representation, using the language set in the context if any.
Example::
1654: thousands six cent cinquante-quatre.
"""
import openerp.loglevels as loglevels
# if nbr > 10000000:
# _logger.warning(_("Number too large '%d', can not translate it"))
# return str(nbr)
if not _translate_funcs.has_key(lang):
_logger.warning(_("no translation function found for lang: '%s'"), lang)
#TODO: (default should be en) same as above
lang = 'en'
return _translate_funcs[lang](abs(nbr), currency)
if __name__=='__main__':
from sys import argv
lang = 'nl'
if len(argv) < 2:
for i in range(1,200):
print i, ">>", int_to_text(i, lang)
for i in range(200,999999,139):
print i, ">>", int_to_text(i, lang)
else:
print int_to_text(int(argv[1]), lang)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
TheBigW/DRC | XMLSerializer.py | 1 | 4179 | # -*- coding: utf-8 -*-
import lxml.etree as ET
class Serializer(object):
def __init__(self):
return None
@staticmethod
def getSerializeMembers(SerializeObject):
strSerializeMembers = []
for member in dir(SerializeObject):
strMember = str(member)
strType = str(type(getattr(SerializeObject, strMember)))
print(strMember + " : " + strType)
if (strType.find("descriptor") == -1) and (
strType.find("function") == -1) and (
strType.find("method") == -1) and (
strMember.find("__") != 0):
strSerializeMembers.append(strMember)
# print( "Serialize considered members : " + str(strSerializeMembers) )
return strSerializeMembers
@staticmethod
def SerializeArray(XMLParent, arrayInst):
for arrayIndex, arrayItem in enumerate(arrayInst):
Serializer.SerializeMember(XMLParent, "elem" + str(arrayIndex),
arrayItem)
@staticmethod
def SerializeMember(XMLParent, MemberName, newValue):
strType = str(type(newValue))
# print( "serialize type : " + strType )
if strType.find("instance") != -1:
XMLParent = ET.SubElement(XMLParent, MemberName)
Serializer.SerializeClass(newValue, XMLParent)
elif strType.find("list") != -1:
newElem = ET.SubElement(XMLParent, MemberName)
Serializer.SerializeArray(newElem, newValue)
else:
newElem = ET.SubElement(XMLParent, MemberName)
newElem.text = str(newValue)
@staticmethod
def SerializeClass(SerializeObject, rootElem=None):
strSerMemberNames = Serializer.getSerializeMembers(SerializeObject)
for strElem in strSerMemberNames:
Serializer.SerializeMember(rootElem, strElem,
getattr(SerializeObject, strElem))
@staticmethod
def Serialize(SerializeObject):
strClassName = SerializeObject.__class__.__name__
rootElem = ET.Element(strClassName)
Serializer.SerializeClass(SerializeObject, rootElem)
return ET.tostring(rootElem)
@staticmethod
def read(fileName, SerializeObject):
root = ET.parse(fileName)
return Serializer.DeserializeClass(SerializeObject, root.getroot())
@staticmethod
def write(fileName, SerializeObject):
strClassName = SerializeObject.__class__.__name__
rootElem = ET.Element(strClassName)
Serializer.SerializeClass(SerializeObject, rootElem)
ET.ElementTree(rootElem).write(fileName)
@staticmethod
def DeserializeArray(XMLParent, value):
# array needs to have at least one value for correct type
# information, else values are read and treated as string
arrayInst = []
for arrayIndex, arrayNode in enumerate(XMLParent):
arrayInst.append(Serializer.DeserializeMember(arrayNode, value[0]))
return arrayInst
@staticmethod
def DeserializeMember(XMLElem, value):
theType = type(value)
strType = str(theType)
print("Deserializing : " + strType)
if strType.find("instance") != -1:
return Serializer.DeserializeClass(value, XMLElem)
elif strType.find("list") != -1:
return Serializer.DeserializeArray(XMLElem, value)
else:
return theType(XMLElem.text)
@staticmethod
def DeserializeClass(SerializeObject, rootElem):
strSerMemberNames = Serializer.getSerializeMembers(SerializeObject)
for strElem, xmlChildElem in zip(strSerMemberNames, rootElem):
setattr(SerializeObject, strElem,
Serializer.DeserializeMember(xmlChildElem,
getattr(SerializeObject,
strElem)))
return SerializeObject
@staticmethod
def DeSerialize(strXmlString, SerializeObject):
root = ET.fromstring(strXmlString)
return Serializer.DeserializeClass(SerializeObject, root)
| gpl-3.0 |
luiseduardohdbackup/odoo | openerp/modules/registry.py | 220 | 19731 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Models registries.
"""
from collections import Mapping, defaultdict
import logging
import os
import threading
import openerp
from .. import SUPERUSER_ID
from openerp.tools import assertion_report, lazy_property, classproperty, config
from openerp.tools.lru import LRU
_logger = logging.getLogger(__name__)
class Registry(Mapping):
""" Model registry for a particular database.
The registry is essentially a mapping between model names and model
instances. There is one registry instance per database.
"""
def __init__(self, db_name):
super(Registry, self).__init__()
self.models = {} # model name/model instance mapping
self._sql_error = {}
self._store_function = {}
self._pure_function_fields = {} # {model: [field, ...], ...}
self._init = True
self._init_parent = {}
self._assertion_report = assertion_report.assertion_report()
self._fields_by_model = None
# modules fully loaded (maintained during init phase by `loading` module)
self._init_modules = set()
self.db_name = db_name
self._db = openerp.sql_db.db_connect(db_name)
# special cursor for test mode; None means "normal" mode
self.test_cr = None
# Indicates that the registry is
self.ready = False
# Inter-process signaling (used only when openerp.multi_process is True):
# The `base_registry_signaling` sequence indicates the whole registry
# must be reloaded.
# The `base_cache_signaling sequence` indicates all caches must be
# invalidated (i.e. cleared).
self.base_registry_signaling_sequence = None
self.base_cache_signaling_sequence = None
self.cache = LRU(8192)
# Flag indicating if at least one model cache has been cleared.
# Useful only in a multi-process context.
self._any_cache_cleared = False
cr = self.cursor()
has_unaccent = openerp.modules.db.has_unaccent(cr)
if openerp.tools.config['unaccent'] and not has_unaccent:
_logger.warning("The option --unaccent was given but no unaccent() function was found in database.")
self.has_unaccent = openerp.tools.config['unaccent'] and has_unaccent
cr.close()
#
# Mapping abstract methods implementation
# => mixin provides methods keys, items, values, get, __eq__, and __ne__
#
def __len__(self):
""" Return the size of the registry. """
return len(self.models)
def __iter__(self):
""" Return an iterator over all model names. """
return iter(self.models)
def __getitem__(self, model_name):
""" Return the model with the given name or raise KeyError if it doesn't exist."""
return self.models[model_name]
def __call__(self, model_name):
""" Same as ``self[model_name]``. """
return self.models[model_name]
@lazy_property
def pure_function_fields(self):
""" Return the list of pure function fields (field objects) """
fields = []
for mname, fnames in self._pure_function_fields.iteritems():
model_fields = self[mname]._fields
for fname in fnames:
fields.append(model_fields[fname])
return fields
def clear_manual_fields(self):
""" Invalidate the cache for manual fields. """
self._fields_by_model = None
def get_manual_fields(self, cr, model_name):
""" Return the manual fields (as a dict) for the given model. """
if self._fields_by_model is None:
# Query manual fields for all models at once
self._fields_by_model = dic = defaultdict(dict)
cr.execute('SELECT * FROM ir_model_fields WHERE state=%s', ('manual',))
for field in cr.dictfetchall():
dic[field['model']][field['name']] = field
return self._fields_by_model[model_name]
def do_parent_store(self, cr):
for o in self._init_parent:
self.get(o)._parent_store_compute(cr)
self._init = False
def obj_list(self):
""" Return the list of model names in this registry."""
return self.keys()
def add(self, model_name, model):
""" Add or replace a model in the registry."""
self.models[model_name] = model
def load(self, cr, module):
""" Load a given module in the registry.
At the Python level, the modules are already loaded, but not yet on a
per-registry level. This method populates a registry with the given
modules, i.e. it instanciates all the classes of a the given module
and registers them in the registry.
"""
from .. import models
models_to_load = [] # need to preserve loading order
lazy_property.reset_all(self)
# Instantiate registered classes (via the MetaModel automatic discovery
# or via explicit constructor call), and add them to the pool.
for cls in models.MetaModel.module_to_models.get(module.name, []):
# models register themselves in self.models
model = cls._build_model(self, cr)
if model._name not in models_to_load:
# avoid double-loading models whose declaration is split
models_to_load.append(model._name)
return [self.models[m] for m in models_to_load]
def setup_models(self, cr, partial=False):
""" Complete the setup of models.
This must be called after loading modules and before using the ORM.
:param partial: ``True`` if all models have not been loaded yet.
"""
lazy_property.reset_all(self)
# load custom models
ir_model = self['ir.model']
cr.execute('select model from ir_model where state=%s', ('manual',))
for (model_name,) in cr.fetchall():
ir_model.instanciate(cr, SUPERUSER_ID, model_name, {})
# prepare the setup on all models
for model in self.models.itervalues():
model._prepare_setup(cr, SUPERUSER_ID)
# do the actual setup from a clean state
self._m2m = {}
for model in self.models.itervalues():
model._setup_base(cr, SUPERUSER_ID, partial)
for model in self.models.itervalues():
model._setup_fields(cr, SUPERUSER_ID)
for model in self.models.itervalues():
model._setup_complete(cr, SUPERUSER_ID)
def clear_caches(self):
""" Clear the caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi`` for all the models.
"""
for model in self.models.itervalues():
model.clear_caches()
# Special case for ir_ui_menu which does not use openerp.tools.ormcache.
ir_ui_menu = self.models.get('ir.ui.menu')
if ir_ui_menu is not None:
ir_ui_menu.clear_cache()
# Useful only in a multi-process context.
def reset_any_cache_cleared(self):
self._any_cache_cleared = False
# Useful only in a multi-process context.
def any_cache_cleared(self):
return self._any_cache_cleared
@classmethod
def setup_multi_process_signaling(cls, cr):
if not openerp.multi_process:
return None, None
# Inter-process signaling:
# The `base_registry_signaling` sequence indicates the whole registry
# must be reloaded.
# The `base_cache_signaling sequence` indicates all caches must be
# invalidated (i.e. cleared).
cr.execute("""SELECT sequence_name FROM information_schema.sequences WHERE sequence_name='base_registry_signaling'""")
if not cr.fetchall():
cr.execute("""CREATE SEQUENCE base_registry_signaling INCREMENT BY 1 START WITH 1""")
cr.execute("""SELECT nextval('base_registry_signaling')""")
cr.execute("""CREATE SEQUENCE base_cache_signaling INCREMENT BY 1 START WITH 1""")
cr.execute("""SELECT nextval('base_cache_signaling')""")
cr.execute("""
SELECT base_registry_signaling.last_value,
base_cache_signaling.last_value
FROM base_registry_signaling, base_cache_signaling""")
r, c = cr.fetchone()
_logger.debug("Multiprocess load registry signaling: [Registry: # %s] "\
"[Cache: # %s]",
r, c)
return r, c
def enter_test_mode(self):
""" Enter the 'test' mode, where one cursor serves several requests. """
assert self.test_cr is None
self.test_cr = self._db.test_cursor()
RegistryManager.enter_test_mode()
def leave_test_mode(self):
""" Leave the test mode. """
assert self.test_cr is not None
self.test_cr.force_close()
self.test_cr = None
RegistryManager.leave_test_mode()
def cursor(self):
""" Return a new cursor for the database. The cursor itself may be used
as a context manager to commit/rollback and close automatically.
"""
cr = self.test_cr
if cr is not None:
# While in test mode, we use one special cursor across requests. The
# test cursor uses a reentrant lock to serialize accesses. The lock
# is granted here by cursor(), and automatically released by the
# cursor itself in its method close().
cr.acquire()
return cr
return self._db.cursor()
class DummyRLock(object):
""" Dummy reentrant lock, to be used while running rpc and js tests """
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
self.acquire()
def __exit__(self, type, value, traceback):
self.release()
class RegistryManager(object):
""" Model registries manager.
The manager is responsible for creation and deletion of model
registries (essentially database connection/model registry pairs).
"""
_registries = None
_lock = threading.RLock()
_saved_lock = None
@classproperty
def registries(cls):
if cls._registries is None:
size = config.get('registry_lru_size', None)
if not size:
# Size the LRU depending of the memory limits
if os.name != 'posix':
# cannot specify the memory limit soft on windows...
size = 42
else:
# A registry takes 10MB of memory on average, so we reserve
# 10Mb (registry) + 5Mb (working memory) per registry
avgsz = 15 * 1024 * 1024
size = int(config['limit_memory_soft'] / avgsz)
cls._registries = LRU(size)
return cls._registries
@classmethod
def lock(cls):
""" Return the current registry lock. """
return cls._lock
@classmethod
def enter_test_mode(cls):
""" Enter the 'test' mode, where the registry is no longer locked. """
assert cls._saved_lock is None
cls._lock, cls._saved_lock = DummyRLock(), cls._lock
@classmethod
def leave_test_mode(cls):
""" Leave the 'test' mode. """
assert cls._saved_lock is not None
cls._lock, cls._saved_lock = cls._saved_lock, None
@classmethod
def get(cls, db_name, force_demo=False, status=None, update_module=False):
""" Return a registry for a given database name."""
with cls.lock():
try:
return cls.registries[db_name]
except KeyError:
return cls.new(db_name, force_demo, status,
update_module)
finally:
# set db tracker - cleaned up at the WSGI
# dispatching phase in openerp.service.wsgi_server.application
threading.current_thread().dbname = db_name
@classmethod
def new(cls, db_name, force_demo=False, status=None,
update_module=False):
""" Create and return a new registry for a given database name.
The (possibly) previous registry for that database name is discarded.
"""
import openerp.modules
with cls.lock():
with openerp.api.Environment.manage():
registry = Registry(db_name)
# Initializing a registry will call general code which will in
# turn call registries.get (this object) to obtain the registry
# being initialized. Make it available in the registries
# dictionary then remove it if an exception is raised.
cls.delete(db_name)
cls.registries[db_name] = registry
try:
with registry.cursor() as cr:
seq_registry, seq_cache = Registry.setup_multi_process_signaling(cr)
registry.base_registry_signaling_sequence = seq_registry
registry.base_cache_signaling_sequence = seq_cache
# This should be a method on Registry
openerp.modules.load_modules(registry._db, force_demo, status, update_module)
except Exception:
del cls.registries[db_name]
raise
# load_modules() above can replace the registry by calling
# indirectly new() again (when modules have to be uninstalled).
# Yeah, crazy.
registry = cls.registries[db_name]
cr = registry.cursor()
try:
registry.do_parent_store(cr)
cr.commit()
finally:
cr.close()
registry.ready = True
if update_module:
# only in case of update, otherwise we'll have an infinite reload loop!
cls.signal_registry_change(db_name)
return registry
@classmethod
def delete(cls, db_name):
"""Delete the registry linked to a given database. """
with cls.lock():
if db_name in cls.registries:
cls.registries[db_name].clear_caches()
del cls.registries[db_name]
@classmethod
def delete_all(cls):
"""Delete all the registries. """
with cls.lock():
for db_name in cls.registries.keys():
cls.delete(db_name)
@classmethod
def clear_caches(cls, db_name):
"""Clear caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi`` for all the models
of the given database name.
This method is given to spare you a ``RegistryManager.get(db_name)``
that would loads the given database if it was not already loaded.
"""
with cls.lock():
if db_name in cls.registries:
cls.registries[db_name].clear_caches()
@classmethod
def check_registry_signaling(cls, db_name):
"""
Check if the modules have changed and performs all necessary operations to update
the registry of the corresponding database.
:returns: True if changes has been detected in the database and False otherwise.
"""
changed = False
if openerp.multi_process and db_name in cls.registries:
registry = cls.get(db_name)
cr = registry.cursor()
try:
cr.execute("""
SELECT base_registry_signaling.last_value,
base_cache_signaling.last_value
FROM base_registry_signaling, base_cache_signaling""")
r, c = cr.fetchone()
_logger.debug("Multiprocess signaling check: [Registry - old# %s new# %s] "\
"[Cache - old# %s new# %s]",
registry.base_registry_signaling_sequence, r,
registry.base_cache_signaling_sequence, c)
# Check if the model registry must be reloaded (e.g. after the
# database has been updated by another process).
if registry.base_registry_signaling_sequence is not None and registry.base_registry_signaling_sequence != r:
changed = True
_logger.info("Reloading the model registry after database signaling.")
registry = cls.new(db_name)
# Check if the model caches must be invalidated (e.g. after a write
# occured on another process). Don't clear right after a registry
# has been reload.
elif registry.base_cache_signaling_sequence is not None and registry.base_cache_signaling_sequence != c:
changed = True
_logger.info("Invalidating all model caches after database signaling.")
registry.clear_caches()
registry.reset_any_cache_cleared()
registry.base_registry_signaling_sequence = r
registry.base_cache_signaling_sequence = c
finally:
cr.close()
return changed
@classmethod
def signal_caches_change(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
# Check the registries if any cache has been cleared and signal it
# through the database to other processes.
registry = cls.get(db_name)
if registry.any_cache_cleared():
_logger.info("At least one model cache has been cleared, signaling through the database.")
cr = registry.cursor()
r = 1
try:
cr.execute("select nextval('base_cache_signaling')")
r = cr.fetchone()[0]
finally:
cr.close()
registry.base_cache_signaling_sequence = r
registry.reset_any_cache_cleared()
@classmethod
def signal_registry_change(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
_logger.info("Registry changed, signaling through the database")
registry = cls.get(db_name)
cr = registry.cursor()
r = 1
try:
cr.execute("select nextval('base_registry_signaling')")
r = cr.fetchone()[0]
finally:
cr.close()
registry.base_registry_signaling_sequence = r
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
EDUlib/edx-platform | lms/djangoapps/mobile_api/tests/test_middleware.py | 5 | 7822 | """
Tests for Version Based App Upgrade Middleware
"""
from datetime import datetime
from unittest import mock
import ddt
from django.core.cache import caches
from django.http import HttpRequest, HttpResponse
from pytz import UTC
from lms.djangoapps.mobile_api.middleware import AppVersionUpgrade
from lms.djangoapps.mobile_api.models import AppVersionConfig
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
@ddt.ddt
class TestAppVersionUpgradeMiddleware(CacheIsolationTestCase):
"""
Tests for version based app upgrade middleware
"""
ENABLED_CACHES = ['default']
def setUp(self):
super().setUp()
self.middleware = AppVersionUpgrade()
self.set_app_version_config()
def set_app_version_config(self):
""" Creates configuration data for platform versions """
AppVersionConfig(platform="iOS", version="1.1.1", expire_at=None, enabled=True).save()
AppVersionConfig(
platform="iOS",
version="2.2.2",
expire_at=datetime(2014, 1, 1, tzinfo=UTC),
enabled=True
).save()
AppVersionConfig(
platform="iOS",
version="4.4.4",
expire_at=datetime(9000, 1, 1, tzinfo=UTC),
enabled=True
).save()
AppVersionConfig(platform="iOS", version="6.6.6", expire_at=None, enabled=True).save()
AppVersionConfig(platform="Android", version="1.1.1", expire_at=None, enabled=True).save()
AppVersionConfig(
platform="Android",
version="2.2.2",
expire_at=datetime(2014, 1, 1, tzinfo=UTC),
enabled=True
).save()
AppVersionConfig(
platform="Android",
version="4.4.4",
expire_at=datetime(5000, 1, 1, tzinfo=UTC),
enabled=True
).save()
AppVersionConfig(platform="Android", version="8.8.8", expire_at=None, enabled=True).save()
def process_middleware(self, user_agent, cache_get_many_calls_for_request=1):
""" Helper function that makes calls to middle process_request and process_response """
fake_request = HttpRequest()
fake_request.META['HTTP_USER_AGENT'] = user_agent
with mock.patch.object(caches['default'], 'get_many', wraps=caches['default'].get_many) as mocked_code:
request_response = self.middleware.process_request(fake_request)
assert cache_get_many_calls_for_request == mocked_code.call_count
with mock.patch.object(caches['default'], 'get_many', wraps=caches['default'].get_many) as mocked_code:
processed_response = self.middleware.process_response(fake_request, request_response or HttpResponse())
assert 0 == mocked_code.call_count
return request_response, processed_response
@ddt.data(
("Mozilla/5.0 (Linux; Android 5.1; Nexus 5 Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) "
"Version/4.0 Chrome/47.0.2526.100 Mobile Safari/537.36 edX/org.edx.mobile/2.0.0"),
("Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) "
"Mobile/13C75 edX/org.edx.mobile/2.2.1"),
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 "
"Safari/537.36"),
)
def test_non_mobile_app_requests(self, user_agent):
with self.assertNumQueries(0):
request_response, processed_response = self.process_middleware(user_agent, 0)
assert request_response is None
assert 200 == processed_response.status_code
assert AppVersionUpgrade.LATEST_VERSION_HEADER not in processed_response
assert AppVersionUpgrade.LAST_SUPPORTED_DATE_HEADER not in processed_response
@ddt.data(
"edX/org.edx.mobile (6.6.6; OS Version 9.2 (Build 13C75))",
"edX/org.edx.mobile (7.7.7; OS Version 9.2 (Build 13C75))",
"Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/8.8.8",
"Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/9.9.9",
)
def test_no_update(self, user_agent):
with self.assertNumQueries(2):
request_response, processed_response = self.process_middleware(user_agent)
assert request_response is None
assert 200 == processed_response.status_code
assert AppVersionUpgrade.LATEST_VERSION_HEADER not in processed_response
assert AppVersionUpgrade.LAST_SUPPORTED_DATE_HEADER not in processed_response
with self.assertNumQueries(0):
self.process_middleware(user_agent)
@ddt.data(
("edX/org.edx.mobile (5.1.1; OS Version 9.2 (Build 13C75))", "6.6.6"),
("edX/org.edx.mobile (5.1.1.RC; OS Version 9.2 (Build 13C75))", "6.6.6"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/5.1.1", "8.8.8"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/5.1.1.RC", "8.8.8"),
)
@ddt.unpack
def test_new_version_available(self, user_agent, latest_version):
with self.assertNumQueries(2):
request_response, processed_response = self.process_middleware(user_agent)
assert request_response is None
assert 200 == processed_response.status_code
assert latest_version == processed_response[AppVersionUpgrade.LATEST_VERSION_HEADER]
assert AppVersionUpgrade.LAST_SUPPORTED_DATE_HEADER not in processed_response
with self.assertNumQueries(0):
self.process_middleware(user_agent)
@ddt.data(
("edX/org.edx.mobile (1.0.1; OS Version 9.2 (Build 13C75))", "6.6.6"),
("edX/org.edx.mobile (1.1.1; OS Version 9.2 (Build 13C75))", "6.6.6"),
("edX/org.edx.mobile (2.0.5.RC; OS Version 9.2 (Build 13C75))", "6.6.6"),
("edX/org.edx.mobile (2.2.2; OS Version 9.2 (Build 13C75))", "6.6.6"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/1.0.1", "8.8.8"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/1.1.1", "8.8.8"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/2.0.5.RC", "8.8.8"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/2.2.2", "8.8.8"),
)
@ddt.unpack
def test_version_update_required(self, user_agent, latest_version):
with self.assertNumQueries(2):
request_response, processed_response = self.process_middleware(user_agent)
assert request_response is not None
assert 426 == processed_response.status_code
assert latest_version == processed_response[AppVersionUpgrade.LATEST_VERSION_HEADER]
with self.assertNumQueries(0):
self.process_middleware(user_agent)
@ddt.data(
("edX/org.edx.mobile (4.4.4; OS Version 9.2 (Build 13C75))", "6.6.6", '9000-01-01T00:00:00+00:00'),
(
"Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/4.4.4",
"8.8.8",
'5000-01-01T00:00:00+00:00',
),
)
@ddt.unpack
def test_version_update_available_with_deadline(self, user_agent, latest_version, upgrade_date):
with self.assertNumQueries(2):
request_response, processed_response = self.process_middleware(user_agent)
assert request_response is None
assert 200 == processed_response.status_code
assert latest_version == processed_response[AppVersionUpgrade.LATEST_VERSION_HEADER]
assert upgrade_date == processed_response[AppVersionUpgrade.LAST_SUPPORTED_DATE_HEADER]
with self.assertNumQueries(0):
self.process_middleware(user_agent)
| agpl-3.0 |
adamrp/qiime | scripts/compare_alpha_diversity.py | 15 | 12087 | #!/usr/bin/env python
# File created on 06 Jun 2011
from __future__ import division
__author__ = "William Van Treuren"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["William Van Treuren", "Greg Caparaso", "Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "William Van Treuren"
__email__ = "[email protected]"
import os
from os.path import join
from qiime.util import (parse_command_line_parameters,
make_option,
create_dir)
from qiime.compare_alpha_diversity import (compare_alpha_diversities,
_correct_compare_alpha_results,
test_types,
correction_types,
generate_alpha_diversity_boxplots)
script_info = {}
script_info[
'brief_description'] = """This script compares alpha diversities based on a two-sample t-test using either parametric or non-parametric (Monte Carlo) methods."""
script_info['script_description'] = """
This script compares the alpha diversity of samples found in a collated alpha
diversity file. The comparison is done not between samples, but between groups
of samples. The groupings are created via the input category passed via
-c/--category. Any samples which have the same value under the catgory will be
grouped.
For example, if your mapping file had a category called 'Treatment' that
separated your samples into three groups (Treatment='Control', Treatment='Drug',
Treatment='2xDose'), passing 'Treatment' to this script would cause it to
compare (Control,Drug), (Control,2xDose), (2xDose, Drug) alpha diversity
values. By default the two-sample t-test will be nonparametric (i.e. using
Monte Carlo permutations to calculate the p-value), though the user has the
option to make the test a parametric t-test.
The script creates an output file in tab-separated format where each row is a
different group comparison. The columns in each row denote which two groups of
samples are being compared, as well as the mean and standard deviation of each
group's alpha diversity. Finally, the t-statistic and p-value are reported for
the comparison. This file can be most easily viewed in a spreadsheet program
such as Excel.
Note: Any iterations of a rarefaction at a given depth will be averaged. For
instance, if your collated_alpha file had 10 iterations of the rarefaction at
depth 480, the scores for the alpha diversity metrics of those 10 iterations
would be averaged (within sample). The iterations are not controlled by this
script; when multiple_rarefactions.py is called, the -n option specifies the
number of iterations that have occurred. The multiple comparison correction
takes into account the number of between group comparisons. If you do not know
the rarefaction depth available or you want to use the deepest rarefaction
level available then do not pass -d/--depth and it will default to using the
deepest available.
If t-statistics and/or p-values are None for any of your comparisons, there are
three possible reasons. The first is that there were undefined values in your
collated alpha diversity input file. This occurs if there were too few
sequences in one or more of the samples in the groups involved in those
comparisons to compute alpha diversity at that depth. You can either rerun
%prog passing a lower value for --depth, or you can re-run alpha diversity
after filtering samples with too few sequences. The second is that you had some
comparison where each treatment was represented by only a single sample. It is
not possible to perform a two-sample t-test on two samples each of length 1, so
None will be reported instead. The third possibility occurs when using the
nonparamteric t-test with small datasets where the Monte Carlo permutations
don't return a p-value because the distribution of the data has no variance.
The multiple comparisons correction will not penalize you for comparisons that
return as None regardless of origin.
If the means/standard deviations are None for any treatment group, the likely
cause is that there is an \'n\\a\' value in the collated_alpha file that was
passed.
"""
script_info['script_usage'] = []
script_info['script_usage'].append(("Comparing alpha diversities",
"The following command takes the following input: a mapping file (which "
"associaties each sample with a number of characteristics), alpha diversity "
"metric (the results of collate_alpha for an alpha diverity metric, like "
"PD_whole_tree), depth (the rarefaction depth to use for comparison), "
"category (the category in the mapping file to determine which samples to "
"compare to each other), and output filepath (a path to the output file to be created). A "
"nonparametric two sample t-test is run to compare the alpha diversities "
"using the default number of Monte Carlo permutations (999).",
"%prog -i PD_whole_tree.txt -m mapping.txt -c Treatment -d 100 -o Treatment_PD100"))
script_info['script_usage'].append(("Comparing alpha diversities",
"Similar to above, but performs comparisons for two categories.",
"%prog -i PD_whole_tree.txt -m mapping.txt -c Treatment,DOB -d 100 -o Treatment_DOB_PD100"))
script_info['script_usage'].append(("Parametric t-test",
"The following command runs a parametric two sample t-test using the "
"t-distribution instead of Monte Carlo permutations at rarefaction depth 100.",
"%prog -i PD_whole_tree.txt -m mapping.txt -c Treatment -d 100 -o "
"PD_d100_parametric -t parametric"))
script_info['script_usage'].append(("Parametric t-test",
"The following command runs a parametric two sample t-test using the "
"t-distribution instead of Monte Carlo permutations at the greatest depth available.",
"%prog -i PD_whole_tree.txt -m mapping.txt -c Treatment -o "
"PD_dmax_parametric -t parametric"))
script_info['output_description'] = """
Generates a tsv stats file and pdf of boxplots for each input category.
Each row in the tsv file corresponds to a comparison between two groups of treatment values,
and includes the means and standard deviations of the two groups' alpha diversities,
along with the results of the two-sample t-test.
"""
script_info[
'script_usage_output_to_remove'] = [
'$PWD/PD_dmax_parametric.txt',
'$PWD/PD_d100_parametric.txt',
'$PWD/PD_d100.txt']
script_info['required_options'] = [
make_option('-i',
'--alpha_diversity_fp',
action='store',
type='existing_filepath',
help='path to collated alpha diversity file (as generated by '
'collate_alpha.py) [REQUIRED]'),
make_option('-m',
'--mapping_fp',
action='store',
type='existing_filepath',
help='path to the mapping file [REQUIRED]'),
make_option('-c',
'--categories',
action='store',
type='string',
help='comma-separated list of categories for comparison [REQUIRED]'),
make_option('-o',
'--output_dir',
action='store',
type='new_dirpath',
help='directory where output files should be stored [REQUIRED]')]
script_info['optional_options'] = [
make_option('-t', '--test_type', type='choice', choices=test_types,
help='the type of test to perform when calculating the p-values. Valid '
'choices: ' + ', '.join(test_types) + '. If test_type is '
'nonparametric, Monte Carlo permutations will be used to determine the '
'p-value. If test_type is parametric, the num_permutations option will '
'be ignored and the t-distribution will be used instead [default: '
'%default]', default='nonparametric'),
make_option('-n', '--num_permutations', type='int', default=999,
help='the number of permutations to perform when calculating the '
'p-value. Must be greater than 10. Only applies if test_type is '
'nonparametric [default: %default]'),
make_option('-p', '--correction_method', type='choice',
choices=correction_types, help='method to use for correcting multiple '
'comparisons. Available methods are bonferroni, fdr, or none. '
'[default: %default]', default='bonferroni'),
make_option('-d', '--depth', type='int', default=None,
help='depth of rarefaction file to use [default: greatest depth]')]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
mapping_fp = opts.mapping_fp
alpha_diversity_fp = opts.alpha_diversity_fp
categories = opts.categories.split(',')
depth = opts.depth
output_dir = opts.output_dir
correction_method = opts.correction_method
test_type = opts.test_type
num_permutations = opts.num_permutations
if num_permutations < 10:
option_parser.error('Number of permuations must be greater than or '
'equal to 10.')
create_dir(output_dir)
for category in categories:
stat_output_fp = join(output_dir, '%s_stats.txt' % category)
boxplot_output_fp = join(output_dir, '%s_boxplots.pdf' % category)
alpha_diversity_f = open(alpha_diversity_fp, 'U')
mapping_f = open(mapping_fp, 'U')
ttest_result, alphadiv_avgs = \
compare_alpha_diversities(alpha_diversity_f,
mapping_f,
category,
depth,
test_type,
num_permutations)
alpha_diversity_f.close()
mapping_f.close()
corrected_result = _correct_compare_alpha_results(ttest_result,
correction_method)
# write stats results
stat_output_f = open(stat_output_fp, 'w')
header = ('Group1\tGroup2\tGroup1 mean\tGroup1 std\tGroup2 mean\t'
'Group2 std\tt stat\tp-value')
lines = [header]
for (t0, t1), v in corrected_result.items():
lines.append('\t'.join(map(str, [t0,
t1,
alphadiv_avgs[t0][0],
alphadiv_avgs[t0][1],
alphadiv_avgs[t1][0],
alphadiv_avgs[t1][1],
v[0],
v[1]])))
stat_output_f.write('\n'.join(lines) + '\n')
stat_output_f.close()
# write box plots
alpha_diversity_f = open(alpha_diversity_fp, 'U')
mapping_f = open(mapping_fp, 'U')
boxplot = generate_alpha_diversity_boxplots(alpha_diversity_f,
mapping_f,
category,
depth)
alpha_diversity_f.close()
mapping_f.close()
boxplot.savefig(boxplot_output_fp)
if __name__ == "__main__":
main()
| gpl-2.0 |
GameKinger123x/mtasa-blue | vendor/google-breakpad/src/testing/gtest/test/gtest_test_utils.py | 408 | 10444 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| gpl-3.0 |
ettrig/NIPAP | nipap/nipap/errors.py | 2 | 1301 |
class NipapError(Exception):
""" NIPAP base error class.
"""
error_code = 1000
class NipapInputError(NipapError):
""" Erroneous input.
A general input error.
"""
error_code = 1100
class NipapMissingInputError(NipapInputError):
""" Missing input.
Most input is passed in dicts, this could mean a missing key in a dict.
"""
error_code = 1110
class NipapExtraneousInputError(NipapInputError):
""" Extraneous input.
Most input is passed in dicts, this could mean an unknown key in a dict.
"""
error_code = 1120
class NipapNoSuchOperatorError(NipapInputError):
""" A non existent operator was specified.
"""
error_code = 1130
class NipapValueError(NipapError):
""" Something wrong with a value
For example, trying to send an integer when an IP address is expected.
"""
error_code = 1200
class NipapNonExistentError(NipapError):
""" A non existent object was specified
For example, try to get a prefix from a pool which doesn't exist.
"""
error_code = 1300
class NipapDuplicateError(NipapError):
""" The passed object violates unique constraints
For example, create a VRF with a name of an already existing one.
"""
error_code = 1400
| mit |
asser/django | tests/utils_tests/test_regex_helper.py | 448 | 1784 | from __future__ import unicode_literals
import unittest
from django.utils import regex_helper
class NormalizeTests(unittest.TestCase):
def test_empty(self):
pattern = r""
expected = [('', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_escape(self):
pattern = r"\\\^\$\.\|\?\*\+\(\)\["
expected = [('\\^$.|?*+()[', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_positional(self):
pattern = r"(.*)-(.+)"
expected = [('%(_0)s-%(_1)s', ['_0', '_1'])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_ignored(self):
pattern = r"(?i)(?L)(?m)(?s)(?u)(?#)"
expected = [('', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_noncapturing(self):
pattern = r"(?:non-capturing)"
expected = [('non-capturing', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_named(self):
pattern = r"(?P<first_group_name>.*)-(?P<second_group_name>.*)"
expected = [('%(first_group_name)s-%(second_group_name)s',
['first_group_name', 'second_group_name'])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_backreference(self):
pattern = r"(?P<first_group_name>.*)-(?P=first_group_name)"
expected = [('%(first_group_name)s-%(first_group_name)s',
['first_group_name'])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
| bsd-3-clause |
sonofeft/PyHatch | pyhatch/examples/example_1.py | 1 | 1057 | import sys
import os
from pyhatch.hatch_supt import Hatch
SIMPLEDESC='''PyProTem acts as a temporary project for test purposes.'''
LONGDESC="""Use pyProTem to test tox usage locally, travis CI on checkin to
GitHub, tk_nosy to watch files locally and alert breakage, operation under
both python 2 and 3 on Windows and Linux."""
h = Hatch(projName='PyProTem',
mainPyFileName='main.py',
mainDefinesClass='Y',
mainClassName='ProTem',
mainFunctionName='my_function',
author='Some Guy',
github_user_name='somekindaguy',
proj_copyright='Copyright (c) 2015 Some Guy',
proj_license='GPL-3',
version='0.1.3',
email='[email protected]',
status='4 - Beta',
simpleDesc=SIMPLEDESC,
longDesc=LONGDESC,
year=None, # if None, set to this year
organization=None) # if None, set to author
# This example places project into user home directory
h.save_project_below_this_dir( os.path.expanduser('~/') )
| gpl-3.0 |
natanlailari/PennApps2015-Heartmates | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/response.py | 328 | 10347 | # urllib3/response.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import zlib
import io
from ._collections import HTTPHeaderDict
from .exceptions import DecodeError
from .packages.six import string_types as basestring, binary_type
from .util import is_fp_closed
log = logging.getLogger(__name__)
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
def _get_decoder(mode):
if mode == 'gzip':
return zlib.decompressobj(16 + zlib.MAX_WBITS)
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = HTTPHeaderDict()
if headers:
self.headers.update(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = body if body and isinstance(body, basestring) else None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 2616
# Section 3.5
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do not
# properly close the connection in all cases. There is no harm
# in redundantly calling close.
self._fp.close()
flush_decoder = True
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = HTTPHeaderDict()
for k, v in r.getheaders():
headers.add(k, v)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
return True
| apache-2.0 |
lokeshjindal15/pd-gem5 | src/arch/alpha/AlphaInterrupts.py | 69 | 1754 | # Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.SimObject import SimObject
class AlphaInterrupts(SimObject):
type = 'AlphaInterrupts'
cxx_class = 'AlphaISA::Interrupts'
cxx_header = "arch/alpha/interrupts.hh"
| bsd-3-clause |
henridwyer/scikit-learn | sklearn/gaussian_process/regression_models.py | 259 | 2166 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
"""
The built-in regression models submodule for the gaussian_process module.
"""
import numpy as np
def constant(x):
"""
Zero order polynomial (constant, p = 1) regression model.
x --> f(x) = 1
Parameters
----------
x : array_like
An array with shape (n_eval, n_features) giving the locations x at
which the regression model should be evaluated.
Returns
-------
f : array_like
An array with shape (n_eval, p) with the values of the regression
model.
"""
x = np.asarray(x, dtype=np.float)
n_eval = x.shape[0]
f = np.ones([n_eval, 1])
return f
def linear(x):
"""
First order polynomial (linear, p = n+1) regression model.
x --> f(x) = [ 1, x_1, ..., x_n ].T
Parameters
----------
x : array_like
An array with shape (n_eval, n_features) giving the locations x at
which the regression model should be evaluated.
Returns
-------
f : array_like
An array with shape (n_eval, p) with the values of the regression
model.
"""
x = np.asarray(x, dtype=np.float)
n_eval = x.shape[0]
f = np.hstack([np.ones([n_eval, 1]), x])
return f
def quadratic(x):
"""
Second order polynomial (quadratic, p = n*(n-1)/2+n+1) regression model.
x --> f(x) = [ 1, { x_i, i = 1,...,n }, { x_i * x_j, (i,j) = 1,...,n } ].T
i > j
Parameters
----------
x : array_like
An array with shape (n_eval, n_features) giving the locations x at
which the regression model should be evaluated.
Returns
-------
f : array_like
An array with shape (n_eval, p) with the values of the regression
model.
"""
x = np.asarray(x, dtype=np.float)
n_eval, n_features = x.shape
f = np.hstack([np.ones([n_eval, 1]), x])
for k in range(n_features):
f = np.hstack([f, x[:, k, np.newaxis] * x[:, k:]])
return f
| bsd-3-clause |
Arafatk/sympy | sympy/vector/coordsysrect.py | 9 | 24501 | from sympy.core.basic import Basic
from sympy.vector.scalar import BaseScalar
from sympy import eye, trigsimp, ImmutableMatrix as Matrix, Symbol
from sympy.core.compatibility import string_types, range
from sympy.core.cache import cacheit
from sympy.vector.orienters import (Orienter, AxisOrienter, BodyOrienter,
SpaceOrienter, QuaternionOrienter)
import sympy.vector
class CoordSysCartesian(Basic):
"""
Represents a coordinate system in 3-D space.
"""
def __new__(cls, name, location=None, rotation_matrix=None,
parent=None, vector_names=None, variable_names=None):
"""
The orientation/location parameters are necessary if this system
is being defined at a certain orientation or location wrt another.
Parameters
==========
name : str
The name of the new CoordSysCartesian instance.
location : Vector
The position vector of the new system's origin wrt the parent
instance.
rotation_matrix : SymPy ImmutableMatrix
The rotation matrix of the new coordinate system with respect
to the parent. In other words, the output of
new_system.rotation_matrix(parent).
parent : CoordSysCartesian
The coordinate system wrt which the orientation/location
(or both) is being defined.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
"""
name = str(name)
Vector = sympy.vector.Vector
BaseVector = sympy.vector.BaseVector
Point = sympy.vector.Point
if not isinstance(name, string_types):
raise TypeError("name should be a string")
#If orientation information has been provided, store
#the rotation matrix accordingly
if rotation_matrix is None:
parent_orient = Matrix(eye(3))
else:
if not isinstance(rotation_matrix, Matrix):
raise TypeError("rotation_matrix should be an Immutable" +
"Matrix instance")
parent_orient = rotation_matrix
#If location information is not given, adjust the default
#location as Vector.zero
if parent is not None:
if not isinstance(parent, CoordSysCartesian):
raise TypeError("parent should be a " +
"CoordSysCartesian/None")
if location is None:
location = Vector.zero
else:
if not isinstance(location, Vector):
raise TypeError("location should be a Vector")
#Check that location does not contain base
#scalars
for x in location.free_symbols:
if isinstance(x, BaseScalar):
raise ValueError("location should not contain" +
" BaseScalars")
origin = parent.origin.locate_new(name + '.origin',
location)
else:
location = Vector.zero
origin = Point(name + '.origin')
#All systems that are defined as 'roots' are unequal, unless
#they have the same name.
#Systems defined at same orientation/position wrt the same
#'parent' are equal, irrespective of the name.
#This is true even if the same orientation is provided via
#different methods like Axis/Body/Space/Quaternion.
#However, coincident systems may be seen as unequal if
#positioned/oriented wrt different parents, even though
#they may actually be 'coincident' wrt the root system.
if parent is not None:
obj = super(CoordSysCartesian, cls).__new__(
cls, Symbol(name), location, parent_orient, parent)
else:
obj = super(CoordSysCartesian, cls).__new__(
cls, Symbol(name), location, parent_orient)
obj._name = name
#Initialize the base vectors
if vector_names is None:
vector_names = (name + '.i', name + '.j', name + '.k')
latex_vects = [(r'\mathbf{\hat{i}_{%s}}' % name),
(r'\mathbf{\hat{j}_{%s}}' % name),
(r'\mathbf{\hat{k}_{%s}}' % name)]
pretty_vects = (name + '_i', name + '_j', name + '_k')
else:
_check_strings('vector_names', vector_names)
vector_names = list(vector_names)
latex_vects = [(r'\mathbf{\hat{%s}_{%s}}' % (x, name)) for
x in vector_names]
pretty_vects = [(name + '_' + x) for x in vector_names]
obj._i = BaseVector(vector_names[0], 0, obj,
pretty_vects[0], latex_vects[0])
obj._j = BaseVector(vector_names[1], 1, obj,
pretty_vects[1], latex_vects[1])
obj._k = BaseVector(vector_names[2], 2, obj,
pretty_vects[2], latex_vects[2])
#Initialize the base scalars
if variable_names is None:
variable_names = (name + '.x', name + '.y', name + '.z')
latex_scalars = [(r"\mathbf{{x}_{%s}}" % name),
(r"\mathbf{{y}_{%s}}" % name),
(r"\mathbf{{z}_{%s}}" % name)]
pretty_scalars = (name + '_x', name + '_y', name + '_z')
else:
_check_strings('variable_names', vector_names)
variable_names = list(variable_names)
latex_scalars = [(r"\mathbf{{%s}_{%s}}" % (x, name)) for
x in variable_names]
pretty_scalars = [(name + '_' + x) for x in variable_names]
obj._x = BaseScalar(variable_names[0], 0, obj,
pretty_scalars[0], latex_scalars[0])
obj._y = BaseScalar(variable_names[1], 1, obj,
pretty_scalars[1], latex_scalars[1])
obj._z = BaseScalar(variable_names[2], 2, obj,
pretty_scalars[2], latex_scalars[2])
#Assign a Del operator instance
from sympy.vector.deloperator import Del
obj._delop = Del(obj)
#Assign params
obj._parent = parent
if obj._parent is not None:
obj._root = obj._parent._root
else:
obj._root = obj
obj._parent_rotation_matrix = parent_orient
obj._origin = origin
#Return the instance
return obj
def __str__(self, printer=None):
return self._name
__repr__ = __str__
_sympystr = __str__
def __iter__(self):
return iter([self.i, self.j, self.k])
@property
def origin(self):
return self._origin
@property
def delop(self):
return self._delop
@property
def i(self):
return self._i
@property
def j(self):
return self._j
@property
def k(self):
return self._k
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def z(self):
return self._z
def base_vectors(self):
return (self._i, self._j, self._k)
def base_scalars(self):
return (self._x, self._y, self._z)
@cacheit
def rotation_matrix(self, other):
"""
Returns the direction cosine matrix(DCM), also known as the
'rotation matrix' of this coordinate system with respect to
another system.
If v_a is a vector defined in system 'A' (in matrix format)
and v_b is the same vector defined in system 'B', then
v_a = A.rotation_matrix(B) * v_b.
A SymPy Matrix is returned.
Parameters
==========
other : CoordSysCartesian
The system which the DCM is generated to.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = CoordSysCartesian('N')
>>> A = N.orient_new_axis('A', q1, N.i)
>>> N.rotation_matrix(A)
Matrix([
[1, 0, 0],
[0, cos(q1), -sin(q1)],
[0, sin(q1), cos(q1)]])
"""
from sympy.vector.functions import _path
if not isinstance(other, CoordSysCartesian):
raise TypeError(str(other) +
" is not a CoordSysCartesian")
#Handle special cases
if other == self:
return eye(3)
elif other == self._parent:
return self._parent_rotation_matrix
elif other._parent == self:
return other._parent_rotation_matrix.T
#Else, use tree to calculate position
rootindex, path = _path(self, other)
result = eye(3)
i = -1
for i in range(rootindex):
result *= path[i]._parent_rotation_matrix
i += 2
while i < len(path):
result *= path[i]._parent_rotation_matrix.T
i += 1
return result
@cacheit
def position_wrt(self, other):
"""
Returns the position vector of the origin of this coordinate
system with respect to another Point/CoordSysCartesian.
Parameters
==========
other : Point/CoordSysCartesian
If other is a Point, the position of this system's origin
wrt it is returned. If its an instance of CoordSyRect,
the position wrt its origin is returned.
Examples
========
>>> from sympy.vector import Point, CoordSysCartesian
>>> N = CoordSysCartesian('N')
>>> N1 = N.locate_new('N1', 10 * N.i)
>>> N.position_wrt(N1)
(-10)*N.i
"""
return self.origin.position_wrt(other)
def scalar_map(self, other):
"""
Returns a dictionary which expresses the coordinate variables
(base scalars) of this frame in terms of the variables of
otherframe.
Parameters
==========
otherframe : CoordSysCartesian
The other system to map the variables to.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import Symbol
>>> A = CoordSysCartesian('A')
>>> q = Symbol('q')
>>> B = A.orient_new_axis('B', q, A.k)
>>> A.scalar_map(B)
{A.x: B.x*cos(q) - B.y*sin(q), A.y: B.x*sin(q) + B.y*cos(q), A.z: B.z}
"""
relocated_scalars = []
origin_coords = tuple(self.position_wrt(other).to_matrix(other))
for i, x in enumerate(other.base_scalars()):
relocated_scalars.append(x - origin_coords[i])
vars_matrix = (self.rotation_matrix(other) *
Matrix(relocated_scalars))
mapping = {}
for i, x in enumerate(self.base_scalars()):
mapping[x] = trigsimp(vars_matrix[i])
return mapping
def locate_new(self, name, position, vector_names=None,
variable_names=None):
"""
Returns a CoordSysCartesian with its origin located at the given
position wrt this coordinate system's origin.
Parameters
==========
name : str
The name of the new CoordSysCartesian instance.
position : Vector
The position vector of the new system's origin wrt this
one.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> A = CoordSysCartesian('A')
>>> B = A.locate_new('B', 10 * A.i)
>>> B.origin.position_wrt(A.origin)
10*A.i
"""
return CoordSysCartesian(name, location=position,
vector_names=vector_names,
variable_names=variable_names,
parent=self)
def orient_new(self, name, orienters, location=None,
vector_names=None, variable_names=None):
"""
Creates a new CoordSysCartesian oriented in the user-specified way
with respect to this system.
Please refer to the documentation of the orienter classes
for more information about the orientation procedure.
Parameters
==========
name : str
The name of the new CoordSysCartesian instance.
orienters : iterable/Orienter
An Orienter or an iterable of Orienters for orienting the
new coordinate system.
If an Orienter is provided, it is applied to get the new
system.
If an iterable is provided, the orienters will be applied
in the order in which they appear in the iterable.
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import symbols
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = CoordSysCartesian('N')
Using an AxisOrienter
>>> from sympy.vector import AxisOrienter
>>> axis_orienter = AxisOrienter(q1, N.i + 2 * N.j)
>>> A = N.orient_new('A', (axis_orienter, ))
Using a BodyOrienter
>>> from sympy.vector import BodyOrienter
>>> body_orienter = BodyOrienter(q1, q2, q3, '123')
>>> B = N.orient_new('B', (body_orienter, ))
Using a SpaceOrienter
>>> from sympy.vector import SpaceOrienter
>>> space_orienter = SpaceOrienter(q1, q2, q3, '312')
>>> C = N.orient_new('C', (space_orienter, ))
Using a QuaternionOrienter
>>> from sympy.vector import QuaternionOrienter
>>> q_orienter = QuaternionOrienter(q0, q1, q2, q3)
>>> D = N.orient_new('D', (q_orienter, ))
"""
if isinstance(orienters, Orienter):
if isinstance(orienters, AxisOrienter):
final_matrix = orienters.rotation_matrix(self)
else:
final_matrix = orienters.rotation_matrix()
else:
final_matrix = Matrix(eye(3))
for orienter in orienters:
if isinstance(orienter, AxisOrienter):
final_matrix *= orienter.rotation_matrix(self)
else:
final_matrix *= orienter.rotation_matrix()
return CoordSysCartesian(name, rotation_matrix=final_matrix,
vector_names=vector_names,
variable_names=variable_names,
location = location,
parent=self)
def orient_new_axis(self, name, angle, axis, location=None,
vector_names=None, variable_names=None):
"""
Axis rotation is a rotation about an arbitrary axis by
some angle. The angle is supplied as a SymPy expr scalar, and
the axis is supplied as a Vector.
Parameters
==========
name : string
The name of the new coordinate system
angle : Expr
The angle by which the new system is to be rotated
axis : Vector
The axis around which the rotation has to be performed
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = CoordSysCartesian('N')
>>> B = N.orient_new_axis('B', q1, N.i + 2 * N.j)
"""
orienter = AxisOrienter(angle, axis)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def orient_new_body(self, name, angle1, angle2, angle3,
rotation_order, location=None,
vector_names=None, variable_names=None):
"""
Body orientation takes this coordinate system through three
successive simple rotations.
Body fixed rotations include both Euler Angles and
Tait-Bryan Angles, see http://en.wikipedia.org/wiki/Euler_angles.
Parameters
==========
name : string
The name of the new coordinate system
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSysCartesian('N')
A 'Body' fixed rotation is described by three angles and
three body-fixed rotation axes. To orient a coordinate system D
with respect to N, each sequential rotation is always about
the orthogonal unit vectors fixed to D. For example, a '123'
rotation will specify rotations about N.i, then D.j, then
D.k. (Initially, D.i is same as N.i)
Therefore,
>>> D = N.orient_new_body('D', q1, q2, q3, '123')
is same as
>>> D = N.orient_new_axis('D', q1, N.i)
>>> D = D.orient_new_axis('D', q2, D.j)
>>> D = D.orient_new_axis('D', q3, D.k)
Acceptable rotation orders are of length 3, expressed in XYZ or
123, and cannot have a rotation about about an axis twice in a row.
>>> B = N.orient_new_body('B', q1, q2, q3, '123')
>>> B = N.orient_new_body('B', q1, q2, 0, 'ZXZ')
>>> B = N.orient_new_body('B', 0, 0, 0, 'XYX')
"""
orienter = BodyOrienter(angle1, angle2, angle3, rotation_order)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def orient_new_space(self, name, angle1, angle2, angle3,
rotation_order, location=None,
vector_names=None, variable_names=None):
"""
Space rotation is similar to Body rotation, but the rotations
are applied in the opposite order.
Parameters
==========
name : string
The name of the new coordinate system
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
See Also
========
CoordSysCartesian.orient_new_body : method to orient via Euler
angles
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSysCartesian('N')
To orient a coordinate system D with respect to N, each
sequential rotation is always about N's orthogonal unit vectors.
For example, a '123' rotation will specify rotations about
N.i, then N.j, then N.k.
Therefore,
>>> D = N.orient_new_space('D', q1, q2, q3, '312')
is same as
>>> B = N.orient_new_axis('B', q1, N.i)
>>> C = B.orient_new_axis('C', q2, N.j)
>>> D = C.orient_new_axis('D', q3, N.k)
"""
orienter = SpaceOrienter(angle1, angle2, angle3, rotation_order)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def orient_new_quaternion(self, name, q0, q1, q2, q3, location=None,
vector_names=None, variable_names=None):
"""
Quaternion orientation orients the new CoordSysCartesian with
Quaternions, defined as a finite rotation about lambda, a unit
vector, by some amount theta.
This orientation is described by four parameters:
q0 = cos(theta/2)
q1 = lambda_x sin(theta/2)
q2 = lambda_y sin(theta/2)
q3 = lambda_z sin(theta/2)
Quaternion does not take in a rotation order.
Parameters
==========
name : string
The name of the new coordinate system
q0, q1, q2, q3 : Expr
The quaternions to rotate the coordinate system by
location : Vector(optional)
The location of the new coordinate system's origin wrt this
system's origin. If not specified, the origins are taken to
be coincident.
vector_names, variable_names : iterable(optional)
Iterables of 3 strings each, with custom names for base
vectors and base scalars of the new system respectively.
Used for simple str printing.
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import symbols
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = CoordSysCartesian('N')
>>> B = N.orient_new_quaternion('B', q0, q1, q2, q3)
"""
orienter = QuaternionOrienter(q0, q1, q2, q3)
return self.orient_new(name, orienter,
location=location,
vector_names=vector_names,
variable_names=variable_names)
def __init__(self, name, location=None, rotation_matrix=None,
parent=None, vector_names=None, variable_names=None,
latex_vects=None, pretty_vects=None, latex_scalars=None,
pretty_scalars=None):
#Dummy initializer for setting docstring
pass
__init__.__doc__ = __new__.__doc__
def _check_strings(arg_name, arg):
errorstr = arg_name + " must be an iterable of 3 string-types"
if len(arg) != 3:
raise ValueError(errorstr)
try:
for s in arg:
if not isinstance(s, string_types):
raise TypeError(errorstr)
except:
raise TypeError(errorstr)
| bsd-3-clause |
jhg/django | tests/utils_tests/test_baseconv.py | 326 | 1787 | from unittest import TestCase
from django.utils.baseconv import (
BaseConverter, base2, base16, base36, base56, base62, base64,
)
from django.utils.six.moves import range
class TestBaseConv(TestCase):
def test_baseconv(self):
nums = [-10 ** 10, 10 ** 10] + list(range(-100, 100))
for converter in [base2, base16, base36, base56, base62, base64]:
for i in nums:
self.assertEqual(i, converter.decode(converter.encode(i)))
def test_base11(self):
base11 = BaseConverter('0123456789-', sign='$')
self.assertEqual(base11.encode(1234), '-22')
self.assertEqual(base11.decode('-22'), 1234)
self.assertEqual(base11.encode(-1234), '$-22')
self.assertEqual(base11.decode('$-22'), -1234)
def test_base20(self):
base20 = BaseConverter('0123456789abcdefghij')
self.assertEqual(base20.encode(1234), '31e')
self.assertEqual(base20.decode('31e'), 1234)
self.assertEqual(base20.encode(-1234), '-31e')
self.assertEqual(base20.decode('-31e'), -1234)
def test_base64(self):
self.assertEqual(base64.encode(1234), 'JI')
self.assertEqual(base64.decode('JI'), 1234)
self.assertEqual(base64.encode(-1234), '$JI')
self.assertEqual(base64.decode('$JI'), -1234)
def test_base7(self):
base7 = BaseConverter('cjdhel3', sign='g')
self.assertEqual(base7.encode(1234), 'hejd')
self.assertEqual(base7.decode('hejd'), 1234)
self.assertEqual(base7.encode(-1234), 'ghejd')
self.assertEqual(base7.decode('ghejd'), -1234)
def test_exception(self):
self.assertRaises(ValueError, BaseConverter, 'abc', sign='a')
self.assertIsInstance(BaseConverter('abc', sign='d'), BaseConverter)
| bsd-3-clause |
abhishekgahlot/scikit-learn | examples/applications/topics_extraction_with_nmf.py | 106 | 2313 | """
========================================================
Topics extraction with Non-Negative Matrix Factorization
========================================================
This is a proof of concept application of Non Negative Matrix
Factorization of the term frequency matrix of a corpus of documents so
as to extract an additive model of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware than the time complexity
is polynomial.
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting TF-IDF features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data[:n_samples])
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
feature_names = vectorizer.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
| bsd-3-clause |
RoshaneH/INFO3180lab4 | lib/werkzeug/testsuite/datastructures.py | 97 | 27488 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.datastructures
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the functionality of the provided Werkzeug
datastructures.
TODO:
- FileMultiDict
- Immutable types undertested
- Split up dict tests
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
import pickle
from contextlib import contextmanager
from copy import copy
from werkzeug import datastructures
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
iterlistvalues, text_type
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.exceptions import BadRequestKeyError
class NativeItermethodsTestCase(WerkzeugTestCase):
def test_basic(self):
@datastructures.native_itermethods(['keys', 'values', 'items'])
class StupidDict(object):
def keys(self, multi=1):
return iter(['a', 'b', 'c'] * multi)
def values(self, multi=1):
return iter([1, 2, 3] * multi)
def items(self, multi=1):
return iter(zip(iterkeys(self, multi=multi),
itervalues(self, multi=multi)))
d = StupidDict()
expected_keys = ['a', 'b', 'c']
expected_values = [1, 2, 3]
expected_items = list(zip(expected_keys, expected_values))
self.assert_equal(list(iterkeys(d)), expected_keys)
self.assert_equal(list(itervalues(d)), expected_values)
self.assert_equal(list(iteritems(d)), expected_items)
self.assert_equal(list(iterkeys(d, 2)), expected_keys * 2)
self.assert_equal(list(itervalues(d, 2)), expected_values * 2)
self.assert_equal(list(iteritems(d, 2)), expected_items * 2)
class MutableMultiDictBaseTestCase(WerkzeugTestCase):
storage_class = None
def test_pickle(self):
cls = self.storage_class
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
d = cls()
d.setlist(b'foo', [1, 2, 3, 4])
d.setlist(b'bar', b'foo bar baz'.split())
s = pickle.dumps(d, protocol)
ud = pickle.loads(s)
self.assert_equal(type(ud), type(d))
self.assert_equal(ud, d)
self.assert_equal(pickle.loads(
s.replace(b'werkzeug.datastructures', b'werkzeug')), d)
ud[b'newkey'] = b'bla'
self.assert_not_equal(ud, d)
def test_basic_interface(self):
md = self.storage_class()
assert isinstance(md, dict)
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
md = self.storage_class(mapping)
# simple getitem gives the first value
self.assert_equal(md['a'], 1)
self.assert_equal(md['c'], 3)
with self.assert_raises(KeyError):
md['e']
self.assert_equal(md.get('a'), 1)
# list getitem
self.assert_equal(md.getlist('a'), [1, 2, 1, 3])
self.assert_equal(md.getlist('d'), [3, 4])
# do not raise if key not found
self.assert_equal(md.getlist('x'), [])
# simple setitem overwrites all values
md['a'] = 42
self.assert_equal(md.getlist('a'), [42])
# list setitem
md.setlist('a', [1, 2, 3])
self.assert_equal(md['a'], 1)
self.assert_equal(md.getlist('a'), [1, 2, 3])
# verify that it does not change original lists
l1 = [1, 2, 3]
md.setlist('a', l1)
del l1[:]
self.assert_equal(md['a'], 1)
# setdefault, setlistdefault
self.assert_equal(md.setdefault('u', 23), 23)
self.assert_equal(md.getlist('u'), [23])
del md['u']
md.setlist('u', [-1, -2])
# delitem
del md['u']
with self.assert_raises(KeyError):
md['u']
del md['d']
self.assert_equal(md.getlist('d'), [])
# keys, values, items, lists
self.assert_equal(list(sorted(md.keys())), ['a', 'b', 'c'])
self.assert_equal(list(sorted(iterkeys(md))), ['a', 'b', 'c'])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(md.items())),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.items(multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md))),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md, multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.lists())),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
self.assert_equal(list(sorted(iterlists(md))),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
# copy method
c = md.copy()
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# copy method 2
c = copy(md)
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# update with a multidict
od = self.storage_class([('a', 4), ('a', 5), ('y', 0)])
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4, 5])
self.assert_equal(md.getlist('y'), [0])
# update with a regular dict
md = c
od = {'a': 4, 'y': 0}
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4])
self.assert_equal(md.getlist('y'), [0])
# pop, poplist, popitem, popitemlist
self.assert_equal(md.pop('y'), 0)
assert 'y' not in md
self.assert_equal(md.poplist('a'), [1, 2, 3, 4])
assert 'a' not in md
self.assert_equal(md.poplist('missing'), [])
# remaining: b=2, c=3
popped = md.popitem()
assert popped in [('b', 2), ('c', 3)]
popped = md.popitemlist()
assert popped in [('b', [2]), ('c', [3])]
# type conversion
md = self.storage_class({'a': '4', 'b': ['2', '3']})
self.assert_equal(md.get('a', type=int), 4)
self.assert_equal(md.getlist('b', type=int), [2, 3])
# repr
md = self.storage_class([('a', 1), ('a', 2), ('b', 3)])
assert "('a', 1)" in repr(md)
assert "('a', 2)" in repr(md)
assert "('b', 3)" in repr(md)
# add and getlist
md.add('c', '42')
md.add('c', '23')
self.assert_equal(md.getlist('c'), ['42', '23'])
md.add('c', 'blah')
self.assert_equal(md.getlist('c', type=int), [42, 23])
# setdefault
md = self.storage_class()
md.setdefault('x', []).append(42)
md.setdefault('x', []).append(23)
self.assert_equal(md['x'], [42, 23])
# to dict
md = self.storage_class()
md['foo'] = 42
md.add('bar', 1)
md.add('bar', 2)
self.assert_equal(md.to_dict(), {'foo': 42, 'bar': 1})
self.assert_equal(md.to_dict(flat=False), {'foo': [42], 'bar': [1, 2]})
# popitem from empty dict
with self.assert_raises(KeyError):
self.storage_class().popitem()
with self.assert_raises(KeyError):
self.storage_class().popitemlist()
# key errors are of a special type
with self.assert_raises(BadRequestKeyError):
self.storage_class()[42]
# setlist works
md = self.storage_class()
md['foo'] = 42
md.setlist('foo', [1, 2])
self.assert_equal(md.getlist('foo'), [1, 2])
class ImmutableDictBaseTestCase(WerkzeugTestCase):
storage_class = None
def test_follows_dict_interface(self):
cls = self.storage_class
data = {'foo': 1, 'bar': 2, 'baz': 3}
d = cls(data)
self.assert_equal(d['foo'], 1)
self.assert_equal(d['bar'], 2)
self.assert_equal(d['baz'], 3)
self.assert_equal(sorted(d.keys()), ['bar', 'baz', 'foo'])
self.assert_true('foo' in d)
self.assert_true('foox' not in d)
self.assert_equal(len(d), 3)
def test_copies_are_mutable(self):
cls = self.storage_class
immutable = cls({'a': 1})
with self.assert_raises(TypeError):
immutable.pop('a')
mutable = immutable.copy()
mutable.pop('a')
self.assert_true('a' in immutable)
self.assert_true(mutable is not immutable)
self.assert_true(copy(immutable) is immutable)
def test_dict_is_hashable(self):
cls = self.storage_class
immutable = cls({'a': 1, 'b': 2})
immutable2 = cls({'a': 2, 'b': 2})
x = set([immutable])
self.assert_true(immutable in x)
self.assert_true(immutable2 not in x)
x.discard(immutable)
self.assert_true(immutable not in x)
self.assert_true(immutable2 not in x)
x.add(immutable2)
self.assert_true(immutable not in x)
self.assert_true(immutable2 in x)
x.add(immutable)
self.assert_true(immutable in x)
self.assert_true(immutable2 in x)
class ImmutableTypeConversionDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableTypeConversionDict
class ImmutableMultiDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableMultiDict
def test_multidict_is_hashable(self):
cls = self.storage_class
immutable = cls({'a': [1, 2], 'b': 2})
immutable2 = cls({'a': [1], 'b': 2})
x = set([immutable])
self.assert_true(immutable in x)
self.assert_true(immutable2 not in x)
x.discard(immutable)
self.assert_true(immutable not in x)
self.assert_true(immutable2 not in x)
x.add(immutable2)
self.assert_true(immutable not in x)
self.assert_true(immutable2 in x)
x.add(immutable)
self.assert_true(immutable in x)
self.assert_true(immutable2 in x)
class ImmutableDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableDict
class ImmutableOrderedMultiDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableOrderedMultiDict
def test_ordered_multidict_is_hashable(self):
a = self.storage_class([('a', 1), ('b', 1), ('a', 2)])
b = self.storage_class([('a', 1), ('a', 2), ('b', 1)])
self.assert_not_equal(hash(a), hash(b))
class MultiDictTestCase(MutableMultiDictBaseTestCase):
storage_class = datastructures.MultiDict
def test_multidict_pop(self):
make_d = lambda: self.storage_class({'foo': [1, 2, 3, 4]})
d = make_d()
self.assert_equal(d.pop('foo'), 1)
assert not d
d = make_d()
self.assert_equal(d.pop('foo', 32), 1)
assert not d
d = make_d()
self.assert_equal(d.pop('foos', 32), 32)
assert d
with self.assert_raises(KeyError):
d.pop('foos')
def test_setlistdefault(self):
md = self.storage_class()
self.assert_equal(md.setlistdefault('u', [-1, -2]), [-1, -2])
self.assert_equal(md.getlist('u'), [-1, -2])
self.assert_equal(md['u'], -1)
def test_iter_interfaces(self):
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
md = self.storage_class(mapping)
self.assert_equal(list(zip(md.keys(), md.listvalues())),
list(md.lists()))
self.assert_equal(list(zip(md, iterlistvalues(md))),
list(iterlists(md)))
self.assert_equal(list(zip(iterkeys(md), iterlistvalues(md))),
list(iterlists(md)))
class OrderedMultiDictTestCase(MutableMultiDictBaseTestCase):
storage_class = datastructures.OrderedMultiDict
def test_ordered_interface(self):
cls = self.storage_class
d = cls()
assert not d
d.add('foo', 'bar')
self.assert_equal(len(d), 1)
d.add('foo', 'baz')
self.assert_equal(len(d), 1)
self.assert_equal(list(iteritems(d)), [('foo', 'bar')])
self.assert_equal(list(d), ['foo'])
self.assert_equal(list(iteritems(d, multi=True)),
[('foo', 'bar'), ('foo', 'baz')])
del d['foo']
assert not d
self.assert_equal(len(d), 0)
self.assert_equal(list(d), [])
d.update([('foo', 1), ('foo', 2), ('bar', 42)])
d.add('foo', 3)
self.assert_equal(d.getlist('foo'), [1, 2, 3])
self.assert_equal(d.getlist('bar'), [42])
self.assert_equal(list(iteritems(d)), [('foo', 1), ('bar', 42)])
expected = ['foo', 'bar']
self.assert_sequence_equal(list(d.keys()), expected)
self.assert_sequence_equal(list(d), expected)
self.assert_sequence_equal(list(iterkeys(d)), expected)
self.assert_equal(list(iteritems(d, multi=True)),
[('foo', 1), ('foo', 2), ('bar', 42), ('foo', 3)])
self.assert_equal(len(d), 2)
self.assert_equal(d.pop('foo'), 1)
assert d.pop('blafasel', None) is None
self.assert_equal(d.pop('blafasel', 42), 42)
self.assert_equal(len(d), 1)
self.assert_equal(d.poplist('bar'), [42])
assert not d
d.get('missingkey') is None
d.add('foo', 42)
d.add('foo', 23)
d.add('bar', 2)
d.add('foo', 42)
self.assert_equal(d, datastructures.MultiDict(d))
id = self.storage_class(d)
self.assert_equal(d, id)
d.add('foo', 2)
assert d != id
d.update({'blah': [1, 2, 3]})
self.assert_equal(d['blah'], 1)
self.assert_equal(d.getlist('blah'), [1, 2, 3])
# setlist works
d = self.storage_class()
d['foo'] = 42
d.setlist('foo', [1, 2])
self.assert_equal(d.getlist('foo'), [1, 2])
with self.assert_raises(BadRequestKeyError):
d.pop('missing')
with self.assert_raises(BadRequestKeyError):
d['missing']
# popping
d = self.storage_class()
d.add('foo', 23)
d.add('foo', 42)
d.add('foo', 1)
self.assert_equal(d.popitem(), ('foo', 23))
with self.assert_raises(BadRequestKeyError):
d.popitem()
assert not d
d.add('foo', 23)
d.add('foo', 42)
d.add('foo', 1)
self.assert_equal(d.popitemlist(), ('foo', [23, 42, 1]))
with self.assert_raises(BadRequestKeyError):
d.popitemlist()
def test_iterables(self):
a = datastructures.MultiDict((("key_a", "value_a"),))
b = datastructures.MultiDict((("key_b", "value_b"),))
ab = datastructures.CombinedMultiDict((a,b))
self.assert_equal(sorted(ab.lists()), [('key_a', ['value_a']), ('key_b', ['value_b'])])
self.assert_equal(sorted(ab.listvalues()), [['value_a'], ['value_b']])
self.assert_equal(sorted(ab.keys()), ["key_a", "key_b"])
self.assert_equal(sorted(iterlists(ab)), [('key_a', ['value_a']), ('key_b', ['value_b'])])
self.assert_equal(sorted(iterlistvalues(ab)), [['value_a'], ['value_b']])
self.assert_equal(sorted(iterkeys(ab)), ["key_a", "key_b"])
class CombinedMultiDictTestCase(WerkzeugTestCase):
storage_class = datastructures.CombinedMultiDict
def test_basic_interface(self):
d1 = datastructures.MultiDict([('foo', '1')])
d2 = datastructures.MultiDict([('bar', '2'), ('bar', '3')])
d = self.storage_class([d1, d2])
# lookup
self.assert_equal(d['foo'], '1')
self.assert_equal(d['bar'], '2')
self.assert_equal(d.getlist('bar'), ['2', '3'])
self.assert_equal(sorted(d.items()),
[('bar', '2'), ('foo', '1')])
self.assert_equal(sorted(d.items(multi=True)),
[('bar', '2'), ('bar', '3'), ('foo', '1')])
assert 'missingkey' not in d
assert 'foo' in d
# type lookup
self.assert_equal(d.get('foo', type=int), 1)
self.assert_equal(d.getlist('bar', type=int), [2, 3])
# get key errors for missing stuff
with self.assert_raises(KeyError):
d['missing']
# make sure that they are immutable
with self.assert_raises(TypeError):
d['foo'] = 'blub'
# copies are immutable
d = d.copy()
with self.assert_raises(TypeError):
d['foo'] = 'blub'
# make sure lists merges
md1 = datastructures.MultiDict((("foo", "bar"),))
md2 = datastructures.MultiDict((("foo", "blafasel"),))
x = self.storage_class((md1, md2))
self.assert_equal(list(iterlists(x)), [('foo', ['bar', 'blafasel'])])
class HeadersTestCase(WerkzeugTestCase):
storage_class = datastructures.Headers
def test_basic_interface(self):
headers = self.storage_class()
headers.add('Content-Type', 'text/plain')
headers.add('X-Foo', 'bar')
assert 'x-Foo' in headers
assert 'Content-type' in headers
headers['Content-Type'] = 'foo/bar'
self.assert_equal(headers['Content-Type'], 'foo/bar')
self.assert_equal(len(headers.getlist('Content-Type')), 1)
# list conversion
self.assert_equal(headers.to_wsgi_list(), [
('Content-Type', 'foo/bar'),
('X-Foo', 'bar')
])
self.assert_equal(str(headers), (
"Content-Type: foo/bar\r\n"
"X-Foo: bar\r\n"
"\r\n"))
self.assert_equal(str(self.storage_class()), "\r\n")
# extended add
headers.add('Content-Disposition', 'attachment', filename='foo')
self.assert_equal(headers['Content-Disposition'],
'attachment; filename=foo')
headers.add('x', 'y', z='"')
self.assert_equal(headers['x'], r'y; z="\""')
def test_defaults_and_conversion(self):
# defaults
headers = self.storage_class([
('Content-Type', 'text/plain'),
('X-Foo', 'bar'),
('X-Bar', '1'),
('X-Bar', '2')
])
self.assert_equal(headers.getlist('x-bar'), ['1', '2'])
self.assert_equal(headers.get('x-Bar'), '1')
self.assert_equal(headers.get('Content-Type'), 'text/plain')
self.assert_equal(headers.setdefault('X-Foo', 'nope'), 'bar')
self.assert_equal(headers.setdefault('X-Bar', 'nope'), '1')
self.assert_equal(headers.setdefault('X-Baz', 'quux'), 'quux')
self.assert_equal(headers.setdefault('X-Baz', 'nope'), 'quux')
headers.pop('X-Baz')
# type conversion
self.assert_equal(headers.get('x-bar', type=int), 1)
self.assert_equal(headers.getlist('x-bar', type=int), [1, 2])
# list like operations
self.assert_equal(headers[0], ('Content-Type', 'text/plain'))
self.assert_equal(headers[:1], self.storage_class([('Content-Type', 'text/plain')]))
del headers[:2]
del headers[-1]
self.assert_equal(headers, self.storage_class([('X-Bar', '1')]))
def test_copying(self):
a = self.storage_class([('foo', 'bar')])
b = a.copy()
a.add('foo', 'baz')
self.assert_equal(a.getlist('foo'), ['bar', 'baz'])
self.assert_equal(b.getlist('foo'), ['bar'])
def test_popping(self):
headers = self.storage_class([('a', 1)])
self.assert_equal(headers.pop('a'), 1)
self.assert_equal(headers.pop('b', 2), 2)
with self.assert_raises(KeyError):
headers.pop('c')
def test_set_arguments(self):
a = self.storage_class()
a.set('Content-Disposition', 'useless')
a.set('Content-Disposition', 'attachment', filename='foo')
self.assert_equal(a['Content-Disposition'], 'attachment; filename=foo')
def test_reject_newlines(self):
h = self.storage_class()
for variation in 'foo\nbar', 'foo\r\nbar', 'foo\rbar':
with self.assert_raises(ValueError):
h['foo'] = variation
with self.assert_raises(ValueError):
h.add('foo', variation)
with self.assert_raises(ValueError):
h.add('foo', 'test', option=variation)
with self.assert_raises(ValueError):
h.set('foo', variation)
with self.assert_raises(ValueError):
h.set('foo', 'test', option=variation)
def test_slicing(self):
# there's nothing wrong with these being native strings
# Headers doesn't care about the data types
h = self.storage_class()
h.set('X-Foo-Poo', 'bleh')
h.set('Content-Type', 'application/whocares')
h.set('X-Forwarded-For', '192.168.0.123')
h[:] = [(k, v) for k, v in h if k.startswith(u'X-')]
self.assert_equal(list(h), [
('X-Foo-Poo', 'bleh'),
('X-Forwarded-For', '192.168.0.123')
])
def test_bytes_operations(self):
h = self.storage_class()
h.set('X-Foo-Poo', 'bleh')
h.set('X-Whoops', b'\xff')
self.assert_equal(h.get('x-foo-poo', as_bytes=True), b'bleh')
self.assert_equal(h.get('x-whoops', as_bytes=True), b'\xff')
class EnvironHeadersTestCase(WerkzeugTestCase):
storage_class = datastructures.EnvironHeaders
def test_basic_interface(self):
# this happens in multiple WSGI servers because they
# use a vary naive way to convert the headers;
broken_env = {
'HTTP_CONTENT_TYPE': 'text/html',
'CONTENT_TYPE': 'text/html',
'HTTP_CONTENT_LENGTH': '0',
'CONTENT_LENGTH': '0',
'HTTP_ACCEPT': '*',
'wsgi.version': (1, 0)
}
headers = self.storage_class(broken_env)
assert headers
self.assert_equal(len(headers), 3)
self.assert_equal(sorted(headers), [
('Accept', '*'),
('Content-Length', '0'),
('Content-Type', 'text/html')
])
assert not self.storage_class({'wsgi.version': (1, 0)})
self.assert_equal(len(self.storage_class({'wsgi.version': (1, 0)})), 0)
def test_return_type_is_unicode(self):
# environ contains native strings; we return unicode
headers = self.storage_class({
'HTTP_FOO': '\xe2\x9c\x93',
'CONTENT_TYPE': 'text/plain',
})
self.assert_equal(headers['Foo'], u"\xe2\x9c\x93")
assert isinstance(headers['Foo'], text_type)
assert isinstance(headers['Content-Type'], text_type)
iter_output = dict(iter(headers))
self.assert_equal(iter_output['Foo'], u"\xe2\x9c\x93")
assert isinstance(iter_output['Foo'], text_type)
assert isinstance(iter_output['Content-Type'], text_type)
def test_bytes_operations(self):
foo_val = '\xff'
h = self.storage_class({
'HTTP_X_FOO': foo_val
})
self.assert_equal(h.get('x-foo', as_bytes=True), b'\xff')
self.assert_equal(h.get('x-foo'), u'\xff')
class HeaderSetTestCase(WerkzeugTestCase):
storage_class = datastructures.HeaderSet
def test_basic_interface(self):
hs = self.storage_class()
hs.add('foo')
hs.add('bar')
assert 'Bar' in hs
self.assert_equal(hs.find('foo'), 0)
self.assert_equal(hs.find('BAR'), 1)
assert hs.find('baz') < 0
hs.discard('missing')
hs.discard('foo')
assert hs.find('foo') < 0
self.assert_equal(hs.find('bar'), 0)
with self.assert_raises(IndexError):
hs.index('missing')
self.assert_equal(hs.index('bar'), 0)
assert hs
hs.clear()
assert not hs
class ImmutableListTestCase(WerkzeugTestCase):
storage_class = datastructures.ImmutableList
def test_list_hashable(self):
t = (1, 2, 3, 4)
l = self.storage_class(t)
self.assert_equal(hash(t), hash(l))
self.assert_not_equal(t, l)
def make_call_asserter(assert_equal_func, func=None):
"""Utility to assert a certain number of function calls.
>>> assert_calls, func = make_call_asserter(self.assert_equal)
>>> with assert_calls(2):
func()
func()
"""
calls = [0]
@contextmanager
def asserter(count, msg=None):
calls[0] = 0
yield
assert_equal_func(calls[0], count, msg)
def wrapped(*args, **kwargs):
calls[0] += 1
if func is not None:
return func(*args, **kwargs)
return asserter, wrapped
class CallbackDictTestCase(WerkzeugTestCase):
storage_class = datastructures.CallbackDict
def test_callback_dict_reads(self):
assert_calls, func = make_call_asserter(self.assert_equal)
initial = {'a': 'foo', 'b': 'bar'}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(0, 'callback triggered by read-only method'):
# read-only methods
dct['a']
dct.get('a')
self.assert_raises(KeyError, lambda: dct['x'])
'a' in dct
list(iter(dct))
dct.copy()
with assert_calls(0, 'callback triggered without modification'):
# methods that may write but don't
dct.pop('z', None)
dct.setdefault('a')
def test_callback_dict_writes(self):
assert_calls, func = make_call_asserter(self.assert_equal)
initial = {'a': 'foo', 'b': 'bar'}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(8, 'callback not triggered by write method'):
# always-write methods
dct['z'] = 123
dct['z'] = 123 # must trigger again
del dct['z']
dct.pop('b', None)
dct.setdefault('x')
dct.popitem()
dct.update([])
dct.clear()
with assert_calls(0, 'callback triggered by failed del'):
self.assert_raises(KeyError, lambda: dct.__delitem__('x'))
with assert_calls(0, 'callback triggered by failed pop'):
self.assert_raises(KeyError, lambda: dct.pop('x'))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MultiDictTestCase))
suite.addTest(unittest.makeSuite(OrderedMultiDictTestCase))
suite.addTest(unittest.makeSuite(CombinedMultiDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableTypeConversionDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableMultiDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableOrderedMultiDictTestCase))
suite.addTest(unittest.makeSuite(HeadersTestCase))
suite.addTest(unittest.makeSuite(EnvironHeadersTestCase))
suite.addTest(unittest.makeSuite(HeaderSetTestCase))
suite.addTest(unittest.makeSuite(NativeItermethodsTestCase))
suite.addTest(unittest.makeSuite(CallbackDictTestCase))
return suite
| apache-2.0 |
G33KS44n/mysql-5.6 | xtrabackup/test/python/testtools/runtest.py | 42 | 7648 | # Copyright (c) 2009-2010 testtools developers. See LICENSE for details.
"""Individual test case execution."""
__all__ = [
'MultipleExceptions',
'RunTest',
]
import sys
from testtools.testresult import ExtendedToOriginalDecorator
class MultipleExceptions(Exception):
"""Represents many exceptions raised from some operation.
:ivar args: The sys.exc_info() tuples for each exception.
"""
class RunTest(object):
"""An object to run a test.
RunTest objects are used to implement the internal logic involved in
running a test. TestCase.__init__ stores _RunTest as the class of RunTest
to execute. Passing the runTest= parameter to TestCase.__init__ allows a
different RunTest class to be used to execute the test.
Subclassing or replacing RunTest can be useful to add functionality to the
way that tests are run in a given project.
:ivar case: The test case that is to be run.
:ivar result: The result object a case is reporting to.
:ivar handlers: A list of (ExceptionClass, handler_function) for
exceptions that should be caught if raised from the user
code. Exceptions that are caught are checked against this list in
first to last order. There is a catch-all of 'Exception' at the end
of the list, so to add a new exception to the list, insert it at the
front (which ensures that it will be checked before any existing base
classes in the list. If you add multiple exceptions some of which are
subclasses of each other, add the most specific exceptions last (so
they come before their parent classes in the list).
:ivar exception_caught: An object returned when _run_user catches an
exception.
:ivar _exceptions: A list of caught exceptions, used to do the single
reporting of error/failure/skip etc.
"""
def __init__(self, case, handlers=None):
"""Create a RunTest to run a case.
:param case: A testtools.TestCase test case object.
:param handlers: Exception handlers for this RunTest. These are stored
in self.handlers and can be modified later if needed.
"""
self.case = case
self.handlers = handlers or []
self.exception_caught = object()
self._exceptions = []
def run(self, result=None):
"""Run self.case reporting activity to result.
:param result: Optional testtools.TestResult to report activity to.
:return: The result object the test was run against.
"""
if result is None:
actual_result = self.case.defaultTestResult()
actual_result.startTestRun()
else:
actual_result = result
try:
return self._run_one(actual_result)
finally:
if result is None:
actual_result.stopTestRun()
def _run_one(self, result):
"""Run one test reporting to result.
:param result: A testtools.TestResult to report activity to.
This result object is decorated with an ExtendedToOriginalDecorator
to ensure that the latest TestResult API can be used with
confidence by client code.
:return: The result object the test was run against.
"""
return self._run_prepared_result(ExtendedToOriginalDecorator(result))
def _run_prepared_result(self, result):
"""Run one test reporting to result.
:param result: A testtools.TestResult to report activity to.
:return: The result object the test was run against.
"""
result.startTest(self.case)
self.result = result
try:
self._exceptions = []
self._run_core()
if self._exceptions:
# One or more caught exceptions, now trigger the test's
# reporting method for just one.
e = self._exceptions.pop()
for exc_class, handler in self.handlers:
if isinstance(e, exc_class):
handler(self.case, self.result, e)
break
finally:
result.stopTest(self.case)
return result
def _run_core(self):
"""Run the user supplied test code."""
if self.exception_caught == self._run_user(self.case._run_setup,
self.result):
# Don't run the test method if we failed getting here.
self._run_cleanups(self.result)
return
# Run everything from here on in. If any of the methods raise an
# exception we'll have failed.
failed = False
try:
if self.exception_caught == self._run_user(
self.case._run_test_method, self.result):
failed = True
finally:
try:
if self.exception_caught == self._run_user(
self.case._run_teardown, self.result):
failed = True
finally:
try:
if self.exception_caught == self._run_user(
self._run_cleanups, self.result):
failed = True
finally:
if not failed:
self.result.addSuccess(self.case,
details=self.case.getDetails())
def _run_cleanups(self, result):
"""Run the cleanups that have been added with addCleanup.
See the docstring for addCleanup for more information.
:return: None if all cleanups ran without error,
``exception_caught`` if there was an error.
"""
failing = False
while self.case._cleanups:
function, arguments, keywordArguments = self.case._cleanups.pop()
got_exception = self._run_user(
function, *arguments, **keywordArguments)
if got_exception == self.exception_caught:
failing = True
if failing:
return self.exception_caught
def _run_user(self, fn, *args, **kwargs):
"""Run a user supplied function.
Exceptions are processed by `_got_user_exception`.
:return: Either whatever 'fn' returns or ``exception_caught`` if
'fn' raised an exception.
"""
try:
return fn(*args, **kwargs)
except KeyboardInterrupt:
raise
except:
return self._got_user_exception(sys.exc_info())
def _got_user_exception(self, exc_info, tb_label='traceback'):
"""Called when user code raises an exception.
If 'exc_info' is a `MultipleExceptions`, then we recurse into it
unpacking the errors that it's made up from.
:param exc_info: A sys.exc_info() tuple for the user error.
:param tb_label: An optional string label for the error. If
not specified, will default to 'traceback'.
:return: 'exception_caught' if we catch one of the exceptions that
have handlers in 'handlers', otherwise raise the error.
"""
if exc_info[0] is MultipleExceptions:
for sub_exc_info in exc_info[1].args:
self._got_user_exception(sub_exc_info, tb_label)
return self.exception_caught
try:
e = exc_info[1]
self.case.onException(exc_info, tb_label=tb_label)
finally:
del exc_info
for exc_class, handler in self.handlers:
if isinstance(e, exc_class):
self._exceptions.append(e)
return self.exception_caught
raise e
| gpl-2.0 |
garoose/eecs494.p2 | jni/external/freetype2/src/tools/docmaker/docmaker.py | 463 | 2766 | #!/usr/bin/env python
#
# DocMaker (c) 2002, 2004, 2008 David Turner <[email protected]>
#
# This program is a re-write of the original DocMaker took used
# to generate the API Reference of the FreeType font engine
# by converting in-source comments into structured HTML.
#
# This new version is capable of outputting XML data, as well
# as accepts more liberal formatting options.
#
# It also uses regular expression matching and substitution
# to speed things significantly.
#
from sources import *
from content import *
from utils import *
from formatter import *
from tohtml import *
import utils
import sys, os, time, string, glob, getopt
def usage():
print "\nDocMaker Usage information\n"
print " docmaker [options] file1 [file2 ...]\n"
print "using the following options:\n"
print " -h : print this page"
print " -t : set project title, as in '-t \"My Project\"'"
print " -o : set output directory, as in '-o mydir'"
print " -p : set documentation prefix, as in '-p ft2'"
print ""
print " --title : same as -t, as in '--title=\"My Project\"'"
print " --output : same as -o, as in '--output=mydir'"
print " --prefix : same as -p, as in '--prefix=ft2'"
def main( argv ):
"""main program loop"""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:], \
"ht:o:p:", \
["help", "title=", "output=", "prefix="] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
#
project_title = "Project"
project_prefix = None
output_dir = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-t", "--title" ):
project_title = opt[1]
if opt[0] in ( "-o", "--output" ):
utils.output_dir = opt[1]
if opt[0] in ( "-p", "--prefix" ):
project_prefix = opt[1]
check_output()
# create context and processor
source_processor = SourceProcessor()
content_processor = ContentProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
content_processor.parse_sources( source_processor )
# process sections
content_processor.finish()
formatter = HtmlFormatter( content_processor, project_title, project_prefix )
formatter.toc_dump()
formatter.index_dump()
formatter.section_dump_all()
# if called from the command line
#
if __name__ == '__main__':
main( sys.argv )
# eof
| mit |
lindycoder/fake-switches | fake_switches/dell10g/command_processor/enabled.py | 4 | 9868 | # Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import namedtuple
from fake_switches import group_sequences
from fake_switches.dell.command_processor.enabled import DellEnabledCommandProcessor, to_vlan_ranges, _is_vlan_id, \
_assemble_elements_on_lines
from fake_switches.switch_configuration import VlanPort, AggregatedPort
class Dell10GEnabledCommandProcessor(DellEnabledCommandProcessor):
def __init__(self, config):
super(Dell10GEnabledCommandProcessor, self).__init__(config)
def get_port_configuration(self, port):
conf = []
if port.shutdown:
conf.append('shutdown')
if port.description:
conf.append("description \"{}\"".format(port.description))
if port.mode and port.mode != "access":
conf.append('switchport mode {}'.format(port.mode))
if port.access_vlan:
conf.append('switchport access vlan {}'.format(port.access_vlan))
if port.trunk_native_vlan:
conf.append('switchport general pvid {}'.format(port.trunk_native_vlan))
if port.trunk_vlans:
if port.mode == "general":
conf.append('switchport {} allowed vlan add {}'.format(port.mode, to_vlan_ranges(port.trunk_vlans)))
else:
conf.append('switchport trunk allowed vlan {}'.format(to_vlan_ranges(port.trunk_vlans)))
if port.spanning_tree is False:
conf.append("spanning-tree disable")
if port.spanning_tree_portfast:
conf.append("spanning-tree portfast")
if port.lldp_transmit is False:
conf.append('no lldp transmit')
if port.lldp_receive is False:
conf.append('no lldp receive')
if port.lldp_med is False:
conf.append('no lldp med')
if port.lldp_med_transmit_capabilities is False:
conf.append('no lldp med transmit-tlv capabilities')
if port.lldp_med_transmit_network_policy is False:
conf.append('no lldp med transmit-tlv network-policy')
return conf
def do_show(self, *args):
if "running-config".startswith(args[0]):
if len(args) == 1:
self.write_line('!Current Configuration:')
self.write_line('!System Description "............."')
self.write_line('!System Software Version 3.3.7.3')
self.write_line('!Cut-through mode is configured as disabled')
self.write_line('!')
self.write_line('configure')
self.write_vlans()
for port in self.switch_configuration.ports:
port_config = self.get_port_configuration(port)
if len(port_config) > 0:
self.write_line('interface %s' % port.name)
for item in port_config:
self.write_line(item)
self.write_line('exit')
self.write_line('!')
self.write_line('exit')
elif "interface".startswith(args[1]):
interface_name = ' '.join(args[2:])
port = self.switch_configuration.get_port_by_partial_name(interface_name)
if port:
if isinstance(port, VlanPort):
config = self.get_vlan_port_configuration(port)
else:
config = self.get_port_configuration(port)
if len(config) > 0:
for line in config:
self.write_line(line)
else:
self.write_line("")
self.write_line("")
else:
self.write_line("")
self.write_line("An invalid interface has been used for this function")
elif "vlan".startswith(args[0]):
if len(args) == 1:
self.show_vlans(self.switch_configuration.vlans)
elif args[1] == "id":
if len(args) < 3:
self.write_line("")
self.write_line("Command not found / Incomplete command. Use ? to list commands.")
self.write_line("")
elif not _is_vlan_id(args[2]):
self.write_line(" ^")
self.write_line("Invalid input. Please specify an integer in the range 1 to 4093.")
self.write_line("")
else:
vlan = self.switch_configuration.get_vlan(int(args[2]))
if vlan is None:
self.write_line("")
self.write_line("ERROR: This VLAN does not exist.")
self.write_line("")
else:
self.show_vlans([vlan])
elif "interfaces".startswith(args[0]) and "status".startswith(args[1]):
self.show_interfaces_status()
def write_vlans(self):
named_vlans = []
other_vlans = []
for v in self.switch_configuration.vlans:
if v.name is not None:
named_vlans.append(v)
else:
other_vlans.append(v)
for vlan in named_vlans:
self.write_line('vlan {}'.format(vlan.number))
if vlan.name is not None:
self.write_line('name {}'.format(vlan.name))
self.write_line('exit')
self.write_line('vlan {}'.format(to_vlan_ranges([v.number for v in other_vlans])))
self.write_line('exit')
def show_interfaces_status(self):
self.write_line("")
self.write_line("Port Description Vlan Duplex Speed Neg Link Flow Ctrl")
self.write_line(" State Status")
self.write_line("--------- ------------------------- ----- ------ ------- ---- ------ ---------")
for port in self.switch_configuration.ports:
if not isinstance(port, AggregatedPort):
self.write_line(
"Te{name: <7} {desc: <25} {vlan: <5} {duplex: <6} {speed: <7} {neg: <4} {state: <6} {flow}".format(
name=port.name.split(" ")[-1], desc=port.description[:25] if port.description else "", vlan="",
duplex="Full", speed="10000", neg="Auto", state="Up", flow="Active"))
self.write_line("")
self.write_line("")
self.write_line("Port Description Vlan Link")
self.write_line("Channel State")
self.write_line("------- ------------------------------ ----- -------")
for port in self.switch_configuration.ports:
if isinstance(port, AggregatedPort):
self.write_line(
"Po{name: <7} {desc: <28} {vlan: <5} {state}".format(
name=port.name.split(" ")[-1], desc=port.description[:28] if port.description else "",
vlan="trnk", state="Up"))
self.write_line("")
def show_vlans(self, vlans):
self.write_line("")
self.write_line("VLAN Name Ports Type")
self.write_line("----- --------------- ------------- --------------")
for vlan in vlans:
ports_strings = self._build_port_strings(self.get_ports_for_vlan(vlan))
self.write_line("{number: <5} {name: <32} {ports: <13} {type}".format(
number=vlan.number, name=vlan_name(vlan), ports=ports_strings[0],
type="Default" if vlan.number == 1 else "Static"))
for port_string in ports_strings[1:]:
self.write_line("{number: <5} {name: <32} {ports: <13} {type}".format(
number="", name="", ports=port_string, type=""))
self.write_line("")
def _build_port_strings(self, ports):
port_range_list = group_sequences(ports, are_in_sequence=self._are_in_sequence)
port_list = []
for port_range in port_range_list:
first_details = self._get_interface_details(port_range[0].name)
if len(port_range) == 1:
port_list.append("Te{}{}".format(first_details.port_prefix, first_details.port))
else:
port_list.append("Te{0}{1}-{2}".format(first_details.port_prefix, first_details.port, self._get_interface_details(port_range[-1].name).port))
return _assemble_elements_on_lines(port_list, max_line_char=13)
def _get_interface_details(self, interface_name):
interface_descriptor = namedtuple('InterfaceDescriptor', "interface port_prefix port")
re_port_number = re.compile('(\d/\d/)(\d+)')
interface, slot_descriptor = interface_name.split(" ")
port_prefix, port = re_port_number.match(slot_descriptor).groups()
return interface_descriptor(interface, port_prefix, int(port))
def do_terminal(self, *args):
self.write_line("")
def vlan_name(vlan):
if vlan.number == 1:
return "default"
elif vlan.name is not None:
return vlan.name
else:
return "VLAN{}".format(vlan.number)
| apache-2.0 |
rgommers/numpy | numpy/core/tests/test_shape_base.py | 4 | 27579 | import pytest
import numpy as np
from numpy.core import (
array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
newaxis, concatenate, stack
)
from numpy.core.shape_base import (_block_dispatcher, _block_setup,
_block_concatenate, _block_slicing)
from numpy.testing import (
assert_, assert_raises, assert_array_equal, assert_equal,
assert_raises_regex, assert_warns, IS_PYPY
)
class TestAtleast1d:
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1]), array([2])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1, 2]), array([2, 3])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r1array(self):
""" Test to make sure equivalent Travis O's r1array function
"""
assert_(atleast_1d(3).shape == (1,))
assert_(atleast_1d(3j).shape == (1,))
assert_(atleast_1d(3.0).shape == (1,))
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
class TestAtleast2d:
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1]]), array([[2]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1, 2]]), array([[2, 3]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r2array(self):
""" Test to make sure equivalent Travis O's r2array function
"""
assert_(atleast_2d(3).shape == (1, 1))
assert_(atleast_2d([3j, 1]).shape == (1, 2))
assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
class TestAtleast3d:
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1]]]), array([[[2]]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1], [2]]]), array([[[2], [3]]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a[:,:, newaxis], b[:,:, newaxis]]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a, b]
assert_array_equal(res, desired)
class TestHstack:
def test_non_iterable(self):
assert_raises(TypeError, hstack, 1)
def test_empty_input(self):
assert_raises(ValueError, hstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = hstack([a, b])
desired = array([[1, 1], [2, 2]])
assert_array_equal(res, desired)
def test_generator(self):
with assert_warns(FutureWarning):
hstack((np.arange(3) for _ in range(2)))
with assert_warns(FutureWarning):
hstack(map(lambda x: x, np.ones((3, 2))))
class TestVstack:
def test_non_iterable(self):
assert_raises(TypeError, vstack, 1)
def test_empty_input(self):
assert_raises(ValueError, vstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = vstack([a, b])
desired = array([[1], [2], [1], [2]])
assert_array_equal(res, desired)
def test_2D_array2(self):
a = array([1, 2])
b = array([1, 2])
res = vstack([a, b])
desired = array([[1, 2], [1, 2]])
assert_array_equal(res, desired)
def test_generator(self):
with assert_warns(FutureWarning):
vstack((np.arange(3) for _ in range(2)))
class TestConcatenate:
def test_returns_copy(self):
a = np.eye(3)
b = np.concatenate([a])
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
def test_exceptions(self):
# test axis must be in bounds
for ndim in [1, 2, 3]:
a = np.ones((1,)*ndim)
np.concatenate((a, a), axis=0) # OK
assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim)
assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1))
# Scalars cannot be concatenated
assert_raises(ValueError, concatenate, (0,))
assert_raises(ValueError, concatenate, (np.array(0),))
# dimensionality must match
assert_raises_regex(
ValueError,
r"all the input arrays must have same number of dimensions, but "
r"the array at index 0 has 1 dimension\(s\) and the array at "
r"index 1 has 2 dimension\(s\)",
np.concatenate, (np.zeros(1), np.zeros((1, 1))))
# test shapes must match except for concatenation axis
a = np.ones((1, 2, 3))
b = np.ones((2, 2, 3))
axis = list(range(3))
for i in range(3):
np.concatenate((a, b), axis=axis[0]) # OK
assert_raises_regex(
ValueError,
"all the input array dimensions for the concatenation axis "
"must match exactly, but along dimension {}, the array at "
"index 0 has size 1 and the array at index 1 has size 2"
.format(i),
np.concatenate, (a, b), axis=axis[1])
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
a = np.moveaxis(a, -1, 0)
b = np.moveaxis(b, -1, 0)
axis.append(axis.pop(0))
# No arrays to concatenate raises ValueError
assert_raises(ValueError, concatenate, ())
def test_concatenate_axis_None(self):
a = np.arange(4, dtype=np.float64).reshape((2, 2))
b = list(range(3))
c = ['x']
r = np.concatenate((a, a), axis=None)
assert_equal(r.dtype, a.dtype)
assert_equal(r.ndim, 1)
r = np.concatenate((a, b), axis=None)
assert_equal(r.size, a.size + len(b))
assert_equal(r.dtype, a.dtype)
r = np.concatenate((a, b, c), axis=None, dtype="U")
d = array(['0.0', '1.0', '2.0', '3.0',
'0', '1', '2', 'x'])
assert_array_equal(r, d)
out = np.zeros(a.size + len(b))
r = np.concatenate((a, b), axis=None)
rout = np.concatenate((a, b), axis=None, out=out)
assert_(out is rout)
assert_equal(r, rout)
def test_large_concatenate_axis_None(self):
# When no axis is given, concatenate uses flattened versions.
# This also had a bug with many arrays (see gh-5979).
x = np.arange(1, 100)
r = np.concatenate(x, None)
assert_array_equal(x, r)
# This should probably be deprecated:
r = np.concatenate(x, 100) # axis is >= MAXDIMS
assert_array_equal(x, r)
def test_concatenate(self):
# Test concatenate function
# One sequence returns unmodified (but as array)
r4 = list(range(4))
assert_array_equal(concatenate((r4,)), r4)
# Any sequence
assert_array_equal(concatenate((tuple(r4),)), r4)
assert_array_equal(concatenate((array(r4),)), r4)
# 1D default concatenation
r3 = list(range(3))
assert_array_equal(concatenate((r4, r3)), r4 + r3)
# Mixed sequence types
assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3)
assert_array_equal(concatenate((array(r4), r3)), r4 + r3)
# Explicit axis specification
assert_array_equal(concatenate((r4, r3), 0), r4 + r3)
# Including negative
assert_array_equal(concatenate((r4, r3), -1), r4 + r3)
# 2D
a23 = array([[10, 11, 12], [13, 14, 15]])
a13 = array([[0, 1, 2]])
res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]])
assert_array_equal(concatenate((a23, a13)), res)
assert_array_equal(concatenate((a23, a13), 0), res)
assert_array_equal(concatenate((a23.T, a13.T), 1), res.T)
assert_array_equal(concatenate((a23.T, a13.T), -1), res.T)
# Arrays much match shape
assert_raises(ValueError, concatenate, (a23.T, a13.T), 0)
# 3D
res = arange(2 * 3 * 7).reshape((2, 3, 7))
a0 = res[..., :4]
a1 = res[..., 4:6]
a2 = res[..., 6:]
assert_array_equal(concatenate((a0, a1, a2), 2), res)
assert_array_equal(concatenate((a0, a1, a2), -1), res)
assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
out = res.copy()
rout = concatenate((a0, a1, a2), 2, out=out)
assert_(out is rout)
assert_equal(res, rout)
@pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython")
def test_operator_concat(self):
import operator
a = array([1, 2])
b = array([3, 4])
n = [1,2]
res = array([1, 2, 3, 4])
assert_raises(TypeError, operator.concat, a, b)
assert_raises(TypeError, operator.concat, a, n)
assert_raises(TypeError, operator.concat, n, a)
assert_raises(TypeError, operator.concat, a, 1)
assert_raises(TypeError, operator.concat, 1, a)
def test_bad_out_shape(self):
a = array([1, 2])
b = array([3, 4])
assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))
assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))
assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
concatenate((a, b), out=np.empty(4))
@pytest.mark.parametrize("axis", [None, 0])
@pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8", "S4"])
@pytest.mark.parametrize("casting",
['no', 'equiv', 'safe', 'same_kind', 'unsafe'])
def test_out_and_dtype(self, axis, out_dtype, casting):
# Compare usage of `out=out` with `dtype=out.dtype`
out = np.empty(4, dtype=out_dtype)
to_concat = (array([1.1, 2.2]), array([3.3, 4.4]))
if not np.can_cast(to_concat[0], out_dtype, casting=casting):
with assert_raises(TypeError):
concatenate(to_concat, out=out, axis=axis, casting=casting)
with assert_raises(TypeError):
concatenate(to_concat, dtype=out.dtype,
axis=axis, casting=casting)
else:
res_out = concatenate(to_concat, out=out,
axis=axis, casting=casting)
res_dtype = concatenate(to_concat, dtype=out.dtype,
axis=axis, casting=casting)
assert res_out is out
assert_array_equal(out, res_dtype)
assert res_dtype.dtype == out_dtype
with assert_raises(TypeError):
concatenate(to_concat, out=out, dtype=out_dtype, axis=axis)
@pytest.mark.parametrize("axis", [None, 0])
@pytest.mark.parametrize("string_dt", ["S", "U", "S0", "U0"])
@pytest.mark.parametrize("arrs",
[([0.],), ([0.], [1]), ([0], ["string"], [1.])])
def test_dtype_with_promotion(self, arrs, string_dt, axis):
# Note that U0 and S0 should be deprecated eventually and changed to
# actually give the empty string result (together with `np.array`)
res = np.concatenate(arrs, axis=axis, dtype=string_dt, casting="unsafe")
# The actual dtype should be identical to a cast (of a double array):
assert res.dtype == np.array(1.).astype(string_dt).dtype
@pytest.mark.parametrize("axis", [None, 0])
def test_string_dtype_does_not_inspect(self, axis):
# The error here currently depends on NPY_USE_NEW_CASTINGIMPL as
# the new version rejects using the "default string length" of 64.
# The new behaviour is better, `np.array()` and `arr.astype()` would
# have to be used instead. (currently only raises due to unsafe cast)
with pytest.raises((ValueError, TypeError)):
np.concatenate(([None], [1]), dtype="S", axis=axis)
with pytest.raises((ValueError, TypeError)):
np.concatenate(([None], [1]), dtype="U", axis=axis)
@pytest.mark.parametrize("axis", [None, 0])
def test_subarray_error(self, axis):
with pytest.raises(TypeError, match=".*subarray dtype"):
np.concatenate(([1], [1]), dtype="(2,)i", axis=axis)
def test_stack():
# non-iterable input
assert_raises(TypeError, stack, 1)
# 0d input
for input_ in [(1, 2, 3),
[np.int32(1), np.int32(2), np.int32(3)],
[np.array(1), np.array(2), np.array(3)]]:
assert_array_equal(stack(input_), [1, 2, 3])
# 1d input examples
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
r1 = array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(np.stack((a, b)), r1)
assert_array_equal(np.stack((a, b), axis=1), r1.T)
# all input types
assert_array_equal(np.stack(list([a, b])), r1)
assert_array_equal(np.stack(array([a, b])), r1)
# all shapes for 1d input
arrays = [np.random.randn(3) for _ in range(10)]
axes = [0, 1, -1, -2]
expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2)
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3)
# all shapes for 2d input
arrays = [np.random.randn(3, 4) for _ in range(10)]
axes = [0, 1, 2, -1, -2, -3]
expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),
(3, 4, 10), (3, 10, 4), (10, 3, 4)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
# empty arrays
assert_(stack([[], [], []]).shape == (3, 0))
assert_(stack([[], [], []], axis=1).shape == (0, 3))
# out
out = np.zeros_like(r1)
np.stack((a, b), out=out)
assert_array_equal(out, r1)
# edge cases
assert_raises_regex(ValueError, 'need at least one array', stack, [])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [1, np.arange(3)])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(2), np.arange(3)])
# generator is deprecated
with assert_warns(FutureWarning):
result = stack((x for x in range(3)))
assert_array_equal(result, np.array([0, 1, 2]))
class TestBlock:
@pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
def block(self, request):
# blocking small arrays and large arrays go through different paths.
# the algorithm is triggered depending on the number of element
# copies required.
# We define a test fixture that forces most tests to go through
# both code paths.
# Ultimately, this should be removed if a single algorithm is found
# to be faster for both small and large arrays.
def _block_force_concatenate(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_concatenate(arrays, list_ndim, result_ndim)
def _block_force_slicing(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_slicing(arrays, list_ndim, result_ndim)
if request.param == 'force_concatenate':
return _block_force_concatenate
elif request.param == 'force_slicing':
return _block_force_slicing
elif request.param == 'block':
return block
else:
raise ValueError('Unknown blocking request. There is a typo in the tests.')
def test_returns_copy(self, block):
a = np.eye(3)
b = block(a)
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
def test_block_total_size_estimate(self, block):
_, _, _, total_size = _block_setup([1])
assert total_size == 1
_, _, _, total_size = _block_setup([[1]])
assert total_size == 1
_, _, _, total_size = _block_setup([[1, 1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1], [1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1, 2], [3, 4]])
assert total_size == 4
def test_block_simple_row_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
desired = np.array([[1, 1, 2, 2],
[1, 1, 2, 2]])
result = block([a_2d, b_2d])
assert_equal(desired, result)
def test_block_simple_column_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
expected = np.array([[1, 1],
[1, 1],
[2, 2],
[2, 2]])
result = block([[a_2d], [b_2d]])
assert_equal(expected, result)
def test_block_with_1d_arrays_row_wise(self, block):
# # # 1-D vectors are treated as row arrays
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([1, 2, 3, 2, 3, 4])
result = block([a, b])
assert_equal(expected, result)
def test_block_with_1d_arrays_multiple_rows(self, block):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([[1, 2, 3, 2, 3, 4],
[1, 2, 3, 2, 3, 4]])
result = block([[a, b], [a, b]])
assert_equal(expected, result)
def test_block_with_1d_arrays_column_wise(self, block):
# # # 1-D vectors are treated as row arrays
a_1d = np.array([1, 2, 3])
b_1d = np.array([2, 3, 4])
expected = np.array([[1, 2, 3],
[2, 3, 4]])
result = block([[a_1d], [b_1d]])
assert_equal(expected, result)
def test_block_mixed_1d_and_2d(self, block):
a_2d = np.ones((2, 2))
b_1d = np.array([2, 2])
result = block([[a_2d], [b_1d]])
expected = np.array([[1, 1],
[1, 1],
[2, 2]])
assert_equal(expected, result)
def test_block_complicated(self, block):
# a bit more complicated
one_2d = np.array([[1, 1, 1]])
two_2d = np.array([[2, 2, 2]])
three_2d = np.array([[3, 3, 3, 3, 3, 3]])
four_1d = np.array([4, 4, 4, 4, 4, 4])
five_0d = np.array(5)
six_1d = np.array([6, 6, 6, 6, 6])
zero_2d = np.zeros((2, 6))
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
result = block([[one_2d, two_2d],
[three_2d],
[four_1d],
[five_0d, six_1d],
[zero_2d]])
assert_equal(result, expected)
def test_nested(self, block):
one = np.array([1, 1, 1])
two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
three = np.array([3, 3, 3])
four = np.array([4, 4, 4])
five = np.array(5)
six = np.array([6, 6, 6, 6, 6])
zero = np.zeros((2, 6))
result = block([
[
block([
[one],
[three],
[four]
]),
two
],
[five, six],
[zero]
])
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 2, 2, 2],
[4, 4, 4, 2, 2, 2],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
assert_equal(result, expected)
def test_3d(self, block):
a000 = np.ones((2, 2, 2), int) * 1
a100 = np.ones((3, 2, 2), int) * 2
a010 = np.ones((2, 3, 2), int) * 3
a001 = np.ones((2, 2, 3), int) * 4
a011 = np.ones((2, 3, 3), int) * 5
a101 = np.ones((3, 2, 3), int) * 6
a110 = np.ones((3, 3, 2), int) * 7
a111 = np.ones((3, 3, 3), int) * 8
result = block([
[
[a000, a001],
[a010, a011],
],
[
[a100, a101],
[a110, a111],
]
])
expected = array([[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]]])
assert_array_equal(result, expected)
def test_block_with_mismatched_shape(self, block):
a = np.array([0, 0])
b = np.eye(2)
assert_raises(ValueError, block, [a, b])
assert_raises(ValueError, block, [b, a])
to_block = [[np.ones((2,3)), np.ones((2,2))],
[np.ones((2,2)), np.ones((2,2))]]
assert_raises(ValueError, block, to_block)
def test_no_lists(self, block):
assert_equal(block(1), np.array(1))
assert_equal(block(np.eye(3)), np.eye(3))
def test_invalid_nesting(self, block):
msg = 'depths are mismatched'
assert_raises_regex(ValueError, msg, block, [1, [2]])
assert_raises_regex(ValueError, msg, block, [1, []])
assert_raises_regex(ValueError, msg, block, [[1], 2])
assert_raises_regex(ValueError, msg, block, [[], 2])
assert_raises_regex(ValueError, msg, block, [
[[1], [2]],
[[3, 4]],
[5] # missing brackets
])
def test_empty_lists(self, block):
assert_raises_regex(ValueError, 'empty', block, [])
assert_raises_regex(ValueError, 'empty', block, [[]])
assert_raises_regex(ValueError, 'empty', block, [[1], []])
def test_tuple(self, block):
assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4]))
assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)])
def test_different_ndims(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 1, 3))
result = block([a, b, c])
expected = np.array([[[1., 2., 2., 3., 3., 3.]]])
assert_equal(result, expected)
def test_different_ndims_depths(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 2, 3))
result = block([[a, b], [c]])
expected = np.array([[[1., 2., 2.],
[3., 3., 3.],
[3., 3., 3.]]])
assert_equal(result, expected)
def test_block_memory_order(self, block):
# 3D
arr_c = np.zeros((3,)*3, order='C')
arr_f = np.zeros((3,)*3, order='F')
b_c = [[[arr_c, arr_c],
[arr_c, arr_c]],
[[arr_c, arr_c],
[arr_c, arr_c]]]
b_f = [[[arr_f, arr_f],
[arr_f, arr_f]],
[[arr_f, arr_f],
[arr_f, arr_f]]]
assert block(b_c).flags['C_CONTIGUOUS']
assert block(b_f).flags['F_CONTIGUOUS']
arr_c = np.zeros((3, 3), order='C')
arr_f = np.zeros((3, 3), order='F')
# 2D
b_c = [[arr_c, arr_c],
[arr_c, arr_c]]
b_f = [[arr_f, arr_f],
[arr_f, arr_f]]
assert block(b_c).flags['C_CONTIGUOUS']
assert block(b_f).flags['F_CONTIGUOUS']
def test_block_dispatcher():
class ArrayLike:
pass
a = ArrayLike()
b = ArrayLike()
c = ArrayLike()
assert_equal(list(_block_dispatcher(a)), [a])
assert_equal(list(_block_dispatcher([a])), [a])
assert_equal(list(_block_dispatcher([a, b])), [a, b])
assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])
# don't recurse into non-lists
assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
| bsd-3-clause |
cole945/binutils-gdb | gdb/testsuite/gdb.python/py-frame-args.py | 41 | 2186 | # Copyright (C) 2013-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import gdb
class pp_s (object):
def __init__(self, val):
self.val = val
def to_string(self):
m = self.val["m"]
return "m=<" + str(self.val["m"]) + ">"
class pp_ss (object):
def __init__(self, val):
self.val = val
def to_string(self):
return "super struct"
def children (self):
yield 'a', self.val['a']
yield 'b', self.val['b']
def lookup_function (val):
"Look-up and return a pretty-printer that can print val."
# Get the type.
type = val.type
# If it points to a reference, get the reference.
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
# Get the unqualified type, stripped of typedefs.
type = type.unqualified ().strip_typedefs ()
# Get the type name.
typename = type.tag
if typename == None:
return None
# Iterate over local dictionary of types to determine
# if a printer is registered for that type. Return an
# instantiation of the printer if found.
for function in pretty_printers_dict:
if function.match (typename):
return pretty_printers_dict[function] (val)
# Cannot find a pretty printer. Return None.
return None
def register_pretty_printers ():
pretty_printers_dict[re.compile ('^s$')] = pp_s
pretty_printers_dict[re.compile ('^ss$')] = pp_ss
pretty_printers_dict = {}
register_pretty_printers ()
gdb.pretty_printers.append (lookup_function)
| gpl-2.0 |
spulec/moto | tests/test_resourcegroups/test_resourcegroups.py | 1 | 8542 | from __future__ import unicode_literals
import boto3
import json
import sure # noqa
from moto import mock_resourcegroups
@mock_resourcegroups
def test_create_group():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
response = resource_groups.create_group(
Name="test_resource_group",
Description="description",
ResourceQuery={
"Type": "TAG_FILTERS_1_0",
"Query": json.dumps(
{
"ResourceTypeFilters": ["AWS::AllSupported"],
"TagFilters": [
{"Key": "resources_tag_key", "Values": ["resources_tag_value"]}
],
}
),
},
Tags={"resource_group_tag_key": "resource_group_tag_value"},
)
response["Group"]["Name"].should.contain("test_resource_group")
response["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0")
response["Tags"]["resource_group_tag_key"].should.contain(
"resource_group_tag_value"
)
@mock_resourcegroups
def test_delete_group():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
test_create_group()
response = resource_groups.delete_group(GroupName="test_resource_group")
response["Group"]["Name"].should.contain("test_resource_group")
response = resource_groups.list_groups()
response["GroupIdentifiers"].should.have.length_of(0)
response["Groups"].should.have.length_of(0)
@mock_resourcegroups
def test_get_group():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
test_create_group()
response = resource_groups.get_group(GroupName="test_resource_group")
response["Group"]["Description"].should.contain("description")
return response
@mock_resourcegroups
def test_get_group_query():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
get_response = test_get_group()
response = resource_groups.get_group_query(GroupName="test_resource_group")
response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0")
response_get = resource_groups.get_group_query(
Group=get_response.get("Group").get("GroupArn")
)
response_get["GroupQuery"]["ResourceQuery"]["Type"].should.contain(
"TAG_FILTERS_1_0"
)
@mock_resourcegroups
def test_get_tags():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
response = test_get_group()
response = resource_groups.get_tags(Arn=response["Group"]["GroupArn"])
response["Tags"].should.have.length_of(1)
response["Tags"]["resource_group_tag_key"].should.contain(
"resource_group_tag_value"
)
return response
@mock_resourcegroups
def test_list_groups():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
test_create_group()
response = resource_groups.list_groups()
response["GroupIdentifiers"].should.have.length_of(1)
response["Groups"].should.have.length_of(1)
@mock_resourcegroups
def test_tag():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
response = test_get_tags()
response = resource_groups.tag(
Arn=response["Arn"],
Tags={"resource_group_tag_key_2": "resource_group_tag_value_2"},
)
response["Tags"]["resource_group_tag_key_2"].should.contain(
"resource_group_tag_value_2"
)
response = resource_groups.get_tags(Arn=response["Arn"])
response["Tags"].should.have.length_of(2)
response["Tags"]["resource_group_tag_key_2"].should.contain(
"resource_group_tag_value_2"
)
@mock_resourcegroups
def test_untag():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
response = test_get_tags()
response = resource_groups.untag(
Arn=response["Arn"], Keys=["resource_group_tag_key"]
)
response["Keys"].should.contain("resource_group_tag_key")
response = resource_groups.get_tags(Arn=response["Arn"])
response["Tags"].should.have.length_of(0)
@mock_resourcegroups
def test_update_group():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
get_response = test_get_group()
response = resource_groups.update_group(
GroupName="test_resource_group", Description="description_2"
)
response["Group"]["Description"].should.contain("description_2")
response = resource_groups.get_group(GroupName="test_resource_group")
response["Group"]["Description"].should.contain("description_2")
@mock_resourcegroups
def test_get_group_configuration():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
group = test_get_group()
configuration = [
{
"Type": "AWS::ResourceGroups::Generic",
"Parameters": [
{"Name": "allowed-resource-types", "Values": ["AWS::EC2::Host"]},
{"Name": "deletion-protection", "Values": ["UNLESS_EMPTY"]},
],
}
]
resource_groups.put_group_configuration(
Group=group["Group"]["Name"], Configuration=configuration
)
configuration_resp = resource_groups.get_group_configuration(
Group=group["Group"]["Name"]
)
assert (
configuration_resp.get("GroupConfiguration").get("Configuration")
== configuration
)
@mock_resourcegroups
def test_create_group_with_configuration():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
configuration = [
{
"Type": "AWS::ResourceGroups::Generic",
"Parameters": [
{"Name": "allowed-resource-types", "Values": ["AWS::EC2::Host"]},
{"Name": "deletion-protection", "Values": ["UNLESS_EMPTY"]},
],
}
]
response = resource_groups.create_group(
Name="test_resource_group_new",
Description="description",
ResourceQuery={
"Type": "TAG_FILTERS_1_0",
"Query": json.dumps(
{
"ResourceTypeFilters": ["AWS::AllSupported"],
"TagFilters": [
{"Key": "resources_tag_key", "Values": ["resources_tag_value"]}
],
}
),
},
Configuration=configuration,
Tags={"resource_group_tag_key": "resource_group_tag_value"},
)
response["Group"]["Name"].should.contain("test_resource_group_new")
assert response["GroupConfiguration"]["Configuration"] == configuration
response["Tags"]["resource_group_tag_key"].should.contain(
"resource_group_tag_value"
)
@mock_resourcegroups
def test_update_group_query():
resource_groups = boto3.client("resource-groups", region_name="us-east-1")
group_response = test_get_group()
response = resource_groups.update_group_query(
GroupName="test_resource_group",
ResourceQuery={
"Type": "CLOUDFORMATION_STACK_1_0",
"Query": json.dumps(
{
"ResourceTypeFilters": ["AWS::AllSupported"],
"StackIdentifier": (
"arn:aws:cloudformation:eu-west-1:012345678912:stack/"
"test_stack/c223eca0-e744-11e8-8910-500c41f59083"
),
}
),
},
)
response["GroupQuery"]["ResourceQuery"]["Type"].should.contain(
"CLOUDFORMATION_STACK_1_0"
)
response = resource_groups.get_group_query(GroupName="test_resource_group")
response["GroupQuery"]["ResourceQuery"]["Type"].should.contain(
"CLOUDFORMATION_STACK_1_0"
)
response = resource_groups.update_group_query(
Group=group_response.get("Group").get("GroupArn"),
ResourceQuery={
"Type": "TAG_FILTERS_1_0",
"Query": json.dumps(
{
"ResourceTypeFilters": ["AWS::AllSupported"],
"TagFilters": [
{"Key": "resources_tag_key", "Values": ["resources_tag_value"]}
],
}
),
},
)
response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0")
response = resource_groups.get_group_query(
Group=group_response.get("Group").get("GroupArn")
)
response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0")
| apache-2.0 |
kisel/trex-core | scripts/external_libs/nose-1.3.4/python3/nose/tools/nontrivial.py | 11 | 4170 | """Tools not exempt from being descended into in tracebacks"""
import time
__all__ = ['make_decorator', 'raises', 'set_trace', 'timed', 'with_setup',
'TimeExpired', 'istest', 'nottest']
class TimeExpired(AssertionError):
pass
def make_decorator(func):
"""
Wraps a test decorator so as to properly replicate metadata
of the decorated function, including nose's additional stuff
(namely, setup and teardown).
"""
def decorate(newfunc):
if hasattr(func, 'compat_func_name'):
name = func.compat_func_name
else:
name = func.__name__
newfunc.__dict__ = func.__dict__
newfunc.__doc__ = func.__doc__
newfunc.__module__ = func.__module__
if not hasattr(newfunc, 'compat_co_firstlineno'):
newfunc.compat_co_firstlineno = func.__code__.co_firstlineno
try:
newfunc.__name__ = name
except TypeError:
# can't set func name in 2.3
newfunc.compat_func_name = name
return newfunc
return decorate
def raises(*exceptions):
"""Test must raise one of expected exceptions to pass.
Example use::
@raises(TypeError, ValueError)
def test_raises_type_error():
raise TypeError("This test passes")
@raises(Exception)
def test_that_fails_by_passing():
pass
If you want to test many assertions about exceptions in a single test,
you may want to use `assert_raises` instead.
"""
valid = ' or '.join([e.__name__ for e in exceptions])
def decorate(func):
name = func.__name__
def newfunc(*arg, **kw):
try:
func(*arg, **kw)
except exceptions:
pass
except:
raise
else:
message = "%s() did not raise %s" % (name, valid)
raise AssertionError(message)
newfunc = make_decorator(func)(newfunc)
return newfunc
return decorate
def set_trace():
"""Call pdb.set_trace in the calling frame, first restoring
sys.stdout to the real output stream. Note that sys.stdout is NOT
reset to whatever it was before the call once pdb is done!
"""
import pdb
import sys
stdout = sys.stdout
sys.stdout = sys.__stdout__
pdb.Pdb().set_trace(sys._getframe().f_back)
def timed(limit):
"""Test must finish within specified time limit to pass.
Example use::
@timed(.1)
def test_that_fails():
time.sleep(.2)
"""
def decorate(func):
def newfunc(*arg, **kw):
start = time.time()
result = func(*arg, **kw)
end = time.time()
if end - start > limit:
raise TimeExpired("Time limit (%s) exceeded" % limit)
return result
newfunc = make_decorator(func)(newfunc)
return newfunc
return decorate
def with_setup(setup=None, teardown=None):
"""Decorator to add setup and/or teardown methods to a test function::
@with_setup(setup, teardown)
def test_something():
" ... "
Note that `with_setup` is useful *only* for test functions, not for test
methods or inside of TestCase subclasses.
"""
def decorate(func, setup=setup, teardown=teardown):
if setup:
if hasattr(func, 'setup'):
_old_s = func.setup
def _s():
setup()
_old_s()
func.setup = _s
else:
func.setup = setup
if teardown:
if hasattr(func, 'teardown'):
_old_t = func.teardown
def _t():
_old_t()
teardown()
func.teardown = _t
else:
func.teardown = teardown
return func
return decorate
def istest(func):
"""Decorator to mark a function or method as a test
"""
func.__test__ = True
return func
def nottest(func):
"""Decorator to mark a function or method as *not* a test
"""
func.__test__ = False
return func
| apache-2.0 |
hobarrera/django | django/contrib/gis/db/backends/mysql/introspection.py | 700 | 1771 | from MySQLdb.constants import FIELD_TYPE
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.mysql.introspection import DatabaseIntrospection
class MySQLIntrospection(DatabaseIntrospection):
# Updating the data_types_reverse dictionary with the appropriate
# type for Geometry fields.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[FIELD_TYPE.GEOMETRY] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# In order to get the specific geometry type of the field,
# we introspect on the table definition using `DESCRIBE`.
cursor.execute('DESCRIBE %s' %
self.connection.ops.quote_name(table_name))
# Increment over description info until we get to the geometry
# column.
for column, typ, null, key, default, extra in cursor.fetchall():
if column == geo_col:
# Using OGRGeomType to convert from OGC name to Django field.
# MySQL does not support 3D or SRIDs, so the field params
# are empty.
field_type = OGRGeomType(typ).django
field_params = {}
break
finally:
cursor.close()
return field_type, field_params
def supports_spatial_index(self, cursor, table_name):
# Supported with MyISAM, or InnoDB on MySQL 5.7.5+
storage_engine = self.get_storage_engine(cursor, table_name)
return (
(storage_engine == 'InnoDB' and self.connection.mysql_version >= (5, 7, 5)) or
storage_engine == 'MyISAM'
)
| bsd-3-clause |
cloudsigma/cgroupspy | cgroupspy/test/test_interfaces.py | 1 | 8905 | """
Copyright (c) 2014, CloudSigma AG
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the CloudSigma AG nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CloudSigma AG BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from unittest import TestCase
import mock
from ..contenttypes import DeviceAccess, DeviceThrottle
from ..interfaces import BaseFileInterface, FlagFile, BitFieldFile, CommaDashSetFile, DictFile, \
IntegerFile, IntegerListFile, ListFile, MultiLineIntegerFile, TypedFile
class FaceHolder(object):
face = None
def __init__(self, init_value):
self.val = init_value
self.last_filename = None
def get_property(self, filename):
self.last_filename = filename
return self.val
def set_property(self, filename, val):
self.last_filename = filename
self.val = str(val)
class InterfacesTest(TestCase):
def patch_face(self, **kwargs):
patch = mock.patch.multiple(FaceHolder, **kwargs)
patch.start()
self.addCleanup(patch.stop)
def test_base(self):
self.patch_face(face=BaseFileInterface("myfile1"))
fh = FaceHolder("23")
self.assertEqual(fh.face, "23")
fh.face = 44
self.assertEqual(fh.face, "44")
def test_flagfile(self):
self.patch_face(face=FlagFile("flagfile"))
fh = FaceHolder("1")
self.assertEqual(fh.face, True)
fh.face = False
self.assertEqual(fh.face, False)
self.assertEqual(fh.val, "0")
fh.face = None
self.assertEqual(fh.face, False)
self.assertEqual(fh.val, "0")
fh.face = 1
self.assertEqual(fh.face, True)
self.assertEqual(fh.val, "1")
def test_bitfieldfile(self):
self.patch_face(face=BitFieldFile("bitfieldfile"))
fh = FaceHolder("2")
self.assertEqual(fh.face, [False, True, False, False, False, False, False, False])
fh.face = [False]
self.assertEqual(fh.face, [False, False, False, False, False, False, False, False])
self.assertEqual(fh.val, "0")
fh.face = [False, True, True]
self.assertEqual(fh.face, [False, True, True, False, False, False, False, False])
self.assertEqual(fh.val, "6")
def test_comma_dash(self):
self.patch_face(face=CommaDashSetFile("commadash"))
fh = FaceHolder("")
self.assertEqual(fh.face, set())
fh = FaceHolder(" ")
self.assertEqual(fh.face, set())
fh.face = set()
self.assertEqual(fh.face, set())
fh.face = []
self.assertEqual(fh.face, set())
fh.face = {}
self.assertEqual(fh.face, set())
fh = FaceHolder("1,2,4-6,18-23,7")
expected = {1, 2, 4, 5, 6, 7, 18, 19, 20, 21, 22, 23}
self.assertEqual(fh.face, expected)
fh.face = {1, 2, 3}
self.assertEqual(fh.face, {1, 2, 3})
self.assertEqual(fh.val, "1-3")
fh.face = [1, 2, 3]
self.assertEqual(fh.face, {1, 2, 3})
self.assertEqual(fh.val, "1-3")
fh.face = [1, 2, 2, 3]
self.assertEqual(fh.face, {1, 2, 3})
self.assertEqual(fh.val, "1-3")
fh.face = {1}
self.assertEqual(fh.face, {1})
self.assertEqual(fh.val, "1")
fh.face = {}
self.assertEqual(fh.face, set([]))
self.assertEqual(fh.val, " ")
def test_dict_file(self):
self.patch_face(face=DictFile("dictfile"))
fh = FaceHolder("ala 123\nbala 123\nnica 456")
self.assertEqual(fh.face, {"ala": 123, "bala": 123, "nica": 456})
def test_int_file(self):
self.patch_face(face=IntegerFile("intfile"))
fh = FaceHolder("16")
self.assertEqual(fh.face, 16)
fh.face = 18
self.assertEqual(fh.face, 18)
self.assertEqual(fh.val, "18")
fh.face = None
self.assertEqual(fh.face, None)
self.assertEqual(fh.val, "-1")
def test_int_list(self):
self.patch_face(face=IntegerListFile("intlistfile"))
fh = FaceHolder("16 18 20")
self.assertEqual(fh.face, [16, 18, 20])
def test_list(self):
self.patch_face(face=ListFile("listfile"))
fh = FaceHolder("16 18 20")
self.assertEqual(fh.face, ["16", "18", "20"])
def test_multiline_int(self):
self.patch_face(face=MultiLineIntegerFile("multiint"))
fh = FaceHolder("16\n18\n20\n22")
self.assertEqual(fh.face, [16, 18, 20, 22])
def test_comma_dash_more():
pairs = [
("1-2,4-7,18-23", {1, 2, 4, 5, 6, 7, 18, 19, 20, 21, 22, 23}),
("1,3-4,6,8-9,11", {1, 3, 4, 6, 8, 9, 11}),
("4,8-10,12", {4, 8, 9, 10, 12}),
("1-3", {1, 2, 3}),
("1", {1}),
]
for encoded, data in pairs:
yield check_comma_dash_case, encoded, data
def check_comma_dash_case(encoded, data):
with mock.patch.multiple(FaceHolder, face=CommaDashSetFile("commadash")):
fh = FaceHolder(encoded)
assert fh.face == data
fh.face = data
assert fh.face == data
assert fh.val == encoded
def test_device_throttle():
pairs = [
("1:2 100", DeviceThrottle(major=1, minor=2, limit=100)),
("1:2 100", DeviceThrottle(major='1', minor='2', limit=100)),
("0:0 100", DeviceThrottle(major=0, minor=0, limit=100)),
("*:* 100", DeviceThrottle(major=None, minor=None, limit=100)),
("0:* 100", DeviceThrottle(major=0, minor=None, limit=100)),
]
for encoded, data in pairs:
yield check_device_throttle_case, encoded, data
yield check_device_throttle_many, " \n", []
yield check_device_throttle_many, "\n".join([p[0] for p in pairs]), [p[1] for p in pairs]
def check_device_throttle_many(encoded, data):
with mock.patch.multiple(FaceHolder, face=TypedFile("device_throttle", DeviceThrottle, many=True)):
fh = FaceHolder(encoded)
assert fh.face == data
def check_device_throttle_case(encoded, data):
with mock.patch.multiple(FaceHolder, face=TypedFile("device_throttle", DeviceThrottle, many=False)):
fh = FaceHolder(encoded)
assert fh.face == data
fh.face = data
assert fh.face == data
assert fh.val == encoded
def test_device_access():
pairs = [
("c 1:3 rwm", DeviceAccess(dev_type="c", major=1, minor=3, access="rwm")),
("c 1:3 rwm", DeviceAccess(dev_type="c", major='1', minor='3', access=7)),
("c 5:* rwm", DeviceAccess(dev_type="c", major=5, minor=None, access="rwm")),
("c 5:0 rwm", DeviceAccess(dev_type="c", major=5, minor=0, access="rwm")),
("b *:* rwm", DeviceAccess(dev_type="b", major=None, minor=None, access="rwm")),
("b 0:0 rwm", DeviceAccess(dev_type="b", major=0, minor=0, access="rwm")),
("c 136:* rw", DeviceAccess(dev_type="c", major=136, minor=None, access="rw")),
]
for encoded, data in pairs:
yield check_device_access_case, encoded, data
yield check_device_access_many, " \n", []
yield check_device_access_many, "\n".join([p[0] for p in pairs]), [p[1] for p in pairs]
def check_device_access_case(encoded, data):
with mock.patch.multiple(FaceHolder, face=TypedFile("device_access", DeviceAccess, many=False)):
fh = FaceHolder(encoded)
assert fh.face == data
fh.face = data
assert fh.face == data
assert fh.val == encoded
def check_device_access_many(encoded, data):
with mock.patch.multiple(FaceHolder, face=TypedFile("device_access", DeviceAccess, many=True)):
fh = FaceHolder(encoded)
assert fh.face == data
| bsd-3-clause |
imre-kerr/swarm-epuck | controllers/swarm_controller/webann.py | 2 | 1182 | # This uses the EpuckBasic code as the interface to webots, and the epuck2 code to connect an ANN
# to webots.
import epuck2
import epuck_basic as epb
import graph
import prims1
# The webann is a descendent of the webot "controller" class, and it has the ANN as an attribute.
class WebAnn(epb.EpuckBasic):
def __init__(self, tempo = 1.0, e_thresh = 125, nvect = True, cvect = True, svect = True, band = 'bw', concol = 1.0, snapshow = True,
ann_cycles = 1, agent_cycles = 5, act_noise = 0.1, tfile = "redman4"):
epb.EpuckBasic.__init__(self)
self.basic_setup() # defined for EpuckBasic
self.ann = epuck2.annpuck2(agent = self, e_thresh = e_thresh, nvect = nvect, cvect = cvect, svect = svect, band = band, snapshow = snapshow,
concol = concol, ann_cycles = ann_cycles, agent_cycles = agent_cycles, act_noise = act_noise,
tfile = tfile)
def long_run(self,steps = 500):
self.ann.simsteps = steps
self.spin_angle(prims1.randab(0,360))
self.ann.redman_run()
#*** MAIN ***
# Webots expects a controller to be created and activated at the bottom of the controller file.
controller = WebAnn(tempo = 1.0, band = 'gray')
controller.long_run(40)
| mit |
terbolous/CouchPotatoServer | libs/pyutil/test/deprecated/test_picklesaver.py | 106 | 1340 | #!/usr/bin/env python
# Copyright (c) 2002 Luke 'Artimage' Nelson
# Copyright (c) 2005-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import os
try:
from twisted.trial import unittest
except ImportError, le:
print "Skipping %s since it requires Twisted and Twisted could not be imported: %s" % (__name__, le,)
else:
from pyutil import PickleSaver, fileutil
class Thingie(PickleSaver.PickleSaver):
def __init__(self, fname, delay=30):
PickleSaver.PickleSaver.__init__(self, fname=fname, attrs={'tmp_store':'False'}, DELAY=delay)
class PickleSaverTest(unittest.TestCase):
def _test_save_now(self, fname):
thingie = Thingie(fname, delay=0)
thingie.tmp_store = 'True'
thingie.lazy_save() # Note: it was constructed with default save delay of 0.
def test_save_now(self):
"""
This test should create a lazy save object, save it with no delay and check if the file exists.
"""
tempdir = fileutil.NamedTemporaryDirectory()
fname = os.path.join(tempdir.name, "picklesavertest")
self._test_save_now(fname)
self.failUnless(os.path.isfile(fname), "The file [%s] does not exist." %(fname,))
tempdir.shutdown()
| gpl-3.0 |
colinbrislawn/bioconda-recipes | recipes/fgbio/fgbio.py | 7 | 2575 | #!/opt/anaconda1anaconda2anaconda3/bin/python
#
# Wrapper script for invoking the jar.
#
# This script is written for use with the Conda package manager and is ported
# from a bash script that does the same thing, adapting the style in
# the peptide-shaker wrapper
# (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py)
import subprocess
import sys
from os import access, getenv, path, X_OK
# Expected name of the JAR file.
JAR_NAME = 'fgbio.jar'
# Default options passed to the `java` executable.
DEFAULT_JVM_MEM_OPTS = ['-Xms512m', '-Xmx4g']
def real_dirname(in_path):
"""Returns the symlink-resolved, canonicalized directory-portion of
the given path."""
return path.dirname(path.realpath(in_path))
def java_executable():
"""Returns the name of the Java executable.
Use JAVA_HOME, or local anaconda java, or just `java` in the PATH.
"""
java_home = getenv('JAVA_HOME')
java_bin = path.join('bin', 'java')
if java_home and access(path.join(java_home, java_bin), X_OK):
return path.join(java_home, java_bin)
conda_java_bin = path.join(real_dirname(sys.executable), "java")
if conda_java_bin and path.exists(conda_java_bin) and access(conda_java_bin, X_OK):
return conda_java_bin
return 'java'
def jvm_opts(argv, default_mem_opts=DEFAULT_JVM_MEM_OPTS):
"""Constructs a list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts, prop_opts, pass_args = [], [], []
for arg in argv:
if arg.startswith('-D') or arg.startswith('-XX'):
opts_list = prop_opts
elif arg.startswith('-Xm'):
opts_list = mem_opts
else:
opts_list = pass_args
opts_list.append(arg)
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('org'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = path.join(jar_dir, JAR_NAME)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == "__main__":
main()
| mit |
alexlo03/ansible | lib/ansible/modules/crypto/openssl_privatekey.py | 25 | 10503 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Yanis Guenane <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openssl_privatekey
author: "Yanis Guenane (@Spredzy)"
version_added: "2.3"
short_description: Generate OpenSSL private keys.
description:
- "This module allows one to (re)generate OpenSSL private keys. It uses
the pyOpenSSL python library to interact with openssl. One can generate
either RSA or DSA private keys. Keys are generated in PEM format."
requirements:
- "python-pyOpenSSL"
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the private key should exist or not, taking action if the state is different from what is stated.
size:
required: false
default: 4096
description:
- Size (in bits) of the TLS/SSL key to generate
type:
required: false
default: "RSA"
choices: [ RSA, DSA ]
description:
- The algorithm used to generate the TLS/SSL private key
force:
required: false
default: False
type: bool
description:
- Should the key be regenerated even if it already exists
path:
required: true
description:
- Name of the file in which the generated TLS/SSL private key will be written. It will have 0600 mode.
passphrase:
required: false
description:
- The passphrase for the private key.
version_added: "2.4"
cipher:
required: false
description:
- The cipher to encrypt the private key. (cipher can be found by running `openssl list-cipher-algorithms`)
version_added: "2.4"
extends_documentation_fragment: files
'''
EXAMPLES = '''
# Generate an OpenSSL private key with the default values (4096 bits, RSA)
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
# Generate an OpenSSL private key with the default values (4096 bits, RSA)
# and a passphrase
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
passphrase: ansible
cipher: aes256
# Generate an OpenSSL private key with a different size (2048 bits)
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
size: 2048
# Force regenerate an OpenSSL private key if it already exists
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
force: True
# Generate an OpenSSL private key with a different algorithm (DSA)
- openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
type: DSA
'''
RETURN = '''
size:
description: Size (in bits) of the TLS/SSL private key
returned: changed or success
type: int
sample: 4096
type:
description: Algorithm used to generate the TLS/SSL private key
returned: changed or success
type: string
sample: RSA
filename:
description: Path to the generated TLS/SSL private key file
returned: changed or success
type: string
sample: /etc/ssl/private/ansible.com.pem
fingerprint:
description: The fingerprint of the public key. Fingerprint will be generated for each hashlib.algorithms available.
Requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
'''
import os
import traceback
try:
from OpenSSL import crypto
except ImportError:
pyopenssl_found = False
else:
pyopenssl_found = True
from ansible.module_utils import crypto as crypto_utils
from ansible.module_utils._text import to_native, to_bytes
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import string_types
class PrivateKeyError(crypto_utils.OpenSSLObjectError):
pass
class PrivateKey(crypto_utils.OpenSSLObject):
def __init__(self, module):
super(PrivateKey, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.size = module.params['size']
self.passphrase = module.params['passphrase']
self.cipher = module.params['cipher']
self.privatekey = None
self.fingerprint = {}
self.mode = module.params.get('mode', None)
if self.mode is None:
self.mode = 0o600
self.type = crypto.TYPE_RSA
if module.params['type'] == 'DSA':
self.type = crypto.TYPE_DSA
def generate(self, module):
"""Generate a keypair."""
if not self.check(module, perms_required=False) or self.force:
self.privatekey = crypto.PKey()
try:
self.privatekey.generate_key(self.type, self.size)
except (TypeError, ValueError) as exc:
raise PrivateKeyError(exc)
try:
privatekey_file = os.open(self.path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.close(privatekey_file)
if isinstance(self.mode, string_types):
try:
self.mode = int(self.mode, 8)
except ValueError as e:
try:
st = os.lstat(self.path)
self.mode = AnsibleModule._symbolic_mode_to_octal(st, self.mode)
except ValueError as e:
module.fail_json(msg="%s" % to_native(e), exception=traceback.format_exc())
os.chmod(self.path, self.mode)
privatekey_file = os.open(self.path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, self.mode)
if self.cipher and self.passphrase:
os.write(privatekey_file, crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey,
self.cipher, to_bytes(self.passphrase)))
else:
os.write(privatekey_file, crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey))
os.close(privatekey_file)
self.changed = True
except IOError as exc:
self.remove()
raise PrivateKeyError(exc)
self.fingerprint = crypto_utils.get_fingerprint(self.path, self.passphrase)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PrivateKey, self).check(module, perms_required)
def _check_size(privatekey):
return self.size == privatekey.bits()
def _check_type(privatekey):
return self.type == privatekey.type()
def _check_passphrase():
try:
crypto_utils.load_privatekey(self.path, self.passphrase)
return True
except crypto.Error:
return False
if not state_and_perms or not _check_passphrase():
return False
privatekey = crypto_utils.load_privatekey(self.path, self.passphrase)
return _check_size(privatekey) and _check_type(privatekey)
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'size': self.size,
'filename': self.path,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
if self.type == crypto.TYPE_RSA:
result['type'] = 'RSA'
else:
result['type'] = 'DSA'
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
size=dict(default=4096, type='int'),
type=dict(default='RSA', choices=['RSA', 'DSA'], type='str'),
force=dict(default=False, type='bool'),
path=dict(required=True, type='path'),
passphrase=dict(type='str', no_log=True),
cipher=dict(type='str'),
),
supports_check_mode=True,
add_file_common_args=True,
required_together=[['cipher', 'passphrase']],
)
if not pyopenssl_found:
module.fail_json(msg='the python pyOpenSSL module is required')
base_dir = os.path.dirname(module.params['path'])
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
private_key = PrivateKey(module)
if private_key.state == 'present':
if module.check_mode:
result = private_key.dump()
result['changed'] = module.params['force'] or not private_key.check(module)
module.exit_json(**result)
try:
private_key.generate(module)
except PrivateKeyError as exc:
module.fail_json(msg=to_native(exc))
else:
if module.check_mode:
result = private_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
try:
private_key.remove()
except PrivateKeyError as exc:
module.fail_json(msg=to_native(exc))
result = private_key.dump()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jhseu/tensorflow | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/shapes_for_arguments.py | 21 | 1840 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/shapes_for_arguments | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
# Check that we get shapes annotated on function arguments.
#
# Besides checking the shape on the function input argument, this test also
# checks that the shape on the input argument is propagated to the return
# value.
# We eventually want to move the shape inference to a pass separate from
# the initial import, in which case that aspect of this test doesn't make much
# sense and will be superceded by MLIR->MLIR shape inference tests.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(%arg0: tensor<f32> {{.*}}) -> (tensor<f32> {{.*}})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["some_function"]
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def some_function(self, x):
return x
if __name__ == '__main__':
common.do_test(TestModule)
| apache-2.0 |
Hasimir/pyjs | examples/widgets/__main__.py | 8 | 1045 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
TARGETS = [
'Widgets.py',
]
PACKAGE = {
'title': 'Widgets',
'desc': 'Widgets example',
}
def setup(targets):
'''Setup example for translation, MUST call util.setup(targets).'''
util.setup(targets)
def translate():
'''Translate example, MUST call util.translate().'''
util.translate()
def install(package):
'''Install and cleanup example module. MUST call util.install(package)'''
util.install(package)
##---------------------------------------##
# --------- (-: DO NOT EDIT :-) --------- #
##---------------------------------------##
import sys
import os
examples = head = os.path.abspath(os.path.dirname(__file__))
while os.path.split(examples)[1].lower() != 'examples':
examples = os.path.split(examples)[0]
if not examples:
raise ValueError("Cannot determine examples directory")
sys.path.insert(0, os.path.join(examples))
from _examples import util
sys.path.pop(0)
util.init(head)
setup(TARGETS)
translate()
install(PACKAGE)
| apache-2.0 |
vaporry/dapp-bin | serpent_gamble/prepare.py | 5 | 2914 | # This is an ultra-minimal "dapp framework" that simplifies the deployment
# process by generating a config.js file that specifies the ABI for all of
# the contracts that you need and also includes the ability to update
# addresses. By default, it simply scans through every .se and .sol file in
# the current directory and adds and ABI object for each one into a JS
# object stored under window.accounts, with the key of each ABI object being
# the filename noc including the .se/.sol ending. For example, wallet.sol
# will create window.accounts.wallet = { abi: ... }
#
# You can also use the x=y syntax to set an address. For example, if you
# call python prepare.py admin=0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae,
# then in the JS object you will get window.accounts.admin =
# '0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae'; this persists if you call
# python prepare.py after without setting the argument again as the script
# always tries to read the value set from the previous time the config.js
# file was created.
#
# Example use:
#
# Step 1: serpent compile currency.se
# Step 2: in eth/geth/pyeth, send a transaction to create a contract whose
# code is the output of the previous line
# Step 3: get the contract address of the contract you created. Suppose that
# this address is 0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae
# Step 4: run
# python prepare.py currency=0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae
# Step 5: make sure to include config.js as a javascript file in your
# application.
import sys
import json
import serpent
import os
solidity = None
accounts = {}
# Fill in contract ABI declarations
for f in os.listdir(os.getcwd()):
if f[-3:] == ".se":
accounts[f[:-3]] = {"abi": serpent.mk_full_signature(f)}
elif f[-4:] == ".sol":
if not solidity:
from ethereum import _solidity
solidity = _solidity.get_solidity()
accounts[f[:-4]] = {"abi": solidity.mk_full_signature(open(f).read())}
# Fill in previously known addresses
if 'config.js' in os.listdir(os.getcwd()):
data = open('config.js').read()
code = json.loads(data[data.find('{'):])
# For contracts (ie. objects that contain an 'abi' parameter), if
# we detect a .se or .sol file removed then we do not add the
# associated address from the registry. For regular accounts, we
# transfer all of them over
for k, v in code.items():
if 'address' in v and (k in accounts or 'abi' not in v):
if k not in accounts:
accounts[k] = {}
accounts[k]["address"] = v['address']
# Fill in addresses from sys.argv
for arg in sys.argv:
if '=' in arg:
k, v = arg.split('=')
if len(v) == 40:
v = '0x' + v
if k not in accounts:
accounts[k] = {}
accounts[k]["address"] = v
open('config.js', 'w').write("window.accounts = " + json.dumps(accounts, indent=4))
| mit |
Zhongqilong/mykbengineer | kbe/res/scripts/common/Lib/profile.py | 102 | 22021 | #! /usr/bin/env python3
#
# Class for profiling python code. rev 1.0 6/2/94
#
# Written by James Roskind
# Based on prior profile module by Sjoerd Mullender...
# which was hacked somewhat by: Guido van Rossum
"""Class for profiling Python code."""
# Copyright Disney Enterprises, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import sys
import os
import time
import marshal
from optparse import OptionParser
__all__ = ["run", "runctx", "Profile"]
# Sample timer for use with
#i_count = 0
#def integer_timer():
# global i_count
# i_count = i_count + 1
# return i_count
#itimes = integer_timer # replace with C coded timer returning integers
class _Utils:
"""Support class for utility functions which are shared by
profile.py and cProfile.py modules.
Not supposed to be used directly.
"""
def __init__(self, profiler):
self.profiler = profiler
def run(self, statement, filename, sort):
prof = self.profiler()
try:
prof.run(statement)
except SystemExit:
pass
finally:
self._show(prof, filename, sort)
def runctx(self, statement, globals, locals, filename, sort):
prof = self.profiler()
try:
prof.runctx(statement, globals, locals)
except SystemExit:
pass
finally:
self._show(prof, filename, sort)
def _show(self, prof, filename, sort):
if filename is not None:
prof.dump_stats(filename)
else:
prof.print_stats(sort)
#**************************************************************************
# The following are the static member functions for the profiler class
# Note that an instance of Profile() is *not* needed to call them.
#**************************************************************************
def run(statement, filename=None, sort=-1):
"""Run statement under profiler optionally saving results in filename
This function takes a single argument that can be passed to the
"exec" statement, and an optional file name. In all cases this
routine attempts to "exec" its first argument and gather profiling
statistics from the execution. If no file name is present, then this
function automatically prints a simple profiling report, sorted by the
standard name string (file/line/function-name) that is presented in
each line.
"""
return _Utils(Profile).run(statement, filename, sort)
def runctx(statement, globals, locals, filename=None, sort=-1):
"""Run statement under profiler, supplying your own globals and locals,
optionally saving results in filename.
statement and filename have the same semantics as profile.run
"""
return _Utils(Profile).runctx(statement, globals, locals, filename, sort)
class Profile:
"""Profiler class.
self.cur is always a tuple. Each such tuple corresponds to a stack
frame that is currently active (self.cur[-2]). The following are the
definitions of its members. We use this external "parallel stack" to
avoid contaminating the program that we are profiling. (old profiler
used to write into the frames local dictionary!!) Derived classes
can change the definition of some entries, as long as they leave
[-2:] intact (frame and previous tuple). In case an internal error is
detected, the -3 element is used as the function name.
[ 0] = Time that needs to be charged to the parent frame's function.
It is used so that a function call will not have to access the
timing data for the parent frame.
[ 1] = Total time spent in this frame's function, excluding time in
subfunctions (this latter is tallied in cur[2]).
[ 2] = Total time spent in subfunctions, excluding time executing the
frame's function (this latter is tallied in cur[1]).
[-3] = Name of the function that corresponds to this frame.
[-2] = Actual frame that we correspond to (used to sync exception handling).
[-1] = Our parent 6-tuple (corresponds to frame.f_back).
Timing data for each function is stored as a 5-tuple in the dictionary
self.timings[]. The index is always the name stored in self.cur[-3].
The following are the definitions of the members:
[0] = The number of times this function was called, not counting direct
or indirect recursion,
[1] = Number of times this function appears on the stack, minus one
[2] = Total time spent internal to this function
[3] = Cumulative time that this function was present on the stack. In
non-recursive functions, this is the total execution time from start
to finish of each invocation of a function, including time spent in
all subfunctions.
[4] = A dictionary indicating for each function name, the number of times
it was called by us.
"""
bias = 0 # calibration constant
def __init__(self, timer=None, bias=None):
self.timings = {}
self.cur = None
self.cmd = ""
self.c_func_name = ""
if bias is None:
bias = self.bias
self.bias = bias # Materialize in local dict for lookup speed.
if not timer:
self.timer = self.get_time = time.process_time
self.dispatcher = self.trace_dispatch_i
else:
self.timer = timer
t = self.timer() # test out timer function
try:
length = len(t)
except TypeError:
self.get_time = timer
self.dispatcher = self.trace_dispatch_i
else:
if length == 2:
self.dispatcher = self.trace_dispatch
else:
self.dispatcher = self.trace_dispatch_l
# This get_time() implementation needs to be defined
# here to capture the passed-in timer in the parameter
# list (for performance). Note that we can't assume
# the timer() result contains two values in all
# cases.
def get_time_timer(timer=timer, sum=sum):
return sum(timer())
self.get_time = get_time_timer
self.t = self.get_time()
self.simulate_call('profiler')
# Heavily optimized dispatch routine for os.times() timer
def trace_dispatch(self, frame, event, arg):
timer = self.timer
t = timer()
t = t[0] + t[1] - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame,t):
t = timer()
self.t = t[0] + t[1]
else:
r = timer()
self.t = r[0] + r[1] - t # put back unrecorded delta
# Dispatch routine for best timer program (return = scalar, fastest if
# an integer but float works too -- and time.clock() relies on that).
def trace_dispatch_i(self, frame, event, arg):
timer = self.timer
t = timer() - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = timer()
else:
self.t = timer() - t # put back unrecorded delta
# Dispatch routine for macintosh (timer returns time in ticks of
# 1/60th second)
def trace_dispatch_mac(self, frame, event, arg):
timer = self.timer
t = timer()/60.0 - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = timer()/60.0
else:
self.t = timer()/60.0 - t # put back unrecorded delta
# SLOW generic dispatch routine for timer returning lists of numbers
def trace_dispatch_l(self, frame, event, arg):
get_time = self.get_time
t = get_time() - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = get_time()
else:
self.t = get_time() - t # put back unrecorded delta
# In the event handlers, the first 3 elements of self.cur are unpacked
# into vrbls w/ 3-letter names. The last two characters are meant to be
# mnemonic:
# _pt self.cur[0] "parent time" time to be charged to parent frame
# _it self.cur[1] "internal time" time spent directly in the function
# _et self.cur[2] "external time" time spent in subfunctions
def trace_dispatch_exception(self, frame, t):
rpt, rit, ret, rfn, rframe, rcur = self.cur
if (rframe is not frame) and rcur:
return self.trace_dispatch_return(rframe, t)
self.cur = rpt, rit+t, ret, rfn, rframe, rcur
return 1
def trace_dispatch_call(self, frame, t):
if self.cur and frame.f_back is not self.cur[-2]:
rpt, rit, ret, rfn, rframe, rcur = self.cur
if not isinstance(rframe, Profile.fake_frame):
assert rframe.f_back is frame.f_back, ("Bad call", rfn,
rframe, rframe.f_back,
frame, frame.f_back)
self.trace_dispatch_return(rframe, 0)
assert (self.cur is None or \
frame.f_back is self.cur[-2]), ("Bad call",
self.cur[-3])
fcode = frame.f_code
fn = (fcode.co_filename, fcode.co_firstlineno, fcode.co_name)
self.cur = (t, 0, 0, fn, frame, self.cur)
timings = self.timings
if fn in timings:
cc, ns, tt, ct, callers = timings[fn]
timings[fn] = cc, ns + 1, tt, ct, callers
else:
timings[fn] = 0, 0, 0, 0, {}
return 1
def trace_dispatch_c_call (self, frame, t):
fn = ("", 0, self.c_func_name)
self.cur = (t, 0, 0, fn, frame, self.cur)
timings = self.timings
if fn in timings:
cc, ns, tt, ct, callers = timings[fn]
timings[fn] = cc, ns+1, tt, ct, callers
else:
timings[fn] = 0, 0, 0, 0, {}
return 1
def trace_dispatch_return(self, frame, t):
if frame is not self.cur[-2]:
assert frame is self.cur[-2].f_back, ("Bad return", self.cur[-3])
self.trace_dispatch_return(self.cur[-2], 0)
# Prefix "r" means part of the Returning or exiting frame.
# Prefix "p" means part of the Previous or Parent or older frame.
rpt, rit, ret, rfn, frame, rcur = self.cur
rit = rit + t
frame_total = rit + ret
ppt, pit, pet, pfn, pframe, pcur = rcur
self.cur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
timings = self.timings
cc, ns, tt, ct, callers = timings[rfn]
if not ns:
# This is the only occurrence of the function on the stack.
# Else this is a (directly or indirectly) recursive call, and
# its cumulative time will get updated when the topmost call to
# it returns.
ct = ct + frame_total
cc = cc + 1
if pfn in callers:
callers[pfn] = callers[pfn] + 1 # hack: gather more
# stats such as the amount of time added to ct courtesy
# of this specific call, and the contribution to cc
# courtesy of this call.
else:
callers[pfn] = 1
timings[rfn] = cc, ns - 1, tt + rit, ct, callers
return 1
dispatch = {
"call": trace_dispatch_call,
"exception": trace_dispatch_exception,
"return": trace_dispatch_return,
"c_call": trace_dispatch_c_call,
"c_exception": trace_dispatch_return, # the C function returned
"c_return": trace_dispatch_return,
}
# The next few functions play with self.cmd. By carefully preloading
# our parallel stack, we can force the profiled result to include
# an arbitrary string as the name of the calling function.
# We use self.cmd as that string, and the resulting stats look
# very nice :-).
def set_cmd(self, cmd):
if self.cur[-1]: return # already set
self.cmd = cmd
self.simulate_call(cmd)
class fake_code:
def __init__(self, filename, line, name):
self.co_filename = filename
self.co_line = line
self.co_name = name
self.co_firstlineno = 0
def __repr__(self):
return repr((self.co_filename, self.co_line, self.co_name))
class fake_frame:
def __init__(self, code, prior):
self.f_code = code
self.f_back = prior
def simulate_call(self, name):
code = self.fake_code('profile', 0, name)
if self.cur:
pframe = self.cur[-2]
else:
pframe = None
frame = self.fake_frame(code, pframe)
self.dispatch['call'](self, frame, 0)
# collect stats from pending stack, including getting final
# timings for self.cmd frame.
def simulate_cmd_complete(self):
get_time = self.get_time
t = get_time() - self.t
while self.cur[-1]:
# We *can* cause assertion errors here if
# dispatch_trace_return checks for a frame match!
self.dispatch['return'](self, self.cur[-2], t)
t = 0
self.t = get_time() - t
def print_stats(self, sort=-1):
import pstats
pstats.Stats(self).strip_dirs().sort_stats(sort). \
print_stats()
def dump_stats(self, file):
with open(file, 'wb') as f:
self.create_stats()
marshal.dump(self.stats, f)
def create_stats(self):
self.simulate_cmd_complete()
self.snapshot_stats()
def snapshot_stats(self):
self.stats = {}
for func, (cc, ns, tt, ct, callers) in self.timings.items():
callers = callers.copy()
nc = 0
for callcnt in callers.values():
nc += callcnt
self.stats[func] = cc, nc, tt, ct, callers
# The following two methods can be called by clients to use
# a profiler to profile a statement, given as a string.
def run(self, cmd):
import __main__
dict = __main__.__dict__
return self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals, locals):
self.set_cmd(cmd)
sys.setprofile(self.dispatcher)
try:
exec(cmd, globals, locals)
finally:
sys.setprofile(None)
return self
# This method is more useful to profile a single function call.
def runcall(self, func, *args, **kw):
self.set_cmd(repr(func))
sys.setprofile(self.dispatcher)
try:
return func(*args, **kw)
finally:
sys.setprofile(None)
#******************************************************************
# The following calculates the overhead for using a profiler. The
# problem is that it takes a fair amount of time for the profiler
# to stop the stopwatch (from the time it receives an event).
# Similarly, there is a delay from the time that the profiler
# re-starts the stopwatch before the user's code really gets to
# continue. The following code tries to measure the difference on
# a per-event basis.
#
# Note that this difference is only significant if there are a lot of
# events, and relatively little user code per event. For example,
# code with small functions will typically benefit from having the
# profiler calibrated for the current platform. This *could* be
# done on the fly during init() time, but it is not worth the
# effort. Also note that if too large a value specified, then
# execution time on some functions will actually appear as a
# negative number. It is *normal* for some functions (with very
# low call counts) to have such negative stats, even if the
# calibration figure is "correct."
#
# One alternative to profile-time calibration adjustments (i.e.,
# adding in the magic little delta during each event) is to track
# more carefully the number of events (and cumulatively, the number
# of events during sub functions) that are seen. If this were
# done, then the arithmetic could be done after the fact (i.e., at
# display time). Currently, we track only call/return events.
# These values can be deduced by examining the callees and callers
# vectors for each functions. Hence we *can* almost correct the
# internal time figure at print time (note that we currently don't
# track exception event processing counts). Unfortunately, there
# is currently no similar information for cumulative sub-function
# time. It would not be hard to "get all this info" at profiler
# time. Specifically, we would have to extend the tuples to keep
# counts of this in each frame, and then extend the defs of timing
# tuples to include the significant two figures. I'm a bit fearful
# that this additional feature will slow the heavily optimized
# event/time ratio (i.e., the profiler would run slower, fur a very
# low "value added" feature.)
#**************************************************************
def calibrate(self, m, verbose=0):
if self.__class__ is not Profile:
raise TypeError("Subclasses must override .calibrate().")
saved_bias = self.bias
self.bias = 0
try:
return self._calibrate_inner(m, verbose)
finally:
self.bias = saved_bias
def _calibrate_inner(self, m, verbose):
get_time = self.get_time
# Set up a test case to be run with and without profiling. Include
# lots of calls, because we're trying to quantify stopwatch overhead.
# Do not raise any exceptions, though, because we want to know
# exactly how many profile events are generated (one call event, +
# one return event, per Python-level call).
def f1(n):
for i in range(n):
x = 1
def f(m, f1=f1):
for i in range(m):
f1(100)
f(m) # warm up the cache
# elapsed_noprofile <- time f(m) takes without profiling.
t0 = get_time()
f(m)
t1 = get_time()
elapsed_noprofile = t1 - t0
if verbose:
print("elapsed time without profiling =", elapsed_noprofile)
# elapsed_profile <- time f(m) takes with profiling. The difference
# is profiling overhead, only some of which the profiler subtracts
# out on its own.
p = Profile()
t0 = get_time()
p.runctx('f(m)', globals(), locals())
t1 = get_time()
elapsed_profile = t1 - t0
if verbose:
print("elapsed time with profiling =", elapsed_profile)
# reported_time <- "CPU seconds" the profiler charged to f and f1.
total_calls = 0.0
reported_time = 0.0
for (filename, line, funcname), (cc, ns, tt, ct, callers) in \
p.timings.items():
if funcname in ("f", "f1"):
total_calls += cc
reported_time += tt
if verbose:
print("'CPU seconds' profiler reported =", reported_time)
print("total # calls =", total_calls)
if total_calls != m + 1:
raise ValueError("internal error: total calls = %d" % total_calls)
# reported_time - elapsed_noprofile = overhead the profiler wasn't
# able to measure. Divide by twice the number of calls (since there
# are two profiler events per call in this test) to get the hidden
# overhead per event.
mean = (reported_time - elapsed_noprofile) / 2.0 / total_calls
if verbose:
print("mean stopwatch overhead per profile event =", mean)
return mean
#****************************************************************************
def main():
usage = "profile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-o', '--outfile', dest="outfile",
help="Save stats to <outfile>", default=None)
parser.add_option('-s', '--sort', dest="sort",
help="Sort order when printing to stdout, based on pstats.Stats class",
default=-1)
if not sys.argv[1:]:
parser.print_usage()
sys.exit(2)
(options, args) = parser.parse_args()
sys.argv[:] = args
if len(args) > 0:
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
runctx(code, globs, None, options.outfile, options.sort)
else:
parser.print_usage()
return parser
# When invoked as main program, invoke the profiler on a script
if __name__ == '__main__':
main()
| lgpl-3.0 |
alu0100207385/dsi_3Django | django/contrib/gis/db/backends/spatialite/creation.py | 117 | 5699 | import os
from django.conf import settings
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.creation import DatabaseCreation
class SpatiaLiteCreation(DatabaseCreation):
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
This method is overloaded to load up the SpatiaLite initialization
SQL prior to calling the `syncdb` command.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Creating test database for alias '%s'%s..." % (self.connection.alias, test_db_repr))
self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
# Need to load the SpatiaLite initialization SQL before running `syncdb`.
self.load_spatialite_sql()
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias)
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
if isinstance(cache, BaseDatabaseCache):
call_command('createcachetable', cache._table, database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = self.connection.cursor()
return test_database_name
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(SpatiaLiteCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ', ' +
style.SQL_KEYWORD(str(int(not f.null))) +
');')
if f.spatial_index:
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('CreateSpatialIndex') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ');')
return output
def load_spatialite_sql(self):
"""
This routine loads up the SpatiaLite SQL file.
"""
if self.connection.ops.spatial_version[:2] >= (2, 4):
# Spatialite >= 2.4 -- No need to load any SQL file, calling
# InitSpatialMetaData() transparently creates the spatial metadata
# tables
cur = self.connection._cursor()
cur.execute("SELECT InitSpatialMetaData()")
else:
# Spatialite < 2.4 -- Load the initial SQL
# Getting the location of the SpatiaLite SQL file, and confirming
# it exists.
spatialite_sql = self.spatialite_init_file()
if not os.path.isfile(spatialite_sql):
raise ImproperlyConfigured('Could not find the required SpatiaLite initialization '
'SQL file (necessary for testing): %s' % spatialite_sql)
# Opening up the SpatiaLite SQL initialization file and executing
# as a script.
with open(spatialite_sql, 'r') as sql_fh:
cur = self.connection._cursor()
cur.executescript(sql_fh.read())
def spatialite_init_file(self):
# SPATIALITE_SQL may be placed in settings to tell GeoDjango
# to use a specific path to the SpatiaLite initilization SQL.
return getattr(settings, 'SPATIALITE_SQL',
'init_spatialite-%s.%s.sql' %
self.connection.ops.spatial_version[:2])
| bsd-3-clause |
0x19/werkzeug | bench/wzbench.py | 4 | 12441 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
wzbench
~~~~~~~
A werkzeug internal benchmark module. It's used in combination with
hg bisect to find out how the Werkzeug performance of some internal
core parts changes over time.
:copyright: 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import division
import os
import gc
import sys
import subprocess
from cStringIO import StringIO
from timeit import default_timer as timer
from types import FunctionType
# create a new module where we later store all the werkzeug attributes.
wz = type(sys)('werkzeug_nonlazy')
sys.path.insert(0, '<DUMMY>')
null_out = open(os.devnull, 'w')
# ±4% are ignored
TOLERANCE = 0.04
MIN_RESOLUTION = 0.002
# we run each test 5 times
TEST_RUNS = 5
def find_hg_tag(path):
"""Returns the current node or tag for the given path."""
tags = {}
try:
client = subprocess.Popen(['hg', 'cat', '-r', 'tip', '.hgtags'],
stdout=subprocess.PIPE, cwd=path)
for line in client.communicate()[0].splitlines():
line = line.strip()
if not line:
continue
hash, tag = line.split()
tags[hash] = tag
except OSError:
return
client = subprocess.Popen(['hg', 'parent', '--template', '#node#'],
stdout=subprocess.PIPE, cwd=path)
tip = client.communicate()[0].strip()
tag = tags.get(tip)
if tag is not None:
return tag
return tip
def load_werkzeug(path):
"""Load werkzeug."""
sys.path[0] = path
# get rid of already imported stuff
wz.__dict__.clear()
for key in sys.modules.keys():
if key.startswith('werkzeug.') or key == 'werkzeug':
sys.modules.pop(key, None)
# import werkzeug again.
import werkzeug
for key in werkzeug.__all__:
setattr(wz, key, getattr(werkzeug, key))
# get the hg tag
hg_tag = find_hg_tag(path)
# get the real version from the setup file
try:
f = open(os.path.join(path, 'setup.py'))
except IOError:
pass
else:
try:
for line in f:
line = line.strip()
if line.startswith('version='):
return line[8:].strip(' \t,')[1:-1], hg_tag
finally:
f.close()
print >> sys.stderr, 'Unknown werkzeug version loaded'
sys.exit(2)
def median(seq):
seq = sorted(seq)
if not seq:
return 0.0
return seq[len(seq) // 2]
def format_func(func):
if type(func) is FunctionType:
name = func.__name__
else:
name = func
if name.startswith('time_'):
name = name[5:]
return name.replace('_', ' ').title()
def bench(func):
"""Times a single function."""
sys.stdout.write('%44s ' % format_func(func))
sys.stdout.flush()
# figure out how many times we have to run the function to
# get reliable timings.
for i in xrange(3, 10):
rounds = 1 << i
t = timer()
for x in xrange(rounds):
func()
if timer() - t >= 0.2:
break
# now run the tests without gc TEST_RUNS times and use the median
# value of these runs.
def _run():
gc.collect()
gc.disable()
try:
t = timer()
for x in xrange(rounds):
func()
return (timer() - t) / rounds * 1000
finally:
gc.enable()
delta = median(_run() for x in xrange(TEST_RUNS))
sys.stdout.write('%.4f\n' % delta)
sys.stdout.flush()
return delta
def main():
"""The main entrypoint."""
from optparse import OptionParser
parser = OptionParser(usage='%prog [options]')
parser.add_option('--werkzeug-path', '-p', dest='path', default='..',
help='the path to the werkzeug package. defaults to cwd')
parser.add_option('--compare', '-c', dest='compare', nargs=2,
default=False, help='compare two hg nodes of Werkzeug')
parser.add_option('--init-compare', dest='init_compare',
action='store_true', default=False,
help='Initializes the comparison feature')
options, args = parser.parse_args()
if args:
parser.error('Script takes no arguments')
if options.compare:
compare(*options.compare)
elif options.init_compare:
init_compare()
else:
run(options.path)
def init_compare():
"""Initializes the comparison feature."""
print 'Initializing comparison feature'
subprocess.Popen(['hg', 'clone', '..', 'a']).wait()
subprocess.Popen(['hg', 'clone', '..', 'b']).wait()
def compare(node1, node2):
"""Compares two Werkzeug hg versions."""
if not os.path.isdir('a'):
print >> sys.stderr, 'error: comparison feature not initialized'
sys.exit(4)
print '=' * 80
print 'WERKZEUG INTERNAL BENCHMARK -- COMPARE MODE'.center(80)
print '-' * 80
delim = '-' * 20
def _error(msg):
print >> sys.stderr, 'error:', msg
sys.exit(1)
def _hg_update(repo, node):
hg = lambda *x: subprocess.call(['hg'] + list(x), cwd=repo,
stdout=null_out, stderr=null_out)
hg('revert', '-a', '--no-backup')
client = subprocess.Popen(['hg', 'status', '--unknown', '-n', '-0'],
stdout=subprocess.PIPE, cwd=repo)
unknown = client.communicate()[0]
if unknown:
client = subprocess.Popen(['xargs', '-0', 'rm', '-f'], cwd=repo,
stdout=null_out, stdin=subprocess.PIPE)
client.communicate(unknown)
hg('pull', '../..')
hg('update', node)
if node == 'tip':
diff = subprocess.Popen(['hg', 'diff'], cwd='..',
stdout=subprocess.PIPE).communicate()[0]
if diff:
client = subprocess.Popen(['hg', 'import', '--no-commit', '-'],
cwd=repo, stdout=null_out,
stdin=subprocess.PIPE)
client.communicate(diff)
_hg_update('a', node1)
_hg_update('b', node2)
d1 = run('a', no_header=True)
d2 = run('b', no_header=True)
print 'DIRECT COMPARISON'.center(80)
print '-' * 80
for key in sorted(d1):
delta = d1[key] - d2[key]
if abs(1 - d1[key] / d2[key]) < TOLERANCE or \
abs(delta) < MIN_RESOLUTION:
delta = '=='
else:
delta = '%+.4f (%+d%%)' % \
(delta, round(d2[key] / d1[key] * 100 - 100))
print '%36s %.4f %.4f %s' % \
(format_func(key), d1[key], d2[key], delta)
print '-' * 80
def run(path, no_header=False):
path = os.path.abspath(path)
wz_version, hg_tag = load_werkzeug(path)
result = {}
if not no_header:
print '=' * 80
print 'WERKZEUG INTERNAL BENCHMARK'.center(80)
print '-' * 80
print 'Path: %s' % path
print 'Version: %s' % wz_version
if hg_tag is not None:
print 'HG Tag: %s' % hg_tag
print '-' * 80
for key, value in sorted(globals().items()):
if key.startswith('time_'):
before = globals().get('before_' + key[5:])
if before:
before()
result[key] = bench(value)
after = globals().get('after_' + key[5:])
if after:
after()
print '-' * 80
return result
URL_DECODED_DATA = dict((str(x), str(x)) for x in xrange(100))
URL_ENCODED_DATA = '&'.join('%s=%s' % x for x in URL_DECODED_DATA.items())
MULTIPART_ENCODED_DATA = '\n'.join((
'--foo',
'Content-Disposition: form-data; name=foo',
'',
'this is just bar',
'--foo',
'Content-Disposition: form-data; name=bar',
'',
'blafasel',
'--foo',
'Content-Disposition: form-data; name=foo; filename=wzbench.py',
'Content-Type: text/plain',
'',
open(__file__.rstrip('c')).read(),
'--foo--'
))
MULTIDICT = None
REQUEST = None
TEST_ENV = None
LOCAL = None
LOCAL_MANAGER = None
def time_url_decode():
wz.url_decode(URL_ENCODED_DATA)
def time_url_encode():
wz.url_encode(URL_DECODED_DATA)
def time_parse_form_data_multipart():
# use a hand written env creator so that we don't bench
# from_values which is known to be slowish in 0.5.1 and higher.
# we don't want to bench two things at once.
environ = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=foo',
'wsgi.input': StringIO(MULTIPART_ENCODED_DATA),
'CONTENT_LENGTH': str(len(MULTIPART_ENCODED_DATA))
}
request = wz.Request(environ)
request.form
def before_multidict_lookup_hit():
global MULTIDICT
MULTIDICT = wz.MultiDict({'foo': 'bar'})
def time_multidict_lookup_hit():
MULTIDICT['foo']
def after_multidict_lookup_hit():
global MULTIDICT
MULTIDICT = None
def before_multidict_lookup_miss():
global MULTIDICT
MULTIDICT = wz.MultiDict()
def time_multidict_lookup_miss():
try:
MULTIDICT['foo']
except KeyError:
pass
def after_multidict_lookup_miss():
global MULTIDICT
MULTIDICT = None
def time_cached_property():
class Foo(object):
@wz.cached_property
def x(self):
return 42
f = Foo()
for x in xrange(60):
f.x
def before_request_form_access():
global REQUEST
data = 'foo=bar&blah=blub'
REQUEST = wz.Request({
'CONTENT_LENGTH': str(len(data)),
'wsgi.input': StringIO(data),
'REQUEST_METHOD': 'POST',
'wsgi.version': (1, 0),
'QUERY_STRING': data,
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'PATH_INFO': '/',
'SCRIPT_NAME': ''
})
def time_request_form_access():
for x in xrange(30):
REQUEST.path
REQUEST.script_root
REQUEST.args['foo']
REQUEST.form['foo']
def after_request_form_access():
global REQUEST
REQUEST = None
def time_request_from_values():
wz.Request.from_values(base_url='http://www.google.com/',
query_string='foo=bar&blah=blaz',
input_stream=StringIO(MULTIPART_ENCODED_DATA),
content_length=len(MULTIPART_ENCODED_DATA),
content_type='multipart/form-data; '
'boundary=foo', method='POST')
def before_request_shallow_init():
global TEST_ENV
TEST_ENV = wz.create_environ()
def time_request_shallow_init():
wz.Request(TEST_ENV, shallow=True)
def after_request_shallow_init():
global TEST_ENV
TEST_ENV = None
def time_response_iter_performance():
resp = wz.Response(u'Hällo Wörld ' * 1000,
mimetype='text/html')
for item in resp({'REQUEST_METHOD': 'GET'}, lambda *s: None):
pass
def time_response_iter_head_performance():
resp = wz.Response(u'Hällo Wörld ' * 1000,
mimetype='text/html')
for item in resp({'REQUEST_METHOD': 'HEAD'}, lambda *s: None):
pass
def before_local_manager_dispatch():
global LOCAL_MANAGER, LOCAL
LOCAL = wz.Local()
LOCAL_MANAGER = wz.LocalManager([LOCAL])
def time_local_manager_dispatch():
for x in xrange(10):
LOCAL.x = 42
for x in xrange(10):
LOCAL.x
def after_local_manager_dispatch():
global LOCAL_MANAGER, LOCAL
LOCAL = LOCAL_MANAGER = None
def before_html_builder():
global TABLE
TABLE = [['col 1', 'col 2', 'col 3', '4', '5', '6'] for x in range(10)]
def time_html_builder():
html_rows = []
for row in TABLE:
html_cols = [wz.html.td(col, class_='col') for col in row]
html_rows.append(wz.html.tr(class_='row', *html_cols))
table = wz.html.table(*html_rows)
def after_html_builder():
global TABLE
TABLE = None
if __name__ == '__main__':
os.chdir(os.path.dirname(__file__) or os.path.curdir)
try:
main()
except KeyboardInterrupt:
print >> sys.stderr, 'interrupted!'
| bsd-3-clause |
ovnicraft/edx-platform | common/djangoapps/course_action_state/tests/test_managers.py | 126 | 7219 | # pylint: disable=invalid-name, attribute-defined-outside-init
"""
Tests for basic common operations related to Course Action State managers
"""
from ddt import ddt, data
from django.test import TestCase
from collections import namedtuple
from opaque_keys.edx.locations import CourseLocator
from course_action_state.models import CourseRerunState
from course_action_state.managers import CourseActionStateItemNotFoundError
# Sequence of Action models to be tested with ddt.
COURSE_ACTION_STATES = (CourseRerunState, )
class TestCourseActionStateManagerBase(TestCase):
"""
Base class for testing Course Action State Managers.
"""
def setUp(self):
super(TestCourseActionStateManagerBase, self).setUp()
self.course_key = CourseLocator("test_org", "test_course_num", "test_run")
@ddt
class TestCourseActionStateManager(TestCourseActionStateManagerBase):
"""
Test class for testing the CourseActionStateManager.
"""
@data(*COURSE_ACTION_STATES)
def test_update_state_allow_not_found_is_false(self, action_class):
with self.assertRaises(CourseActionStateItemNotFoundError):
action_class.objects.update_state(self.course_key, "fake_state", allow_not_found=False)
@data(*COURSE_ACTION_STATES)
def test_update_state_allow_not_found(self, action_class):
action_class.objects.update_state(self.course_key, "initial_state", allow_not_found=True)
self.assertIsNotNone(
action_class.objects.find_first(course_key=self.course_key)
)
@data(*COURSE_ACTION_STATES)
def test_delete(self, action_class):
obj = action_class.objects.update_state(self.course_key, "initial_state", allow_not_found=True)
action_class.objects.delete(obj.id)
with self.assertRaises(CourseActionStateItemNotFoundError):
action_class.objects.find_first(course_key=self.course_key)
@ddt
class TestCourseActionUIStateManager(TestCourseActionStateManagerBase):
"""
Test class for testing the CourseActionUIStateManager.
"""
def init_course_action_states(self, action_class):
"""
Creates course action state entries with different states for the given action model class.
Creates both displayable (should_display=True) and non-displayable (should_display=False) entries.
"""
def create_course_states(starting_course_num, ending_course_num, state, should_display=True):
"""
Creates a list of course state tuples by creating unique course locators with course-numbers
from starting_course_num to ending_course_num.
"""
CourseState = namedtuple('CourseState', 'course_key, state, should_display')
return [
CourseState(CourseLocator("org", "course", "run" + str(num)), state, should_display)
for num in range(starting_course_num, ending_course_num)
]
NUM_COURSES_WITH_STATE1 = 3
NUM_COURSES_WITH_STATE2 = 3
NUM_COURSES_WITH_STATE3 = 3
NUM_COURSES_NON_DISPLAYABLE = 3
# courses with state1 and should_display=True
self.courses_with_state1 = create_course_states(
0,
NUM_COURSES_WITH_STATE1,
'state1'
)
# courses with state2 and should_display=True
self.courses_with_state2 = create_course_states(
NUM_COURSES_WITH_STATE1,
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2,
'state2'
)
# courses with state3 and should_display=True
self.courses_with_state3 = create_course_states(
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2,
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2 + NUM_COURSES_WITH_STATE3,
'state3'
)
# all courses with should_display=True
self.course_actions_displayable_states = (
self.courses_with_state1 + self.courses_with_state2 + self.courses_with_state3
)
# courses with state3 and should_display=False
self.courses_with_state3_non_displayable = create_course_states(
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2 + NUM_COURSES_WITH_STATE3,
NUM_COURSES_WITH_STATE1 + NUM_COURSES_WITH_STATE2 + NUM_COURSES_WITH_STATE3 + NUM_COURSES_NON_DISPLAYABLE,
'state3',
should_display=False,
)
# create course action states for all courses
for CourseState in self.course_actions_displayable_states + self.courses_with_state3_non_displayable:
action_class.objects.update_state(
CourseState.course_key,
CourseState.state,
should_display=CourseState.should_display,
allow_not_found=True
)
def assertCourseActionStatesEqual(self, expected, found):
"""Asserts that the set of course keys in the expected state equal those that are found"""
self.assertSetEqual(
set(course_action_state.course_key for course_action_state in expected),
set(course_action_state.course_key for course_action_state in found))
@data(*COURSE_ACTION_STATES)
def test_find_all_for_display(self, action_class):
self.init_course_action_states(action_class)
self.assertCourseActionStatesEqual(
self.course_actions_displayable_states,
action_class.objects.find_all(should_display=True),
)
@data(*COURSE_ACTION_STATES)
def test_find_all_for_display_filter_exclude(self, action_class):
self.init_course_action_states(action_class)
for course_action_state, filter_state, exclude_state in (
(self.courses_with_state1, 'state1', None), # filter for state1
(self.courses_with_state2, 'state2', None), # filter for state2
(self.courses_with_state2 + self.courses_with_state3, None, 'state1'), # exclude state1
(self.courses_with_state1 + self.courses_with_state3, None, 'state2'), # exclude state2
(self.courses_with_state1, 'state1', 'state2'), # filter for state1, exclude state2
([], 'state1', 'state1'), # filter for state1, exclude state1
):
self.assertCourseActionStatesEqual(
course_action_state,
action_class.objects.find_all(
exclude_args=({'state': exclude_state} if exclude_state else None),
should_display=True,
**({'state': filter_state} if filter_state else {})
)
)
def test_kwargs_in_update_state(self):
destination_course_key = CourseLocator("org", "course", "run")
source_course_key = CourseLocator("source_org", "source_course", "source_run")
CourseRerunState.objects.update_state(
course_key=destination_course_key,
new_state='state1',
allow_not_found=True,
source_course_key=source_course_key,
)
found_action_state = CourseRerunState.objects.find_first(course_key=destination_course_key)
self.assertEquals(source_course_key, found_action_state.source_course_key)
| agpl-3.0 |
nanolearningllc/edx-platform-cypress-2 | cms/djangoapps/contentstore/courseware_index.py | 60 | 26790 | """ Code to allow module store to interface with courseware index """
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
from datetime import timedelta
import logging
import re
from six import add_metaclass
from django.conf import settings
from django.utils.translation import ugettext as _
from django.core.urlresolvers import resolve
from contentstore.utils import course_image_url
from contentstore.course_group_config import GroupConfiguration
from course_modes.models import CourseMode
from eventtracking import tracker
from search.search_engine_base import SearchEngine
from xmodule.annotator_mixin import html_to_text
from xmodule.modulestore import ModuleStoreEnum
from xmodule.library_tools import normalize_key_for_search
# REINDEX_AGE is the default amount of time that we look back for changes
# that might have happened. If we are provided with a time at which the
# indexing is triggered, then we know it is safe to only index items
# recently changed at that time. This is the time period that represents
# how far back from the trigger point to look back in order to index
REINDEX_AGE = timedelta(0, 60) # 60 seconds
log = logging.getLogger('edx.modulestore')
def strip_html_content_to_text(html_content):
""" Gets only the textual part for html content - useful for building text to be searched """
# Removing HTML-encoded non-breaking space characters
text_content = re.sub(r"(\s| |//)+", " ", html_to_text(html_content))
# Removing HTML CDATA
text_content = re.sub(r"<!\[CDATA\[.*\]\]>", "", text_content)
# Removing HTML comments
text_content = re.sub(r"<!--.*-->", "", text_content)
return text_content
def indexing_is_enabled():
"""
Checks to see if the indexing feature is enabled
"""
return settings.FEATURES.get('ENABLE_COURSEWARE_INDEX', False)
class SearchIndexingError(Exception):
""" Indicates some error(s) occured during indexing """
def __init__(self, message, error_list):
super(SearchIndexingError, self).__init__(message)
self.error_list = error_list
@add_metaclass(ABCMeta)
class SearchIndexerBase(object):
"""
Base class to perform indexing for courseware or library search from different modulestores
"""
__metaclass__ = ABCMeta
INDEX_NAME = None
DOCUMENT_TYPE = None
ENABLE_INDEXING_KEY = None
INDEX_EVENT = {
'name': None,
'category': None
}
@classmethod
def indexing_is_enabled(cls):
"""
Checks to see if the indexing feature is enabled
"""
return settings.FEATURES.get(cls.ENABLE_INDEXING_KEY, False)
@classmethod
@abstractmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
@classmethod
@abstractmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
@classmethod
@abstractmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
@classmethod
def _id_modifier(cls, usage_id):
""" Modifies usage_id to submit to index """
return usage_id
@classmethod
def remove_deleted_items(cls, searcher, structure_key, exclude_items):
"""
remove any item that is present in the search index that is not present in updated list of indexed items
as we find items we can shorten the set of items to keep
"""
response = searcher.search(
doc_type=cls.DOCUMENT_TYPE,
field_dictionary=cls._get_location_info(structure_key),
exclude_dictionary={"id": list(exclude_items)}
)
result_ids = [result["data"]["id"] for result in response["results"]]
searcher.remove(cls.DOCUMENT_TYPE, result_ids)
@classmethod
def index(cls, modulestore, structure_key, triggered_at=None, reindex_age=REINDEX_AGE):
"""
Process course for indexing
Arguments:
modulestore - modulestore object to use for operations
structure_key (CourseKey|LibraryKey) - course or library identifier
triggered_at (datetime) - provides time at which indexing was triggered;
useful for index updates - only things changed recently from that date
(within REINDEX_AGE above ^^) will have their index updated, others skip
updating their index but are still walked through in order to identify
which items may need to be removed from the index
If None, then a full reindex takes place
Returns:
Number of items that have been added to the index
"""
error_list = []
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
structure_key = cls.normalize_structure_key(structure_key)
location_info = cls._get_location_info(structure_key)
# Wrap counter in dictionary - otherwise we seem to lose scope inside the embedded function `prepare_item_index`
indexed_count = {
"count": 0
}
# indexed_items is a list of all the items that we wish to remain in the
# index, whether or not we are planning to actually update their index.
# This is used in order to build a query to remove those items not in this
# list - those are ready to be destroyed
indexed_items = set()
# items_index is a list of all the items index dictionaries.
# it is used to collect all indexes and index them using bulk API,
# instead of per item index API call.
items_index = []
def get_item_location(item):
"""
Gets the version agnostic item location
"""
return item.location.version_agnostic().replace(branch=None)
def prepare_item_index(item, skip_index=False, groups_usage_info=None):
"""
Add this item to the items_index and indexed_items list
Arguments:
item - item to add to index, its children will be processed recursively
skip_index - simply walk the children in the tree, the content change is
older than the REINDEX_AGE window and would have been already indexed.
This should really only be passed from the recursive child calls when
this method has determined that it is safe to do so
Returns:
item_content_groups - content groups assigned to indexed item
"""
is_indexable = hasattr(item, "index_dictionary")
item_index_dictionary = item.index_dictionary() if is_indexable else None
# if it's not indexable and it does not have children, then ignore
if not item_index_dictionary and not item.has_children:
return
item_content_groups = None
if item.category == "split_test":
split_partition = item.get_selected_partition()
for split_test_child in item.get_children():
if split_partition:
for group in split_partition.groups:
group_id = unicode(group.id)
child_location = item.group_id_to_child.get(group_id, None)
if child_location == split_test_child.location:
groups_usage_info.update({
unicode(get_item_location(split_test_child)): [group_id],
})
for component in split_test_child.get_children():
groups_usage_info.update({
unicode(get_item_location(component)): [group_id]
})
if groups_usage_info:
item_location = get_item_location(item)
item_content_groups = groups_usage_info.get(unicode(item_location), None)
item_id = unicode(cls._id_modifier(item.scope_ids.usage_id))
indexed_items.add(item_id)
if item.has_children:
# determine if it's okay to skip adding the children herein based upon how recently any may have changed
skip_child_index = skip_index or \
(triggered_at is not None and (triggered_at - item.subtree_edited_on) > reindex_age)
children_groups_usage = []
for child_item in item.get_children():
if modulestore.has_published_version(child_item):
children_groups_usage.append(
prepare_item_index(
child_item,
skip_index=skip_child_index,
groups_usage_info=groups_usage_info
)
)
if None in children_groups_usage:
item_content_groups = None
if skip_index or not item_index_dictionary:
return
item_index = {}
# if it has something to add to the index, then add it
try:
item_index.update(location_info)
item_index.update(item_index_dictionary)
item_index['id'] = item_id
if item.start:
item_index['start_date'] = item.start
item_index['content_groups'] = item_content_groups if item_content_groups else None
item_index.update(cls.supplemental_fields(item))
items_index.append(item_index)
indexed_count["count"] += 1
return item_content_groups
except Exception as err: # pylint: disable=broad-except
# broad exception so that index operation does not fail on one item of many
log.warning('Could not index item: %s - %r', item.location, err)
error_list.append(_('Could not index item: {}').format(item.location))
try:
with modulestore.branch_setting(ModuleStoreEnum.RevisionOption.published_only):
structure = cls._fetch_top_level(modulestore, structure_key)
groups_usage_info = cls.fetch_group_usage(modulestore, structure)
# First perform any additional indexing from the structure object
cls.supplemental_index_information(modulestore, structure)
# Now index the content
for item in structure.get_children():
prepare_item_index(item, groups_usage_info=groups_usage_info)
searcher.index(cls.DOCUMENT_TYPE, items_index)
cls.remove_deleted_items(searcher, structure_key, indexed_items)
except Exception as err: # pylint: disable=broad-except
# broad exception so that index operation does not prevent the rest of the application from working
log.exception(
"Indexing error encountered, courseware index may be out of date %s - %r",
structure_key,
err
)
error_list.append(_('General indexing error occurred'))
if error_list:
raise SearchIndexingError('Error(s) present during indexing', error_list)
return indexed_count["count"]
@classmethod
def _do_reindex(cls, modulestore, structure_key):
"""
(Re)index all content within the given structure (course or library),
tracking the fact that a full reindex has taken place
"""
indexed_count = cls.index(modulestore, structure_key)
if indexed_count:
cls._track_index_request(cls.INDEX_EVENT['name'], cls.INDEX_EVENT['category'], indexed_count)
return indexed_count
@classmethod
def _track_index_request(cls, event_name, category, indexed_count):
"""Track content index requests.
Arguments:
event_name (str): Name of the event to be logged.
category (str): category of indexed items
indexed_count (int): number of indexed items
Returns:
None
"""
data = {
"indexed_count": indexed_count,
'category': category,
}
tracker.emit(
event_name,
data
)
@classmethod
def fetch_group_usage(cls, modulestore, structure): # pylint: disable=unused-argument
"""
Base implementation of fetch group usage on course/library.
"""
return None
@classmethod
def supplemental_index_information(cls, modulestore, structure):
"""
Perform any supplemental indexing given that the structure object has
already been loaded. Base implementation performs no operation.
Arguments:
modulestore - modulestore object used during the indexing operation
structure - structure object loaded during the indexing job
Returns:
None
"""
pass
@classmethod
def supplemental_fields(cls, item): # pylint: disable=unused-argument
"""
Any supplemental fields that get added to the index for the specified
item. Base implementation returns an empty dictionary
"""
return {}
class CoursewareSearchIndexer(SearchIndexerBase):
"""
Class to perform indexing for courseware search from different modulestores
"""
INDEX_NAME = "courseware_index"
DOCUMENT_TYPE = "courseware_content"
ENABLE_INDEXING_KEY = 'ENABLE_COURSEWARE_INDEX'
INDEX_EVENT = {
'name': 'edx.course.index.reindexed',
'category': 'courseware_index'
}
UNNAMED_MODULE_NAME = _("(Unnamed)")
@classmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
return structure_key
@classmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
return modulestore.get_course(structure_key, depth=None)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"course": unicode(normalized_structure_key), "org": normalized_structure_key.org}
@classmethod
def do_course_reindex(cls, modulestore, course_key):
"""
(Re)index all content within the given course, tracking the fact that a full reindex has taken place
"""
return cls._do_reindex(modulestore, course_key)
@classmethod
def fetch_group_usage(cls, modulestore, structure):
groups_usage_dict = {}
groups_usage_info = GroupConfiguration.get_content_groups_usage_info(modulestore, structure).items()
groups_usage_info.extend(
GroupConfiguration.get_content_groups_items_usage_info(
modulestore,
structure
).items()
)
if groups_usage_info:
for name, group in groups_usage_info:
for module in group:
view, args, kwargs = resolve(module['url']) # pylint: disable=unused-variable
usage_key_string = unicode(kwargs['usage_key_string'])
if groups_usage_dict.get(usage_key_string, None):
groups_usage_dict[usage_key_string].append(name)
else:
groups_usage_dict[usage_key_string] = [name]
return groups_usage_dict
@classmethod
def supplemental_index_information(cls, modulestore, structure):
"""
Perform additional indexing from loaded structure object
"""
CourseAboutSearchIndexer.index_about_information(modulestore, structure)
@classmethod
def supplemental_fields(cls, item):
"""
Add location path to the item object
Once we've established the path of names, the first name is the course
name, and the next 3 names are the navigable path within the edx
application. Notice that we stop at that level because a full path to
deep children would be confusing.
"""
location_path = []
parent = item
while parent is not None:
path_component_name = parent.display_name
if not path_component_name:
path_component_name = cls.UNNAMED_MODULE_NAME
location_path.append(path_component_name)
parent = parent.get_parent()
location_path.reverse()
return {
"course_name": location_path[0],
"location": location_path[1:4]
}
class LibrarySearchIndexer(SearchIndexerBase):
"""
Base class to perform indexing for library search from different modulestores
"""
INDEX_NAME = "library_index"
DOCUMENT_TYPE = "library_content"
ENABLE_INDEXING_KEY = 'ENABLE_LIBRARY_INDEX'
INDEX_EVENT = {
'name': 'edx.library.index.reindexed',
'category': 'library_index'
}
@classmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
return normalize_key_for_search(structure_key)
@classmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
return modulestore.get_library(structure_key, depth=None)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"library": unicode(normalized_structure_key)}
@classmethod
def _id_modifier(cls, usage_id):
""" Modifies usage_id to submit to index """
return usage_id.replace(library_key=(usage_id.library_key.replace(version_guid=None, branch=None)))
@classmethod
def do_library_reindex(cls, modulestore, library_key):
"""
(Re)index all content within the given library, tracking the fact that a full reindex has taken place
"""
return cls._do_reindex(modulestore, library_key)
class AboutInfo(object):
""" About info structure to contain
1) Property name to use
2) Where to add in the index (using flags above)
3) Where to source the properties value
"""
# Bitwise Flags for where to index the information
#
# ANALYSE - states that the property text contains content that we wish to be able to find matched within
# e.g. "joe" should yield a result for "I'd like to drink a cup of joe"
#
# PROPERTY - states that the property text should be a property of the indexed document, to be returned with the
# results: search matches will only be made on exact string matches
# e.g. "joe" will only match on "joe"
#
# We are using bitwise flags because one may want to add the property to EITHER or BOTH parts of the index
# e.g. university name is desired to be analysed, so that a search on "Oxford" will match
# property values "University of Oxford" and "Oxford Brookes University",
# but it is also a useful property, because within a (future) filtered search a user
# may have chosen to filter courses from "University of Oxford"
#
# see https://wiki.python.org/moin/BitwiseOperators for information about bitwise shift operator used below
#
ANALYSE = 1 << 0 # Add the information to the analysed content of the index
PROPERTY = 1 << 1 # Add the information as a property of the object being indexed (not analysed)
def __init__(self, property_name, index_flags, source_from):
self.property_name = property_name
self.index_flags = index_flags
self.source_from = source_from
def get_value(self, **kwargs):
""" get the value for this piece of information, using the correct source """
return self.source_from(self, **kwargs)
def from_about_dictionary(self, **kwargs):
""" gets the value from the kwargs provided 'about_dictionary' """
about_dictionary = kwargs.get('about_dictionary', None)
if not about_dictionary:
raise ValueError("Context dictionary does not contain expected argument 'about_dictionary'")
return about_dictionary.get(self.property_name, None)
def from_course_property(self, **kwargs):
""" gets the value from the kwargs provided 'course' """
course = kwargs.get('course', None)
if not course:
raise ValueError("Context dictionary does not contain expected argument 'course'")
return getattr(course, self.property_name, None)
def from_course_mode(self, **kwargs):
""" fetches the available course modes from the CourseMode model """
course = kwargs.get('course', None)
if not course:
raise ValueError("Context dictionary does not contain expected argument 'course'")
return [mode.slug for mode in CourseMode.modes_for_course(course.id)]
# Source location options - either from the course or the about info
FROM_ABOUT_INFO = from_about_dictionary
FROM_COURSE_PROPERTY = from_course_property
FROM_COURSE_MODE = from_course_mode
class CourseAboutSearchIndexer(object):
"""
Class to perform indexing of about information from course object
"""
DISCOVERY_DOCUMENT_TYPE = "course_info"
INDEX_NAME = CoursewareSearchIndexer.INDEX_NAME
# List of properties to add to the index - each item in the list is an instance of AboutInfo object
ABOUT_INFORMATION_TO_INCLUDE = [
AboutInfo("advertised_start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("announcement", AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("end", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("effort", AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("display_name", AboutInfo.ANALYSE, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("overview", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("title", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("university", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("number", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("short_description", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("description", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("key_dates", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("video", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("course_staff_short", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("course_staff_extended", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("requirements", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("syllabus", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("textbook", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("faq", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("more_info", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("ocw_links", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("enrollment_start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("enrollment_end", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("org", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("modes", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_MODE),
AboutInfo("language", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
]
@classmethod
def index_about_information(cls, modulestore, course):
"""
Add the given course to the course discovery index
Arguments:
modulestore - modulestore object to use for operations
course - course object from which to take properties, locate about information
"""
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
course_id = unicode(course.id)
course_info = {
'id': course_id,
'course': course_id,
'content': {},
'image_url': course_image_url(course),
}
# load data for all of the 'about' modules for this course into a dictionary
about_dictionary = {
item.location.name: item.data
for item in modulestore.get_items(course.id, qualifiers={"category": "about"})
}
about_context = {
"course": course,
"about_dictionary": about_dictionary,
}
for about_information in cls.ABOUT_INFORMATION_TO_INCLUDE:
# Broad exception handler so that a single bad property does not scupper the collection of others
try:
section_content = about_information.get_value(**about_context)
except: # pylint: disable=bare-except
section_content = None
log.warning(
"Course discovery could not collect property %s for course %s",
about_information.property_name,
course_id,
exc_info=True,
)
if section_content:
if about_information.index_flags & AboutInfo.ANALYSE:
analyse_content = section_content
if isinstance(section_content, basestring):
analyse_content = strip_html_content_to_text(section_content)
course_info['content'][about_information.property_name] = analyse_content
if about_information.index_flags & AboutInfo.PROPERTY:
course_info[about_information.property_name] = section_content
# Broad exception handler to protect around and report problems with indexing
try:
searcher.index(cls.DISCOVERY_DOCUMENT_TYPE, [course_info])
except: # pylint: disable=bare-except
log.exception(
"Course discovery indexing error encountered, course discovery index may be out of date %s",
course_id,
)
raise
log.debug(
"Successfully added %s course to the course discovery index",
course_id
)
| agpl-3.0 |
dongjoon-hyun/tensorflow | tensorflow/python/saved_model/utils_test.py | 62 | 5133 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModel utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import utils
class UtilsTest(test.TestCase):
def testBuildTensorInfoDense(self):
x = array_ops.placeholder(dtypes.float32, 1, name="x")
x_tensor_info = utils.build_tensor_info(x)
self.assertEqual("x:0", x_tensor_info.name)
self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype)
self.assertEqual(1, len(x_tensor_info.tensor_shape.dim))
self.assertEqual(1, x_tensor_info.tensor_shape.dim[0].size)
def testBuildTensorInfoSparse(self):
x = array_ops.sparse_placeholder(dtypes.float32, [42, 69], name="x")
x_tensor_info = utils.build_tensor_info(x)
self.assertEqual(x.values.name,
x_tensor_info.coo_sparse.values_tensor_name)
self.assertEqual(x.indices.name,
x_tensor_info.coo_sparse.indices_tensor_name)
self.assertEqual(x.dense_shape.name,
x_tensor_info.coo_sparse.dense_shape_tensor_name)
self.assertEqual(types_pb2.DT_FLOAT, x_tensor_info.dtype)
self.assertEqual(2, len(x_tensor_info.tensor_shape.dim))
self.assertEqual(42, x_tensor_info.tensor_shape.dim[0].size)
self.assertEqual(69, x_tensor_info.tensor_shape.dim[1].size)
def testGetTensorFromInfoDense(self):
expected = array_ops.placeholder(dtypes.float32, 1, name="x")
tensor_info = utils.build_tensor_info(expected)
actual = utils.get_tensor_from_tensor_info(tensor_info)
self.assertIsInstance(actual, ops.Tensor)
self.assertEqual(expected.name, actual.name)
def testGetTensorFromInfoSparse(self):
expected = array_ops.sparse_placeholder(dtypes.float32, name="x")
tensor_info = utils.build_tensor_info(expected)
actual = utils.get_tensor_from_tensor_info(tensor_info)
self.assertIsInstance(actual, sparse_tensor.SparseTensor)
self.assertEqual(expected.values.name, actual.values.name)
self.assertEqual(expected.indices.name, actual.indices.name)
self.assertEqual(expected.dense_shape.name, actual.dense_shape.name)
def testGetTensorFromInfoInOtherGraph(self):
with ops.Graph().as_default() as expected_graph:
expected = array_ops.placeholder(dtypes.float32, 1, name="right")
tensor_info = utils.build_tensor_info(expected)
with ops.Graph().as_default(): # Some other graph.
array_ops.placeholder(dtypes.float32, 1, name="other")
actual = utils.get_tensor_from_tensor_info(tensor_info,
graph=expected_graph)
self.assertIsInstance(actual, ops.Tensor)
self.assertIs(actual.graph, expected_graph)
self.assertEqual(expected.name, actual.name)
def testGetTensorFromInfoInScope(self):
# Build a TensorInfo with name "bar/x:0".
with ops.Graph().as_default():
with ops.name_scope("bar"):
unscoped = array_ops.placeholder(dtypes.float32, 1, name="x")
tensor_info = utils.build_tensor_info(unscoped)
self.assertEqual("bar/x:0", tensor_info.name)
# Build a graph with node "foo/bar/x:0", akin to importing into scope foo.
with ops.Graph().as_default():
with ops.name_scope("foo"):
with ops.name_scope("bar"):
expected = array_ops.placeholder(dtypes.float32, 1, name="x")
self.assertEqual("foo/bar/x:0", expected.name)
# Test that tensor is found by prepending the import scope.
actual = utils.get_tensor_from_tensor_info(tensor_info,
import_scope="foo")
self.assertEqual(expected.name, actual.name)
def testGetTensorFromInfoRaisesErrors(self):
expected = array_ops.placeholder(dtypes.float32, 1, name="x")
tensor_info = utils.build_tensor_info(expected)
tensor_info.name = "blah:0" # Nonexistant name.
with self.assertRaises(KeyError):
utils.get_tensor_from_tensor_info(tensor_info)
tensor_info.ClearField("name") # Malformed (missing encoding).
with self.assertRaises(ValueError):
utils.get_tensor_from_tensor_info(tensor_info)
if __name__ == "__main__":
test.main()
| apache-2.0 |
gymnasium/edx-platform | lms/djangoapps/certificates/tests/tests.py | 9 | 10197 | """
Tests for the certificates models.
"""
from datetime import datetime, timedelta
from ddt import data, ddt, unpack
from pytz import UTC
from django.conf import settings
from milestones.tests.utils import MilestonesTestCaseMixin
from mock import patch
from nose.plugins.attrib import attr
from badges.tests.factories import CourseCompleteImageConfigurationFactory
from lms.djangoapps.certificates.models import (
CertificateStatuses,
GeneratedCertificate,
certificate_info_for_user,
certificate_status_for_student
)
from lms.djangoapps.certificates.tests.factories import GeneratedCertificateFactory
from student.models import CourseEnrollment
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.milestones_helpers import milestones_achieved_by_user, set_prerequisite_courses
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@attr(shard=1)
@ddt
class CertificatesModelTest(ModuleStoreTestCase, MilestonesTestCaseMixin):
"""
Tests for the GeneratedCertificate model
"""
def setUp(self):
super(CertificatesModelTest, self).setUp()
today = datetime.now(UTC)
self.instructor_paced_course = CourseFactory.create(
org='edx', number='instructor', display_name='Instructor Paced Course',
start=today - timedelta(days=30),
end=today - timedelta(days=2),
certificate_available_date=today - timedelta(days=1),
self_paced=False
)
self.self_paced_course = CourseFactory.create(
org='edx', number='self',
display_name='Self Paced Course', self_paced=True
)
def test_certificate_status_for_student(self):
student = UserFactory()
course = CourseFactory.create(org='edx', number='verified', display_name='Verified Course')
certificate_status = certificate_status_for_student(student, course.id)
self.assertEqual(certificate_status['status'], CertificateStatuses.unavailable)
self.assertEqual(certificate_status['mode'], GeneratedCertificate.MODES.honor)
@unpack
@data(
{'allow_certificate': False, 'whitelisted': False, 'grade': None, 'output': ['N', 'N', 'N/A']},
{'allow_certificate': True, 'whitelisted': True, 'grade': None, 'output': ['Y', 'N', 'N/A']},
{'allow_certificate': True, 'whitelisted': False, 'grade': 0.9, 'output': ['N', 'N', 'N/A']},
{'allow_certificate': False, 'whitelisted': True, 'grade': 0.8, 'output': ['N', 'N', 'N/A']},
{'allow_certificate': False, 'whitelisted': None, 'grade': 0.8, 'output': ['N', 'N', 'N/A']}
)
def test_certificate_info_for_user(self, allow_certificate, whitelisted, grade, output):
"""
Verify that certificate_info_for_user works.
"""
student = UserFactory()
student.profile.allow_certificate = allow_certificate
student.profile.save()
# for instructor paced course
certificate_info = certificate_info_for_user(
student, self.instructor_paced_course.id, grade,
whitelisted, user_certificate=None
)
self.assertEqual(certificate_info, output)
# for self paced course
certificate_info = certificate_info_for_user(
student, self.self_paced_course.id, grade,
whitelisted, user_certificate=None
)
self.assertEqual(certificate_info, output)
@unpack
@data(
{'allow_certificate': False, 'whitelisted': False, 'grade': None, 'output': ['N', 'Y', 'honor']},
{'allow_certificate': True, 'whitelisted': True, 'grade': None, 'output': ['Y', 'Y', 'honor']},
{'allow_certificate': True, 'whitelisted': False, 'grade': 0.9, 'output': ['Y', 'Y', 'honor']},
{'allow_certificate': False, 'whitelisted': True, 'grade': 0.8, 'output': ['N', 'Y', 'honor']},
{'allow_certificate': False, 'whitelisted': None, 'grade': 0.8, 'output': ['N', 'Y', 'honor']},
{'allow_certificate': True, 'whitelisted': None, 'grade': None, 'output': ['Y', 'Y', 'honor']},
{'allow_certificate': False, 'whitelisted': True, 'grade': None, 'output': ['N', 'Y', 'honor']}
)
def test_certificate_info_for_user_when_grade_changes(self, allow_certificate, whitelisted, grade, output):
"""
Verify that certificate_info_for_user works as expect in scenario when grading of problems
changes after certificates already generated. In such scenario `Certificate delivered` should not depend
on student's eligibility to get certificates since in above scenario eligibility can change over period
of time.
"""
student = UserFactory()
student.profile.allow_certificate = allow_certificate
student.profile.save()
certificate1 = GeneratedCertificateFactory.create(
user=student,
course_id=self.instructor_paced_course.id,
status=CertificateStatuses.downloadable,
mode='honor'
)
certificate2 = GeneratedCertificateFactory.create(
user=student,
course_id=self.self_paced_course.id,
status=CertificateStatuses.downloadable,
mode='honor'
)
# for instructor paced course
certificate_info = certificate_info_for_user(
student, self.instructor_paced_course.id, grade,
whitelisted, certificate1
)
self.assertEqual(certificate_info, output)
# for self paced course
certificate_info = certificate_info_for_user(
student, self.self_paced_course.id, grade,
whitelisted, certificate2
)
self.assertEqual(certificate_info, output)
@unpack
@data(
{'allow_certificate': True, 'whitelisted': False, 'grade': 0.8, 'mode': 'audit', 'output': ['N', 'N', 'N/A']},
{'allow_certificate': True, 'whitelisted': True, 'grade': 0.8, 'mode': 'audit', 'output': ['Y', 'N', 'N/A']},
{'allow_certificate': True, 'whitelisted': False, 'grade': 0.8, 'mode': 'verified', 'output': ['Y', 'N', 'N/A']}
)
def test_certificate_info_for_user_with_course_modes(self, allow_certificate, whitelisted, grade, mode, output):
"""
Verify that certificate_info_for_user works with course modes.
"""
user = UserFactory.create()
user.profile.allow_certificate = allow_certificate
user.profile.save()
_ = CourseEnrollment.enroll(user, self.instructor_paced_course.id, mode)
certificate_info = certificate_info_for_user(
user, self.instructor_paced_course.id, grade,
whitelisted, user_certificate=None
)
self.assertEqual(certificate_info, output)
def test_course_ids_with_certs_for_user(self):
# Create one user with certs and one without
student_no_certs = UserFactory()
student_with_certs = UserFactory()
student_with_certs.profile.allow_certificate = True
student_with_certs.profile.save()
# Set up a couple of courses
course_1 = CourseFactory.create()
course_2 = CourseFactory.create()
# Generate certificates
GeneratedCertificateFactory.create(
user=student_with_certs,
course_id=course_1.id,
status=CertificateStatuses.downloadable,
mode='honor'
)
GeneratedCertificateFactory.create(
user=student_with_certs,
course_id=course_2.id,
status=CertificateStatuses.downloadable,
mode='honor'
)
# User with no certs should return an empty set.
self.assertSetEqual(
GeneratedCertificate.course_ids_with_certs_for_user(student_no_certs),
set()
)
# User with certs should return a set with the two course_ids
self.assertSetEqual(
GeneratedCertificate.course_ids_with_certs_for_user(student_with_certs),
{course_1.id, course_2.id}
)
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True})
def test_course_milestone_collected(self):
student = UserFactory()
course = CourseFactory.create(org='edx', number='998', display_name='Test Course')
pre_requisite_course = CourseFactory.create(org='edx', number='999', display_name='Pre requisite Course')
# set pre-requisite course
set_prerequisite_courses(course.id, [unicode(pre_requisite_course.id)])
# get milestones collected by user before completing the pre-requisite course
completed_milestones = milestones_achieved_by_user(student, unicode(pre_requisite_course.id))
self.assertEqual(len(completed_milestones), 0)
GeneratedCertificateFactory.create(
user=student,
course_id=pre_requisite_course.id,
status=CertificateStatuses.generating,
mode='verified'
)
# get milestones collected by user after user has completed the pre-requisite course
completed_milestones = milestones_achieved_by_user(student, unicode(pre_requisite_course.id))
self.assertEqual(len(completed_milestones), 1)
self.assertEqual(completed_milestones[0]['namespace'], unicode(pre_requisite_course.id))
@patch.dict(settings.FEATURES, {'ENABLE_OPENBADGES': True})
@patch('badges.backends.badgr.BadgrBackend', spec=True)
def test_badge_callback(self, handler):
student = UserFactory()
course = CourseFactory.create(org='edx', number='998', display_name='Test Course', issue_badges=True)
CourseCompleteImageConfigurationFactory()
CourseEnrollmentFactory(user=student, course_id=course.location.course_key, mode='honor')
cert = GeneratedCertificateFactory.create(
user=student,
course_id=course.id,
status=CertificateStatuses.generating,
mode='verified'
)
cert.status = CertificateStatuses.downloadable
cert.save()
self.assertTrue(handler.return_value.award.called)
| agpl-3.0 |
oihane/odoo | addons/product/report/__init__.py | 452 | 1080 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_pricelist
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.