repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
haxwithaxe/supybot | src/utils/str.py | 5 | 14491 | ###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2008-2009, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Simple utility functions related to strings.
"""
import re
import new
import sys
import string
import textwrap
from .iter import all, any
from .structures import TwoWayDictionary
curry = new.instancemethod
chars = string.maketrans('', '')
def rsplit(s, sep=None, maxsplit=-1):
"""Equivalent to str.split, except splitting from the right."""
if sys.version_info < (2, 4, 0):
if sep is not None:
sep = sep[::-1]
L = s[::-1].split(sep, maxsplit)
L.reverse()
return [s[::-1] for s in L]
else:
return s.rsplit(sep, maxsplit)
def normalizeWhitespace(s):
"""Normalizes the whitespace in a string; \s+ becomes one space."""
return ' '.join(s.split())
def distance(s, t):
"""Returns the levenshtein edit distance between two strings."""
n = len(s)
m = len(t)
if n == 0:
return m
elif m == 0:
return n
d = []
for i in xrange(n+1):
d.append([])
for j in xrange(m+1):
d[i].append(0)
d[0][j] = j
d[i][0] = i
for i in xrange(1, n+1):
cs = s[i-1]
for j in xrange(1, m+1):
ct = t[j-1]
cost = int(cs != ct)
d[i][j] = min(d[i-1][j]+1, d[i][j-1]+1, d[i-1][j-1]+cost)
return d[n][m]
_soundextrans = string.maketrans(string.ascii_uppercase,
'01230120022455012623010202')
_notUpper = chars.translate(chars, string.ascii_uppercase)
def soundex(s, length=4):
"""Returns the soundex hash of a given string.
length=0 doesn't truncate the hash.
"""
s = s.upper() # Make everything uppercase.
s = s.translate(chars, _notUpper) # Delete non-letters.
if not s:
raise ValueError, 'Invalid string for soundex: %s'
firstChar = s[0] # Save the first character.
s = s.translate(_soundextrans) # Convert to soundex numbers.
s = s.lstrip(s[0]) # Remove all repeated first characters.
L = [firstChar]
for c in s:
if c != L[-1]:
L.append(c)
L = [c for c in L if c != '0']
s = ''.join(L)
if length:
s = s.ljust(length, '0')[:length]
return s
def dqrepr(s):
"""Returns a repr() of s guaranteed to be in double quotes."""
# The wankers-that-be decided not to use double-quotes anymore in 2.3.
# return '"' + repr("'\x00" + s)[6:]
return '"%s"' % s.encode('string_escape').replace('"', '\\"')
def quoted(s):
"""Returns a quoted s."""
return '"%s"' % s
_openers = '{[(<'
_closers = '}])>'
def _getSep(s, allowBraces=False):
if len(s) < 2:
raise ValueError, 'string given to _getSep is too short: %r' % s
if allowBraces:
braces = _closers
else:
braces = _openers + _closers
if s.startswith('m') or s.startswith('s'):
separator = s[1]
else:
separator = s[0]
if separator.isalnum() or separator in braces:
raise ValueError, \
'Invalid separator: separator must not be alphanumeric or in ' \
'"%s"' % braces
return separator
def perlReToPythonRe(s):
"""Converts a string representation of a Perl regular expression (i.e.,
m/^foo$/i or /foo|bar/) to a Python regular expression.
"""
opener = closer = _getSep(s, True)
if opener in '{[(<':
closer = _closers[_openers.index(opener)]
opener = re.escape(opener)
closer = re.escape(closer)
matcher = re.compile(r'm?%s((?:\\.|[^\\])*)%s(.*)' % (opener, closer))
try:
(regexp, flags) = matcher.match(s).groups()
except AttributeError: # Unpack list of wrong size.
raise ValueError, 'Must be of the form m/.../ or /.../'
regexp = regexp.replace('\\'+opener, opener)
if opener != closer:
regexp = regexp.replace('\\'+closer, closer)
flag = 0
try:
for c in flags.upper():
flag |= getattr(re, c)
except AttributeError:
raise ValueError, 'Invalid flag: %s' % c
try:
return re.compile(regexp, flag)
except re.error, e:
raise ValueError, str(e)
def perlReToReplacer(s):
"""Converts a string representation of a Perl regular expression (i.e.,
s/foo/bar/g or s/foo/bar/i) to a Python function doing the equivalent
replacement.
"""
sep = _getSep(s)
escaped = re.escape(sep)
matcher = re.compile(r's%s((?:\\.|[^\\])*)%s((?:\\.|[^\\])*)%s(.*)'
% (escaped, escaped, escaped))
try:
(regexp, replace, flags) = matcher.match(s).groups()
except AttributeError: # Unpack list of wrong size.
raise ValueError, 'Must be of the form s/.../.../'
regexp = regexp.replace('\x08', r'\b')
replace = replace.replace('\\'+sep, sep)
for i in xrange(10):
replace = replace.replace(chr(i), r'\%s' % i)
g = False
if 'g' in flags:
g = True
flags = filter('g'.__ne__, flags)
r = perlReToPythonRe(sep.join(('', regexp, flags)))
if g:
return curry(r.sub, replace)
else:
return lambda s: r.sub(replace, s, 1)
_perlVarSubstituteRe = re.compile(r'\$\{([^}]+)\}|\$([a-zA-Z][a-zA-Z0-9]*)')
def perlVariableSubstitute(vars, text):
def replacer(m):
(braced, unbraced) = m.groups()
var = braced or unbraced
try:
x = vars[var]
if callable(x):
return x()
else:
return str(x)
except KeyError:
if braced:
return '${%s}' % braced
else:
return '$' + unbraced
return _perlVarSubstituteRe.sub(replacer, text)
def commaAndify(seq, comma=',', And='and'):
"""Given a a sequence, returns an English clause for that sequence.
I.e., given [1, 2, 3], returns '1, 2, and 3'
"""
L = list(seq)
if len(L) == 0:
return ''
elif len(L) == 1:
return ''.join(L) # We need this because it raises TypeError.
elif len(L) == 2:
L.insert(1, And)
return ' '.join(L)
else:
L[-1] = '%s %s' % (And, L[-1])
sep = '%s ' % comma
return sep.join(L)
_unCommaTheRe = re.compile(r'(.*),\s*(the)$', re.I)
def unCommaThe(s):
"""Takes a string of the form 'foo, the' and turns it into 'the foo'."""
m = _unCommaTheRe.match(s)
if m is not None:
return '%s %s' % (m.group(2), m.group(1))
else:
return s
def ellipsisify(s, n):
"""Returns a shortened version of s. Produces up to the first n chars at
the nearest word boundary.
"""
if len(s) <= n:
return s
else:
return (textwrap.wrap(s, n-3)[0] + '...')
plurals = TwoWayDictionary({})
def matchCase(s1, s2):
"""Matches the case of s1 in s2"""
if s1.isupper():
return s2.upper()
else:
L = list(s2)
for (i, char) in enumerate(s1[:len(s2)]):
if char.isupper():
L[i] = L[i].upper()
return ''.join(L)
consonants = 'bcdfghjklmnpqrstvwxz'
_pluralizeRegex = re.compile('[%s]y$' % consonants)
def pluralize(s):
"""Returns the plural of s. Put any exceptions to the general English
rule of appending 's' in the plurals dictionary.
"""
lowered = s.lower()
# Exception dictionary
if lowered in plurals:
return matchCase(s, plurals[lowered])
# Words ending with 'ch', 'sh' or 'ss' such as 'punch(es)', 'fish(es)
# and miss(es)
elif any(lowered.endswith, ['x', 'ch', 'sh', 'ss']):
return matchCase(s, s+'es')
# Words ending with a consonant followed by a 'y' such as
# 'try (tries)' or 'spy (spies)'
elif _pluralizeRegex.search(lowered):
return matchCase(s, s[:-1] + 'ies')
# In all other cases, we simply add an 's' to the base word
else:
return matchCase(s, s+'s')
_depluralizeRegex = re.compile('[%s]ies' % consonants)
def depluralize(s):
"""Returns the singular of s."""
lowered = s.lower()
if lowered in plurals:
return matchCase(s, plurals[lowered])
elif any(lowered.endswith, ['ches', 'shes', 'sses']):
return s[:-2]
elif re.search(_depluralizeRegex, lowered):
return s[:-3] + 'y'
else:
if lowered.endswith('s'):
return s[:-1] # Chop off 's'.
else:
return s # Don't know what to do.
def nItems(n, item, between=None):
"""Works like this:
>>> nItems(1, 'clock')
'1 clock'
>>> nItems(10, 'clock')
'10 clocks'
>>> nItems(10, 'clock', between='grandfather')
'10 grandfather clocks'
"""
assert isinstance(n, int) or isinstance(n, long), \
'The order of the arguments to nItems changed again, sorry.'
if between is None:
if n != 1:
return format('%s %p', n, item)
else:
return format('%s %s', n, item)
else:
if n != 1:
return format('%s %s %p', n, between, item)
else:
return format('%s %s %s', n, between, item)
def ordinal(i):
"""Returns i + the ordinal indicator for the number.
Example: ordinal(3) => '3rd'
"""
i = int(i)
if i % 100 in (11,12,13):
return '%sth' % i
ord = 'th'
test = i % 10
if test == 1:
ord = 'st'
elif test == 2:
ord = 'nd'
elif test == 3:
ord = 'rd'
return '%s%s' % (i, ord)
def be(i):
"""Returns the form of the verb 'to be' based on the number i."""
if i == 1:
return 'is'
else:
return 'are'
def has(i):
"""Returns the form of the verb 'to have' based on the number i."""
if i == 1:
return 'has'
else:
return 'have'
def toBool(s):
s = s.strip().lower()
if s in ('true', 'on', 'enable', 'enabled', '1'):
return True
elif s in ('false', 'off', 'disable', 'disabled', '0'):
return False
else:
raise ValueError, 'Invalid string for toBool: %s' % quoted(s)
# When used with Supybot, this is overriden when supybot.conf is loaded
def timestamp(t):
if t is None:
t = time.time()
return time.ctime(t)
_formatRe = re.compile('%((?:\d+)?\.\d+f|[bfhiLnpqrstu%])')
def format(s, *args, **kwargs):
"""w00t.
%: literal %.
i: integer
s: string
f: float
r: repr
b: form of the verb 'to be' (takes an int)
h: form of the verb 'to have' (takes an int)
L: commaAndify (takes a list of strings or a tuple of ([strings], and))
p: pluralize (takes a string)
q: quoted (takes a string)
n: nItems (takes a 2-tuple of (n, item) or a 3-tuple of (n, between, item))
t: time, formatted (takes an int)
u: url, wrapped in braces (this should be configurable at some point)
"""
args = list(args)
args.reverse() # For more efficient popping.
def sub(match):
char = match.group(1)
if char == 's':
return str(args.pop())
elif char == 'i':
# XXX Improve me!
return str(args.pop())
elif char.endswith('f'):
return ('%'+char) % args.pop()
elif char == 'b':
return be(args.pop())
elif char == 'h':
return has(args.pop())
elif char == 'L':
t = args.pop()
if isinstance(t, list):
return commaAndify(t)
elif isinstance(t, tuple) and len(t) == 2:
if not isinstance(t[0], list):
raise ValueError, \
'Invalid list for %%L in format: %s' % t
if not isinstance(t[1], basestring):
raise ValueError, \
'Invalid string for %%L in format: %s' % t
return commaAndify(t[0], And=t[1])
else:
raise ValueError, 'Invalid value for %%L in format: %s' % t
elif char == 'p':
return pluralize(args.pop())
elif char == 'q':
return quoted(args.pop())
elif char == 'r':
return repr(args.pop())
elif char == 'n':
t = args.pop()
if not isinstance(t, (tuple, list)):
raise ValueError, 'Invalid value for %%n in format: %s' % t
if len(t) == 2:
return nItems(*t)
elif len(t) == 3:
return nItems(t[0], t[2], between=t[1])
else:
raise ValueError, 'Invalid value for %%n in format: %s' % t
elif char == 't':
return timestamp(args.pop())
elif char == 'u':
return '<%s>' % args.pop()
elif char == '%':
return '%'
else:
raise ValueError, 'Invalid char in sub (in format).'
try:
return _formatRe.sub(sub, s)
except IndexError:
raise ValueError, 'Extra format chars in format spec: %r' % s
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
wrigri/libcloud | libcloud/common/openstack.py | 14 | 17421 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common utilities for OpenStack
"""
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import httplib
from libcloud.common.base import ConnectionUserAndKey, Response
from libcloud.common.types import ProviderError
from libcloud.compute.types import (LibcloudError, MalformedResponseError)
from libcloud.compute.types import KeyPairDoesNotExistError
from libcloud.common.openstack_identity import get_class_for_auth_version
# Imports for backward compatibility reasons
from libcloud.common.openstack_identity import (OpenStackServiceCatalog,
OpenStackIdentityTokenScope)
try:
import simplejson as json
except ImportError:
import json
AUTH_API_VERSION = '1.1'
# Auth versions which contain token expiration information.
AUTH_VERSIONS_WITH_EXPIRES = [
'1.1',
'2.0',
'2.0_apikey',
'2.0_password',
'3.x',
'3.x_password'
]
__all__ = [
'OpenStackBaseConnection',
'OpenStackResponse',
'OpenStackException',
'OpenStackDriverMixin'
]
class OpenStackBaseConnection(ConnectionUserAndKey):
"""
Base class for OpenStack connections.
:param user_id: User name to use when authenticating
:type user_id: ``str``
:param key: Secret to use when authenticating.
:type key: ``str``
:param secure: Use HTTPS? (True by default.)
:type secure: ``bool``
:param ex_force_base_url: Base URL for connection requests. If
not specified, this will be determined by
authenticating.
:type ex_force_base_url: ``str``
:param ex_force_auth_url: Base URL for authentication requests.
:type ex_force_auth_url: ``str``
:param ex_force_auth_version: Authentication version to use. If
not specified, defaults to AUTH_API_VERSION.
:type ex_force_auth_version: ``str``
:param ex_force_auth_token: Authentication token to use for connection
requests. If specified, the connection will
not attempt to authenticate, and the value
of ex_force_base_url will be used to
determine the base request URL. If
ex_force_auth_token is passed in,
ex_force_base_url must also be provided.
:type ex_force_auth_token: ``str``
:param token_scope: Whether to scope a token to a "project", a
"domain" or "unscoped".
:type token_scope: ``str``
:param ex_domain_name: When authenticating, provide this domain name to
the identity service. A scoped token will be
returned. Some cloud providers require the domain
name to be provided at authentication time. Others
will use a default domain if none is provided.
:type ex_domain_name: ``str``
:param ex_tenant_name: When authenticating, provide this tenant name to the
identity service. A scoped token will be returned.
Some cloud providers require the tenant name to be
provided at authentication time. Others will use a
default tenant if none is provided.
:type ex_tenant_name: ``str``
:param ex_force_service_type: Service type to use when selecting an
service. If not specified, a provider
specific default will be used.
:type ex_force_service_type: ``str``
:param ex_force_service_name: Service name to use when selecting an
service. If not specified, a provider
specific default will be used.
:type ex_force_service_name: ``str``
:param ex_force_service_region: Region to use when selecting an service.
If not specified, a provider specific
default will be used.
:type ex_force_service_region: ``str``
"""
auth_url = None
auth_token = None
auth_token_expires = None
auth_user_info = None
service_catalog = None
service_type = None
service_name = None
service_region = None
accept_format = None
_auth_version = None
def __init__(self, user_id, key, secure=True,
host=None, port=None, timeout=None, proxy_url=None,
ex_force_base_url=None,
ex_force_auth_url=None,
ex_force_auth_version=None,
ex_force_auth_token=None,
ex_token_scope=OpenStackIdentityTokenScope.PROJECT,
ex_domain_name='Default',
ex_tenant_name=None,
ex_force_service_type=None,
ex_force_service_name=None,
ex_force_service_region=None,
retry_delay=None, backoff=None):
super(OpenStackBaseConnection, self).__init__(
user_id, key, secure=secure, timeout=timeout,
retry_delay=retry_delay, backoff=backoff, proxy_url=proxy_url)
if ex_force_auth_version:
self._auth_version = ex_force_auth_version
self._ex_force_base_url = ex_force_base_url
self._ex_force_auth_url = ex_force_auth_url
self._ex_force_auth_token = ex_force_auth_token
self._ex_token_scope = ex_token_scope
self._ex_domain_name = ex_domain_name
self._ex_tenant_name = ex_tenant_name
self._ex_force_service_type = ex_force_service_type
self._ex_force_service_name = ex_force_service_name
self._ex_force_service_region = ex_force_service_region
self._osa = None
if ex_force_auth_token and not ex_force_base_url:
raise LibcloudError(
'Must also provide ex_force_base_url when specifying '
'ex_force_auth_token.')
if ex_force_auth_token:
self.auth_token = ex_force_auth_token
if not self._auth_version:
self._auth_version = AUTH_API_VERSION
auth_url = self._get_auth_url()
if not auth_url:
raise LibcloudError('OpenStack instance must ' +
'have auth_url set')
def get_auth_class(self):
"""
Retrieve identity / authentication class instance.
:rtype: :class:`OpenStackIdentityConnection`
"""
if not self._osa:
auth_url = self._get_auth_url()
cls = get_class_for_auth_version(auth_version=self._auth_version)
self._osa = cls(auth_url=auth_url,
user_id=self.user_id,
key=self.key,
tenant_name=self._ex_tenant_name,
domain_name=self._ex_domain_name,
token_scope=self._ex_token_scope,
timeout=self.timeout,
parent_conn=self)
return self._osa
def request(self, action, params=None, data='', headers=None,
method='GET', raw=False):
headers = headers or {}
params = params or {}
# Include default content-type for POST and PUT request (if available)
default_content_type = getattr(self, 'default_content_type', None)
if method.upper() in ['POST', 'PUT'] and default_content_type:
headers = {'Content-Type': default_content_type}
return super(OpenStackBaseConnection, self).request(action=action,
params=params,
data=data,
method=method,
headers=headers,
raw=raw)
def _get_auth_url(self):
"""
Retrieve auth url for this instance using either "ex_force_auth_url"
constructor kwarg of "auth_url" class variable.
"""
auth_url = self.auth_url
if self._ex_force_auth_url is not None:
auth_url = self._ex_force_auth_url
return auth_url
def get_service_catalog(self):
if self.service_catalog is None:
self._populate_hosts_and_request_paths()
return self.service_catalog
def get_service_name(self):
"""
Gets the service name used to look up the endpoint in the service
catalog.
:return: name of the service in the catalog
"""
if self._ex_force_service_name:
return self._ex_force_service_name
return self.service_name
def get_endpoint(self):
"""
Selects the endpoint to use based on provider specific values,
or overrides passed in by the user when setting up the driver.
:returns: url of the relevant endpoint for the driver
"""
service_type = self.service_type
service_name = self.service_name
service_region = self.service_region
if self._ex_force_service_type:
service_type = self._ex_force_service_type
if self._ex_force_service_name:
service_name = self._ex_force_service_name
if self._ex_force_service_region:
service_region = self._ex_force_service_region
endpoint = self.service_catalog.get_endpoint(service_type=service_type,
name=service_name,
region=service_region)
url = endpoint.url
if not url:
raise LibcloudError('Could not find specified endpoint')
return url
def add_default_headers(self, headers):
headers['X-Auth-Token'] = self.auth_token
headers['Accept'] = self.accept_format
return headers
def morph_action_hook(self, action):
self._populate_hosts_and_request_paths()
return super(OpenStackBaseConnection, self).morph_action_hook(action)
def _set_up_connection_info(self, url):
result = self._tuple_from_url(url)
(self.host, self.port, self.secure, self.request_path) = result
def _populate_hosts_and_request_paths(self):
"""
OpenStack uses a separate host for API calls which is only provided
after an initial authentication request.
"""
osa = self.get_auth_class()
if self._ex_force_auth_token:
# If ex_force_auth_token is provided we always hit the api directly
# and never try to authenticate.
#
# Note: When ex_force_auth_token is provided, ex_force_base_url
# must be provided as well.
self._set_up_connection_info(url=self._ex_force_base_url)
return
if not osa.is_token_valid():
# Token is not available or it has expired. Need to retrieve a
# new one.
if self._auth_version == '2.0_apikey':
kwargs = {'auth_type': 'api_key'}
elif self._auth_version == '2.0_password':
kwargs = {'auth_type': 'password'}
else:
kwargs = {}
osa = osa.authenticate(**kwargs) # may throw InvalidCreds
self.auth_token = osa.auth_token
self.auth_token_expires = osa.auth_token_expires
self.auth_user_info = osa.auth_user_info
# Pull out and parse the service catalog
osc = OpenStackServiceCatalog(service_catalog=osa.urls,
auth_version=self._auth_version)
self.service_catalog = osc
url = self._ex_force_base_url or self.get_endpoint()
self._set_up_connection_info(url=url)
class OpenStackException(ProviderError):
pass
class OpenStackResponse(Response):
node_driver = None
def success(self):
i = int(self.status)
return i >= 200 and i <= 299
def has_content_type(self, content_type):
content_type_value = self.headers.get('content-type') or ''
content_type_value = content_type_value.lower()
return content_type_value.find(content_type.lower()) > -1
def parse_body(self):
if self.status == httplib.NO_CONTENT or not self.body:
return None
if self.has_content_type('application/xml'):
try:
return ET.XML(self.body)
except:
raise MalformedResponseError(
'Failed to parse XML',
body=self.body,
driver=self.node_driver)
elif self.has_content_type('application/json'):
try:
return json.loads(self.body)
except:
raise MalformedResponseError(
'Failed to parse JSON',
body=self.body,
driver=self.node_driver)
else:
return self.body
def parse_error(self):
text = None
body = self.parse_body()
if self.has_content_type('application/xml'):
text = '; '.join([err.text or '' for err in body.getiterator()
if err.text])
elif self.has_content_type('application/json'):
values = list(body.values())
context = self.connection.context
driver = self.connection.driver
key_pair_name = context.get('key_pair_name', None)
if len(values) > 0 and values[0]['code'] == 404 and key_pair_name:
raise KeyPairDoesNotExistError(name=key_pair_name,
driver=driver)
elif len(values) > 0 and 'message' in values[0]:
text = ';'.join([fault_data['message'] for fault_data
in values])
else:
text = body
else:
# while we hope a response is always one of xml or json, we have
# seen html or text in the past, its not clear we can really do
# something to make it more readable here, so we will just pass
# it along as the whole response body in the text variable.
text = body
return '%s %s %s' % (self.status, self.error, text)
class OpenStackDriverMixin(object):
def __init__(self, *args, **kwargs):
self._ex_force_base_url = kwargs.get('ex_force_base_url', None)
self._ex_force_auth_url = kwargs.get('ex_force_auth_url', None)
self._ex_force_auth_version = kwargs.get('ex_force_auth_version', None)
self._ex_force_auth_token = kwargs.get('ex_force_auth_token', None)
self._ex_token_scope = kwargs.get('ex_token_scope', None)
self._ex_domain_name = kwargs.get('ex_domain_name', None)
self._ex_tenant_name = kwargs.get('ex_tenant_name', None)
self._ex_force_service_type = kwargs.get('ex_force_service_type', None)
self._ex_force_service_name = kwargs.get('ex_force_service_name', None)
self._ex_force_service_region = kwargs.get('ex_force_service_region',
None)
def openstack_connection_kwargs(self):
"""
:rtype: ``dict``
"""
rv = {}
if self._ex_force_base_url:
rv['ex_force_base_url'] = self._ex_force_base_url
if self._ex_force_auth_token:
rv['ex_force_auth_token'] = self._ex_force_auth_token
if self._ex_force_auth_url:
rv['ex_force_auth_url'] = self._ex_force_auth_url
if self._ex_force_auth_version:
rv['ex_force_auth_version'] = self._ex_force_auth_version
if self._ex_token_scope:
rv['ex_token_scope'] = self._ex_token_scope
if self._ex_domain_name:
rv['ex_domain_name'] = self._ex_domain_name
if self._ex_tenant_name:
rv['ex_tenant_name'] = self._ex_tenant_name
if self._ex_force_service_type:
rv['ex_force_service_type'] = self._ex_force_service_type
if self._ex_force_service_name:
rv['ex_force_service_name'] = self._ex_force_service_name
if self._ex_force_service_region:
rv['ex_force_service_region'] = self._ex_force_service_region
return rv
| apache-2.0 |
lgarren/spack | var/spack/repos/builtin/packages/vdt/package.py | 3 | 1735 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Vdt(CMakePackage):
"""Vectorised math. A collection of fast and inline implementations of
mathematical functions."""
homepage = "https://github.com/dpiparo/vdt"
url = "https://github.com/dpiparo/vdt/archive/v0.3.9.tar.gz"
version('0.3.9', '80a2d73a82f7ef8257a8206ca22dd145')
version('0.3.8', '25b07c72510aaa95fffc11e33579061c')
version('0.3.7', 'd2621d4c489894fd1fe8e056d9a0a67c')
version('0.3.6', '6eaff3bbbd5175332ccbd66cd71a741d')
| lgpl-2.1 |
simleo/openmicroscopy | components/tools/OmeroPy/test/integration/gatewaytest/test_annotation.py | 2 | 15621 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
gateway tests - Annotation Wrapper
Copyright 2009-2015 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
pytest fixtures used as defined in conftest.py:
- gatewaywrapper
- author_testimg_generated
"""
import time
import datetime
import os
from tempfile import NamedTemporaryFile
import omero.gateway
def _testAnnotation(obj, annclass, ns, value, sameOwner=False,
testOwner=None):
gateway = obj._conn
# Make sure it doesn't yet exist
obj.removeAnnotations(ns)
assert obj.getAnnotation(ns) is None
# Create new, link and check
ann = annclass(gateway)
ann.setNs(ns)
ann.setValue(value)
if sameOwner:
obj.linkAnnotation(ann, sameOwner=True)
else:
# checks that default sameOwner=False
obj.linkAnnotation(ann)
ann = obj.getAnnotation(ns)
# Make sure the group for the annotation is the same as the original
# object. (#120)
assert ann.getDetails().getGroup() == obj.getDetails().getGroup()
tval = hasattr(value, 'val') and value.val or value
assert ann.getValue() == value, '%s != %s' % (str(ann.getValue()),
str(tval))
assert ann.getNs() == ns, '%s != %s' % (str(ann.getNs()), str(ns))
if testOwner is not None:
testOwner(obj, ann)
# Remove and check
obj.removeAnnotations(ns)
assert obj.getAnnotation(ns) is None
# Same dance, createAndLink shortcut
if sameOwner:
annclass.createAndLink(target=obj, ns=ns, val=value, sameOwner=True)
else:
# checks that default sameOwner=False
annclass.createAndLink(target=obj, ns=ns, val=value)
ann = obj.getAnnotation(ns)
# Make sure the group for the annotation is the same as the original
# object. (#120)
assert ann.getDetails().getGroup() == obj.getDetails().getGroup()
tval = hasattr(value, 'val') and value.val or value
assert ann.getValue() == value, '%s != %s' % (str(ann.getValue()),
str(tval))
assert ann.getNs() == ns, '%s != %s' % (str(ann.getNs()), str(ns))
if testOwner is not None:
testOwner(obj, ann)
# Remove and check
obj.removeAnnotations(ns)
assert obj.getAnnotation(ns) is None
TESTANN_NS = 'omero.gateway.test_annotation'
def testSameOwner(gatewaywrapper):
"""
tests project.linkAnnotation(sameOwner=False)
Tests sameOwner default is False (was True for 4.4.x but False in 5.0)
"""
gatewaywrapper.loginAsAdmin()
gatewaywrapper.gateway.SERVICE_OPTS.setOmeroGroup("-1")
p = gatewaywrapper.getTestProject2()
p.getDetails().owner.id.val
def sameOwner(obj, ann):
assert obj.getDetails().owner.id.val == ann.getDetails().owner.id.val
def differentOwner(obj, ann):
assert obj.getDetails().owner.id.val != ann.getDetails().owner.id.val
_testAnnotation(p,
omero.gateway.CommentAnnotationWrapper,
TESTANN_NS, 'some value',
sameOwner=True, testOwner=sameOwner)
return _testAnnotation(
p, omero.gateway.CommentAnnotationWrapper,
TESTANN_NS, 'some value', sameOwner=False, testOwner=differentOwner)
def testCommentAnnotation(author_testimg_generated):
return _testAnnotation(author_testimg_generated,
omero.gateway.CommentAnnotationWrapper,
TESTANN_NS, 'some value')
def testNonDefGroupAnnotation(gatewaywrapper):
p = gatewaywrapper.getTestProject2()
return _testAnnotation(p,
omero.gateway.CommentAnnotationWrapper,
TESTANN_NS, 'some value')
def testTimestampAnnotation(author_testimg_generated):
now = time.time()
t = datetime.datetime.fromtimestamp(int(now))
_testAnnotation(author_testimg_generated,
omero.gateway.TimestampAnnotationWrapper,
TESTANN_NS, t)
# Now use RTime, but this one doesn't fit in the general test case
t = omero.rtypes.rtime(int(now))
omero.gateway.TimestampAnnotationWrapper.createAndLink(
target=author_testimg_generated, ns=TESTANN_NS, val=t)
t = datetime.datetime.fromtimestamp(t.val / 1000.0)
ann = author_testimg_generated.getAnnotation(TESTANN_NS)
assert ann.getValue() == t, '%s != %s' % (str(ann.getValue()), str(t))
assert ann.getNs() == TESTANN_NS, '%s != %s' % (str(ann.getNs()),
str(TESTANN_NS))
# Remove and check
author_testimg_generated.removeAnnotations(TESTANN_NS)
assert author_testimg_generated.getAnnotation(TESTANN_NS) is None
# A simple int stating secs since the epoch, also not fitting in the
# general test case
t = int(now)
omero.gateway.TimestampAnnotationWrapper.createAndLink(
target=author_testimg_generated, ns=TESTANN_NS, val=t)
t = datetime.datetime.fromtimestamp(t)
ann = author_testimg_generated.getAnnotation(TESTANN_NS)
assert ann.getValue() == t, '%s != %s' % (str(ann.getValue()), str(t))
assert ann.getNs() == TESTANN_NS, '%s != %s' % (str(ann.getNs()),
str(TESTANN_NS))
# Remove and check
author_testimg_generated.removeAnnotations(TESTANN_NS)
assert author_testimg_generated.getAnnotation(TESTANN_NS) is None
def testBooleanAnnotation(author_testimg_generated):
_testAnnotation(author_testimg_generated,
omero.gateway.BooleanAnnotationWrapper,
TESTANN_NS, True)
def testLongAnnotation(author_testimg_generated):
_testAnnotation(author_testimg_generated,
omero.gateway.LongAnnotationWrapper,
TESTANN_NS, 1000L)
def testMapAnnotation(author_testimg_generated):
data = [("foo", "bar"),
("test key", "test value")]
_testAnnotation(author_testimg_generated,
omero.gateway.MapAnnotationWrapper,
TESTANN_NS, data)
# test getObjects()
conn = author_testimg_generated._conn
ann = omero.gateway.MapAnnotationWrapper(conn)
ann.setValue(data)
ann.save()
aId = ann.getId()
ann2 = conn.getObject("MapAnnotation", aId)
assert ann2 is not None
assert ann2.getValue() == data
# delete to clean up
handle = conn.deleteObjects('/Annotation', [aId])
try:
conn._waitOnCmd(handle)
finally:
handle.close()
assert conn.getObject("MapAnnotation", aId) is None
def testDualLinkedAnnotation(author_testimg_generated):
""" Tests linking the same annotation to 2 separate objects """
dataset = author_testimg_generated.getParent()
assert dataset is not None
author_testimg_generated.removeAnnotations(TESTANN_NS)
assert author_testimg_generated.getAnnotation(TESTANN_NS) is None
dataset.removeAnnotations(TESTANN_NS)
assert dataset.getAnnotation(TESTANN_NS) is None
ann = omero.gateway.CommentAnnotationWrapper(dataset._conn)
ann.setNs(TESTANN_NS)
value = 'I suffer from multi link disorder'
ann.setValue(value)
author_testimg_generated.linkAnnotation(ann)
dataset.linkAnnotation(ann)
assert author_testimg_generated.getAnnotation(TESTANN_NS).getValue() == \
value
assert dataset.getAnnotation(TESTANN_NS).getValue() == value
author_testimg_generated.removeAnnotations(TESTANN_NS)
assert author_testimg_generated.getAnnotation(TESTANN_NS) is None
assert dataset.getAnnotation(TESTANN_NS).getValue() == value
dataset.removeAnnotations(TESTANN_NS)
assert dataset.getAnnotation(TESTANN_NS) is None
def testListAnnotations(author_testimg_generated):
""" Other small things that need to be tested """
ns1 = TESTANN_NS
ns2 = ns1 + '_2'
obj = author_testimg_generated
annclass = omero.gateway.CommentAnnotationWrapper
value = 'foo'
# Make sure it doesn't yet exist
obj.removeAnnotations(ns1)
obj.removeAnnotations(ns2)
assert obj.getAnnotation(ns1) is None
assert obj.getAnnotation(ns2) is None
# createAndLink
annclass.createAndLink(target=obj, ns=ns1, val=value)
annclass.createAndLink(target=obj, ns=ns2, val=value)
ann1 = obj.getAnnotation(ns1)
ann2 = obj.getAnnotation(ns2)
l = list(obj.listAnnotations())
assert ann1 in l
assert ann2 in l
l = list(obj.listAnnotations(ns=ns1))
assert ann1 in l
assert ann2 not in l
l = list(obj.listAnnotations(ns=ns2))
assert ann1 not in l
assert ann2 in l
l = list(obj.listAnnotations(ns='bogusns...bogusns...'))
assert ann1 not in l
assert ann2 not in l
# Remove and check
obj.removeAnnotations(ns1)
obj.removeAnnotations(ns2)
assert obj.getAnnotation(ns1) is None
assert obj.getAnnotation(ns2) is None
def testFileAnnotation(author_testimg_generated, gatewaywrapper):
""" Creates a file annotation from a local file """
tempFileName = "tempFile"
f = open(tempFileName, 'w')
fileText = "Test text for writing to file for upload"
f.write(fileText)
f.close()
ns = TESTANN_NS
image = author_testimg_generated
# use the same file to create various file annotations with different
# namespaces
fileAnn = gatewaywrapper.gateway.createFileAnnfromLocalFile(
tempFileName, mimetype='text/plain', ns=ns)
image.linkAnnotation(fileAnn)
compAnn = gatewaywrapper.gateway.createFileAnnfromLocalFile(
tempFileName, mimetype='text/plain',
ns=omero.constants.namespaces.NSCOMPANIONFILE)
image.linkAnnotation(compAnn)
os.remove(tempFileName)
# get user-id of another user to use below.
gatewaywrapper.loginAsAdmin()
adminId = gatewaywrapper.gateway.getUser().getId()
gatewaywrapper.loginAsAuthor()
# test listing of File Annotations. Should exclude companion files by
# default and all files should be loaded
gateway = gatewaywrapper.gateway
eid = gateway.getUser().getId()
fas = list(gateway.listFileAnnotations(eid=eid, toInclude=[ns]))
faIds = [fa.id for fa in fas]
assert fileAnn.getId() in faIds
assert compAnn.getId() not in faIds
for fa in fas:
assert fa.getNs() == ns, \
"All files should be filtered by this namespace"
assert fa._obj.file.loaded, \
"All file annotations should have files loaded"
# filtering by namespace
fas = list(gateway.listFileAnnotations(
toInclude=["nothing.with.this.namespace"], eid=eid))
assert len(fas) == 0, \
"No file annotations should exist with bogus namespace"
# filtering files by a different user should not return the annotations
# above.
fas = list(gateway.listFileAnnotations(eid=adminId))
faIds = [fa.id for fa in fas]
assert fileAnn.getId() not in faIds
assert compAnn.getId() not in faIds
# needs a fresh connection, original was closed already
image._conn = gatewaywrapper.gateway
ann = image.getAnnotation(ns)
annId = ann.getId()
assert ann.OMERO_TYPE == omero.model.FileAnnotationI
for t in ann.getFileInChunks():
assert str(t) == fileText # we get whole text in one chunk
# delete what we created
assert gateway.getObject("Annotation", annId) is not None
handle = gateway.deleteObjects("Annotation", [annId])
gateway._waitOnCmd(handle)
assert gateway.getObject("Annotation", annId) is None
def testFileAnnotationSpeed(author_testimg_generated, gatewaywrapper):
""" Tests speed of loading file annotations. See PR: 4176 """
try:
f = NamedTemporaryFile()
f.write("testFileAnnotationSpeed text")
ns = TESTANN_NS
image = author_testimg_generated
# use the same file to create many file annotations
for i in range(20):
fileAnn = gatewaywrapper.gateway.createFileAnnfromLocalFile(
f.name, mimetype='text/plain', ns=ns)
image.linkAnnotation(fileAnn)
finally:
f.close()
now = time.time()
for ann in image.listAnnotations():
if ann._obj.__class__ == omero.model.FileAnnotationI:
# mimmic behaviour of templates which call multiple times
print ann.getId()
print ann.getFileName()
print ann.getFileName()
print ann.getFileSize()
print ann.getFileSize()
print time.time() - now
def testFileAnnNonDefaultGroup(author_testimg_generated, gatewaywrapper):
""" Test conn.createFileAnnfromLocalFile() respects SERVICE_OPTS """
gatewaywrapper.loginAsAuthor()
userId = gatewaywrapper.gateway.getUser().getId()
ctx = gatewaywrapper.gateway.getAdminService().getEventContext()
uuid = ctx.sessionUuid
# Admin creates a new group with user
gatewaywrapper.loginAsAdmin()
gid = gatewaywrapper.gateway.createGroup(
"testFileAnnNonDefaultGroup-%s" % uuid, member_Ids=[userId])
# login as Author again (into 'default' group)
gatewaywrapper.loginAsAuthor()
conn = gatewaywrapper.gateway
# Try to create fileAnn in another group
conn.SERVICE_OPTS.setOmeroGroup(gid)
tempFileName = "tempFile"
f = open(tempFileName, 'w')
fileText = "Test text for writing to file for upload"
f.write(fileText)
f.close()
ns = TESTANN_NS
fileAnn = conn.createFileAnnfromLocalFile(
tempFileName, mimetype='text/plain', ns=ns)
os.remove(tempFileName)
assert fileAnn.getDetails().group.id.val == gid
def testUnlinkAnnotation(author_testimg_generated):
""" Tests the use of unlinkAnnotations. See #7301 """
# Setup test dataset
dataset = author_testimg_generated.getParent()
assert dataset is not None
gateway = dataset._conn
# Make really sure there are no annotations
dataset.removeAnnotations(TESTANN_NS)
assert dataset.getAnnotation(TESTANN_NS) is None
# Add an annotation
ann = omero.gateway.CommentAnnotationWrapper(gateway)
ann.setNs(TESTANN_NS)
dataset.linkAnnotation(ann)
assert dataset.getAnnotation(TESTANN_NS).getNs() == TESTANN_NS
# Unlink annotations
dataset.unlinkAnnotations(TESTANN_NS)
assert dataset.getAnnotation(TESTANN_NS) is None
def testAnnoationCount(author_testimg_generated):
""" Test get annotations counts """
img = author_testimg_generated
gateway = img._conn
ann = omero.gateway.CommentAnnotationWrapper(gateway)
img.linkAnnotation(ann)
# LongAnnotation without NS, == OtherAnnotation
ann = omero.gateway.LongAnnotationWrapper(gateway)
img.linkAnnotation(ann)
# LongAnnotation with rating NS
ann = omero.gateway.LongAnnotationWrapper(gateway)
ann.setNs(omero.constants.metadata.NSINSIGHTRATING)
img.linkAnnotation(ann)
ann = omero.gateway.DoubleAnnotationWrapper(gateway)
img.linkAnnotation(ann)
ann = omero.gateway.BooleanAnnotationWrapper(gateway)
img.linkAnnotation(ann)
ann = omero.gateway.MapAnnotationWrapper(gateway)
img.linkAnnotation(ann)
ann = omero.gateway.TagAnnotationWrapper(gateway)
img.linkAnnotation(ann)
ann = omero.gateway.FileAnnotationWrapper(gateway)
img.linkAnnotation(ann)
counts = img.getAnnotationCounts()
assert counts['CommentAnnotation'] == 1
assert counts['TagAnnotation'] == 1
assert counts['LongAnnotation'] == 1
assert counts['MapAnnotation'] == 1
assert counts['FileAnnotation'] == 1
assert counts['OtherAnnotation'] == 3
| gpl-2.0 |
calvinsettachatgul/athena | athena_env/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/ordered_dict.py | 2040 | 8935 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| mit |
GeoffEvans/aol_model | aol_model/tests/test_ray.py | 1 | 2020 | from aol_model.ray import Ray
from numpy import allclose, array
import pytest
from aol_model.vector_utils import normalise
position = [0,0,2]
wavevector_unit = [1,0,0]
wavelength = 10
energy = 1
def test_setting_non_unit_vector():
with pytest.raises(ValueError):
Ray(position, [1,0,0.1], wavelength, energy)
def test_wavevectors():
r = Ray(position, wavevector_unit, wavelength, energy)
assert allclose(r.wavevector_unit * r.wavevector_vac_mag, r.wavevector_vac)
def test_propagating_normal_to_plane():
r1 = Ray(position, wavevector_unit, wavelength, energy)
r1.propagate_free_space(10)
r2 = Ray(position, wavevector_unit, wavelength, energy)
r2.propagate_to_plane([10,10,10], wavevector_unit)
assert allclose(r1.position, r2.position)
def test_propagating_angle_to_plane():
r = Ray(position, [3./5,4./5,0], wavelength, energy)
r.propagate_to_plane([12,0,0], [1,0,0])
assert allclose(r.position, position + array([12,16,0]))
def test_setting_wavevector_property():
r = Ray(position, [3./5,4./5,0], wavelength, energy)
r.wavevector_vac = [144,0,17]
mag_correct = allclose(r.wavevector_vac_mag, 145)
dir_correct = allclose(r.wavevector_unit, [144./145, 0, 17./145])
assert mag_correct and dir_correct
def test_propagate_from_plane_to_plane_z():
r = Ray([1,0,0], [0,0,1], wavelength, energy)
r.propagate_from_plane_to_plane(10, normalise([1,0,1]), normalise([-1,0,1]))
assert allclose(r.position, [1,0,12])
def test_align_to_plane():
r = Ray([1,0,0], [0,0,1], wavelength, energy)
r.propagate_from_plane_to_plane(0, array([0,0,1]), normalise([-1,0,1]))
assert allclose(r.position, [1,0,1])
def test_propagate_from_plane_to_plane_reverse():
r = Ray([1,0,0], [0,0,1], wavelength, energy)
r.propagate_from_plane_to_plane( 10, normalise([ 1,2,1]), normalise([-1,3,1]))
r.propagate_from_plane_to_plane(-10, normalise([-1,3,1]), normalise([ 1,2,1]))
assert allclose(r.position, [1,0,0])
| gpl-3.0 |
byterom/android_external_chromium_org | tools/perf/benchmarks/robohornet_pro.py | 27 | 1798 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Microsoft's RoboHornet Pro benchmark."""
import os
from metrics import power
from telemetry import benchmark
from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.value import scalar
class _RobohornetProMeasurement(page_test.PageTest):
def __init__(self):
super(_RobohornetProMeasurement, self).__init__()
self._power_metric = None
def CustomizeBrowserOptions(self, options):
power.PowerMetric.CustomizeBrowserOptions(options)
def WillStartBrowser(self, platform):
self._power_metric = power.PowerMetric(platform)
def DidNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
tab.ExecuteJavaScript('ToggleRoboHornet()')
tab.WaitForJavaScriptExpression(
'document.getElementById("results").innerHTML.indexOf("Total") != -1',
600)
self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results)
result = int(tab.EvaluateJavaScript('stopTime - startTime'))
results.AddValue(
scalar.ScalarValue(results.current_page, 'Total', 'ms', result))
class RobohornetPro(benchmark.Benchmark):
test = _RobohornetProMeasurement
def CreatePageSet(self, options):
ps = page_set.PageSet(
archive_data_file='../page_sets/data/robohornet_pro.json',
# Measurement require use of real Date.now() for measurement.
make_javascript_deterministic=False,
file_path=os.path.abspath(__file__))
ps.AddPageWithDefaultRunNavigate(
'http://ie.microsoft.com/testdrive/performance/robohornetpro/')
return ps
| bsd-3-clause |
LadZone/LadZone.github.io | blog/Adafruit_PWM_Servo_Driver.py | 33 | 3163 | #!/usr/bin/python
import time
import math
from Adafruit_I2C import Adafruit_I2C
# ============================================================================
# Adafruit PCA9685 16-Channel PWM Servo Driver
# ============================================================================
class PWM :
# Registers/etc.
__MODE1 = 0x00
__MODE2 = 0x01
__SUBADR1 = 0x02
__SUBADR2 = 0x03
__SUBADR3 = 0x04
__PRESCALE = 0xFE
__LED0_ON_L = 0x06
__LED0_ON_H = 0x07
__LED0_OFF_L = 0x08
__LED0_OFF_H = 0x09
__ALL_LED_ON_L = 0xFA
__ALL_LED_ON_H = 0xFB
__ALL_LED_OFF_L = 0xFC
__ALL_LED_OFF_H = 0xFD
# Bits
__RESTART = 0x80
__SLEEP = 0x10
__ALLCALL = 0x01
__INVRT = 0x10
__OUTDRV = 0x04
general_call_i2c = Adafruit_I2C(0x00)
@classmethod
def softwareReset(cls):
"Sends a software reset (SWRST) command to all the servo drivers on the bus"
cls.general_call_i2c.writeRaw8(0x06) # SWRST
def __init__(self, address=0x40, debug=False):
self.i2c = Adafruit_I2C(address)
self.i2c.debug = debug
self.address = address
self.debug = debug
if (self.debug):
print "Reseting PCA9685 MODE1 (without SLEEP) and MODE2"
self.setAllPWM(0, 0)
self.i2c.write8(self.__MODE2, self.__OUTDRV)
self.i2c.write8(self.__MODE1, self.__ALLCALL)
time.sleep(0.005) # wait for oscillator
mode1 = self.i2c.readU8(self.__MODE1)
mode1 = mode1 & ~self.__SLEEP # wake up (reset sleep)
self.i2c.write8(self.__MODE1, mode1)
time.sleep(0.005) # wait for oscillator
def setPWMFreq(self, freq):
"Sets the PWM frequency"
prescaleval = 25000000.0 # 25MHz
prescaleval /= 4096.0 # 12-bit
prescaleval /= float(freq)
prescaleval -= 1.0
if (self.debug):
print "Setting PWM frequency to %d Hz" % freq
print "Estimated pre-scale: %d" % prescaleval
prescale = math.floor(prescaleval + 0.5)
if (self.debug):
print "Final pre-scale: %d" % prescale
oldmode = self.i2c.readU8(self.__MODE1);
newmode = (oldmode & 0x7F) | 0x10 # sleep
self.i2c.write8(self.__MODE1, newmode) # go to sleep
self.i2c.write8(self.__PRESCALE, int(math.floor(prescale)))
self.i2c.write8(self.__MODE1, oldmode)
time.sleep(0.005)
self.i2c.write8(self.__MODE1, oldmode | 0x80)
def setPWM(self, channel, on, off):
"Sets a single PWM channel"
self.i2c.write8(self.__LED0_ON_L+4*channel, on & 0xFF)
self.i2c.write8(self.__LED0_ON_H+4*channel, on >> 8)
self.i2c.write8(self.__LED0_OFF_L+4*channel, off & 0xFF)
self.i2c.write8(self.__LED0_OFF_H+4*channel, off >> 8)
def setAllPWM(self, on, off):
"Sets a all PWM channels"
self.i2c.write8(self.__ALL_LED_ON_L, on & 0xFF)
self.i2c.write8(self.__ALL_LED_ON_H, on >> 8)
self.i2c.write8(self.__ALL_LED_OFF_L, off & 0xFF)
self.i2c.write8(self.__ALL_LED_OFF_H, off >> 8)
| agpl-3.0 |
lujji/JXD-7800b-KK-kernel | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
thiriel/maps | venv/lib/python2.7/site-packages/django/contrib/gis/tests/relatedapp/tests.py | 198 | 14731 | from __future__ import absolute_import
from datetime import date
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.tests.utils import mysql, oracle, no_mysql, no_oracle, no_spatialite
from django.test import TestCase
from .models import City, Location, DirectoryEntry, Parcel, Book, Author, Article
class RelatedGeoModelTest(TestCase):
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.all()
qs2 = City.objects.select_related()
qs3 = City.objects.select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@no_mysql
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@no_mysql
@no_spatialite
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol)
@no_mysql
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# Creating the reference union geometry depending on the spatial backend,
# as Oracle will have a different internal ordering of the component
# geometries than PostGIS. The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
if oracle:
ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326)
ref_u2 = MultiPoint(p3, p2, srid=4326)
else:
# Looks like PostGIS points by longitude value.
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(ref_u1, u1)
self.assertEqual(ref_u2, u2)
self.assertEqual(ref_u1, u3)
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
l = list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if not mysql:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if not mysql:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertTrue(isinstance(d['point'], Geometry))
self.assertTrue(isinstance(t[1], Geometry))
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertTrue('Aurora' in names)
self.assertTrue('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertTrue(isinstance(qs[0]['point'], GEOSGeometry))
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
no_author = Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@no_mysql
@no_oracle
@no_spatialite
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertEqual(ref_geom, coll)
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
sql = str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
| bsd-3-clause |
LaboratoireMecaniqueLille/crappy | crappy/blocks/generator.py | 1 | 5313 | # coding: utf-8
from time import time, sleep
from .block import Block
from . import generator_path
from .._global import CrappyStop
class Generator(Block):
"""This block is used to generate a signal.
It can be used to drive a machine. This block can take inputs, and each path
can use these inputs to take decisions.
"""
def __init__(self,
path=None,
freq=200,
cmd_label='cmd',
cycle_label='cycle',
cmd=0,
repeat=False,
trig_link=None,
spam=False,
verbose=False,
end_delay=2):
"""Sets the args and initializes parent class.
Args:
path (:obj:`list`, optional): It must be a :obj:`list` of :obj:`dict`,
each dict providing the parameters to generate the path. Each dict MUST
have a key ``type``.
Note:
The Generator will then instantiate a :ref:`generator path` with all
the other keys as `kwargs`, adding the current ``cmd`` and the time.
On each round, it will call :meth:`Path.get_cmd` method, passing data
until it raise :exc:`StopIteration`. It will then skip to the next
path.
When all paths are over, it will stop Crappy by raising
:exc:`CrappyStop` unless ``repeat`` is set to :obj:`True`. If so, it
will start over indefinitely.
freq (:obj:`float`, optional): The frequency of the block. If set and
positive, the generator will try to send the command at this frequency
(in `Hz`). Else, it will go as fast as possible. It relies on the
:ref:`Block` `freq` control scheme.
cmd_label (:obj:`str`, optional): The label of the command to send in the
links
cycle_label (:obj:`str`, optional):
cmd (:obj:`float`, optional): The first value of the command.
repeat (:obj:`bool`, optional): Loop over the paths or stop when done ?
trig_link (:obj:`str`, optional): If given, the block will wait until
data is received through the input link with this label. If
:obj:`None`, it will try loop at ``freq``.
spam (:obj:`bool`, optional): If :obj:`True`, the value will be sent on
each loop. Else, it will only send it if it was updated or we reached a
new step.
verbose (:obj:`bool`, optional): if :obj:`True`, displays a message when
switching to the next path.
end_delay (:obj:`float`, optional): The delay to wait for before raising
the :exc:`CrappyStop` exception at the end of the path. This is meant
to let enough time to the other blocks to properly terminate.
"""
Block.__init__(self)
self.niceness = -5
self.freq = freq
self.cmd_label = cmd_label
self.cycle_label = cycle_label
self.cmd = cmd
self.repeat = repeat
self.trig_link = trig_link
self.spam = spam
self.verbose = verbose
self.end_delay = end_delay
if path is None:
path = []
self.path = path
assert all([hasattr(generator_path, d['type']) for d in self.path]), \
"Invalid path in signal generator:" + \
str(filter(lambda s: not hasattr(generator_path, s['type']), self.path))
self.labels = ['t(s)', self.cmd_label, self.cycle_label]
def prepare(self):
self.path_id = -1 # Will be incremented to 0 on first next_path
if self.trig_link is not None:
self.to_get = list(range(len(self.inputs)))
self.to_get.remove(self.trig_link)
self.last_t = time()
self.last_path = -1
self.next_path()
def next_path(self):
self.path_id += 1
if self.path_id >= len(self.path):
if self.repeat:
self.path_id = 0
else:
print("Signal generator terminated!")
sleep(self.end_delay)
# Block.stop_all()
raise CrappyStop("Signal Generator terminated")
if self.verbose:
print("[Signal Generator] Next step({}):".format(self.path_id),
self.path[self.path_id])
kwargs = {'cmd': self.cmd, 'time': self.last_t}
kwargs.update(self.path[self.path_id])
del kwargs['type']
name = self.path[self.path_id]['type'].capitalize()
# Instantiating the new path class for the next step
self.current_path = getattr(generator_path, name)(**kwargs)
def begin(self):
self.send([self.last_t - self.t0, self.cmd, self.path_id])
self.current_path.t0 = self.t0
def loop(self):
if self.trig_link is not None:
da = self.inputs[self.trig_link].recv_chunk()
data = self.get_all_last(self.to_get)
data.update(da)
else:
data = self.get_all_last()
data[self.cmd_label] = [self.cmd] # Add my own cmd to the dict
try:
cmd = self.current_path.get_cmd(data)
except StopIteration:
self.next_path()
return
# If next_path returns None, do not update cmd
if cmd is not None and cmd is not self.cmd:
self.cmd = cmd
self.send([self.last_t - self.t0, self.cmd, self.path_id])
self.last_path = self.path_id
elif self.last_path != self.path_id:
self.send([self.last_t - self.t0, self.cmd, self.path_id])
self.last_path = self.path_id
elif self.spam:
self.send([self.last_t - self.t0, self.cmd, self.path_id])
self.last_t = time()
| gpl-2.0 |
shaufi/odoo | addons/point_of_sale/report/account_statement.py | 380 | 2031 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class account_statement(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_statement, self).__init__(cr, uid, name, context=context)
self.total = 0.0
self.localcontext.update({
'time': time,
'get_total': self._get_total,
'get_data': self._get_data,
})
def _get_data(self, statement):
lines = []
for line in statement.line_ids:
lines.append(line)
return lines
def _get_total(self, statement_line_ids):
total = 0.0
for line in statement_line_ids:
total += line.amount
return total
class report_account_statement(osv.AbstractModel):
_name = 'report.point_of_sale.report_statement'
_inherit = 'report.abstract_report'
_template = 'point_of_sale.report_statement'
_wrapped_report_class = account_statement
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
NaturalGIS/naturalgis_qgis | python/plugins/db_manager/table_viewer.py | 67 | 3659 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import QTableView, QAbstractItemView, QApplication, QAction
from qgis.PyQt.QtGui import QKeySequence, QCursor, QClipboard
from qgis.utils import OverrideCursor
from .db_plugins.plugin import DbError, Table
from .dlg_db_error import DlgDbError
class TableViewer(QTableView):
def __init__(self, parent=None):
QTableView.__init__(self, parent)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.item = None
self.dirty = False
# allow copying results
copyAction = QAction(QApplication.translate("DBManagerPlugin", "Copy"), self)
self.addAction(copyAction)
copyAction.setShortcuts(QKeySequence.Copy)
copyAction.triggered.connect(self.copySelectedResults)
self._clear()
def refresh(self):
self.dirty = True
self.loadData(self.item)
def loadData(self, item):
if item == self.item and not self.dirty:
return
self._clear()
if item is None:
return
if isinstance(item, Table):
self._loadTableData(item)
else:
return
self.item = item
self.item.aboutToChange.connect(self.setDirty)
def setDirty(self, val=True):
self.dirty = val
def _clear(self):
if self.item is not None:
try:
self.item.aboutToChange.disconnect(self.setDirty)
except:
# do not raise any error if self.item was deleted
pass
self.item = None
self.dirty = False
# delete the old model
model = self.model()
self.setModel(None)
if model:
model.deleteLater()
def _loadTableData(self, table):
with OverrideCursor(Qt.WaitCursor):
try:
# set the new model
self.setModel(table.tableDataModel(self))
except DbError as e:
DlgDbError.showError(e, self)
else:
self.update()
def copySelectedResults(self):
if len(self.selectedIndexes()) <= 0:
return
model = self.model()
# convert to string using tab as separator
text = model.headerToString("\t")
for idx in self.selectionModel().selectedRows():
text += "\n" + model.rowToString(idx.row(), "\t")
QApplication.clipboard().setText(text, QClipboard.Selection)
QApplication.clipboard().setText(text, QClipboard.Clipboard)
| gpl-2.0 |
LibraryHippo/LibraryHippo | Tests/test_wpl.py | 1 | 45343 | #!/usr/bin/env python
import pytest
import py.test
import datetime
from BeautifulSoup import BeautifulSoup
import wpl
import kpl
from data import Hold
from data import LoginError
from fakes import MyCard
from fakes import MyOpener
def test__parse_holds__numeric_position__reads_position():
response = BeautifulSoup(
"""<table>
<tr class="patFuncHeaders"><th>STATUS</th></tr>
<tr><td> 9 of 83 holds </td></tr>
</table>"""
)
w = wpl.LibraryAccount(MyCard(), MyOpener())
assert (9, 83) == w.parse_holds(response)[0].status
def test__parse_holds__title_with_slash__reads_title():
response = BeautifulSoup(
"""<table>
<tr class="patFuncHeaders"><th> TITLE </th></tr>
<tr><td align="left"><a href="/BLAH"> Either/Or / Boo! </a></td></tr>
</table>"""
)
w = wpl.LibraryAccount(MyCard(), MyOpener())
hold = w.parse_holds(response)[0]
assert ("Either/Or") == hold.title
def test__parse_holds__author_with_slash__reads_author():
response = BeautifulSoup(
"""<table>
<tr class="patFuncHeaders"><th> TITLE </th></tr>
<tr><td align="left"><a href="/BLAH"> JustOne / Bo/o! </a></td></tr>
</table>"""
)
w = wpl.LibraryAccount(MyCard(), MyOpener())
hold = w.parse_holds(response)[0]
assert ("Bo/o!") == hold.author
@pytest.mark.parametrize(
"hold_text,expected_status",
[
("Ready.", Hold.READY),
("IN TRANSIT", Hold.IN_TRANSIT),
("CHECK SHELVES", Hold.CHECK_SHELVES),
("TRACE", Hold.DELAYED),
],
)
def test__parse_holds__named_position__parses_position(hold_text, expected_status):
response = BeautifulSoup(
"""<table>
<tr class="patFuncHeaders"><th>STATUS</th></tr>
<tr><td> %s </td></tr>
</table>"""
% hold_text
)
w = wpl.LibraryAccount(MyCard(), MyOpener())
assert expected_status == w.parse_holds(response)[0].status
hold_with_pickup_dropdown = """<table lang="en" class="patFunc"><tr class="patFuncTitle">
<th colspan="6" class="patFuncTitle">
6 HOLDS
</th>
</tr>
<tr class="patFuncHeaders">
<th class="patFuncHeaders"> CANCEL </th>
<th class="patFuncHeaders"> TITLE </th>
<th class="patFuncHeaders"> STATUS </th>
<th class="patFuncHeaders">PICKUP LOCATION</th>
<th class="patFuncHeaders"> CANCEL IF NOT FILLED BY </th>
<th class="patFuncHeaders"> FREEZE </th>
</tr>
<tr class="patFuncEntry">
<td class="patFuncMark" align="center">
<input type="checkbox" name="cancelb2193902x00" /></td>
<td class="patFuncTitle">
<a href="/patroninfo~S3/1307788/item&2193902"> Stories </a>
<br />
</td>
<td class="patFuncStatus"> 1 of 1 holds </td>
<td class="patFuncPickup"><select name=locb2193902x00>
<option value="ch+++" >Country Hills Library-KPL</option>
<option value="fh+++" >Forest Heights Library-KPL</option>
<option value="g++++" >Grand River Stanley Pk Lib-KPL</option>
<option value="m++++" >Main Library-KPL</option>
<option value="pp+++" >Pioneer Park Library-KPL</option>
<option value="w++++" >WPL Main Library</option>
<option value="wm+++" selected="selected">WPL McCormick Branch</option>
</select>
</td>
<td class="patFuncCancel">04-03-11</td>
<td class="patFuncFreeze" align="center"><input type="checkbox" name="freezeb2193902" /></td>
</tr>
</table>
"""
def test__parse_holds___pickup_dropdown__pickup_is_read():
response = BeautifulSoup(hold_with_pickup_dropdown)
w = wpl.LibraryAccount(MyCard(), MyOpener())
assert "WPL McCormick Branch" == w.parse_holds(response)[0].pickup
def test__parse_holds___pickup_dropdown__pickup_is_string():
"""makes for better pickling"""
response = BeautifulSoup(hold_with_pickup_dropdown)
w = wpl.LibraryAccount(MyCard(), MyOpener())
assert str == type(
w.parse_holds(response)[0].pickup
) # noqa: E721 - need to check exact type
hold_with_unselected_pickup_dropdown = """<table lang="en" class="patFunc"><tr class="patFuncTitle">
<th colspan="6" class="patFuncTitle">
6 HOLDS
</th>
</tr>
<tr class="patFuncHeaders">
<th class="patFuncHeaders"> CANCEL </th>
<th class="patFuncHeaders"> TITLE </th>
<th class="patFuncHeaders"> STATUS </th>
<th class="patFuncHeaders">PICKUP LOCATION</th>
<th class="patFuncHeaders"> CANCEL IF NOT FILLED BY </th>
<th class="patFuncHeaders"> FREEZE </th>
</tr>
<tr class="patFuncEntry">
<td class="patFuncMark" align="center">
<input type="checkbox" name="cancelb2193902x00" /></td>
<td class="patFuncTitle">
<a href="/patroninfo~S3/1307788/item&2193902"> Stories </a>
<br />
</td>
<td class="patFuncStatus"> 1 of 1 holds </td>
<td class="patFuncPickup">
<div class="patFuncPickupLabel">
<label for="locb2677337x00">Pickup Location</label></div>
<select name="locb2677337x00" id="locb2677337x00">
<option value="mn+++">KPL Central Library CURBSIDE</option>
<option value="w++++">WPL Main Library CURBSIDE</option>
<option value="ww+++">WPL John M. Harper CURBSIDE</option>
</select>
</td>
<td class="patFuncCancel">04-03-11</td>
<td class="patFuncFreeze" align="center"><input type="checkbox" name="freezeb2193902" /></td>
</tr>
</table>
"""
def test__parse_holds___unselected_pickup_dropdown__pickup_is_empty():
response = BeautifulSoup(hold_with_unselected_pickup_dropdown)
w = wpl.LibraryAccount(MyCard(), MyOpener())
assert "" == w.parse_holds(response)[0].pickup
def test__parse_holds___unselected_pickup_dropdown__pickup_is_string():
"""makes for better pickling"""
response = BeautifulSoup(hold_with_unselected_pickup_dropdown)
w = wpl.LibraryAccount(MyCard(), MyOpener())
assert str == type(
w.parse_holds(response)[0].pickup
) # noqa: E721 - need to check exact type
def test__parse_holds___with_expiration_date__reads_expiration():
response = BeautifulSoup(
"""<table>
<tr class="patFuncHeaders"><th>CANCEL IF NOT FILLED BY</th></tr>
<tr><td>04-03-11</td></tr>
</table>"""
)
w = wpl.LibraryAccount(MyCard(), MyOpener())
hold = w.parse_holds(response)[0]
assert datetime.date(2011, 4, 3) == hold.expires
def test__parse_holds___frozen__added_to_status_notes():
response = BeautifulSoup(
"""<table>
<tr class="patFuncHeaders">
<th> FREEZE
</th>
</tr>
<tr>
<td class="patFuncFreeze" align="center">
<input type="checkbox" name="freezeb2186875" checked />
</td>
</tr>
</table>"""
)
w = wpl.LibraryAccount(MyCard(), MyOpener())
assert w.parse_holds(response)[0].is_frozen()
def test__parse_holds___empty_freeze_field__is_not_frozen():
response = BeautifulSoup(
"""<table>
<tr class="patFuncHeaders"><th> FREEZE </th></tr>
<tr><td class="patFuncFreeze" align="center"> </td></tr>
</table>"""
)
w = wpl.LibraryAccount(MyCard(), MyOpener())
assert not w.parse_holds(response)[0].is_frozen()
def test__parse_holds___hold_for_waterloo__finds_correct_url():
response = BeautifulSoup(
"""<table>
<tr class="patFuncHeaders">
<th> TITLE
</th>
</tr>
<tr class="patFuncEntry">
<td class="patFuncTitle">
<label for="canceli3337880x00">
<a href="/record=b2247789~S3"> The profession : a thriller / Steven Pressfield
</a>
</label>
<br />
</td>
</tr>
</table>"""
)
w = wpl.LibraryAccount(MyCard(), MyOpener())
assert "https://books.kpl.org/record=b2247789~S3" == w.parse_holds(response)[0].url
def test__parse_holds___hold_for_kitchener__finds_correct_url():
response = BeautifulSoup(
"""<table>
<tr class="patFuncHeaders"><th> TITLE </th></tr>
<tr class="patFuncEntry">
<td class="patFuncTitle">
<label for="cancelb2232976x09"><a href="/record=b2232976~S1"> Live wire / Harlan Coben. -- </a></label>
<br />
</td>
</tr>
</table>"""
)
k = kpl.LibraryAccount(MyCard(), MyOpener())
assert "https://books.kpl.org/record=b2232976~S1" == k.parse_holds(response)[0].url
def test__parse_items__title_has_slash__parses_title():
response = BeautifulSoup(
"""
<table lang="en">
<tr class="patFuncHeaders">
<th> RENEW
</th>
<th> TITLE
</th>
<th > BARCODE
</th>
<th> STATUS
</th>
<th > CALL NUMBER
</th>
</tr>
<tr>
<td align="left">
<input type="checkbox" name="renew0" value="i3103561" />
</td>
<td align="left">
<a href="/patroninfo~S3/1307788/item&2160792"> The city/the city / China Mi\u00E9ville
</a>
</td>
<td align="left"> 33420011304806
</td>
<td align="left"> DUE 07-20-09
<span >Renewed 1 time
</span>
</td>
<td align="left"> MIEVI
</td>
</tr>
</table>
"""
)
w = wpl.LibraryAccount(MyCard(), MyOpener())
item = w.parse_items(response)[0]
assert "The city/the city" == item.title
def test__parse_items__author_has_accent__parses_author():
response = BeautifulSoup(
"""
<table lang="en">
<tr class="patFuncHeaders">
<th> RENEW
</th>
<th> TITLE
</th>
<th > BARCODE
</th>
<th> STATUS
</th>
<th > CALL NUMBER
</th>
</tr>
<tr>
<td align="left">
<input type="checkbox" name="renew0" value="i3103561" />
</td>
<td align="left">
<a href="/patroninfo~S3/1307788/item&2160792"> The city/the city / China Mi\u00E9ville
</a>
</td>
<td align="left"> 33420011304806
</td>
<td align="left"> DUE 07-20-09
<span >Renewed 1 time
</span>
</td>
<td align="left"> MIEVI
</td>
</tr>
</table>
"""
)
w = wpl.LibraryAccount(MyCard(), MyOpener())
item = w.parse_items(response)[0]
assert "China Mi\u00E9ville" == item.author
def test__parse_items__with_status_notes__finds_status_notes():
response = BeautifulSoup(
"""
<table lang="en">
<tr class="patFuncHeaders">
<th> RENEW
</th>
<th> TITLE
</th>
<th > BARCODE
</th>
<th> STATUS
</th>
<th > CALL NUMBER
</th>
</tr>
<tr>
<td align="left">
<input type="checkbox" name="renew0" value="i3103561" />
</td>
<td align="left">
<a href="/patroninfo~S3/1307788/item&2160792"> The city/the city / China Mi\u00E9ville
</a>
</td>
<td align="left"> 33420011304806
</td>
<td align="left"> DUE 07-20-09
<span >Renewed 1 time
</span>
</td>
<td align="left"> MIEVI
</td>
</tr>
</table>
"""
)
w = wpl.LibraryAccount(MyCard(), MyOpener())
item = w.parse_items(response)[0]
assert ["Renewed 1 time"] == item.status_notes
def test__parse_items__span_in_title__all_text_in_title():
response = BeautifulSoup(
"""
<table lang="en">
<tr class="patFuncHeaders">
<th> RENEW
</th>
<th> TITLE
</th>
<th > BARCODE
</th>
<th> STATUS
</th>
<th > CALL NUMBER
</th>
</tr>
<tr class="patFuncEntry">
<td align="left" class="patFuncMark">
<input type="checkbox" name="renew3" id="renew3" value="i2626300" />
</td>
<td align="left" class="patFuncTitle">
<label for="renew3">
<a href="/record=b1945079~S3"> Hiking the redwood coast -- <span class="patFuncVol">2004</span>
</a>
</label>
<br />
</td>
<td align="left" class="patFuncBarcode"> 33420007964514
</td>
<td align="left" class="patFuncStatus"> DUE 05-29-10
</td>
<td align="left" class="patFuncCallNo"> 917.9404 Hik
</td>
</tr>
</table>
"""
)
w = wpl.LibraryAccount(MyCard(), MyOpener())
item = w.parse_items(response)[0]
assert """Hiking the redwood coast -- 2004""" == item.title
def test__parse_items__no_author__author_blank():
response = BeautifulSoup(
"""
<table lang="en">
<tr class="patFuncHeaders">
<th> RENEW
</th>
<th> TITLE
</th>
<th > BARCODE
</th>
<th> STATUS
</th>
<th > CALL NUMBER
</th>
</tr>
<tr class="patFuncEntry">
<td align="left" class="patFuncMark">
<input type="checkbox" name="renew3" id="renew3" value="i2626300" />
</td>
<td align="left" class="patFuncTitle">
<label for="renew3">
<a href="/record=b1945079~S3"> Hiking the redwood coast
</a>
</label>
<br />
</td>
<td align="left" class="patFuncBarcode"> 33420007964514
</td>
<td align="left" class="patFuncStatus"> DUE 05-29-10
</td>
<td align="left" class="patFuncCallNo"> 917.9404 Hik
</td>
</tr>
</table>
"""
)
w = wpl.LibraryAccount(MyCard(), MyOpener())
item = w.parse_items(response)[0]
assert "" == item.author
def test__parse_status__status_notes_jammed_up_against_date__date_parsed():
response = BeautifulSoup(
"""
<table lang="en">
<tr class="patFuncHeaders">
<th> RENEW
</th>
<th> TITLE
</th>
<th > BARCODE
</th>
<th> STATUS
</th>
<th > CALL NUMBER
</th>
</tr>
<tr>
<td align="left">
<input type="checkbox" name="renew0" value="i3103561" />
</td>
<td align="left">
<a href="/patroninfo~S3/1307788/item&2160792"> The city/the city / China Mi\u00E9ville
</a>
</td>
<td align="left"> 33420011304806
</td>
<td align="left"> DUE 10-07-09IN LIBRARY USE
</span>
</td>
<td align="left"> MIEVI
</td>
</tr>
</table>
"""
)
w = wpl.LibraryAccount(MyCard(), MyOpener())
item = w.parse_items(response)[0]
assert "The city/the city" == item.title
assert "China Mi\u00E9ville" == item.author
assert datetime.date(2009, 10, 7) == item.status
def test__parse_status__status_notes_jammed_up_against_date__status_notes_found():
response = BeautifulSoup(
"""
<table lang="en">
<tr class="patFuncHeaders">
<th> RENEW
</th>
<th> TITLE
</th>
<th > BARCODE
</th>
<th> STATUS
</th>
<th > CALL NUMBER
</th>
</tr>
<tr>
<td align="left">
<input type="checkbox" name="renew0" value="i3103561" />
</td>
<td align="left">
<a href="/patroninfo~S3/1307788/item&2160792"> The city/the city / China Mi\u00E9ville
</a>
</td>
<td align="left"> 33420011304806
</td>
<td align="left"> DUE 10-07-09IN LIBRARY USE
</span>
</td>
<td align="left"> MIEVI
</td>
</tr>
</table>
"""
)
w = wpl.LibraryAccount(MyCard(), MyOpener())
item = w.parse_items(response)[0]
assert ["IN LIBRARY USE"] == item.status_notes
failing_login_response = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<link rel="stylesheet" type="text/css" href="/scripts/ProStyles.css" />
<link rel="stylesheet" type="text/css" href="/screens/w-stylesheet3.css" />
<title>Library Log in
</title>
<meta http-equiv="X-UA-Compatible" content="IE=8;FF=3;OtherUA=4" />
<meta http-equiv="Content-type" content="text/html;charset=UTF-8" />
<meta name="viewport" content="width=device-width,user-scalable=no" />
<script type="text/javascript" src="/scripts/common.js">
</script>
<!--<link rel="stylesheet" type="text/css" href="/apps/CAS/resources/login_mobile.css">
<link rel="stylesheet" type="text/css"
href="/apps/CAS/resources/login_s3_html.css" media="screen and (min-device-width: 481px)"> -->
<link rel="stylesheet" type="text/css" href="/apps/CAS/resources/login_mobile_s3.css" />
<style type="text/css" media="screen and (min-width: 481px)">
<!--
@import url("/apps/CAS/resources/login_s3_html.css");
-->
</style>
</head>
<body id="cas">
<!--[if IE]>
<div id="ie">
<![endif]-->
<div class="loginPage">
<div class="loginTop">
<!-- Prevent div collapse -->
<div class="loginTopLogo">
<a href="http://www.wpl.ca" tabindex="0">
<img src="/screens/w-logo-137x60.gif" alt="">
</a>
</div>
</div>
<!-- Use for library-only authentication: -->
<div class="loginArea loginArea1Col">
<!--<div style="text-align:center; background:#FF0077; color:black;" >
<strong>Please Note:</strong>
Holds placed online are currently not working.
Please call us at 519-886-1310 to have staff help you place holds.
</div>-->
<div class="clearfloats">
</div>
<!--end theForm1-->
<form id="fm1" class="fm-v clearfix" method="post"
action="/iii/cas/login?service=https%3A%2F%2Fencore.kpl.org%3A443%2Fiii%2Fencore...">
<!--display any errors-->
<div id="status" class="errors">Sorry, the information you submitted was invalid. Please try again.
</div>
<!-- Message from client webapp to be displayed on the CAS login screen -->
<div id="clientmessage">
<!--display any errors-->
</div>
<!-- end clientmessage -->
<!--start theForm2-->
<!-- Message from client webapp to be displayed on the CAS login screen -->
<div id="clientmessage">
<!--display any errors-->
</div>
<!-- end clientmessage -->
<!--display login form-->
<span style="padding-left:1.8em;">
<h3>Library Account Login
</h3>
</span>
<div id="login">
<fieldset>
<label for="name">First and Last Name:
</label>
<div class="loginField">
<input id="name" name="name" class="required" tabindex="3" accesskey="n"
type="text" value="" size="20" maxlength="40"/>
</div>
<fieldset class="barcodeAltChoice">
<!--<legend>Enter your barcode or login name</legend>-->
<label for="code">Library card number
<br />(no spaces):
</label>
<div class="loginField">
<input id="code" name="code" class="required" tabindex="4" accesskey="b"
type="text" size="20" maxlength="40" />
</div>
</fieldset>
<!--<div id="ipssopinentry">
<label for="pin">Personal Identification Number (PIN):</label>
<div class="loginFieldBg">
<input id="pin" name="pin" class="required" tabindex="6" accesskey="p"
type="password" value="" size="20" maxlength="40" />
</div>
</div>-->
<!--end theForm2-->
<!--start theForm3-->
<!-- This button is hidden unless using mobile devices. Even if hidden it enables Enter key to submit. -->
<input type="submit" name="Log In" class="loginSubmit" tabindex="35" />
</fieldset>
</div>
<!-- end login -->
<div class="clearfloats">
</div>
<div class="formButtons">
<a href="#" onclick="document.forms['fm1'].submit();" tabindex="40">
<div onmousedown="this.className='pressedState';" onmouseout="this.className='';"
onmouseup="this.className='';">
<div class="buttonSpriteDiv">
<span class="buttonSpriteSpan1">
<span class="buttonSpriteSpan2">Submit
</span>
</span>
</div>
</div>
</a>
</div>
<br />
<div style="display:none;">
<!--Enable form focus-->
<script type="text/javascript">
<!--//-->
<![CDATA[//>
<!--
//Hide the main PIN entry if the new pin section is active.
//try { if ( document.getElementById("ipssonewpin") ) {
// document.getElementById("ipssopinentry").style.display="none"; } }
//catch(err) {}
//Look for the first field in the external patron part of the form. This field will get cursor focus.
var ipssoFirstField;
try { ipssoFirstField = document.forms[0].extpatid; }
catch(err) {
}
//If we still don't have a field, look for the name field in the library account part.
if ( ipssoFirstField==undefined ) { ipssoFirstField = document.forms[0].name; }
//Set focus. Ignore errors.
try { ipssoFirstField.focus(); }
catch(err) {}
document.onkeydown = enterSubmit
function enterSubmit(e) {
var keycode;
if (window.event) keycode = window.event.keyCode;
else if (e) keycode = e.which;
if (keycode==13)
document.forms[0].submit();
}
//-->
<!]]>
</script>
<!--end theForm3-->
<!-- Spring Web Flow requirements must be in a certain place -->
<input type="hidden" name="lt"
value="_c761F6248-082B-2453-47FE-DEBB4500C8AD_kF7718391-1925-2239-9B69-01CE8B941744" />
<input type="hidden" name="_eventId" value="submit" />
</form>
<!--start theForm4-->
</div>
</div>
<!-- end loginArea -->
<div class="loginActions">
<!--
<span class="loginActionText">New to the library?</span>
<span class="loginActionScreenOnly"><a href="/selfreg">Create an account</a></span>
<span class="loginActionSeparator"></span>
-->
</div>
</div>
<!-- loginPage -->
<!--[if IE]>
</div>
<![endif]-->
<!-- IPSSO html form updated 2010 June 29 -->
</body>
</html>
<!--this is customized </iiidb/http/apps//CAS/resources/ipsso_s3.html>-->
<!--end theForm4-->
"""
def test__login__login_fails__throws():
w = wpl.LibraryAccount(MyCard(), MyOpener("", failing_login_response))
py.test.raises(LoginError, w.login)
def test__login__new_kpl_format__passes():
w = wpl.LibraryAccount(
MyCard(),
MyOpener(
"",
"""
<!-- Rel 2007 "Skyline" Example Set -->
<!-- This File Last Changed: 02 September 2008 -->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>Kitchener and Waterloo Public Libraries /KPL</title>
<base target="_self"/>
<link rel="stylesheet" type="text/css" href="/scripts/ProStyles.css" />
<link rel="stylesheet" type="text/css" href="/screens/k-stylesheet1.css" />
<link rel="shortcut icon" type="ximage/icon" href="/screens/favicon.ico" />
<script type="text/javascript" src="/scripts/common.js"></script>
<script type="text/javascript" src="/scripts/features.js"></script>
<script type="text/javascript" src="/scripts/elcontent.js"></script>
<link rel="icon" href="/screens/favicon.ico"><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
</head>
<body >
<body>
<div id="wrapper">
<div id="header">
<a href="http://www.kpl.org/">
<img src="/screens/kpl_logo.png" alt="Kitchener Public Library logo"/></a>
<div id="nav">
<ul id="navmenu">
<li><a href="http://books.kpl.org/search~S1/" title="Library Catalogue" class="selected">Catalogue</a></li>
</ul>
</div>
<div id="nav2"> </div>
</div>
<font color="purple">You are logged in to Kitchener and Waterloo Public Libraries
/KPL as: </font><font color="purple" size="+2">Hippo, Librarian</font><br />
<br />
<script type="text/javascript">
function SetHTML1(type) {
document.getElementById("a1").style.display = "none"
document.getElementById("b1").style.display = "none"
// Using style.display="block" instead of style.display="" leaves a carriage return
document.getElementById(type).style.display = ""
}
</script>
<div align="center">
<span id="a1" style="">
<form method="get" action="http://encore.kpl.org/iii/encore_kpl/Home,$Search.form.sdirect" name="form" id="form">
<input name="formids" value="target" type="hidden">
<input name="lang" value="eng" type="hidden">
<input name="suite" value="def" type="hidden">
<input name="reservedids" value="lang,suite" type="hidden">
<input name="submitmode" value="" type="hidden">
<input name="submitname" value="" type="hidden">
<table>
<tr>
<td style="padding-right:10px;">
<span style="font-family:'Times New Roman', Times, serif; font-size:1.4em;">Search:</span>
</td>
<td><input name="target" value="" id="target" type="text"
style="border:1px solid #555; width:410px; height:30px; font-size:100%;">
</td>
<td style="padding-left:10px;">
<input type="image" src="http://www.kpl.org/_images/catalogue/go_button.png" value="submit"/>
</td>
</tr>
<tr><td colspan="3" style="font-size:12px;"> </td></tr>
</table>
</form>
</span>
<span id="b1" style="display:none;">
<div class="bibSearchtool" style="margin-top:5px;"><form target="_self" action="/search~S2/">
<label for="searchtype" style="display:none;">Search Type1</label><select name="searchtype" id="searchtype">
<option value="t"> TITLE</option>
<option value="a"> AUTHOR</option>
<option value="s"> SERIES</option>
<option value="d"> SUBJECT</option>
<option value="c"> CALL NO</option>
<option value="i"> ISBN/ISSN</option>
<option value="Y" selected="selected"> KEYWORD</option>
</select>
<label for="searcharg" style="display:none;">Search</label>
<input type="text" name="searcharg" id="searcharg" size="30" maxlength="75" value="" />
<label for="searchscope" style="display:none;">Search Scope</label><select name="searchscope" id="searchscope">
<option value="2" selected>Kitchener Public Library</option>
<option value="3">Waterloo Public Library</option>
<option value="5">King Digital Collection</option>
</select>
<input type="hidden" name="SORT" value="D" />
<input type="hidden" name="extended" value="0" /><input type="submit" name="SUBMIT" value="Search" />
<div style="margin-top:6px;">
<input type="checkbox" name="availlim" value="1" />
<span class="limit-to-available">Limit results to available items<br/><br/></span>
</div>
</form></div>
</span>
<div align="center" style=" font-family: Arial, Helvetica, sans-serif; font-size:14px;">
<input style="margin-top:5px;" id="multisearch" name="br"
type="radio" onClick="SetHTML1('a1')" checked>Search New KPL Catalogue
<input style="margin-top:5px;" id="multisearch" name="br"
type="radio" onClick="SetHTML1('b1')">Search Classic Catalogue
</div>
<br /><br />
<p style="font-size:0.85em;">
<span style="color:#990000; font-weight:bold;">Note:</span>
Please remember to <strong>LOG OUT</strong> of your library account when you are finished using the catalogue.<br />
The logout option can be found at the bottom of this page, or in the top right corner of the catalogue.</p>
<br />
</div>
<!--{patron}-->
<br/><br/>
<div align="center">
<table>
<tr>
<td>
<div class="patNameAddress">
<strong>Hippo, Librarian</strong><br />
100 Regina Street S<br />
Waterloo ON N2V 4A8<br />
519-885-1550 (E)<br />
EXP DATE:08-01-2013<br />
<br/>
<div>
</div>
<div>
<a href="/patroninfo~S1/XXXXXXXX/holds" target="_self">4 requests (holds).</a>
</div>
<br><br>
</div>
</td>
<td>
<div class="patActionsLinks">
<div>
<a href="#" onClick="return open_new_window( '/patroninfo~S1/XXXXXXXX/modpinfo' )">Modify Personal Information</a>
</div>
<div><p>
<a href="/patroninfo~S1/XXXXXXXX/readinghistory" target="_self">My Reading History</a>
</p></div>
<br>
Classic catalogue only:
<div><p>
<a href="/patroninfo~S1/XXXXXXXX/getpsearches" target="_self">Preferred Searches</a>
</p></div>
<div>
<a href="/patroninfo~S1/XXXXXXXX/mylists" target="_self">My Lists</a>
</div>
<br>
<p><a href="http://encore.kpl.org/iii/encore_kpl/home?...">
<img src="/screens/b-logout.gif" alt="Log Out" border="0" />
</a></p>
<!--
<p valign=top><a href="/logout?" target="_self"><img src="/screens/b-logout.gif" alt="Log Out" border="0" /></a></p>
-->
</div></td>
</tr>
</table>
</div>
<br/><br/>
<div class="patFuncArea" style="border:1px solid #555555;">
</div>
<br />
<div class="footer"></div>
</div>
</body>
</html>
<!--this is customized <screens/patronview_web_s1.html>-->
""",
),
)
w.login()
def test__get_status__with_card_expiry_date__reads_date():
w = wpl.LibraryAccount(
MyCard(),
MyOpener(
"",
"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>Kitchener and Waterloo Public Libraries /WPL</title>
<base target="_self"/>
<link rel="stylesheet" type="text/css" href="/scripts/ProStyles.css" />
<link rel="stylesheet" type="text/css" href="/screens/w-stylesheet3.css" />
<link rel="shortcut icon" type="ximage/icon" href="/screens/favicon.ico" />
<script type="text/javascript" src="/scripts/common.js"></script>
<script type="text/javascript" src="/scripts/features.js"></script>
<script type="text/javascript" src="/scripts/elcontent.js"></script>
<link rel="icon" href="/screens/favicon.ico"><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
</head>
<body >
<script language="JavaScript" type="text/javascript">
var min=8;
var max=22;
function increaseFontSize() {
var p = document.getElementsByTagName('*')
for(i=0;i<p.length;i++) {
if(p[i].style.fontSize) {
var s = parseInt(p[i].style.fontSize.replace("px",""));
} else {
var s = 14;
}
if(s!=max) {
s += 2;
}
p[i].style.fontSize = s+"px"
}
}
function decreaseFontSize() {
var p = document.getElementsByTagName('*');
for(i=0;i<p.length;i++) {
if(p[i].style.fontSize) {
var s = parseInt(p[i].style.fontSize.replace("px",""));
} else {
var s = 14;
}
if(s!=min) {
s -= 2;
}
p[i].style.fontSize = s+"px"
}
}
</script>
<script language="JavaScript" type="text/javascript">
<!-- Hide the JS
startTimeout(600000, "/search~S3/");
-->
</script>
<!-- begin toplogo.html file -->
<!-- HEADER -->
<a class="linkskip" href="#content">Skip over navigation</a>
<div id="container-header">
<div id="header-main1-background">
<div id="container-header-content">
<div id="header-logo">
<a href="http://www.wpl.ca">
<img src="/screens/wpl-logo-main1.jpg" alt="Waterloo Public Library"/>
</a></div>
<div id="header-nav" align=center>
<ul>
<li><a href="http://books.kpl.org/selfreg~S3/">Get a Card</a></li>
<li><a href="https://books.kpl.org/iii/cas/login?service=http://books.kpl.org/patroninfo~S3/..."
class="navline">My Account</a></li>
<li><a href="http://www.wpl.ca/location">Hours & Locations</a></li>
<li><a href="http://www.wpl.ca/contact">Contact Us</a></li>
</ul>
</div>
<div id="header-main1-utility">
<div id="header-title" class=title1><a href="/search~S3/">Catalogue</a></div>
<div id="font-size">
<a href="javascript:decreaseFontSize();">
<img src="/screens/wpl-font-smaller.gif" alt="Font Smaller" width="15" height="38"/>
</a>
<a href="javascript:increaseFontSize();" >
<img src="/screens/wpl-font-larger.gif" alt="Font Larger" width="19" height="38"/>
</a>
</div>
</div>
</div>
</div>
</div>
<!-- NAV -->
<div id="container-nav" align=center>
<div id="nav">
<ul>
<li><a href="http://www.wpl.ca" class="navline">Home</a></li>
<li><a href="http://books.kpl.org/search~S3">Catalogue</a></li>
<li><a href="http://www.wpl.ca/ebooks">eBooks</a></li>
<li><a href="http://www.wpl.ca/ebranch">eBranch</a></li>
<li><a href="http://www.wpl.ca/book-a-computer">Book a Computer</a></li>
<li><a href="http://www.wpl.ca/blogs-more">Blogs</a></li>
<li><a href="http://www.wpl.ca/ebranch/diy/">DIY</a></li>
</ul>
</div>
</div>
<div align=center>
<a href="http://wplreads.wpl.ca">WPL Reads</a> |
<a href="http://books.kpl.org/screens/newitems.html">New Items</a>
| <a href="http://www.wpl.ca/about/borrowing/interlibrary-loan-form/">Interlibrary Loan</a>
| <a href="http://www.wpl.ca/ebranch/databases-and-weblinks">Databases and WebLinks</a>
| <a href="http://www.wpl.ca/services/ask-us/">Ask Us</a>
</div>
<!--end toplogo.html-->
<br />
<p align=center><font size=4 color=#0066cc>Kitchener and Waterloo Public Libraries/WPL <br />
You are logged in as HIPPO, LIBRARIAN.</font><p><br />
<br />
<div class="srchhelpHeader" align="center">
<form method="get" action="http://encore.kpl.org/iii/encore_wpl/Home,$Search.form.sdirect" name="form" id="form">
<input name="formids" value="target" type="hidden">
<input name="lang" value="eng" type="hidden">
<input name="suite" value="def" type="hidden">
<input name="reservedids" value="lang,suite" type="hidden">
<input name="submitmode" value="" type="hidden">
<input name="submitname" value="" type="hidden">
<table>
<tr>
<td style="padding-right:10px;">
<span style="font-family:'Times New Roman', Times, serif; font-size:1.7em;">Search:</span>
</td>
<td><input name="target" value="" id="target" type="text"
style="border:1px solid #555; width:410px; height:30px; font-size:1.4em;"></td>
<td style="padding-left:10px;"><input type="image" src="/screens/go_button.png" value="submit"/></td>
</tr>
<tr>
<td></td>
<td align="right">
<p><a href="http://encore.kpl.org/iii/encore_wpl/home?lang=eng&suite=kpl&advancedSearch=true&searchString=">
Advanced Search</a></p></td>
<td></td></tr>
</table>
</form>
<br />
<a name="content" id="content"></a>
<!--<form name="searchtool" action="/search~S3/">
<select tabindex="1" name="searchtype" id="searchtype" onChange="initSort()">
<option value="X" selected>Keyword</option>
<option value="t">Title</option>
<option value="a">Author</option>
<option value="s">Series</option>
<option value="d">Subject</option>
<option value="c">Call Number</option>
<option value="i">ISBN/ISSN</option>
</select>
<input tabindex="2" type="text" name="searcharg" size="50" maxlength="75">
<input type="hidden" name="searchscope" value="3">
<input tabindex="3" type="submit" value="Search">
</div>
<div class="media">
<div align="center">Media (book, DVD, etc.):
<select tabindex="4" name="searchlimits">
<option value="" selected>Any</option>
<option value="m=d">DVD</option>
<option value="m=j">CD Audio</option>
<option value="m=m">CD-ROM</option>
<option value="m=z">E-audio Book</option>
<option value="m=e">E-book</option>
<option value="m=a">Book</option>
<option value="m=l">Large Print Book</option>
<option value="m=v">Government Document</option>
<option value="m=c">Magazine/Newspaper</option>
<option value="m=o">Kit</option>
</select>
</div>
</div>
<label class="limit-to-available">
<div align="center">
<input tabindex="5" type="checkbox" name="availlim" value="1">
Limit results to available items
</div>
</label>
</form>
<br />-->
<!--{patron}-->
<table>
<tr>
<td valign=top>
<div class="patNameAddress">
<strong>HIPPO, LIBRARIAN.</strong><br />
100 Regina Steet S<br />
WATERLOO, ON N2V 4A8<br />
519-885-1550<br />
EXP DATE:12-04-2009<br />
<br/>
<div>
</div>
<div>
<a href="/patroninfo~S3/1307788/holds" target="_self">14 requests (holds).</a>
</div>
<div>
<a href="/patroninfo~S3/1307788/items" target="_self">8 Items currently checked out</a>
</div>
</div>
</td>
<td>
<div style="text-align:left;">
<div>
<a href="#" onClick="return open_new_window( '/patroninfo~S3/1307788/modpinfo' )">Modify Personal Information</a>
</div>
<div><p>
<a href="/patroninfo~S3/1307788/readinghistory" target="_self">My Reading History</a>
</p></div>
<div><p>
<p> </p>
Classic Catalogue Features:
</p></div>
<div><p>
<a href="/patroninfo~S3/1307788/getpsearches" target="_self">Preferred Searches</a>
</p></div>
<div style="display:none;">
<a href="/patroninfo~S3/1307788/patreview" target="_self">My Reviews</a>
</div>
<div>
<a href="/patroninfo~S3/1307788/mylists" target="_self">My Lists</a>
</div>
</div>
</td>
</tr>
</table>
<table>
<tr>
<td>
<div class="patActionsLinks">
<!--
<p valign=top><a href="http://encore.kpl.org/iii/encore_wpl/..."
target="_self"><img src="/screens/b-logout.gif" alt="Log Out" border="0" /></a></p>-->
<p valign=top>
<a href="http://encore.kpl.org/iii/encore_wpl/home?component=..."
target="_self">
<img src="/screens/b-logout.gif" alt="Log Out" border="0" /></a></p>
</div></td>
</tr>
</table>
<br/><br/>
<div class="patFuncArea" style="border:0px #555555;">
</div>
<br />
</div>
<div class="botlogo">
<br />
Your library account may not be available during scheduled system maintenance
10:00pm - 12:00am Mon to Thu, & 6pm - 8pm Fri to Sun.
<br />
</div>
</body>
</html>
<!--this is customized <screens/patronview_web_s3.html>-->
""",
"",
"",
"",
),
)
card_info = w.get_status()
assert datetime.date(2009, 12, 4) == card_info.expires
def test__get_status__wpl_login__finds_correct_holds_url():
w = wpl.LibraryAccount(
MyCard(),
MyOpener(
"#login",
"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<body >
<table>
<tr>
<td valign=top>
<div class="patNameAddress">
<div>
<a href="/patroninfo~S3/XXXXXXX/holds" target="_self">4 requests (holds).</a>
</div>
<div>
<a href="/patroninfo~S3/XXXXXXX/items" target="_self">5 Items currently checked out</a>
</div>
</div>
</td>
<td>
<div class="patActionsLinks">
<div>
<a href="#" onClick="return open_new_window( '/patroninfo~S3/XXXXXXX/newpin' )">Modify your PIN</a>
</div>
<div><p>
<a href="/patroninfo~S3/XXXXXXX/readinghistory" target="_self">My Reading History</a>
</p></div>
<div><p>
<a href="/patroninfo~S3/XXXXXXX/getpsearches" target="_self">Preferred Searches</a>
</p></div>
</div>
</td>
</tr>
</table>
</body>
</html>""",
"""<table>
<tr class="patFuncHeaders"><th> TITLE </th></tr>
<tr><td align="left"><a href="/BLAH"> Either/Or / Bo/o! </a></td></tr>
</table>""",
"#items",
"#logout",
),
)
status = w.get_status()
assert (
"https://books.kpl.org/patroninfo~S3/XXXXXXX/holds" == status.holds[0].holds_url
)
def test__get_status__wpl_login_no_holds__finds_no_holds():
w = wpl.LibraryAccount(
MyCard(),
MyOpener(
"#login",
"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<body >
<table>
<tr>
<td valign=top>
<div class="patNameAddress">
<div>
<a href="/patroninfo~S3/XXXXXXX/items" target="_self">5 Items currently checked out</a>
</div>
</div>
</td>
<td>
<div class="patActionsLinks">
<div>
<a href="#" onClick="return open_new_window( '/patroninfo~S3/XXXXXXX/newpin' )">Modify your PIN</a>
</div>
<div><p>
<a href="/patroninfo~S3/XXXXXXX/readinghistory" target="_self">My Reading History</a>
</p></div>
<div><p>
<a href="/patroninfo~S3/XXXXXXX/getpsearches" target="_self">Preferred Searches</a>
</p></div>
</div>
</td>
</tr>
</table>
</body>
</html>""",
"#holds",
"#items",
"#logout",
),
)
status = w.get_status()
assert status.holds == []
def test__get_status__wpl_login_no_items__finds_no_items():
w = wpl.LibraryAccount(
MyCard(),
MyOpener(
"#login",
"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<body >
<table>
<tr>
<td valign=top>
<div class="patNameAddress">
<div>
<a href="/patroninfo~S3/XXXXXXX/holds" target="_self">4 requests (holds).</a>
</div>
</div>
</td>
<td>
<div class="patActionsLinks">
<div>
<a href="#" onClick="return open_new_window( '/patroninfo~S3/XXXXXXX/newpin' )">Modify your PIN</a>
</div>
<div><p>
<a href="/patroninfo~S3/XXXXXXX/readinghistory" target="_self">My Reading History</a>
</p></div>
<div><p>
<a href="/patroninfo~S3/XXXXXXX/getpsearches" target="_self">Preferred Searches</a>
</p></div>
</div>
</td>
</tr>
</table>
</body>
</html>""",
"#holds",
"#items",
"#logout",
),
)
status = w.get_status()
assert status.items == []
def test__login_wpl_format_2013_06_07__can_parse_the_login_screen():
w = wpl.LibraryAccount(
MyCard(),
MyOpener(
"""
<form id="fm1" class="fm-v clearfix" method="post"
action="/iii/cas/login?service=https://books.kpl.org/patroninfo~S3/j_acegi_cas_security_check&lang=eng&scope=3">
<!--display any errors-->
<!-- Message from client webapp to be displayed on the CAS login screen -->
<div id="clientmessage">
<!--display any errors-->
</div>
<!-- end clientmessage -->
<!--start theForm2-->
<!-- Message from client webapp to be displayed on the CAS login screen -->
<div id="clientmessage">
<!--display any errors-->
</div>
<!-- end clientmessage -->
<!--display login form-->
<span style="padding-left:1.8em;">
<h3>Library Account Login
</h3>
</span>
<div id="login">
<fieldset>
<label for="name">First and Last Name:
</label>
<div class="loginField">
<input id="name" name="name" class="required" tabindex="3"
accesskey="n" type="text" value="" size="20" maxlength="40"/>
</div>
<fieldset class="barcodeAltChoice">
<!--<legend>Enter your barcode or login name</legend>-->
<label for="code">Library card number
<br />(no spaces):
</label>
<div class="loginField">
<input id="code" name="code" class="required" tabindex="4"
accesskey="b" type="text" size="20" maxlength="40" />
</div>
</fieldset>
<!--<div id="ipssopinentry">
<label for="pin">Personal Identification Number (PIN):</label>
<div class="loginFieldBg">
<input id="pin" name="pin" class="required" tabindex="6"
accesskey="p" type="password" value="" size="20" maxlength="40" />
</div>
</div>-->
<!--end theForm2-->
<!--start theForm3-->
<!-- This button is hidden unless using mobile devices. Even if hidden it enables Enter key to submit. -->
<input type="submit" name="Log In" class="loginSubmit" tabindex="35" />
</fieldset>
</div>
<!-- end login -->
<div class="clearfloats">
</div>
<div class="formButtons">
<a href="#" onclick="document.forms['fm1'].submit();" tabindex="40">
<div onmousedown="this.className='pressedState';"
onmouseout="this.className='';" onmouseup="this.className='';">
<div class="buttonSpriteDiv">
<span class="buttonSpriteSpan1">
<span class="buttonSpriteSpan2">Submit
</span>
</span>
</div>
</div>
</a>
</div>
<!--end theForm3-->
<!-- Spring Web Flow requirements must be in a certain place -->
<input type="hidden" name="lt"
value="_cF3646058-103E-2F3B-C9DB-0C9931EDB267_k24CDA5F8-E174-085D-7570-0D56ADBFE0E7" />
<input type="hidden" name="_eventId" value="submit" />
</form>""",
# "patNameAddress" is enough to make the login think it worked
'''"patNameAddress"''',
),
)
w.login()
| mit |
effa/flocs | practice/models/practice_session.py | 2 | 2200 | from django.db import models
from practice.models import TaskInstanceModel
from practice.models import StudentModel
from datetime import datetime
class PracticeSession(models.Model):
"""
Representation of a practice session.
The session keeps counter of tasks in the session.
"""
# time for practice session expiration
EXPIRATION = 12 * 60 * 60 # half a day
# student, owner of the session
student = models.ForeignKey(StudentModel, null=True)
# counter of the session tasks
task_counter = models.PositiveSmallIntegerField(default=1)
# last task instance started in the session
last_task = models.ForeignKey(TaskInstanceModel, null=True)
# duration of the session in sec, valid only after session termination
duration = models.PositiveIntegerField(default=0)
# active (private field)
_active = models.BooleanField(default=True)
# active (public property)
def _get_active(self):
# deactivate session if it is older then EXPIRATION parameter
# keep duration = 0 if expired
session_instances = self.task_instances_set.order_by('order')
if len(session_instances) > 0:
delta = datetime.now() - session_instances[0].task_instance.time_start
if delta.total_seconds() > self.EXPIRATION:
self._active = False
self.save()
return self._active
def _set_active(self, input):
self._active = input
active = property(_get_active, _set_active)
def get_task_instances(self):
"""
Return list of task instances in this session in order they were taken
by the student.
"""
session_instances = self.task_instances_set.order_by('order')
task_instances = [si.task_instance for si in session_instances]
return task_instances
def __str__(self):
templ = ('session_id={session_id}, task_counter={task_counter}, '
'last_task={last_task}, active={active}')
return templ.format(
session_id=self.pk,
task_counter=self.task_counter,
last_task=self.last_task.pk,
active=self.active
)
| gpl-2.0 |
xperroni/DevAI2014MOOC | agent03.py | 1 | 6019 | #!/usr/bin/env python
r'''
Implementation of the agent algorithm in tables 33-1 and 33-2 of the IDEAL MOOC
lessons, with modifications required in the programming assignment.
For details see http://liris.cnrs.fr/ideal/mooc/lesson.php?n=033
'''
from collections import namedtuple
from itertools import cycle
# Default experiments
E1 = 'e1'
E2 = 'e2'
#Default results
R1 = 'r1'
R2 = 'r2'
#Default moods
Bored = 'BORED'
Pained = 'PAINED'
Pleased = 'PLEASED'
class Interaction(namedtuple('_Interaction', ['experiment', 'result', 'valence'])):
r'''Describes an agent interaction in terms of an experiment, expected result, and valence.
'''
pass
def Environment10(experiment):
r'''A stateless environment where there are fixed relations between experiments
and results.
'''
return R1 if experiment == E1 else R2
class Environment30(object):
r'''An environment where an experiment's result depends on whether it was already performed
in the previous interaction turn.
'''
def __init__(self):
self.previous = E2
def __call__(self, experiment):
result = R1 if experiment == self.previous else R2
self.previous = experiment
return result
class Environment31(object):
r'''An environment where the relation between experiments ans results is switched
after a number of turns, and then switched after after some more turns.
'''
def __init__(self, t1 = 8, t2 = 15):
self.clock = 0
self.t1 = t1
self.t2 = t2
def __call__(self, experiment):
self.clock += 1
if self.clock <= self.t1 or self.clock > self.t2:
return R1 if experiment == E1 else R2
else:
return R1 if experiment == E2 else R2
class Agent(object):
r'''A simple embodied agent.
'''
def __init__(self, environment, *interactions):
r'''Creates a new agent, configured to interact with th given environment through
the given set of interactions, described as tuples (experiment, result, valence).
If no interactions are passed to the constructor, a default interaction set is used instead.
'''
if len(interactions) == 0:
interactions = [(E1, R1, -1), (E1, R2, 1), (E2, R1, -1), (E2, R2, 1)]
self.__experiments = [i[0] for i in interactions]
self.environment = (lambda s, e: environment(e)).__get__(self, Agent)
self.primitives = dict((i[0:2], Interaction(*i)) for i in interactions)
self.composites = dict()
self.context = None
self.mood = Pleased
def another(self, experiments, fallback = None):
r'''Returns an experiment not in the given collection, or the fall back experiment
if one couldn't be found among this agent's known experiments.
'''
for e in self.__experiments:
if not e in experiments:
return e
return fallback
def anticipate(self):
r'''Returns a list of next likely interactions (according to this agent's own previous
experience) given the last enacted interaction, or None if such a list does not
yet exist.
'''
return self.composites.get(self.context, None)
def select(self, anticipations):
r'''Given a list of anticipated interactions, looks for an interaction of positive
valence and returns its associated experiment. If none could be found, returns
an experiment not associated to any of the anticipated interactions.
'''
for anticipated in anticipations:
print 'afforded %s%s,%d' % (anticipated.experiment, anticipated.result, anticipated.valence)
anticipated = anticipations[0]
if anticipated.valence > 0:
return anticipated.experiment
return self.another({a.experiment for a in anticipations}, anticipated.experiment)
def experiment(self):
r'''Selects the next experiment to perform.
'''
anticipations = self.anticipate()
if anticipations != None:
return self.select(anticipations)
elif self.context == None:
return self.__experiments[0]
elif self.mood == Pleased:
return self.context.experiment
else:
return self.another({self.context.experiment})
def learn(self, enacted):
r'''Records a new composite interaction composed of the just-enacted primitive
interaction and its predecessor.
'''
context = self.context
self.context = enacted
if context == None:
return
anticipated = self.composites.setdefault(context, [])
if not enacted in anticipated:
anticipated.append(enacted)
anticipated.sort(lambda x, y: y.valence - x.valence)
print 'learn %s%s%s%s' % (context.experiment, context.result, enacted.experiment, enacted.result)
def run(self, turns = 10):
r'''Runs the agent for the specified number of turns.
The agent starts by trying the first experiment on its list, moving to alternatives
only if it gets negative results. Over time however, it builds a list of composite
interactions that enable it to decide what experiment to try next based on last
turn's enacted interaction.
'''
for i in range(0, turns):
experiment = self.experiment()
result = self.environment(experiment)
mood = self.mood
enacted = self.primitives[experiment, result]
print 'Enacted %s%s,%d' % enacted
self.learn(enacted)
mood = Pleased if enacted.valence >= 0 else Pained
self.mood = mood
print '%d: %s' % (i, mood)
def main():
#agent = Agent(Environment10)
#agent = Agent(Environment30())
agent = Agent(Environment31())
agent.run(20)
if __name__ == '__main__':
main()
| gpl-3.0 |
lukeiwanski/tensorflow | tensorflow/contrib/factorization/python/kernel_tests/clustering_ops_test.py | 38 | 7459 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for clustering_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.platform import test
class KmeansPlusPlusInitializationTest(test.TestCase):
# All but one input point are close to (101, 1). With uniform random sampling,
# it is highly improbable for (-1, -1) to be selected.
def setUp(self):
self._points = np.array([[100., 0.],
[101., 2.],
[102., 0.],
[100., 1.],
[100., 2.],
[101., 0.],
[101., 0.],
[101., 1.],
[102., 0.],
[-1., -1.]]).astype(np.float32)
def runTestWithSeed(self, seed):
with self.test_session():
sampled_points = clustering_ops.kmeans_plus_plus_initialization(
self._points, 3, seed, (seed % 5) - 1)
self.assertAllClose(
sorted(sampled_points.eval().tolist()), [[-1., -1.],
[101., 1.],
[101., 1.]],
atol=1.0)
def testBasic(self):
for seed in range(100):
self.runTestWithSeed(seed)
class KMC2InitializationTest(test.TestCase):
def runTestWithSeed(self, seed):
with self.test_session():
distances = np.zeros(1000).astype(np.float32)
distances[6] = 10e7
distances[4] = 10e3
sampled_point = clustering_ops.kmc2_chain_initialization(distances, seed)
self.assertEquals(sampled_point.eval(), 6)
distances[6] = 0.0
sampled_point = clustering_ops.kmc2_chain_initialization(distances, seed)
self.assertEquals(sampled_point.eval(), 4)
def testBasic(self):
for seed in range(100):
self.runTestWithSeed(seed)
class KMC2InitializationLargeTest(test.TestCase):
def setUp(self):
self._distances = np.zeros(1001)
self._distances[500] = 100.0
self._distances[1000] = 50.0
def testBasic(self):
with self.test_session():
counts = {}
seed = 0
for i in range(50):
sample = clustering_ops.kmc2_chain_initialization(
self._distances, seed + i).eval()
counts[sample] = counts.get(sample, 0) + 1
self.assertEquals(len(counts), 2)
self.assertTrue(500 in counts)
self.assertTrue(1000 in counts)
self.assertGreaterEqual(counts[500], 5)
self.assertGreaterEqual(counts[1000], 5)
class KMC2InitializationCornercaseTest(test.TestCase):
def setUp(self):
self._distances = np.zeros(10)
def runTestWithSeed(self, seed):
with self.test_session():
sampled_point = clustering_ops.kmc2_chain_initialization(
self._distances, seed)
self.assertEquals(sampled_point.eval(), 0)
def testBasic(self):
for seed in range(100):
self.runTestWithSeed(seed)
# A simple test that can be verified by hand.
class NearestCentersTest(test.TestCase):
def setUp(self):
self._points = np.array([[100., 0.],
[101., 2.],
[99., 2.],
[1., 1.]]).astype(np.float32)
self._centers = np.array([[100., 0.],
[99., 1.],
[50., 50.],
[0., 0.],
[1., 1.]]).astype(np.float32)
def testNearest1(self):
with self.test_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 1)
self.assertAllClose(indices.eval(), [[0], [0], [1], [4]])
self.assertAllClose(distances.eval(), [[0.], [5.], [1.], [0.]])
def testNearest2(self):
with self.test_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 2)
self.assertAllClose(indices.eval(), [[0, 1], [0, 1], [1, 0], [4, 3]])
self.assertAllClose(distances.eval(),
[[0., 2.], [5., 5.], [1., 5.], [0., 2.]])
# A test with large inputs.
class NearestCentersLargeTest(test.TestCase):
def setUp(self):
num_points = 1000
num_centers = 2000
num_dim = 100
max_k = 5
# Construct a small number of random points and later tile them.
points_per_tile = 10
assert num_points % points_per_tile == 0
points = np.random.standard_normal(
[points_per_tile, num_dim]).astype(np.float32)
# Construct random centers.
self._centers = np.random.standard_normal(
[num_centers, num_dim]).astype(np.float32)
# Exhaustively compute expected nearest neighbors.
def squared_distance(x, y):
return np.linalg.norm(x - y, ord=2)**2
nearest_neighbors = [
sorted([(squared_distance(point, self._centers[j]), j)
for j in range(num_centers)])[:max_k] for point in points
]
expected_nearest_neighbor_indices = np.array(
[[i for _, i in nn] for nn in nearest_neighbors])
expected_nearest_neighbor_squared_distances = np.array(
[[dist for dist, _ in nn] for nn in nearest_neighbors])
# Tile points and expected results to reach requested size (num_points)
(self._points, self._expected_nearest_neighbor_indices,
self._expected_nearest_neighbor_squared_distances) = (
np.tile(x, (int(num_points / points_per_tile), 1))
for x in (points, expected_nearest_neighbor_indices,
expected_nearest_neighbor_squared_distances))
def testNearest1(self):
with self.test_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 1)
self.assertAllClose(indices.eval(),
self._expected_nearest_neighbor_indices[:, [0]])
self.assertAllClose(
distances.eval(),
self._expected_nearest_neighbor_squared_distances[:, [0]])
def testNearest5(self):
with self.test_session():
[indices, distances] = clustering_ops.nearest_neighbors(self._points,
self._centers, 5)
self.assertAllClose(indices.eval(),
self._expected_nearest_neighbor_indices[:, 0:5])
self.assertAllClose(
distances.eval(),
self._expected_nearest_neighbor_squared_distances[:, 0:5])
if __name__ == "__main__":
np.random.seed(0)
test.main()
| apache-2.0 |
40223151/2015cd0505 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_functiontestcase.py | 791 | 5478 | import unittest
from .support import LoggingResult
class Test_FunctionTestCase(unittest.TestCase):
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertEqual(test.countTestCases(), 1)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
raise RuntimeError('raised by setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'addError', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
raise RuntimeError('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
self.fail('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
raise RuntimeError('raised by tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertIsInstance(test.id(), str)
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__no_docstring(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertEqual(test.shortDescription(), None)
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__singleline_docstring(self):
desc = "this tests foo"
test = unittest.FunctionTestCase(lambda: None, description=desc)
self.assertEqual(test.shortDescription(), "this tests foo")
| agpl-3.0 |
sadanandb/pmt | 3rd_party/CherryPy/cherrypy/test/test_json.py | 7 | 2560 | from cherrypy.test import test, helper
test.prefer_parent_path()
import cherrypy
from cherrypy.lib.jsontools import json
if json is None:
print "skipped (simplejson not found) "
else:
def setup_server():
class Root(object):
def plain(self):
return 'hello'
plain.exposed = True
def json_string(self):
return 'hello'
json_string.exposed = True
json_string._cp_config = {'tools.json_out.on': True}
def json_list(self):
return ['a', 'b', 42]
json_list.exposed = True
json_list._cp_config = {'tools.json_out.on': True}
def json_dict(self):
return {'answer': 42}
json_dict.exposed = True
json_dict._cp_config = {'tools.json_out.on': True}
def json_post(self):
if cherrypy.request.json == [13, 'c']:
return 'ok'
else:
return 'nok'
json_post.exposed = True
json_post._cp_config = {'tools.json_in.on': True}
root = Root()
cherrypy.tree.mount(root)
class JsonTest(helper.CPWebCase):
def test_json_output(self):
self.getPage("/plain")
self.assertBody("hello")
self.getPage("/json_string")
self.assertBody('"hello"')
self.getPage("/json_list")
self.assertBody('["a", "b", 42]')
self.getPage("/json_dict")
self.assertBody('{"answer": 42}')
def test_json_input(self):
body = '[13, "c"]'
headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(body)))]
self.getPage("/json_post", method="POST", headers=headers, body=body)
self.assertBody('ok')
body = '[13, "c"]'
headers = [('Content-Type', 'text/plain'),
('Content-Length', str(len(body)))]
self.getPage("/json_post", method="POST", headers=headers, body=body)
self.assertStatus(415, 'Expected an application/json content type')
body = '[13, -]'
headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(body)))]
self.getPage("/json_post", method="POST", headers=headers, body=body)
self.assertStatus(400, 'Invalid JSON document')
if __name__ == '__main__':
helper.testmain()
| epl-1.0 |
ghber/My-Django-Nonrel | django/contrib/gis/gdal/tests/test_driver.py | 330 | 1207 | import os, os.path, unittest
from django.contrib.gis.gdal import Driver, OGRException
valid_drivers = ('ESRI Shapefile', 'MapInfo File', 'TIGER', 'S57', 'DGN',
'Memory', 'CSV', 'GML', 'KML')
invalid_drivers = ('Foo baz', 'clucka', 'ESRI Shp')
aliases = {'eSrI' : 'ESRI Shapefile',
'TigER/linE' : 'TIGER',
'SHAPE' : 'ESRI Shapefile',
'sHp' : 'ESRI Shapefile',
}
class DriverTest(unittest.TestCase):
def test01_valid_driver(self):
"Testing valid OGR Data Source Drivers."
for d in valid_drivers:
dr = Driver(d)
self.assertEqual(d, str(dr))
def test02_invalid_driver(self):
"Testing invalid OGR Data Source Drivers."
for i in invalid_drivers:
self.assertRaises(OGRException, Driver, i)
def test03_aliases(self):
"Testing driver aliases."
for alias, full_name in aliases.items():
dr = Driver(alias)
self.assertEqual(full_name, str(dr))
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(DriverTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| bsd-3-clause |
ajinabraham/Mobile-Security-Framework-MobSF | StaticAnalyzer/tools/enjarify/enjarify/jvm/optimization/registers.py | 30 | 8923 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from .. import ir
from .. import scalartypes as scalars
from ..jvmops import *
# Copy propagation - when one register is moved to another, keep track and replace
# all loads with loads from the original register (as long as it hasn't since been
# overwritten). Note that stores won't be removed, since they may still be needed
# in some cases, but if they are unused, they'll be removed in a subsequent pass
# As usual, assume no iincs
# A set of registers that currently are copies of each other.
class _CopySet:
def __init__(self, key):
self.root = key
self.set = {key}
self.q = [] # keep track of insertion order in case root is overwritten
def add(self, key):
assert(self.set)
self.set.add(key)
self.q.append(key)
def remove(self, key):
self.set.remove(key)
# Heuristic - use oldest element still in set as new root
while self.q and self.root not in self.set:
self.root = self.q.pop(0)
def copy(self):
new = _CopySet(self.root)
new.set = self.set.copy()
new.q = self.q[:]
return new
# Map registers to CopySets
class _CopySetsMap:
def __init__(self):
self.lookup = {}
def _get(self, key): return self.lookup.setdefault(key, _CopySet(key))
def clobber(self, key):
self._get(key).remove(key)
del self.lookup[key]
def move(self, dest, src):
# return false if the corresponding instructions should be removed
s_set = self._get(src)
d_set = self._get(dest)
if s_set is d_set:
# src and dest are copies of same value, so we can remove
return False
d_set.remove(dest)
s_set.add(dest)
self.lookup[dest] = s_set
return True
def load(self, key):
return self._get(key).root
def copy(self):
copies = {}
new = _CopySetsMap()
for k, v in self.lookup.items():
if v not in copies:
copies[v] = v.copy()
new.lookup[k] = copies[v]
return new
def copyPropagation(irdata):
instrs = irdata.flat_instructions
replace = {}
single_pred_infos = {}
prev = None
current = _CopySetsMap()
for instr in instrs:
# reset all info when control flow is merged
if instr in irdata.jump_targets:
# try to use info if this was a single predecessor forward jump
if prev and not prev.fallsthrough() and irdata.target_pred_counts.get(instr) == 1:
current = single_pred_infos.get(instr, _CopySetsMap())
else:
current = _CopySetsMap()
elif isinstance(instr, ir.RegAccess):
key = instr.key
if instr.store:
# check if previous instr was a load
if isinstance(prev, ir.RegAccess) and not prev.store:
if not current.move(dest=key, src=prev.key):
replace[prev] = []
replace[instr] = []
else:
current.clobber(key)
else:
root_key = current.load(key)
if key != root_key:
assert(instr not in replace)
# replace with load from root register instead
replace[instr] = [ir.RegAccess(root_key[0], root_key[1], False)]
else:
for target in instr.targets():
label = irdata.labels[target]
if irdata.target_pred_counts.get(label) == 1:
single_pred_infos[label] = current.copy()
prev = instr
irdata.replaceInstrs(replace)
def _isRemoveable(instr):
# can remove if load or const since we know there are no side effects
# note - instr may be None
if isinstance(instr, ir.RegAccess) and not instr.store:
return True
return isinstance(instr, (ir.PrimConstant, ir.OtherConstant))
def removeUnusedRegisters(irdata):
# Remove stores to registers that are not read from anywhere in the method
instrs = irdata.flat_instructions
used = set()
for instr in instrs:
if isinstance(instr, ir.RegAccess) and not instr.store:
used.add(instr.key)
replace = {}
prev = None
for instr in instrs:
if isinstance(instr, ir.RegAccess) and instr.key not in used:
assert(instr.store)
# if prev instruction is load or const, just remove it and the store
# otherwise, replace the store with a pop
if _isRemoveable(prev):
replace[prev] = []
replace[instr] = []
else:
op = POP2 if instr.wide else POP
replace[instr] = [ir.Other(bytecode=bytes([op]))]
prev = instr
irdata.replaceInstrs(replace)
# Allocate registers to JVM registers on a first come, first serve basis
# For simplicity, parameter registers are preserved as is
def simpleAllocateRegisters(irdata):
instrs = irdata.flat_instructions
regmap = {v:i for i,v in enumerate(irdata.initial_args)}
nextreg = len(irdata.initial_args)
for instr in instrs:
if isinstance(instr, ir.RegAccess):
if instr.key not in regmap:
regmap[instr.key] = nextreg
nextreg += 1
if instr.wide:
nextreg += 1
instr.calcBytecode(regmap[instr.key])
irdata.numregs = nextreg
# Sort registers by number of uses so that more frequently used registers will
# end up in slots 0-3 or 4-255 and benefit from the shorter instruction forms
# For simplicity, parameter registers are still preserved as is with one exception
def sortAllocateRegisters(irdata):
instrs = irdata.flat_instructions
use_counts = collections.Counter()
for instr in instrs:
if isinstance(instr, ir.RegAccess):
use_counts[instr.key] += 1
regs = irdata.initial_args[:]
rest = sorted(use_counts, key=lambda k:(-use_counts[k], k))
for key in rest:
# If key is a param, it was already added at the beginning
if key not in irdata.initial_args:
regs.append(key)
if scalars.iswide(key[1]):
regs.append(None)
# Sometimes the non-param regsisters are used more times than the param registers
# and it is beneificial to swap them (which requires inserting code at the
# beginning of the method to move the value if the param is not unused)
# This is very complicated to do in general, so the following code only does
# this in one specific circumstance which should nevertheless be sufficient
# to capture the majority of the benefit
# Specificially, it only swaps at most one register, and only in the case that
# it is nonwide and there is a nonwide parameter in the first 4 slots that
# it can be swapped with. Also, it doesn't bother to check if param is unused.
candidate_i = max(4, len(irdata.initial_args))
# make sure candidate is valid, nonwide register
if len(regs) > candidate_i and regs[candidate_i] is not None:
candidate = regs[candidate_i]
if not scalars.iswide(candidate[1]) and use_counts[candidate] >= 3:
for i in range(min(4, len(irdata.initial_args))):
# make sure target is not wide
if regs[i] is None or regs[i+1] is None:
continue
target = regs[i]
if use_counts[candidate] > use_counts[target] + 3:
# swap register assignments
regs[i], regs[candidate_i] = candidate, target
# add move instructions at beginning of method
load = ir.RegAccess.raw(i, target[1], False)
store = ir.RegAccess(target[0], target[1], True)
instrs = [load, store] + instrs
irdata.flat_instructions = instrs
break
# Now generate bytecode from the selected register allocations
irdata.numregs = len(regs)
regmap = {v:i for i,v in enumerate(regs) if v is not None}
for instr in instrs:
if instr.bytecode is None and isinstance(instr, ir.RegAccess):
instr.calcBytecode(regmap[instr.key])
| gpl-3.0 |
programadorjc/django | tests/postgres_tests/models.py | 231 | 3562 | from django.db import connection, models
from .fields import (
ArrayField, BigIntegerRangeField, DateRangeField, DateTimeRangeField,
FloatRangeField, HStoreField, IntegerRangeField, JSONField,
)
class PostgreSQLModel(models.Model):
class Meta:
abstract = True
required_db_vendor = 'postgresql'
class IntegerArrayModel(PostgreSQLModel):
field = ArrayField(models.IntegerField())
class NullableIntegerArrayModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), blank=True, null=True)
class CharArrayModel(PostgreSQLModel):
field = ArrayField(models.CharField(max_length=10))
class DateTimeArrayModel(PostgreSQLModel):
datetimes = ArrayField(models.DateTimeField())
dates = ArrayField(models.DateField())
times = ArrayField(models.TimeField())
class NestedIntegerArrayModel(PostgreSQLModel):
field = ArrayField(ArrayField(models.IntegerField()))
class OtherTypesArrayModel(PostgreSQLModel):
ips = ArrayField(models.GenericIPAddressField())
uuids = ArrayField(models.UUIDField())
decimals = ArrayField(models.DecimalField(max_digits=5, decimal_places=2))
class HStoreModel(PostgreSQLModel):
field = HStoreField(blank=True, null=True)
class CharFieldModel(models.Model):
field = models.CharField(max_length=16)
class TextFieldModel(models.Model):
field = models.TextField()
# Only create this model for postgres >= 9.2
if connection.vendor == 'postgresql' and connection.pg_version >= 90200:
class RangesModel(PostgreSQLModel):
ints = IntegerRangeField(blank=True, null=True)
bigints = BigIntegerRangeField(blank=True, null=True)
floats = FloatRangeField(blank=True, null=True)
timestamps = DateTimeRangeField(blank=True, null=True)
dates = DateRangeField(blank=True, null=True)
class RangeLookupsModel(PostgreSQLModel):
parent = models.ForeignKey(RangesModel, models.SET_NULL, blank=True, null=True)
integer = models.IntegerField(blank=True, null=True)
big_integer = models.BigIntegerField(blank=True, null=True)
float = models.FloatField(blank=True, null=True)
timestamp = models.DateTimeField(blank=True, null=True)
date = models.DateField(blank=True, null=True)
else:
# create an object with this name so we don't have failing imports
class RangesModel(object):
pass
class RangeLookupsModel(object):
pass
# Only create this model for postgres >= 9.4
if connection.vendor == 'postgresql' and connection.pg_version >= 90400:
class JSONModel(models.Model):
field = JSONField(blank=True, null=True)
else:
# create an object with this name so we don't have failing imports
class JSONModel(object):
pass
class ArrayFieldSubclass(ArrayField):
def __init__(self, *args, **kwargs):
super(ArrayFieldSubclass, self).__init__(models.IntegerField())
class AggregateTestModel(models.Model):
"""
To test postgres-specific general aggregation functions
"""
char_field = models.CharField(max_length=30, blank=True)
integer_field = models.IntegerField(null=True)
boolean_field = models.NullBooleanField()
class StatTestModel(models.Model):
"""
To test postgres-specific aggregation functions for statistics
"""
int1 = models.IntegerField()
int2 = models.IntegerField()
related_field = models.ForeignKey(AggregateTestModel, models.SET_NULL, null=True)
class NowTestModel(models.Model):
when = models.DateTimeField(null=True, default=None)
| bsd-3-clause |
pavelchristof/gomoku-ai | tensorflow/python/framework/proto_test.py | 178 | 1704 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Protobuf related tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class ProtoTest(test.TestCase):
# TODO(vrv): re-enable this test once we figure out how this can
# pass the pip install test (where the user is expected to have
# protobuf installed).
def _testLargeProto(self):
# create a constant of size > 64MB.
a = constant_op.constant(np.zeros([1024, 1024, 17]))
# Serialize the resulting graph def.
gdef = a.op.graph.as_graph_def()
serialized = gdef.SerializeToString()
unserialized = ops.Graph().as_graph_def()
# Deserialize back. Protobuf python library should support
# protos larger than 64MB.
unserialized.ParseFromString(serialized)
self.assertProtoEquals(unserialized, gdef)
if __name__ == "__main__":
test.main()
| apache-2.0 |
wakatime/komodo-wakatime | components/wakatime/packages/chardet/mbcssm.py | 289 | 25481 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .enums import MachineState
# BIG5
BIG5_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_ST = (
MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17
)
BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0)
BIG5_SM_MODEL = {'class_table': BIG5_CLS,
'class_factor': 5,
'state_table': BIG5_ST,
'char_len_table': BIG5_CHAR_LEN_TABLE,
'name': 'Big5'}
# CP949
CP949_CLS = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_ST = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START, 4, 5,MachineState.ERROR, 6, # MachineState.START
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, # MachineState.ERROR
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 3
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 4
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6
)
CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949_SM_MODEL = {'class_table': CP949_CLS,
'class_factor': 10,
'state_table': CP949_ST,
'char_len_table': CP949_CHAR_LEN_TABLE,
'name': 'CP949'}
# EUC-JP
EUCJP_CLS = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_ST = (
3, 4, 3, 5,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f
3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27
)
EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0)
EUCJP_SM_MODEL = {'class_table': EUCJP_CLS,
'class_factor': 6,
'state_table': EUCJP_ST,
'char_len_table': EUCJP_CHAR_LEN_TABLE,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_ST = (
MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f
)
EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0)
EUCKR_SM_MODEL = {'class_table': EUCKR_CLS,
'class_factor': 4,
'state_table': EUCKR_ST,
'char_len_table': EUCKR_CHAR_LEN_TABLE,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_CLS = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_ST = (
MachineState.ERROR,MachineState.ERROR,MachineState.START, 3, 3, 3, 4,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.ERROR,#10-17
MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27
MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
)
EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3)
EUCTW_SM_MODEL = {'class_table': EUCTW_CLS,
'class_factor': 7,
'state_table': EUCTW_ST,
'char_len_table': EUCTW_CHAR_LEN_TABLE,
'name': 'x-euc-tw'}
# GB2312
GB2312_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_ST = (
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, 3,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,#10-17
4,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validating
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2)
GB2312_SM_MODEL = {'class_table': GB2312_CLS,
'class_factor': 7,
'state_table': GB2312_ST,
'char_len_table': GB2312_CHAR_LEN_TABLE,
'name': 'GB2312'}
# Shift_JIS
SJIS_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,2,2,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,0,0,0) # f8 - ff
SJIS_ST = (
MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17
)
SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0)
SJIS_SM_MODEL = {'class_table': SJIS_CLS,
'class_factor': 6,
'state_table': SJIS_ST,
'char_len_table': SJIS_CHAR_LEN_TABLE,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_CLS = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_ST = (
5, 7, 7,MachineState.ERROR, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME, 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,#10-17
6, 6, 6, 6, 6,MachineState.ITS_ME, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,MachineState.ERROR,#20-27
5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f
6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37
)
UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2)
UCS2BE_SM_MODEL = {'class_table': UCS2BE_CLS,
'class_factor': 6,
'state_table': UCS2BE_ST,
'char_len_table': UCS2BE_CHAR_LEN_TABLE,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_CLS = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_ST = (
6, 6, 7, 6, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME, 5, 5, 5,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#10-17
5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,MachineState.ERROR,#20-27
5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f
5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37
)
UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2)
UCS2LE_SM_MODEL = {'class_table': UCS2LE_CLS,
'class_factor': 6,
'state_table': UCS2LE_ST,
'char_len_table': UCS2LE_CHAR_LEN_TABLE,
'name': 'UTF-16LE'}
# UTF-8
UTF8_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_ST = (
MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#20-27
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#28-2f
MachineState.ERROR,MachineState.ERROR, 5, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#30-37
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#38-3f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#40-47
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#48-4f
MachineState.ERROR,MachineState.ERROR, 7, 7, 7, 7,MachineState.ERROR,MachineState.ERROR,#50-57
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#58-5f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 7, 7,MachineState.ERROR,MachineState.ERROR,#60-67
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#68-6f
MachineState.ERROR,MachineState.ERROR, 9, 9, 9, 9,MachineState.ERROR,MachineState.ERROR,#70-77
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#78-7f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 9,MachineState.ERROR,MachineState.ERROR,#80-87
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#88-8f
MachineState.ERROR,MachineState.ERROR, 12, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,#90-97
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#98-9f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12,MachineState.ERROR,MachineState.ERROR,#a0-a7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#a8-af
MachineState.ERROR,MachineState.ERROR, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b0-b7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b8-bf
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf
)
UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8_SM_MODEL = {'class_table': UTF8_CLS,
'class_factor': 16,
'state_table': UTF8_ST,
'char_len_table': UTF8_CHAR_LEN_TABLE,
'name': 'UTF-8'}
| bsd-3-clause |
cmsdaq/hltd | lib/wstools-0.4.8/wstools/XMLname.py | 2 | 2575 | import re
from six import text_type
"""Translate strings to and from SOAP 1.2 XML name encoding
Implements rules for mapping application defined name to XML names
specified by the w3 SOAP working group for SOAP version 1.2 in
Appendix A of "SOAP Version 1.2 Part 2: Adjuncts", W3C Working Draft
17, December 2001, <http://www.w3.org/TR/soap12-part2/#namemap>
Also see <http://www.w3.org/2000/xp/Group/xmlp-issues>.
Author: Gregory R. Warnes <[email protected]>
Date:: 2002-04-25
Version 0.9.0
"""
ident = "$Id$"
def _NCNameChar(x):
return x.isalpha() or x.isdigit() or x == "." or x == '-' or x == "_"
def _NCNameStartChar(x):
return x.isalpha() or x == "_"
def _toUnicodeHex(x):
hexval = hex(ord(x[0]))[2:]
hexlen = len(hexval)
# Make hexval have either 4 or 8 digits by prepending 0's
if (hexlen == 1):
hexval = "000" + hexval
elif (hexlen == 2):
hexval = "00" + hexval
elif (hexlen == 3):
hexval = "0" + hexval
elif (hexlen == 4):
hexval = "" + hexval
elif (hexlen == 5):
hexval = "000" + hexval
elif (hexlen == 6):
hexval = "00" + hexval
elif (hexlen == 7):
hexval = "0" + hexval
elif (hexlen == 8):
hexval = "" + hexval
else:
raise Exception("Illegal Value returned from hex(ord(x))")
return "_x" + hexval + "_"
def _fromUnicodeHex(x):
return eval(r'u"\u' + x[2:-1] + '"')
def toXMLname(string):
"""Convert string to a XML name."""
if string.find(':') != -1:
(prefix, localname) = string.split(':', 1)
else:
prefix = None
localname = string
T = text_type(localname)
N = len(localname)
X = []
for i in range(N):
if i < N - 1 and T[i] == u'_' and T[i + 1] == u'x':
X.append(u'_x005F_')
elif i == 0 and N >= 3 and \
(T[0] == u'x' or T[0] == u'X') and \
(T[1] == u'm' or T[1] == u'M') and \
(T[2] == u'l' or T[2] == u'L'):
X.append(u'_xFFFF_' + T[0])
elif (not _NCNameChar(T[i])) or (i == 0 and not _NCNameStartChar(T[i])):
X.append(_toUnicodeHex(T[i]))
else:
X.append(T[i])
if prefix:
return "%s:%s" % (prefix, u''.join(X))
return u''.join(X)
def fromXMLname(string):
"""Convert XML name to unicode string."""
retval = re.sub(r'_xFFFF_', '', string)
def fun(matchobj):
return _fromUnicodeHex(matchobj.group(0))
retval = re.sub(r'_x[0-9A-Fa-f]{4}_', fun, retval)
return retval
| lgpl-3.0 |
xuleiboy1234/autoTitle | tensorflow/tensorflow/contrib/keras/python/keras/losses_test.py | 27 | 3551 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.python.platform import test
ALL_LOSSES = [keras.losses.mean_squared_error,
keras.losses.mean_absolute_error,
keras.losses.mean_absolute_percentage_error,
keras.losses.mean_squared_logarithmic_error,
keras.losses.squared_hinge,
keras.losses.hinge,
keras.losses.categorical_crossentropy,
keras.losses.binary_crossentropy,
keras.losses.kullback_leibler_divergence,
keras.losses.poisson,
keras.losses.cosine_proximity,
keras.losses.logcosh,
keras.losses.categorical_hinge]
class KerasLossesTest(test.TestCase):
def test_objective_shapes_3d(self):
with self.test_session():
y_a = keras.backend.variable(np.random.random((5, 6, 7)))
y_b = keras.backend.variable(np.random.random((5, 6, 7)))
for obj in ALL_LOSSES:
objective_output = obj(y_a, y_b)
self.assertListEqual(objective_output.get_shape().as_list(), [5, 6])
def test_objective_shapes_2d(self):
with self.test_session():
y_a = keras.backend.variable(np.random.random((6, 7)))
y_b = keras.backend.variable(np.random.random((6, 7)))
for obj in ALL_LOSSES:
objective_output = obj(y_a, y_b)
self.assertListEqual(objective_output.get_shape().as_list(), [6,])
def test_cce_one_hot(self):
with self.test_session():
y_a = keras.backend.variable(np.random.randint(0, 7, (5, 6)))
y_b = keras.backend.variable(np.random.random((5, 6, 7)))
objective_output = keras.losses.sparse_categorical_crossentropy(y_a, y_b)
assert keras.backend.eval(objective_output).shape == (5, 6)
y_a = keras.backend.variable(np.random.randint(0, 7, (6,)))
y_b = keras.backend.variable(np.random.random((6, 7)))
objective_output = keras.losses.sparse_categorical_crossentropy(y_a, y_b)
assert keras.backend.eval(objective_output).shape == (6,)
def test_serialization(self):
fn = keras.losses.get('mse')
config = keras.losses.serialize(fn)
new_fn = keras.losses.deserialize(config)
self.assertEqual(fn, new_fn)
def test_categorical_hinge(self):
y_pred = keras.backend.variable(np.array([[0.3, 0.2, 0.1],
[0.1, 0.2, 0.7]]))
y_true = keras.backend.variable(np.array([[0, 1, 0], [1, 0, 0]]))
expected_loss = ((0.3 - 0.2 + 1) + (0.7 - 0.1 + 1)) / 2.0
loss = keras.backend.eval(keras.losses.categorical_hinge(y_true, y_pred))
self.assertAllClose(expected_loss, np.mean(loss))
if __name__ == '__main__':
test.main()
| mit |
q40223241/2015cdb_g3_40223241 | cadb_g3_0420-master/static/Brython3.1.1-20150328-091302/Lib/http/cookies.py | 735 | 20810 | #!/usr/bin/env python3
#
####
# Copyright 2000 by Timothy O'Malley <[email protected]>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <[email protected]>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell ([email protected]) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy...
>>> from http import cookies
Most of the time you start by creating a cookie.
>>> C = cookies.SimpleCookie()
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = cookies.SimpleCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = cookies.SimpleCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print(C.output(header="Cookie:"))
Cookie: rocky=road; Path=/cookie
>>> print(C.output(attrs=[], header="Cookie:"))
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = cookies.SimpleCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = cookies.SimpleCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print(C)
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = cookies.SimpleCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print(C)
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = cookies.SimpleCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = cookies.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
Finis.
"""
#
# Import our required modules
#
import re
import string
__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
# Because of the way browsers really handle cookies (as opposed
# to what the RFC says) we also encode , and ;
',' : '\\054', ';' : '\\073',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
def _quote(str, LegalChars=_LegalChars):
r"""Quote a string for use in a cookie header.
If the string does not need to be double-quoted, then just return the
string. Otherwise, surround the string in doublequotes and quote
(with a \) special characters.
"""
if all(c in LegalChars for c in str):
return str
else:
return '"' + _nulljoin(_Translator.get(s, s) for s in str) + '"'
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
o_match = _OctalPatt.search(str, i)
q_match = _QuotePatt.search(str, i)
if not o_match and not q_match: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k + 2
else: # OctalPatt matched
res.append(str[i:j])
res.append(chr(int(str[j+1:j+4], 8)))
i = j + 4
return _nulljoin(res)
# The _getdate() routine is used to set the expiration time in the cookie's HTTP
# header. By default, _getdate() returns the current time in the appropriate
# "expires" format for a Set-Cookie header. The one optional argument is an
# offset from now, in seconds. For example, an offset of -3600 means "one hour
# ago". The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
class Morsel(dict):
"""A class to hold ONE (key, value) pair.
In a cookie, each such pair may have several attributes, so this class is
used to keep the attributes associated with the appropriate key,value pair.
This class also includes a coded_value attribute, which is used to hold
the network representation of the value. This is most useful when Python
objects are pickled for network transit.
"""
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = {
"expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"httponly" : "httponly",
"version" : "Version",
}
_flags = {'secure', 'httponly'}
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
# Set default attributes
for key in self._reserved:
dict.__setitem__(self, key, "")
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid Attribute %s" % K)
dict.__setitem__(self, K, V)
def isReservedKey(self, K):
return K.lower() in self._reserved
def set(self, key, val, coded_val, LegalChars=_LegalChars):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if key.lower() in self._reserved:
raise CookieError("Attempt to set a reserved key: %s" % key)
if any(c not in LegalChars for c in key):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
def output(self, attrs=None, header="Set-Cookie:"):
return "%s %s" % (header, self.OutputString(attrs))
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value))
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % (self.OutputString(attrs).replace('"', r'\"'))
def OutputString(self, attrs=None):
# Build up our result
#
result = []
append = result.append
# First, the key=value pair
append("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = sorted(self.items())
for key, value in items:
if value == "":
continue
if key not in attrs:
continue
if key == "expires" and isinstance(value, int):
append("%s=%s" % (self._reserved[key], _getdate(value)))
elif key == "max-age" and isinstance(value, int):
append("%s=%d" % (self._reserved[key], value))
elif key == "secure":
append(str(self._reserved[key]))
elif key == "httponly":
append(str(self._reserved[key]))
else:
append("%s=%s" % (self._reserved[key], value))
# Return the result
return _semispacejoin(result)
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_CookiePattern = re.compile(r"""
(?x) # This is a verbose pattern
(?P<key> # Start of group 'key'
""" + _LegalCharsPatt + r"""+? # Any word of at least one letter
) # End of group 'key'
( # Optional group: there may not be a value.
\s*=\s* # Equal Sign
(?P<val> # Start of group 'val'
"(?:[^\\"]|\\.)*" # Any doublequoted string
| # or
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
| # or
""" + _LegalCharsPatt + r"""* # Any word or empty string
) # End of group 'val'
)? # End of optional value group
\s* # Any number of spaces.
(\s+|;|$) # Ending either at space, semicolon, or EOS.
""", re.ASCII) # May be removed if safe.
# At long last, here is the cookie class. Using this class is almost just like
# using a dictionary. See this module's docstring for example usage.
#
class BaseCookie(dict):
"""A container class for a set of Morsels."""
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
def __init__(self, input=None):
if input:
self.load(input)
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.output(attrs, header))
return sep.join(result)
__str__ = output
def __repr__(self):
l = []
items = sorted(self.items())
for key, value in items:
l.append('%s=%s' % (key, repr(value.value)))
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.js_output(attrs))
return _nulljoin(result)
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if isinstance(rawdata, str):
self.__parse_string(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for key, value in rawdata.items():
self[key] = value
return
def __parse_string(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(str, i)
if not match:
# No more cookies
break
key, value = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if key[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[key[1:]] = value
elif key.lower() in Morsel._reserved:
if M:
if value is None:
if key.lower() in Morsel._flags:
M[key] = True
else:
M[key] = _unquote(value)
elif value is not None:
rval, cval = self.value_decode(value)
self.__set(key, rval, cval)
M = self[key]
class SimpleCookie(BaseCookie):
"""
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote(val), val
def value_encode(self, val):
strval = str(val)
return strval, _quote(strval)
| gpl-3.0 |
artnez/faceoff | scripts/run-server.py | 1 | 1416 | #! /usr/bin/env python
"""
Runs Faceoff as a standalone web server. Do not use this is in production.
Copyright: (c) 2012-2014 Artem Nezvigin <[email protected]>
License: MIT, see LICENSE for details
"""
from sys import path
from os import environ
from os.path import dirname, join
path.append(join(dirname(__file__), '..'))
from argparse import ArgumentParser
parser = ArgumentParser(usage='%(prog)s [options]', argument_default='')
parser.add_argument('--host', metavar='<string>', dest='FACEOFF_HOST', default='0')
parser.add_argument('--port', metavar='<string>', dest='FACEOFF_PORT', default='5000')
parser.add_argument('--config', metavar='<string>', dest='FACEOFF_CONFIG')
parser.add_argument('--debug', action='store_const', const='1', dest='FACEOFF_DEBUG')
parser.add_argument('--db-path', metavar='<string>', dest='FACEOFF_DB_PATH')
parser.add_argument('--db-fixtures', action='store_const', const='1', dest='FACEOFF_DB_FIXTURES')
parser.add_argument('--log-path', metavar='<string>', dest='FACEOFF_LOG_PATH')
parser.add_argument('--log-level', metavar='<string>', dest='FACEOFF_LOG_LEVEL')
parser.add_argument('--log-filter', metavar='<string>', dest='FACEOFF_LOG_FILTER')
parser.add_argument('--log-ignore', metavar='<string>', dest='FACEOFF_LOG_IGNORE')
args = parser.parse_args()
environ.update(vars(args))
from faceoff import app
app.run(args.FACEOFF_HOST, int(args.FACEOFF_PORT), threaded=True)
| mit |
MicroTrustRepos/microkernel | src/l4/pkg/python/contrib/Mac/Modules/fm/fmsupport.py | 39 | 2467 | # This script generates a Python interface for an Apple Macintosh Manager.
# It uses the "bgen" package to generate C code.
# The function specifications are generated by scanning the mamager's header file,
# using the "scantools" package (customized for this particular manager).
import string
# Declarations that change for each manager
MACHEADERFILE = 'Fonts.h' # The Apple header file
MODNAME = '_Fm' # The name of the module
# The following is *usually* unchanged but may still require tuning
MODPREFIX = 'Fm' # The prefix for module-wide routines
INPUTFILE = string.lower(MODPREFIX) + 'gen.py' # The file generated by the scanner
OUTPUTFILE = MODNAME + "module.c" # The file generated by this program
from macsupport import *
# Create the type objects
class RevVarInputBufferType(VarInputBufferType):
def passInput(self, name):
return "%s__len__, %s__in__" % (name, name)
TextBuffer = RevVarInputBufferType()
includestuff = includestuff + """
#include <Carbon/Carbon.h>
/*
** Parse/generate ComponentDescriptor records
*/
static PyObject *
FMRec_New(FMetricRec *itself)
{
return Py_BuildValue("O&O&O&O&O&",
PyMac_BuildFixed, itself->ascent,
PyMac_BuildFixed, itself->descent,
PyMac_BuildFixed, itself->leading,
PyMac_BuildFixed, itself->widMax,
ResObj_New, itself->wTabHandle);
}
#if 0
/* Not needed... */
static int
FMRec_Convert(PyObject *v, FMetricRec *p_itself)
{
return PyArg_ParseTuple(v, "O&O&O&O&O&",
PyMac_GetFixed, &itself->ascent,
PyMac_GetFixed, &itself->descent,
PyMac_GetFixed, &itself->leading,
PyMac_GetFixed, &itself->widMax,
ResObj_Convert, &itself->wTabHandle);
}
#endif
"""
FMetricRecPtr = OpaqueType('FMetricRec', 'FMRec')
# Create the generator groups and link them
module = MacModule(MODNAME, MODPREFIX, includestuff, finalstuff, initstuff)
# Create the generator classes used to populate the lists
Function = OSErrWeakLinkFunctionGenerator
# Create and populate the lists
functions = []
execfile(INPUTFILE)
# add the populated lists to the generator groups
# (in a different wordl the scan program would generate this)
for f in functions: module.add(f)
# generate output (open the output file as late as possible)
SetOutputFileName(OUTPUTFILE)
module.generate()
| gpl-2.0 |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/wheel/test/test_basic.py | 92 | 6509 | """
Basic wheel tests.
"""
import os
import pkg_resources
import json
import sys
from pkg_resources import resource_filename
import wheel.util
import wheel.tool
from wheel import egg2wheel
from wheel.install import WheelFile
from zipfile import ZipFile
from shutil import rmtree
test_distributions = ("complex-dist", "simple.dist", "headers.dist")
def teardown_module():
"""Delete eggs/wheels created by tests."""
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
for subdir in ('build', 'dist'):
try:
rmtree(os.path.join(base, dist, subdir))
except OSError:
pass
def setup_module():
build_wheel()
build_egg()
def build_wheel():
"""Build wheels from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_wheel']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def build_egg():
"""Build eggs from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_egg']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def test_findable():
"""Make sure pkg_resources can find us."""
assert pkg_resources.working_set.by_key['wheel'].version
def test_egg_re():
"""Make sure egg_info_re matches."""
egg_names_path = pkg_resources.resource_filename('wheel', 'eggnames.txt')
with open(egg_names_path) as egg_names:
for line in egg_names:
line = line.strip()
if not line:
continue
assert egg2wheel.egg_info_re.match(line), line
def test_compatibility_tags():
"""Test compatibilty tags are working."""
wf = WheelFile("package-1.0.0-cp32.cp33-noabi-noarch.whl")
assert (list(wf.compatibility_tags) ==
[('cp32', 'noabi', 'noarch'), ('cp33', 'noabi', 'noarch')])
assert (wf.arity == 2)
wf2 = WheelFile("package-1.0.0-1st-cp33-noabi-noarch.whl")
wf2_info = wf2.parsed_filename.groupdict()
assert wf2_info['build'] == '1st', wf2_info
def test_convert_egg():
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
distdir = os.path.join(base, dist, 'dist')
eggs = [e for e in os.listdir(distdir) if e.endswith('.egg')]
wheel.tool.convert(eggs, distdir, verbose=False)
def test_unpack():
"""
Make sure 'wheel unpack' works.
This also verifies the integrity of our testing wheel files.
"""
for dist in test_distributions:
distdir = pkg_resources.resource_filename('wheel.test',
os.path.join(dist, 'dist'))
for wheelfile in (w for w in os.listdir(distdir) if w.endswith('.whl')):
wheel.tool.unpack(os.path.join(distdir, wheelfile), distdir)
def test_no_scripts():
"""Make sure entry point scripts are not generated."""
dist = "complex-dist"
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
assert not '.data/scripts/' in entry.filename
def test_pydist():
"""Make sure pydist.json exists and validates against our schema."""
# XXX this test may need manual cleanup of older wheels
import jsonschema
def open_json(filename):
with open(filename, 'rb') as json_file:
return json.loads(json_file.read().decode('utf-8'))
pymeta_schema = open_json(resource_filename('wheel.test',
'pydist-schema.json'))
valid = 0
for dist in ("simple.dist", "complex-dist"):
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
if entry.filename.endswith('/metadata.json'):
pymeta = json.loads(whl.read(entry).decode('utf-8'))
jsonschema.validate(pymeta, pymeta_schema)
valid += 1
assert valid > 0, "No metadata.json found"
def test_util():
"""Test functions in util.py."""
for i in range(10):
before = b'*' * i
encoded = wheel.util.urlsafe_b64encode(before)
assert not encoded.endswith(b'=')
after = wheel.util.urlsafe_b64decode(encoded)
assert before == after
def test_pick_best():
"""Test the wheel ranking algorithm."""
def get_tags(res):
info = res[-1].parsed_filename.groupdict()
return info['pyver'], info['abi'], info['plat']
cand_tags = [('py27', 'noabi', 'noarch'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'),
('cp26', 'noabi', 'linux_i686'),
('cp27', 'noabi', 'linux_x86_64'),
('cp26', 'noabi', 'linux_x86_64')]
cand_wheels = [WheelFile('testpkg-1.0-%s-%s-%s.whl' % t)
for t in cand_tags]
supported = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
supported2 = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch'),
('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch')]
supported3 = [('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
for supp in (supported, supported2, supported3):
context = lambda: list(supp)
for wheel in cand_wheels:
wheel.context = context
best = max(cand_wheels)
assert list(best.tags)[0] == supp[0]
# assert_equal(
# list(map(get_tags, pick_best(cand_wheels, supp, top=False))), supp)
| gpl-3.0 |
alistairlow/tensorflow | tensorflow/python/keras/losses/__init__.py | 75 | 2203 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Loss functions.
from tensorflow.python.keras._impl.keras.losses import binary_crossentropy
from tensorflow.python.keras._impl.keras.losses import categorical_crossentropy
from tensorflow.python.keras._impl.keras.losses import categorical_hinge
from tensorflow.python.keras._impl.keras.losses import cosine_proximity
from tensorflow.python.keras._impl.keras.losses import hinge
from tensorflow.python.keras._impl.keras.losses import kullback_leibler_divergence
from tensorflow.python.keras._impl.keras.losses import logcosh
from tensorflow.python.keras._impl.keras.losses import mean_absolute_error
from tensorflow.python.keras._impl.keras.losses import mean_absolute_percentage_error
from tensorflow.python.keras._impl.keras.losses import mean_squared_error
from tensorflow.python.keras._impl.keras.losses import mean_squared_logarithmic_error
from tensorflow.python.keras._impl.keras.losses import poisson
from tensorflow.python.keras._impl.keras.losses import sparse_categorical_crossentropy
from tensorflow.python.keras._impl.keras.losses import squared_hinge
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras._impl.keras.losses import deserialize
from tensorflow.python.keras._impl.keras.losses import serialize
from tensorflow.python.keras._impl.keras.losses import get
del absolute_import
del division
del print_function
| apache-2.0 |
aforalee/rally | tests/unit/test_log.py | 13 | 7028 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import mock
from rally.common import log
from tests.unit import test
class LogTestCase(test.TestCase):
@mock.patch("rally.common.log.CONF")
@mock.patch("rally.common.log.handlers")
@mock.patch("rally.common.log.oslogging")
def test_setup(self, mock_oslogging, mock_handlers, mock_conf):
proj = "fakep"
version = "fakev"
mock_handlers.ColorHandler.LEVEL_COLORS = {
logging.DEBUG: "debug_color"}
mock_conf.rally_debug = True
log.setup(proj, version)
self.assertIn(logging.RDEBUG, mock_handlers.ColorHandler.LEVEL_COLORS)
self.assertEqual(
mock_handlers.ColorHandler.LEVEL_COLORS[logging.DEBUG],
mock_handlers.ColorHandler.LEVEL_COLORS[logging.RDEBUG])
mock_oslogging.setup.assert_called_once_with(mock_conf, proj, version)
mock_oslogging.getLogger(None).logger.setLevel.assert_called_once_with(
logging.RDEBUG)
@mock.patch("rally.common.log.logging")
@mock.patch("rally.common.log.RallyContextAdapter")
@mock.patch("rally.common.log.oslogging")
def test_getLogger(self, mock_oslogging, mock_rally_context_adapter,
mock_logging):
name = "fake"
vers = "fake"
mock_oslogging._loggers = {}
returned_logger = log.getLogger(name, vers)
self.assertIn(name, mock_oslogging._loggers)
mock_rally_context_adapter.assert_called_once_with(
mock_logging.getLogger(name),
{"project": "rally", "version": vers})
self.assertEqual(mock_oslogging._loggers[name], returned_logger)
class LogRallyContaxtAdapter(test.TestCase):
@mock.patch("rally.common.log.logging")
@mock.patch("rally.common.log.oslogging.KeywordArgumentAdapter")
def test_debug(self, mock_keyword_argument_adapter, mock_logging):
mock_logging.RDEBUG = 123
fake_msg = "fake message"
radapter = log.RallyContextAdapter(mock.MagicMock(), "fakep")
radapter.log = mock.MagicMock()
radapter.debug(fake_msg)
radapter.log.assert_called_once_with(mock_logging.RDEBUG,
fake_msg)
class ExceptionLoggerTestCase(test.TestCase):
@mock.patch("rally.common.log.is_debug")
def test_context(self, mock_is_debug):
# Prepare
mock_is_debug.return_value = True
logger = mock.MagicMock()
exception = Exception()
# Run
with log.ExceptionLogger(logger, "foo") as e:
raise exception
# Assertions
logger.warning.assert_called_once_with("foo")
logger.exception.assert_called_once_with(exception)
logger.debug.assert_called_once_with(exception)
self.assertEqual(e.exception, exception)
class LogCatcherTestCase(test.TestCase):
# FIXME(pboldin): These are really functional tests and should be moved
# there when the infrastructure is ready
def test_logcatcher(self):
LOG = log.getLogger("testlogger")
LOG.logger.setLevel(log.INFO)
with log.LogCatcher(LOG) as catcher:
LOG.warning("Warning")
LOG.info("Info")
LOG.debug("Debug")
catcher.assertInLogs("Warning")
self.assertRaises(AssertionError, catcher.assertInLogs, "Error")
self.assertEqual(["Warning", "Info"], catcher.fetchLogs())
self.assertEqual(2, len(catcher.fetchLogRecords()))
class CatcherHandlerTestCase(test.TestCase):
@mock.patch("logging.handlers.BufferingHandler.__init__")
def test_init(self, mock_buffering_handler___init__):
catcher_handler = log.CatcherHandler()
mock_buffering_handler___init__.assert_called_once_with(
catcher_handler, 0)
def test_shouldFlush(self):
catcher_handler = log.CatcherHandler()
self.assertFalse(catcher_handler.shouldFlush())
def test_emit(self):
catcher_handler = log.CatcherHandler()
catcher_handler.buffer = mock.Mock()
catcher_handler.emit("foobar")
catcher_handler.buffer.append.assert_called_once_with("foobar")
class LogCatcherUnitTestCase(test.TestCase):
def setUp(self):
super(LogCatcherUnitTestCase, self).setUp()
patcher = mock.patch("rally.common.log.CatcherHandler")
self.catcher_handler = patcher.start()
self.catcher_handler.return_value.buffer = [
mock.Mock(msg="foo"), mock.Mock(msg="bar")]
self.addCleanup(patcher.stop)
self.logger = mock.Mock()
def test_init(self):
catcher = log.LogCatcher(self.logger)
self.assertEqual(self.logger.logger, catcher.logger)
self.assertEqual(self.catcher_handler.return_value, catcher.handler)
self.catcher_handler.assert_called_once_with()
def test_enter(self):
catcher = log.LogCatcher(self.logger)
self.assertEqual(catcher, catcher.__enter__())
self.logger.logger.addHandler.assert_called_once_with(
self.catcher_handler.return_value)
def test_exit(self):
catcher = log.LogCatcher(self.logger)
catcher.__exit__(None, None, None)
self.logger.logger.removeHandler.assert_called_once_with(
self.catcher_handler.return_value)
def test_assertInLogs(self):
catcher = log.LogCatcher(self.logger)
self.assertEqual(["foo"], catcher.assertInLogs("foo"))
self.assertEqual(["bar"], catcher.assertInLogs("bar"))
self.assertRaises(AssertionError, catcher.assertInLogs, "foobar")
def test_assertInLogs_contains(self):
catcher = log.LogCatcher(self.logger)
record_mock = mock.MagicMock()
self.catcher_handler.return_value.buffer = [record_mock]
record_mock.msg.__contains__.return_value = True
self.assertEqual([record_mock.msg], catcher.assertInLogs("foo"))
record_mock.msg.__contains__.assert_called_once_with("foo")
def test_fetchLogRecords(self):
catcher = log.LogCatcher(self.logger)
self.assertEqual(self.catcher_handler.return_value.buffer,
catcher.fetchLogRecords())
def test_fetchLogs(self):
catcher = log.LogCatcher(self.logger)
self.assertEqual(
[r.msg for r in self.catcher_handler.return_value.buffer],
catcher.fetchLogs())
| apache-2.0 |
varunagrawal/azure-services | varunagrawal/VarunWeb/env/Lib/site-packages/_markerlib/markers.py | 1769 | 3979 | # -*- coding: utf-8 -*-
"""Interpret PEP 345 environment markers.
EXPR [in|==|!=|not in] EXPR [or|and] ...
where EXPR belongs to any of those:
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
python_full_version = sys.version.split()[0]
os.name = os.name
sys.platform = sys.platform
platform.version = platform.version()
platform.machine = platform.machine()
platform.python_implementation = platform.python_implementation()
a free string, like '2.6', or 'win32'
"""
__all__ = ['default_environment', 'compile', 'interpret']
import ast
import os
import platform
import sys
import weakref
_builtin_compile = compile
try:
from platform import python_implementation
except ImportError:
if os.name == "java":
# Jython 2.5 has ast module, but not platform.python_implementation() function.
def python_implementation():
return "Jython"
else:
raise
# restricted set of variables
_VARS = {'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# FIXME parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': python_implementation(),
'extra': None # wheel extension
}
for var in list(_VARS.keys()):
if '.' in var:
_VARS[var.replace('.', '_')] = _VARS[var]
def default_environment():
"""Return copy of default PEP 385 globals dictionary."""
return dict(_VARS)
class ASTWhitelist(ast.NodeTransformer):
def __init__(self, statement):
self.statement = statement # for error messages
ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str)
# Bool operations
ALLOWED += (ast.And, ast.Or)
# Comparison operations
ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn)
def visit(self, node):
"""Ensure statement only contains allowed nodes."""
if not isinstance(node, self.ALLOWED):
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
(self.statement,
(' ' * node.col_offset) + '^'))
return ast.NodeTransformer.visit(self, node)
def visit_Attribute(self, node):
"""Flatten one level of attribute access."""
new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx)
return ast.copy_location(new_node, node)
def parse_marker(marker):
tree = ast.parse(marker, mode='eval')
new_tree = ASTWhitelist(marker).generic_visit(tree)
return new_tree
def compile_marker(parsed_marker):
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
dont_inherit=True)
_cache = weakref.WeakValueDictionary()
def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker]
def interpret(marker, environment=None):
return compile(marker)(environment)
| gpl-2.0 |
hzlf/openbroadcast | website/apps/alibrary/migrations/0070_auto__add_field_playlistitemplaylist_fade_cross.py | 1 | 44115 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PlaylistItemPlaylist.fade_cross'
db.add_column('alibrary_playlistitemplaylist', 'fade_cross',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0, max_length=12),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PlaylistItemPlaylist.fade_cross'
db.delete_column('alibrary_playlistitemplaylist', 'fade_cross')
models = {
'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'alibrary.apilookup': {
'Meta': {'ordering': "('created',)", 'object_name': 'APILookup'},
'api_data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'provider': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50'}),
'ressource_id': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'alibrary.artist': {
'Meta': {'ordering': "('name',)", 'object_name': 'Artist'},
'aliases': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'aliases_rel_+'", 'null': 'True', 'to': "orm['alibrary.Artist']"}),
'biography': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'disable_link': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_folder'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'listed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_main_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Artist']", 'through': "orm['alibrary.ArtistMembership']", 'symmetrical': 'False'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'professions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['alibrary.Profession']", 'through': "orm['alibrary.ArtistProfessions']", 'symmetrical': 'False'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.artistmembership': {
'Meta': {'object_name': 'ArtistMembership'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artist_child'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artist_parent'", 'to': "orm['alibrary.Artist']"}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'artist_membership_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"})
},
'alibrary.artistplugin': {
'Meta': {'object_name': 'ArtistPlugin', 'db_table': "'cmsplugin_artistplugin'", '_ormbases': ['cms.CMSPlugin']},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Artist']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'alibrary.artistprofessions': {
'Meta': {'object_name': 'ArtistProfessions'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Profession']"})
},
'alibrary.daypart': {
'Meta': {'ordering': "('day', 'time_start')", 'object_name': 'Daypart'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'day': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '1', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time_end': ('django.db.models.fields.TimeField', [], {}),
'time_start': ('django.db.models.fields.TimeField', [], {})
},
'alibrary.format': {
'Meta': {'ordering': "('format', 'version')", 'object_name': 'Format'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_price': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "'base'", 'max_length': '10'})
},
'alibrary.label': {
'Meta': {'ordering': "('name',)", 'object_name': 'Label'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email_main': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'label_folder'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labelcode': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'label_children'", 'null': 'True', 'to': "orm['alibrary.Label']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.license': {
'Meta': {'ordering': "('name',)", 'object_name': 'License'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'license_children'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'restricted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'0231407e-e1a5-4c8d-b3d0-a8675a64c8c6'", 'max_length': '36'})
},
'alibrary.licensetranslation': {
'Meta': {'ordering': "('language_code',)", 'unique_together': "(('language_code', 'master'),)", 'object_name': 'LicenseTranslation', 'db_table': "'alibrary_license_translation'"},
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '15', 'blank': 'True'}),
'license_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['alibrary.License']"}),
'name_translated': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'alibrary.media': {
'Meta': {'ordering': "('tracknumber',)", 'object_name': 'Media'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_artist'", 'null': 'True', 'to': "orm['alibrary.Artist']"}),
'base_bitrate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'base_duration': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'base_filesize': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'base_format': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'base_samplerate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'conversion_status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'echoprint_status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'extra_artists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.Profession']", 'null': 'True', 'through': "orm['alibrary.MediaExtraartists']", 'blank': 'True'}),
'folder': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isrc': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_license'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'lock': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '1'}),
'master': ('django.db.models.fields.files.FileField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'master_sha1': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'mediatype': ('django.db.models.fields.CharField', [], {'default': "'track'", 'max_length': '12'}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '2'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_release'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['alibrary.Release']"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'tracknumber': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.mediaextraartists': {
'Meta': {'object_name': 'MediaExtraartists'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'extraartist_artist'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'extraartist_media'", 'to': "orm['alibrary.Media']"}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'media_extraartist_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"})
},
'alibrary.mediaformat': {
'Meta': {'ordering': "('name',)", 'object_name': 'Mediaformat'},
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_listing': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'alibrary.mediaplugin': {
'Meta': {'object_name': 'MediaPlugin', 'db_table': "'cmsplugin_mediaplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'headline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Media']"})
},
'alibrary.playlist': {
'Meta': {'ordering': "('name',)", 'object_name': 'Playlist'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'dayparts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'daypart_plalists'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['alibrary.Daypart']"}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '12', 'null': 'True'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.PlaylistItem']", 'null': 'True', 'through': "orm['alibrary.PlaylistItemPlaylist']", 'blank': 'True'}),
'main_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'media': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.Media']", 'null': 'True', 'through': "orm['alibrary.PlaylistMedia']", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'target_duration': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'other'", 'max_length': '12', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.playlistitem': {
'Meta': {'object_name': 'PlaylistItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.playlistitemplaylist': {
'Meta': {'object_name': 'PlaylistItemPlaylist'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cue_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'cue_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_cross': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.PlaylistItem']"}),
'playlist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Playlist']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.playlistmedia': {
'Meta': {'object_name': 'PlaylistMedia'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'cue_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'cue_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_cross': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_in': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'fade_out': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Media']"}),
'playlist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Playlist']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.profession': {
'Meta': {'ordering': "('name',)", 'object_name': 'Profession'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_listing': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'alibrary.relation': {
'Meta': {'ordering': "('url',)", 'unique_together': "(('service', 'content_type', 'object_id'),)", 'object_name': 'Relation'},
'action': ('django.db.models.fields.CharField', [], {'default': "'information'", 'max_length': '50'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'service': ('django.db.models.fields.CharField', [], {'default': "'generic'", 'max_length': '50'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512'})
},
'alibrary.release': {
'Meta': {'ordering': "('-created',)", 'object_name': 'Release'},
'asin': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'catalognumber': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'cover_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_cover_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'd_tags': ('tagging.fields.TagField', [], {'null': 'True'}),
'description': ('lib.fields.extra.MarkdownTextField', [], {'null': 'True', 'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extra_artists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['alibrary.Artist']", 'null': 'True', 'through': "orm['alibrary.ReleaseExtraartists']", 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_folder'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['filer.Folder']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_label'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['alibrary.Label']"}),
'legacy_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_license'", 'null': 'True', 'to': "orm['alibrary.License']"}),
'main_format': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Mediaformat']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_main_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'migrated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_owner'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'pressings': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'max_length': '12'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_publisher'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'release_country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'releasedate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'releasestatus': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'releasetype': ('django.db.models.fields.CharField', [], {'default': "'other'", 'max_length': '12'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'True'}),
'totaltracks': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
'alibrary.releaseextraartists': {
'Meta': {'object_name': 'ReleaseExtraartists'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_extraartist_artist'", 'to': "orm['alibrary.Artist']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_extraartist_profession'", 'null': 'True', 'to': "orm['alibrary.Profession']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_extraartist_release'", 'to': "orm['alibrary.Release']"})
},
'alibrary.releaseplugin': {
'Meta': {'object_name': 'ReleasePlugin', 'db_table': "'cmsplugin_releaseplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['alibrary.Release']"})
},
'alibrary.releaserelations': {
'Meta': {'object_name': 'ReleaseRelations'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_relation_relation'", 'to': "orm['alibrary.Relation']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'release_relation_release'", 'to': "orm['alibrary.Release']"})
},
'arating.vote': {
'Meta': {'unique_together': "(('user', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['alibrary'] | gpl-3.0 |
Godiyos/python-for-android | python3-alpha/python3-src/Lib/test/test_colorsys.py | 98 | 2834 | import unittest, test.support
import colorsys
def frange(start, stop, step):
while start <= stop:
yield start
start += step
class ColorsysTest(unittest.TestCase):
def assertTripleEqual(self, tr1, tr2):
self.assertEqual(len(tr1), 3)
self.assertEqual(len(tr2), 3)
self.assertAlmostEqual(tr1[0], tr2[0])
self.assertAlmostEqual(tr1[1], tr2[1])
self.assertAlmostEqual(tr1[2], tr2[2])
def test_hsv_roundtrip(self):
for r in frange(0.0, 1.0, 0.2):
for g in frange(0.0, 1.0, 0.2):
for b in frange(0.0, 1.0, 0.2):
rgb = (r, g, b)
self.assertTripleEqual(
rgb,
colorsys.hsv_to_rgb(*colorsys.rgb_to_hsv(*rgb))
)
def test_hsv_values(self):
values = [
# rgb, hsv
((0.0, 0.0, 0.0), ( 0 , 0.0, 0.0)), # black
((0.0, 0.0, 1.0), (4./6., 1.0, 1.0)), # blue
((0.0, 1.0, 0.0), (2./6., 1.0, 1.0)), # green
((0.0, 1.0, 1.0), (3./6., 1.0, 1.0)), # cyan
((1.0, 0.0, 0.0), ( 0 , 1.0, 1.0)), # red
((1.0, 0.0, 1.0), (5./6., 1.0, 1.0)), # purple
((1.0, 1.0, 0.0), (1./6., 1.0, 1.0)), # yellow
((1.0, 1.0, 1.0), ( 0 , 0.0, 1.0)), # white
((0.5, 0.5, 0.5), ( 0 , 0.0, 0.5)), # grey
]
for (rgb, hsv) in values:
self.assertTripleEqual(hsv, colorsys.rgb_to_hsv(*rgb))
self.assertTripleEqual(rgb, colorsys.hsv_to_rgb(*hsv))
def test_hls_roundtrip(self):
for r in frange(0.0, 1.0, 0.2):
for g in frange(0.0, 1.0, 0.2):
for b in frange(0.0, 1.0, 0.2):
rgb = (r, g, b)
self.assertTripleEqual(
rgb,
colorsys.hls_to_rgb(*colorsys.rgb_to_hls(*rgb))
)
def test_hls_values(self):
values = [
# rgb, hls
((0.0, 0.0, 0.0), ( 0 , 0.0, 0.0)), # black
((0.0, 0.0, 1.0), (4./6., 0.5, 1.0)), # blue
((0.0, 1.0, 0.0), (2./6., 0.5, 1.0)), # green
((0.0, 1.0, 1.0), (3./6., 0.5, 1.0)), # cyan
((1.0, 0.0, 0.0), ( 0 , 0.5, 1.0)), # red
((1.0, 0.0, 1.0), (5./6., 0.5, 1.0)), # purple
((1.0, 1.0, 0.0), (1./6., 0.5, 1.0)), # yellow
((1.0, 1.0, 1.0), ( 0 , 1.0, 0.0)), # white
((0.5, 0.5, 0.5), ( 0 , 0.5, 0.0)), # grey
]
for (rgb, hls) in values:
self.assertTripleEqual(hls, colorsys.rgb_to_hls(*rgb))
self.assertTripleEqual(rgb, colorsys.hls_to_rgb(*hls))
def test_main():
test.support.run_unittest(ColorsysTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
blacklin/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/html5lib/constants.py | 963 | 87346 | from __future__ import absolute_import, division, unicode_literals
import string
import gettext
_ = gettext.gettext
EOF = None
E = {
"null-character":
_("Null character in input stream, replaced with U+FFFD."),
"invalid-codepoint":
_("Invalid codepoint in stream."),
"incorrectly-placed-solidus":
_("Solidus (/) incorrectly placed in tag."),
"incorrect-cr-newline-entity":
_("Incorrect CR newline entity, replaced with LF."),
"illegal-windows-1252-entity":
_("Entity used with illegal number (windows-1252 reference)."),
"cant-convert-numeric-entity":
_("Numeric entity couldn't be converted to character "
"(codepoint U+%(charAsInt)08x)."),
"illegal-codepoint-for-numeric-entity":
_("Numeric entity represents an illegal codepoint: "
"U+%(charAsInt)08x."),
"numeric-entity-without-semicolon":
_("Numeric entity didn't end with ';'."),
"expected-numeric-entity-but-got-eof":
_("Numeric entity expected. Got end of file instead."),
"expected-numeric-entity":
_("Numeric entity expected but none found."),
"named-entity-without-semicolon":
_("Named entity didn't end with ';'."),
"expected-named-entity":
_("Named entity expected. Got none."),
"attributes-in-end-tag":
_("End tag contains unexpected attributes."),
'self-closing-flag-on-end-tag':
_("End tag contains unexpected self-closing flag."),
"expected-tag-name-but-got-right-bracket":
_("Expected tag name. Got '>' instead."),
"expected-tag-name-but-got-question-mark":
_("Expected tag name. Got '?' instead. (HTML doesn't "
"support processing instructions.)"),
"expected-tag-name":
_("Expected tag name. Got something else instead"),
"expected-closing-tag-but-got-right-bracket":
_("Expected closing tag. Got '>' instead. Ignoring '</>'."),
"expected-closing-tag-but-got-eof":
_("Expected closing tag. Unexpected end of file."),
"expected-closing-tag-but-got-char":
_("Expected closing tag. Unexpected character '%(data)s' found."),
"eof-in-tag-name":
_("Unexpected end of file in the tag name."),
"expected-attribute-name-but-got-eof":
_("Unexpected end of file. Expected attribute name instead."),
"eof-in-attribute-name":
_("Unexpected end of file in attribute name."),
"invalid-character-in-attribute-name":
_("Invalid character in attribute name"),
"duplicate-attribute":
_("Dropped duplicate attribute on tag."),
"expected-end-of-tag-name-but-got-eof":
_("Unexpected end of file. Expected = or end of tag."),
"expected-attribute-value-but-got-eof":
_("Unexpected end of file. Expected attribute value."),
"expected-attribute-value-but-got-right-bracket":
_("Expected attribute value. Got '>' instead."),
'equals-in-unquoted-attribute-value':
_("Unexpected = in unquoted attribute"),
'unexpected-character-in-unquoted-attribute-value':
_("Unexpected character in unquoted attribute"),
"invalid-character-after-attribute-name":
_("Unexpected character after attribute name."),
"unexpected-character-after-attribute-value":
_("Unexpected character after attribute value."),
"eof-in-attribute-value-double-quote":
_("Unexpected end of file in attribute value (\")."),
"eof-in-attribute-value-single-quote":
_("Unexpected end of file in attribute value (')."),
"eof-in-attribute-value-no-quotes":
_("Unexpected end of file in attribute value."),
"unexpected-EOF-after-solidus-in-tag":
_("Unexpected end of file in tag. Expected >"),
"unexpected-character-after-solidus-in-tag":
_("Unexpected character after / in tag. Expected >"),
"expected-dashes-or-doctype":
_("Expected '--' or 'DOCTYPE'. Not found."),
"unexpected-bang-after-double-dash-in-comment":
_("Unexpected ! after -- in comment"),
"unexpected-space-after-double-dash-in-comment":
_("Unexpected space after -- in comment"),
"incorrect-comment":
_("Incorrect comment."),
"eof-in-comment":
_("Unexpected end of file in comment."),
"eof-in-comment-end-dash":
_("Unexpected end of file in comment (-)"),
"unexpected-dash-after-double-dash-in-comment":
_("Unexpected '-' after '--' found in comment."),
"eof-in-comment-double-dash":
_("Unexpected end of file in comment (--)."),
"eof-in-comment-end-space-state":
_("Unexpected end of file in comment."),
"eof-in-comment-end-bang-state":
_("Unexpected end of file in comment."),
"unexpected-char-in-comment":
_("Unexpected character in comment found."),
"need-space-after-doctype":
_("No space after literal string 'DOCTYPE'."),
"expected-doctype-name-but-got-right-bracket":
_("Unexpected > character. Expected DOCTYPE name."),
"expected-doctype-name-but-got-eof":
_("Unexpected end of file. Expected DOCTYPE name."),
"eof-in-doctype-name":
_("Unexpected end of file in DOCTYPE name."),
"eof-in-doctype":
_("Unexpected end of file in DOCTYPE."),
"expected-space-or-right-bracket-in-doctype":
_("Expected space or '>'. Got '%(data)s'"),
"unexpected-end-of-doctype":
_("Unexpected end of DOCTYPE."),
"unexpected-char-in-doctype":
_("Unexpected character in DOCTYPE."),
"eof-in-innerhtml":
_("XXX innerHTML EOF"),
"unexpected-doctype":
_("Unexpected DOCTYPE. Ignored."),
"non-html-root":
_("html needs to be the first start tag."),
"expected-doctype-but-got-eof":
_("Unexpected End of file. Expected DOCTYPE."),
"unknown-doctype":
_("Erroneous DOCTYPE."),
"expected-doctype-but-got-chars":
_("Unexpected non-space characters. Expected DOCTYPE."),
"expected-doctype-but-got-start-tag":
_("Unexpected start tag (%(name)s). Expected DOCTYPE."),
"expected-doctype-but-got-end-tag":
_("Unexpected end tag (%(name)s). Expected DOCTYPE."),
"end-tag-after-implied-root":
_("Unexpected end tag (%(name)s) after the (implied) root element."),
"expected-named-closing-tag-but-got-eof":
_("Unexpected end of file. Expected end tag (%(name)s)."),
"two-heads-are-not-better-than-one":
_("Unexpected start tag head in existing head. Ignored."),
"unexpected-end-tag":
_("Unexpected end tag (%(name)s). Ignored."),
"unexpected-start-tag-out-of-my-head":
_("Unexpected start tag (%(name)s) that can be in head. Moved."),
"unexpected-start-tag":
_("Unexpected start tag (%(name)s)."),
"missing-end-tag":
_("Missing end tag (%(name)s)."),
"missing-end-tags":
_("Missing end tags (%(name)s)."),
"unexpected-start-tag-implies-end-tag":
_("Unexpected start tag (%(startName)s) "
"implies end tag (%(endName)s)."),
"unexpected-start-tag-treated-as":
_("Unexpected start tag (%(originalName)s). Treated as %(newName)s."),
"deprecated-tag":
_("Unexpected start tag %(name)s. Don't use it!"),
"unexpected-start-tag-ignored":
_("Unexpected start tag %(name)s. Ignored."),
"expected-one-end-tag-but-got-another":
_("Unexpected end tag (%(gotName)s). "
"Missing end tag (%(expectedName)s)."),
"end-tag-too-early":
_("End tag (%(name)s) seen too early. Expected other end tag."),
"end-tag-too-early-named":
_("Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."),
"end-tag-too-early-ignored":
_("End tag (%(name)s) seen too early. Ignored."),
"adoption-agency-1.1":
_("End tag (%(name)s) violates step 1, "
"paragraph 1 of the adoption agency algorithm."),
"adoption-agency-1.2":
_("End tag (%(name)s) violates step 1, "
"paragraph 2 of the adoption agency algorithm."),
"adoption-agency-1.3":
_("End tag (%(name)s) violates step 1, "
"paragraph 3 of the adoption agency algorithm."),
"adoption-agency-4.4":
_("End tag (%(name)s) violates step 4, "
"paragraph 4 of the adoption agency algorithm."),
"unexpected-end-tag-treated-as":
_("Unexpected end tag (%(originalName)s). Treated as %(newName)s."),
"no-end-tag":
_("This element (%(name)s) has no end tag."),
"unexpected-implied-end-tag-in-table":
_("Unexpected implied end tag (%(name)s) in the table phase."),
"unexpected-implied-end-tag-in-table-body":
_("Unexpected implied end tag (%(name)s) in the table body phase."),
"unexpected-char-implies-table-voodoo":
_("Unexpected non-space characters in "
"table context caused voodoo mode."),
"unexpected-hidden-input-in-table":
_("Unexpected input with type hidden in table context."),
"unexpected-form-in-table":
_("Unexpected form in table context."),
"unexpected-start-tag-implies-table-voodoo":
_("Unexpected start tag (%(name)s) in "
"table context caused voodoo mode."),
"unexpected-end-tag-implies-table-voodoo":
_("Unexpected end tag (%(name)s) in "
"table context caused voodoo mode."),
"unexpected-cell-in-table-body":
_("Unexpected table cell start tag (%(name)s) "
"in the table body phase."),
"unexpected-cell-end-tag":
_("Got table cell end tag (%(name)s) "
"while required end tags are missing."),
"unexpected-end-tag-in-table-body":
_("Unexpected end tag (%(name)s) in the table body phase. Ignored."),
"unexpected-implied-end-tag-in-table-row":
_("Unexpected implied end tag (%(name)s) in the table row phase."),
"unexpected-end-tag-in-table-row":
_("Unexpected end tag (%(name)s) in the table row phase. Ignored."),
"unexpected-select-in-select":
_("Unexpected select start tag in the select phase "
"treated as select end tag."),
"unexpected-input-in-select":
_("Unexpected input start tag in the select phase."),
"unexpected-start-tag-in-select":
_("Unexpected start tag token (%(name)s in the select phase. "
"Ignored."),
"unexpected-end-tag-in-select":
_("Unexpected end tag (%(name)s) in the select phase. Ignored."),
"unexpected-table-element-start-tag-in-select-in-table":
_("Unexpected table element start tag (%(name)s) in the select in table phase."),
"unexpected-table-element-end-tag-in-select-in-table":
_("Unexpected table element end tag (%(name)s) in the select in table phase."),
"unexpected-char-after-body":
_("Unexpected non-space characters in the after body phase."),
"unexpected-start-tag-after-body":
_("Unexpected start tag token (%(name)s)"
" in the after body phase."),
"unexpected-end-tag-after-body":
_("Unexpected end tag token (%(name)s)"
" in the after body phase."),
"unexpected-char-in-frameset":
_("Unexpected characters in the frameset phase. Characters ignored."),
"unexpected-start-tag-in-frameset":
_("Unexpected start tag token (%(name)s)"
" in the frameset phase. Ignored."),
"unexpected-frameset-in-frameset-innerhtml":
_("Unexpected end tag token (frameset) "
"in the frameset phase (innerHTML)."),
"unexpected-end-tag-in-frameset":
_("Unexpected end tag token (%(name)s)"
" in the frameset phase. Ignored."),
"unexpected-char-after-frameset":
_("Unexpected non-space characters in the "
"after frameset phase. Ignored."),
"unexpected-start-tag-after-frameset":
_("Unexpected start tag (%(name)s)"
" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-frameset":
_("Unexpected end tag (%(name)s)"
" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-body-innerhtml":
_("Unexpected end tag after body(innerHtml)"),
"expected-eof-but-got-char":
_("Unexpected non-space characters. Expected end of file."),
"expected-eof-but-got-start-tag":
_("Unexpected start tag (%(name)s)"
". Expected end of file."),
"expected-eof-but-got-end-tag":
_("Unexpected end tag (%(name)s)"
". Expected end of file."),
"eof-in-table":
_("Unexpected end of file. Expected table content."),
"eof-in-select":
_("Unexpected end of file. Expected select content."),
"eof-in-frameset":
_("Unexpected end of file. Expected frameset content."),
"eof-in-script-in-script":
_("Unexpected end of file. Expected script content."),
"eof-in-foreign-lands":
_("Unexpected end of file. Expected foreign content"),
"non-void-element-with-trailing-solidus":
_("Trailing solidus not allowed on element %(name)s"),
"unexpected-html-element-in-foreign-content":
_("Element %(name)s not allowed in a non-html context"),
"unexpected-end-tag-before-html":
_("Unexpected end tag (%(name)s) before html."),
"XXX-undefined-error":
_("Undefined error (this sucks and should be fixed)"),
}
namespaces = {
"html": "http://www.w3.org/1999/xhtml",
"mathml": "http://www.w3.org/1998/Math/MathML",
"svg": "http://www.w3.org/2000/svg",
"xlink": "http://www.w3.org/1999/xlink",
"xml": "http://www.w3.org/XML/1998/namespace",
"xmlns": "http://www.w3.org/2000/xmlns/"
}
scopingElements = frozenset((
(namespaces["html"], "applet"),
(namespaces["html"], "caption"),
(namespaces["html"], "html"),
(namespaces["html"], "marquee"),
(namespaces["html"], "object"),
(namespaces["html"], "table"),
(namespaces["html"], "td"),
(namespaces["html"], "th"),
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext"),
(namespaces["mathml"], "annotation-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title"),
))
formattingElements = frozenset((
(namespaces["html"], "a"),
(namespaces["html"], "b"),
(namespaces["html"], "big"),
(namespaces["html"], "code"),
(namespaces["html"], "em"),
(namespaces["html"], "font"),
(namespaces["html"], "i"),
(namespaces["html"], "nobr"),
(namespaces["html"], "s"),
(namespaces["html"], "small"),
(namespaces["html"], "strike"),
(namespaces["html"], "strong"),
(namespaces["html"], "tt"),
(namespaces["html"], "u")
))
specialElements = frozenset((
(namespaces["html"], "address"),
(namespaces["html"], "applet"),
(namespaces["html"], "area"),
(namespaces["html"], "article"),
(namespaces["html"], "aside"),
(namespaces["html"], "base"),
(namespaces["html"], "basefont"),
(namespaces["html"], "bgsound"),
(namespaces["html"], "blockquote"),
(namespaces["html"], "body"),
(namespaces["html"], "br"),
(namespaces["html"], "button"),
(namespaces["html"], "caption"),
(namespaces["html"], "center"),
(namespaces["html"], "col"),
(namespaces["html"], "colgroup"),
(namespaces["html"], "command"),
(namespaces["html"], "dd"),
(namespaces["html"], "details"),
(namespaces["html"], "dir"),
(namespaces["html"], "div"),
(namespaces["html"], "dl"),
(namespaces["html"], "dt"),
(namespaces["html"], "embed"),
(namespaces["html"], "fieldset"),
(namespaces["html"], "figure"),
(namespaces["html"], "footer"),
(namespaces["html"], "form"),
(namespaces["html"], "frame"),
(namespaces["html"], "frameset"),
(namespaces["html"], "h1"),
(namespaces["html"], "h2"),
(namespaces["html"], "h3"),
(namespaces["html"], "h4"),
(namespaces["html"], "h5"),
(namespaces["html"], "h6"),
(namespaces["html"], "head"),
(namespaces["html"], "header"),
(namespaces["html"], "hr"),
(namespaces["html"], "html"),
(namespaces["html"], "iframe"),
# Note that image is commented out in the spec as "this isn't an
# element that can end up on the stack, so it doesn't matter,"
(namespaces["html"], "image"),
(namespaces["html"], "img"),
(namespaces["html"], "input"),
(namespaces["html"], "isindex"),
(namespaces["html"], "li"),
(namespaces["html"], "link"),
(namespaces["html"], "listing"),
(namespaces["html"], "marquee"),
(namespaces["html"], "menu"),
(namespaces["html"], "meta"),
(namespaces["html"], "nav"),
(namespaces["html"], "noembed"),
(namespaces["html"], "noframes"),
(namespaces["html"], "noscript"),
(namespaces["html"], "object"),
(namespaces["html"], "ol"),
(namespaces["html"], "p"),
(namespaces["html"], "param"),
(namespaces["html"], "plaintext"),
(namespaces["html"], "pre"),
(namespaces["html"], "script"),
(namespaces["html"], "section"),
(namespaces["html"], "select"),
(namespaces["html"], "style"),
(namespaces["html"], "table"),
(namespaces["html"], "tbody"),
(namespaces["html"], "td"),
(namespaces["html"], "textarea"),
(namespaces["html"], "tfoot"),
(namespaces["html"], "th"),
(namespaces["html"], "thead"),
(namespaces["html"], "title"),
(namespaces["html"], "tr"),
(namespaces["html"], "ul"),
(namespaces["html"], "wbr"),
(namespaces["html"], "xmp"),
(namespaces["svg"], "foreignObject")
))
htmlIntegrationPointElements = frozenset((
(namespaces["mathml"], "annotaion-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title")
))
mathmlTextIntegrationPointElements = frozenset((
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext")
))
adjustForeignAttributes = {
"xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
"xlink:href": ("xlink", "href", namespaces["xlink"]),
"xlink:role": ("xlink", "role", namespaces["xlink"]),
"xlink:show": ("xlink", "show", namespaces["xlink"]),
"xlink:title": ("xlink", "title", namespaces["xlink"]),
"xlink:type": ("xlink", "type", namespaces["xlink"]),
"xml:base": ("xml", "base", namespaces["xml"]),
"xml:lang": ("xml", "lang", namespaces["xml"]),
"xml:space": ("xml", "space", namespaces["xml"]),
"xmlns": (None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
}
unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in
adjustForeignAttributes.items()])
spaceCharacters = frozenset((
"\t",
"\n",
"\u000C",
" ",
"\r"
))
tableInsertModeElements = frozenset((
"table",
"tbody",
"tfoot",
"thead",
"tr"
))
asciiLowercase = frozenset(string.ascii_lowercase)
asciiUppercase = frozenset(string.ascii_uppercase)
asciiLetters = frozenset(string.ascii_letters)
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
asciiUpper2Lower = dict([(ord(c), ord(c.lower()))
for c in string.ascii_uppercase])
# Heading elements need to be ordered
headingElements = (
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
)
voidElements = frozenset((
"base",
"command",
"event-source",
"link",
"meta",
"hr",
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source",
"track"
))
cdataElements = frozenset(('title', 'textarea'))
rcdataElements = frozenset((
'style',
'script',
'xmp',
'iframe',
'noembed',
'noframes',
'noscript'
))
booleanAttributes = {
"": frozenset(("irrelevant",)),
"style": frozenset(("scoped",)),
"img": frozenset(("ismap",)),
"audio": frozenset(("autoplay", "controls")),
"video": frozenset(("autoplay", "controls")),
"script": frozenset(("defer", "async")),
"details": frozenset(("open",)),
"datagrid": frozenset(("multiple", "disabled")),
"command": frozenset(("hidden", "disabled", "checked", "default")),
"hr": frozenset(("noshade")),
"menu": frozenset(("autosubmit",)),
"fieldset": frozenset(("disabled", "readonly")),
"option": frozenset(("disabled", "readonly", "selected")),
"optgroup": frozenset(("disabled", "readonly")),
"button": frozenset(("disabled", "autofocus")),
"input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")),
"select": frozenset(("disabled", "readonly", "autofocus", "multiple")),
"output": frozenset(("disabled", "readonly")),
}
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
# therefore can't be a frozenset.
entitiesWindows1252 = (
8364, # 0x80 0x20AC EURO SIGN
65533, # 0x81 UNDEFINED
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
8224, # 0x86 0x2020 DAGGER
8225, # 0x87 0x2021 DOUBLE DAGGER
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
8240, # 0x89 0x2030 PER MILLE SIGN
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
65533, # 0x8D UNDEFINED
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
65533, # 0x8F UNDEFINED
65533, # 0x90 UNDEFINED
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
8226, # 0x95 0x2022 BULLET
8211, # 0x96 0x2013 EN DASH
8212, # 0x97 0x2014 EM DASH
732, # 0x98 0x02DC SMALL TILDE
8482, # 0x99 0x2122 TRADE MARK SIGN
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
65533, # 0x9D UNDEFINED
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
)
xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;'))
entities = {
"AElig": "\xc6",
"AElig;": "\xc6",
"AMP": "&",
"AMP;": "&",
"Aacute": "\xc1",
"Aacute;": "\xc1",
"Abreve;": "\u0102",
"Acirc": "\xc2",
"Acirc;": "\xc2",
"Acy;": "\u0410",
"Afr;": "\U0001d504",
"Agrave": "\xc0",
"Agrave;": "\xc0",
"Alpha;": "\u0391",
"Amacr;": "\u0100",
"And;": "\u2a53",
"Aogon;": "\u0104",
"Aopf;": "\U0001d538",
"ApplyFunction;": "\u2061",
"Aring": "\xc5",
"Aring;": "\xc5",
"Ascr;": "\U0001d49c",
"Assign;": "\u2254",
"Atilde": "\xc3",
"Atilde;": "\xc3",
"Auml": "\xc4",
"Auml;": "\xc4",
"Backslash;": "\u2216",
"Barv;": "\u2ae7",
"Barwed;": "\u2306",
"Bcy;": "\u0411",
"Because;": "\u2235",
"Bernoullis;": "\u212c",
"Beta;": "\u0392",
"Bfr;": "\U0001d505",
"Bopf;": "\U0001d539",
"Breve;": "\u02d8",
"Bscr;": "\u212c",
"Bumpeq;": "\u224e",
"CHcy;": "\u0427",
"COPY": "\xa9",
"COPY;": "\xa9",
"Cacute;": "\u0106",
"Cap;": "\u22d2",
"CapitalDifferentialD;": "\u2145",
"Cayleys;": "\u212d",
"Ccaron;": "\u010c",
"Ccedil": "\xc7",
"Ccedil;": "\xc7",
"Ccirc;": "\u0108",
"Cconint;": "\u2230",
"Cdot;": "\u010a",
"Cedilla;": "\xb8",
"CenterDot;": "\xb7",
"Cfr;": "\u212d",
"Chi;": "\u03a7",
"CircleDot;": "\u2299",
"CircleMinus;": "\u2296",
"CirclePlus;": "\u2295",
"CircleTimes;": "\u2297",
"ClockwiseContourIntegral;": "\u2232",
"CloseCurlyDoubleQuote;": "\u201d",
"CloseCurlyQuote;": "\u2019",
"Colon;": "\u2237",
"Colone;": "\u2a74",
"Congruent;": "\u2261",
"Conint;": "\u222f",
"ContourIntegral;": "\u222e",
"Copf;": "\u2102",
"Coproduct;": "\u2210",
"CounterClockwiseContourIntegral;": "\u2233",
"Cross;": "\u2a2f",
"Cscr;": "\U0001d49e",
"Cup;": "\u22d3",
"CupCap;": "\u224d",
"DD;": "\u2145",
"DDotrahd;": "\u2911",
"DJcy;": "\u0402",
"DScy;": "\u0405",
"DZcy;": "\u040f",
"Dagger;": "\u2021",
"Darr;": "\u21a1",
"Dashv;": "\u2ae4",
"Dcaron;": "\u010e",
"Dcy;": "\u0414",
"Del;": "\u2207",
"Delta;": "\u0394",
"Dfr;": "\U0001d507",
"DiacriticalAcute;": "\xb4",
"DiacriticalDot;": "\u02d9",
"DiacriticalDoubleAcute;": "\u02dd",
"DiacriticalGrave;": "`",
"DiacriticalTilde;": "\u02dc",
"Diamond;": "\u22c4",
"DifferentialD;": "\u2146",
"Dopf;": "\U0001d53b",
"Dot;": "\xa8",
"DotDot;": "\u20dc",
"DotEqual;": "\u2250",
"DoubleContourIntegral;": "\u222f",
"DoubleDot;": "\xa8",
"DoubleDownArrow;": "\u21d3",
"DoubleLeftArrow;": "\u21d0",
"DoubleLeftRightArrow;": "\u21d4",
"DoubleLeftTee;": "\u2ae4",
"DoubleLongLeftArrow;": "\u27f8",
"DoubleLongLeftRightArrow;": "\u27fa",
"DoubleLongRightArrow;": "\u27f9",
"DoubleRightArrow;": "\u21d2",
"DoubleRightTee;": "\u22a8",
"DoubleUpArrow;": "\u21d1",
"DoubleUpDownArrow;": "\u21d5",
"DoubleVerticalBar;": "\u2225",
"DownArrow;": "\u2193",
"DownArrowBar;": "\u2913",
"DownArrowUpArrow;": "\u21f5",
"DownBreve;": "\u0311",
"DownLeftRightVector;": "\u2950",
"DownLeftTeeVector;": "\u295e",
"DownLeftVector;": "\u21bd",
"DownLeftVectorBar;": "\u2956",
"DownRightTeeVector;": "\u295f",
"DownRightVector;": "\u21c1",
"DownRightVectorBar;": "\u2957",
"DownTee;": "\u22a4",
"DownTeeArrow;": "\u21a7",
"Downarrow;": "\u21d3",
"Dscr;": "\U0001d49f",
"Dstrok;": "\u0110",
"ENG;": "\u014a",
"ETH": "\xd0",
"ETH;": "\xd0",
"Eacute": "\xc9",
"Eacute;": "\xc9",
"Ecaron;": "\u011a",
"Ecirc": "\xca",
"Ecirc;": "\xca",
"Ecy;": "\u042d",
"Edot;": "\u0116",
"Efr;": "\U0001d508",
"Egrave": "\xc8",
"Egrave;": "\xc8",
"Element;": "\u2208",
"Emacr;": "\u0112",
"EmptySmallSquare;": "\u25fb",
"EmptyVerySmallSquare;": "\u25ab",
"Eogon;": "\u0118",
"Eopf;": "\U0001d53c",
"Epsilon;": "\u0395",
"Equal;": "\u2a75",
"EqualTilde;": "\u2242",
"Equilibrium;": "\u21cc",
"Escr;": "\u2130",
"Esim;": "\u2a73",
"Eta;": "\u0397",
"Euml": "\xcb",
"Euml;": "\xcb",
"Exists;": "\u2203",
"ExponentialE;": "\u2147",
"Fcy;": "\u0424",
"Ffr;": "\U0001d509",
"FilledSmallSquare;": "\u25fc",
"FilledVerySmallSquare;": "\u25aa",
"Fopf;": "\U0001d53d",
"ForAll;": "\u2200",
"Fouriertrf;": "\u2131",
"Fscr;": "\u2131",
"GJcy;": "\u0403",
"GT": ">",
"GT;": ">",
"Gamma;": "\u0393",
"Gammad;": "\u03dc",
"Gbreve;": "\u011e",
"Gcedil;": "\u0122",
"Gcirc;": "\u011c",
"Gcy;": "\u0413",
"Gdot;": "\u0120",
"Gfr;": "\U0001d50a",
"Gg;": "\u22d9",
"Gopf;": "\U0001d53e",
"GreaterEqual;": "\u2265",
"GreaterEqualLess;": "\u22db",
"GreaterFullEqual;": "\u2267",
"GreaterGreater;": "\u2aa2",
"GreaterLess;": "\u2277",
"GreaterSlantEqual;": "\u2a7e",
"GreaterTilde;": "\u2273",
"Gscr;": "\U0001d4a2",
"Gt;": "\u226b",
"HARDcy;": "\u042a",
"Hacek;": "\u02c7",
"Hat;": "^",
"Hcirc;": "\u0124",
"Hfr;": "\u210c",
"HilbertSpace;": "\u210b",
"Hopf;": "\u210d",
"HorizontalLine;": "\u2500",
"Hscr;": "\u210b",
"Hstrok;": "\u0126",
"HumpDownHump;": "\u224e",
"HumpEqual;": "\u224f",
"IEcy;": "\u0415",
"IJlig;": "\u0132",
"IOcy;": "\u0401",
"Iacute": "\xcd",
"Iacute;": "\xcd",
"Icirc": "\xce",
"Icirc;": "\xce",
"Icy;": "\u0418",
"Idot;": "\u0130",
"Ifr;": "\u2111",
"Igrave": "\xcc",
"Igrave;": "\xcc",
"Im;": "\u2111",
"Imacr;": "\u012a",
"ImaginaryI;": "\u2148",
"Implies;": "\u21d2",
"Int;": "\u222c",
"Integral;": "\u222b",
"Intersection;": "\u22c2",
"InvisibleComma;": "\u2063",
"InvisibleTimes;": "\u2062",
"Iogon;": "\u012e",
"Iopf;": "\U0001d540",
"Iota;": "\u0399",
"Iscr;": "\u2110",
"Itilde;": "\u0128",
"Iukcy;": "\u0406",
"Iuml": "\xcf",
"Iuml;": "\xcf",
"Jcirc;": "\u0134",
"Jcy;": "\u0419",
"Jfr;": "\U0001d50d",
"Jopf;": "\U0001d541",
"Jscr;": "\U0001d4a5",
"Jsercy;": "\u0408",
"Jukcy;": "\u0404",
"KHcy;": "\u0425",
"KJcy;": "\u040c",
"Kappa;": "\u039a",
"Kcedil;": "\u0136",
"Kcy;": "\u041a",
"Kfr;": "\U0001d50e",
"Kopf;": "\U0001d542",
"Kscr;": "\U0001d4a6",
"LJcy;": "\u0409",
"LT": "<",
"LT;": "<",
"Lacute;": "\u0139",
"Lambda;": "\u039b",
"Lang;": "\u27ea",
"Laplacetrf;": "\u2112",
"Larr;": "\u219e",
"Lcaron;": "\u013d",
"Lcedil;": "\u013b",
"Lcy;": "\u041b",
"LeftAngleBracket;": "\u27e8",
"LeftArrow;": "\u2190",
"LeftArrowBar;": "\u21e4",
"LeftArrowRightArrow;": "\u21c6",
"LeftCeiling;": "\u2308",
"LeftDoubleBracket;": "\u27e6",
"LeftDownTeeVector;": "\u2961",
"LeftDownVector;": "\u21c3",
"LeftDownVectorBar;": "\u2959",
"LeftFloor;": "\u230a",
"LeftRightArrow;": "\u2194",
"LeftRightVector;": "\u294e",
"LeftTee;": "\u22a3",
"LeftTeeArrow;": "\u21a4",
"LeftTeeVector;": "\u295a",
"LeftTriangle;": "\u22b2",
"LeftTriangleBar;": "\u29cf",
"LeftTriangleEqual;": "\u22b4",
"LeftUpDownVector;": "\u2951",
"LeftUpTeeVector;": "\u2960",
"LeftUpVector;": "\u21bf",
"LeftUpVectorBar;": "\u2958",
"LeftVector;": "\u21bc",
"LeftVectorBar;": "\u2952",
"Leftarrow;": "\u21d0",
"Leftrightarrow;": "\u21d4",
"LessEqualGreater;": "\u22da",
"LessFullEqual;": "\u2266",
"LessGreater;": "\u2276",
"LessLess;": "\u2aa1",
"LessSlantEqual;": "\u2a7d",
"LessTilde;": "\u2272",
"Lfr;": "\U0001d50f",
"Ll;": "\u22d8",
"Lleftarrow;": "\u21da",
"Lmidot;": "\u013f",
"LongLeftArrow;": "\u27f5",
"LongLeftRightArrow;": "\u27f7",
"LongRightArrow;": "\u27f6",
"Longleftarrow;": "\u27f8",
"Longleftrightarrow;": "\u27fa",
"Longrightarrow;": "\u27f9",
"Lopf;": "\U0001d543",
"LowerLeftArrow;": "\u2199",
"LowerRightArrow;": "\u2198",
"Lscr;": "\u2112",
"Lsh;": "\u21b0",
"Lstrok;": "\u0141",
"Lt;": "\u226a",
"Map;": "\u2905",
"Mcy;": "\u041c",
"MediumSpace;": "\u205f",
"Mellintrf;": "\u2133",
"Mfr;": "\U0001d510",
"MinusPlus;": "\u2213",
"Mopf;": "\U0001d544",
"Mscr;": "\u2133",
"Mu;": "\u039c",
"NJcy;": "\u040a",
"Nacute;": "\u0143",
"Ncaron;": "\u0147",
"Ncedil;": "\u0145",
"Ncy;": "\u041d",
"NegativeMediumSpace;": "\u200b",
"NegativeThickSpace;": "\u200b",
"NegativeThinSpace;": "\u200b",
"NegativeVeryThinSpace;": "\u200b",
"NestedGreaterGreater;": "\u226b",
"NestedLessLess;": "\u226a",
"NewLine;": "\n",
"Nfr;": "\U0001d511",
"NoBreak;": "\u2060",
"NonBreakingSpace;": "\xa0",
"Nopf;": "\u2115",
"Not;": "\u2aec",
"NotCongruent;": "\u2262",
"NotCupCap;": "\u226d",
"NotDoubleVerticalBar;": "\u2226",
"NotElement;": "\u2209",
"NotEqual;": "\u2260",
"NotEqualTilde;": "\u2242\u0338",
"NotExists;": "\u2204",
"NotGreater;": "\u226f",
"NotGreaterEqual;": "\u2271",
"NotGreaterFullEqual;": "\u2267\u0338",
"NotGreaterGreater;": "\u226b\u0338",
"NotGreaterLess;": "\u2279",
"NotGreaterSlantEqual;": "\u2a7e\u0338",
"NotGreaterTilde;": "\u2275",
"NotHumpDownHump;": "\u224e\u0338",
"NotHumpEqual;": "\u224f\u0338",
"NotLeftTriangle;": "\u22ea",
"NotLeftTriangleBar;": "\u29cf\u0338",
"NotLeftTriangleEqual;": "\u22ec",
"NotLess;": "\u226e",
"NotLessEqual;": "\u2270",
"NotLessGreater;": "\u2278",
"NotLessLess;": "\u226a\u0338",
"NotLessSlantEqual;": "\u2a7d\u0338",
"NotLessTilde;": "\u2274",
"NotNestedGreaterGreater;": "\u2aa2\u0338",
"NotNestedLessLess;": "\u2aa1\u0338",
"NotPrecedes;": "\u2280",
"NotPrecedesEqual;": "\u2aaf\u0338",
"NotPrecedesSlantEqual;": "\u22e0",
"NotReverseElement;": "\u220c",
"NotRightTriangle;": "\u22eb",
"NotRightTriangleBar;": "\u29d0\u0338",
"NotRightTriangleEqual;": "\u22ed",
"NotSquareSubset;": "\u228f\u0338",
"NotSquareSubsetEqual;": "\u22e2",
"NotSquareSuperset;": "\u2290\u0338",
"NotSquareSupersetEqual;": "\u22e3",
"NotSubset;": "\u2282\u20d2",
"NotSubsetEqual;": "\u2288",
"NotSucceeds;": "\u2281",
"NotSucceedsEqual;": "\u2ab0\u0338",
"NotSucceedsSlantEqual;": "\u22e1",
"NotSucceedsTilde;": "\u227f\u0338",
"NotSuperset;": "\u2283\u20d2",
"NotSupersetEqual;": "\u2289",
"NotTilde;": "\u2241",
"NotTildeEqual;": "\u2244",
"NotTildeFullEqual;": "\u2247",
"NotTildeTilde;": "\u2249",
"NotVerticalBar;": "\u2224",
"Nscr;": "\U0001d4a9",
"Ntilde": "\xd1",
"Ntilde;": "\xd1",
"Nu;": "\u039d",
"OElig;": "\u0152",
"Oacute": "\xd3",
"Oacute;": "\xd3",
"Ocirc": "\xd4",
"Ocirc;": "\xd4",
"Ocy;": "\u041e",
"Odblac;": "\u0150",
"Ofr;": "\U0001d512",
"Ograve": "\xd2",
"Ograve;": "\xd2",
"Omacr;": "\u014c",
"Omega;": "\u03a9",
"Omicron;": "\u039f",
"Oopf;": "\U0001d546",
"OpenCurlyDoubleQuote;": "\u201c",
"OpenCurlyQuote;": "\u2018",
"Or;": "\u2a54",
"Oscr;": "\U0001d4aa",
"Oslash": "\xd8",
"Oslash;": "\xd8",
"Otilde": "\xd5",
"Otilde;": "\xd5",
"Otimes;": "\u2a37",
"Ouml": "\xd6",
"Ouml;": "\xd6",
"OverBar;": "\u203e",
"OverBrace;": "\u23de",
"OverBracket;": "\u23b4",
"OverParenthesis;": "\u23dc",
"PartialD;": "\u2202",
"Pcy;": "\u041f",
"Pfr;": "\U0001d513",
"Phi;": "\u03a6",
"Pi;": "\u03a0",
"PlusMinus;": "\xb1",
"Poincareplane;": "\u210c",
"Popf;": "\u2119",
"Pr;": "\u2abb",
"Precedes;": "\u227a",
"PrecedesEqual;": "\u2aaf",
"PrecedesSlantEqual;": "\u227c",
"PrecedesTilde;": "\u227e",
"Prime;": "\u2033",
"Product;": "\u220f",
"Proportion;": "\u2237",
"Proportional;": "\u221d",
"Pscr;": "\U0001d4ab",
"Psi;": "\u03a8",
"QUOT": "\"",
"QUOT;": "\"",
"Qfr;": "\U0001d514",
"Qopf;": "\u211a",
"Qscr;": "\U0001d4ac",
"RBarr;": "\u2910",
"REG": "\xae",
"REG;": "\xae",
"Racute;": "\u0154",
"Rang;": "\u27eb",
"Rarr;": "\u21a0",
"Rarrtl;": "\u2916",
"Rcaron;": "\u0158",
"Rcedil;": "\u0156",
"Rcy;": "\u0420",
"Re;": "\u211c",
"ReverseElement;": "\u220b",
"ReverseEquilibrium;": "\u21cb",
"ReverseUpEquilibrium;": "\u296f",
"Rfr;": "\u211c",
"Rho;": "\u03a1",
"RightAngleBracket;": "\u27e9",
"RightArrow;": "\u2192",
"RightArrowBar;": "\u21e5",
"RightArrowLeftArrow;": "\u21c4",
"RightCeiling;": "\u2309",
"RightDoubleBracket;": "\u27e7",
"RightDownTeeVector;": "\u295d",
"RightDownVector;": "\u21c2",
"RightDownVectorBar;": "\u2955",
"RightFloor;": "\u230b",
"RightTee;": "\u22a2",
"RightTeeArrow;": "\u21a6",
"RightTeeVector;": "\u295b",
"RightTriangle;": "\u22b3",
"RightTriangleBar;": "\u29d0",
"RightTriangleEqual;": "\u22b5",
"RightUpDownVector;": "\u294f",
"RightUpTeeVector;": "\u295c",
"RightUpVector;": "\u21be",
"RightUpVectorBar;": "\u2954",
"RightVector;": "\u21c0",
"RightVectorBar;": "\u2953",
"Rightarrow;": "\u21d2",
"Ropf;": "\u211d",
"RoundImplies;": "\u2970",
"Rrightarrow;": "\u21db",
"Rscr;": "\u211b",
"Rsh;": "\u21b1",
"RuleDelayed;": "\u29f4",
"SHCHcy;": "\u0429",
"SHcy;": "\u0428",
"SOFTcy;": "\u042c",
"Sacute;": "\u015a",
"Sc;": "\u2abc",
"Scaron;": "\u0160",
"Scedil;": "\u015e",
"Scirc;": "\u015c",
"Scy;": "\u0421",
"Sfr;": "\U0001d516",
"ShortDownArrow;": "\u2193",
"ShortLeftArrow;": "\u2190",
"ShortRightArrow;": "\u2192",
"ShortUpArrow;": "\u2191",
"Sigma;": "\u03a3",
"SmallCircle;": "\u2218",
"Sopf;": "\U0001d54a",
"Sqrt;": "\u221a",
"Square;": "\u25a1",
"SquareIntersection;": "\u2293",
"SquareSubset;": "\u228f",
"SquareSubsetEqual;": "\u2291",
"SquareSuperset;": "\u2290",
"SquareSupersetEqual;": "\u2292",
"SquareUnion;": "\u2294",
"Sscr;": "\U0001d4ae",
"Star;": "\u22c6",
"Sub;": "\u22d0",
"Subset;": "\u22d0",
"SubsetEqual;": "\u2286",
"Succeeds;": "\u227b",
"SucceedsEqual;": "\u2ab0",
"SucceedsSlantEqual;": "\u227d",
"SucceedsTilde;": "\u227f",
"SuchThat;": "\u220b",
"Sum;": "\u2211",
"Sup;": "\u22d1",
"Superset;": "\u2283",
"SupersetEqual;": "\u2287",
"Supset;": "\u22d1",
"THORN": "\xde",
"THORN;": "\xde",
"TRADE;": "\u2122",
"TSHcy;": "\u040b",
"TScy;": "\u0426",
"Tab;": "\t",
"Tau;": "\u03a4",
"Tcaron;": "\u0164",
"Tcedil;": "\u0162",
"Tcy;": "\u0422",
"Tfr;": "\U0001d517",
"Therefore;": "\u2234",
"Theta;": "\u0398",
"ThickSpace;": "\u205f\u200a",
"ThinSpace;": "\u2009",
"Tilde;": "\u223c",
"TildeEqual;": "\u2243",
"TildeFullEqual;": "\u2245",
"TildeTilde;": "\u2248",
"Topf;": "\U0001d54b",
"TripleDot;": "\u20db",
"Tscr;": "\U0001d4af",
"Tstrok;": "\u0166",
"Uacute": "\xda",
"Uacute;": "\xda",
"Uarr;": "\u219f",
"Uarrocir;": "\u2949",
"Ubrcy;": "\u040e",
"Ubreve;": "\u016c",
"Ucirc": "\xdb",
"Ucirc;": "\xdb",
"Ucy;": "\u0423",
"Udblac;": "\u0170",
"Ufr;": "\U0001d518",
"Ugrave": "\xd9",
"Ugrave;": "\xd9",
"Umacr;": "\u016a",
"UnderBar;": "_",
"UnderBrace;": "\u23df",
"UnderBracket;": "\u23b5",
"UnderParenthesis;": "\u23dd",
"Union;": "\u22c3",
"UnionPlus;": "\u228e",
"Uogon;": "\u0172",
"Uopf;": "\U0001d54c",
"UpArrow;": "\u2191",
"UpArrowBar;": "\u2912",
"UpArrowDownArrow;": "\u21c5",
"UpDownArrow;": "\u2195",
"UpEquilibrium;": "\u296e",
"UpTee;": "\u22a5",
"UpTeeArrow;": "\u21a5",
"Uparrow;": "\u21d1",
"Updownarrow;": "\u21d5",
"UpperLeftArrow;": "\u2196",
"UpperRightArrow;": "\u2197",
"Upsi;": "\u03d2",
"Upsilon;": "\u03a5",
"Uring;": "\u016e",
"Uscr;": "\U0001d4b0",
"Utilde;": "\u0168",
"Uuml": "\xdc",
"Uuml;": "\xdc",
"VDash;": "\u22ab",
"Vbar;": "\u2aeb",
"Vcy;": "\u0412",
"Vdash;": "\u22a9",
"Vdashl;": "\u2ae6",
"Vee;": "\u22c1",
"Verbar;": "\u2016",
"Vert;": "\u2016",
"VerticalBar;": "\u2223",
"VerticalLine;": "|",
"VerticalSeparator;": "\u2758",
"VerticalTilde;": "\u2240",
"VeryThinSpace;": "\u200a",
"Vfr;": "\U0001d519",
"Vopf;": "\U0001d54d",
"Vscr;": "\U0001d4b1",
"Vvdash;": "\u22aa",
"Wcirc;": "\u0174",
"Wedge;": "\u22c0",
"Wfr;": "\U0001d51a",
"Wopf;": "\U0001d54e",
"Wscr;": "\U0001d4b2",
"Xfr;": "\U0001d51b",
"Xi;": "\u039e",
"Xopf;": "\U0001d54f",
"Xscr;": "\U0001d4b3",
"YAcy;": "\u042f",
"YIcy;": "\u0407",
"YUcy;": "\u042e",
"Yacute": "\xdd",
"Yacute;": "\xdd",
"Ycirc;": "\u0176",
"Ycy;": "\u042b",
"Yfr;": "\U0001d51c",
"Yopf;": "\U0001d550",
"Yscr;": "\U0001d4b4",
"Yuml;": "\u0178",
"ZHcy;": "\u0416",
"Zacute;": "\u0179",
"Zcaron;": "\u017d",
"Zcy;": "\u0417",
"Zdot;": "\u017b",
"ZeroWidthSpace;": "\u200b",
"Zeta;": "\u0396",
"Zfr;": "\u2128",
"Zopf;": "\u2124",
"Zscr;": "\U0001d4b5",
"aacute": "\xe1",
"aacute;": "\xe1",
"abreve;": "\u0103",
"ac;": "\u223e",
"acE;": "\u223e\u0333",
"acd;": "\u223f",
"acirc": "\xe2",
"acirc;": "\xe2",
"acute": "\xb4",
"acute;": "\xb4",
"acy;": "\u0430",
"aelig": "\xe6",
"aelig;": "\xe6",
"af;": "\u2061",
"afr;": "\U0001d51e",
"agrave": "\xe0",
"agrave;": "\xe0",
"alefsym;": "\u2135",
"aleph;": "\u2135",
"alpha;": "\u03b1",
"amacr;": "\u0101",
"amalg;": "\u2a3f",
"amp": "&",
"amp;": "&",
"and;": "\u2227",
"andand;": "\u2a55",
"andd;": "\u2a5c",
"andslope;": "\u2a58",
"andv;": "\u2a5a",
"ang;": "\u2220",
"ange;": "\u29a4",
"angle;": "\u2220",
"angmsd;": "\u2221",
"angmsdaa;": "\u29a8",
"angmsdab;": "\u29a9",
"angmsdac;": "\u29aa",
"angmsdad;": "\u29ab",
"angmsdae;": "\u29ac",
"angmsdaf;": "\u29ad",
"angmsdag;": "\u29ae",
"angmsdah;": "\u29af",
"angrt;": "\u221f",
"angrtvb;": "\u22be",
"angrtvbd;": "\u299d",
"angsph;": "\u2222",
"angst;": "\xc5",
"angzarr;": "\u237c",
"aogon;": "\u0105",
"aopf;": "\U0001d552",
"ap;": "\u2248",
"apE;": "\u2a70",
"apacir;": "\u2a6f",
"ape;": "\u224a",
"apid;": "\u224b",
"apos;": "'",
"approx;": "\u2248",
"approxeq;": "\u224a",
"aring": "\xe5",
"aring;": "\xe5",
"ascr;": "\U0001d4b6",
"ast;": "*",
"asymp;": "\u2248",
"asympeq;": "\u224d",
"atilde": "\xe3",
"atilde;": "\xe3",
"auml": "\xe4",
"auml;": "\xe4",
"awconint;": "\u2233",
"awint;": "\u2a11",
"bNot;": "\u2aed",
"backcong;": "\u224c",
"backepsilon;": "\u03f6",
"backprime;": "\u2035",
"backsim;": "\u223d",
"backsimeq;": "\u22cd",
"barvee;": "\u22bd",
"barwed;": "\u2305",
"barwedge;": "\u2305",
"bbrk;": "\u23b5",
"bbrktbrk;": "\u23b6",
"bcong;": "\u224c",
"bcy;": "\u0431",
"bdquo;": "\u201e",
"becaus;": "\u2235",
"because;": "\u2235",
"bemptyv;": "\u29b0",
"bepsi;": "\u03f6",
"bernou;": "\u212c",
"beta;": "\u03b2",
"beth;": "\u2136",
"between;": "\u226c",
"bfr;": "\U0001d51f",
"bigcap;": "\u22c2",
"bigcirc;": "\u25ef",
"bigcup;": "\u22c3",
"bigodot;": "\u2a00",
"bigoplus;": "\u2a01",
"bigotimes;": "\u2a02",
"bigsqcup;": "\u2a06",
"bigstar;": "\u2605",
"bigtriangledown;": "\u25bd",
"bigtriangleup;": "\u25b3",
"biguplus;": "\u2a04",
"bigvee;": "\u22c1",
"bigwedge;": "\u22c0",
"bkarow;": "\u290d",
"blacklozenge;": "\u29eb",
"blacksquare;": "\u25aa",
"blacktriangle;": "\u25b4",
"blacktriangledown;": "\u25be",
"blacktriangleleft;": "\u25c2",
"blacktriangleright;": "\u25b8",
"blank;": "\u2423",
"blk12;": "\u2592",
"blk14;": "\u2591",
"blk34;": "\u2593",
"block;": "\u2588",
"bne;": "=\u20e5",
"bnequiv;": "\u2261\u20e5",
"bnot;": "\u2310",
"bopf;": "\U0001d553",
"bot;": "\u22a5",
"bottom;": "\u22a5",
"bowtie;": "\u22c8",
"boxDL;": "\u2557",
"boxDR;": "\u2554",
"boxDl;": "\u2556",
"boxDr;": "\u2553",
"boxH;": "\u2550",
"boxHD;": "\u2566",
"boxHU;": "\u2569",
"boxHd;": "\u2564",
"boxHu;": "\u2567",
"boxUL;": "\u255d",
"boxUR;": "\u255a",
"boxUl;": "\u255c",
"boxUr;": "\u2559",
"boxV;": "\u2551",
"boxVH;": "\u256c",
"boxVL;": "\u2563",
"boxVR;": "\u2560",
"boxVh;": "\u256b",
"boxVl;": "\u2562",
"boxVr;": "\u255f",
"boxbox;": "\u29c9",
"boxdL;": "\u2555",
"boxdR;": "\u2552",
"boxdl;": "\u2510",
"boxdr;": "\u250c",
"boxh;": "\u2500",
"boxhD;": "\u2565",
"boxhU;": "\u2568",
"boxhd;": "\u252c",
"boxhu;": "\u2534",
"boxminus;": "\u229f",
"boxplus;": "\u229e",
"boxtimes;": "\u22a0",
"boxuL;": "\u255b",
"boxuR;": "\u2558",
"boxul;": "\u2518",
"boxur;": "\u2514",
"boxv;": "\u2502",
"boxvH;": "\u256a",
"boxvL;": "\u2561",
"boxvR;": "\u255e",
"boxvh;": "\u253c",
"boxvl;": "\u2524",
"boxvr;": "\u251c",
"bprime;": "\u2035",
"breve;": "\u02d8",
"brvbar": "\xa6",
"brvbar;": "\xa6",
"bscr;": "\U0001d4b7",
"bsemi;": "\u204f",
"bsim;": "\u223d",
"bsime;": "\u22cd",
"bsol;": "\\",
"bsolb;": "\u29c5",
"bsolhsub;": "\u27c8",
"bull;": "\u2022",
"bullet;": "\u2022",
"bump;": "\u224e",
"bumpE;": "\u2aae",
"bumpe;": "\u224f",
"bumpeq;": "\u224f",
"cacute;": "\u0107",
"cap;": "\u2229",
"capand;": "\u2a44",
"capbrcup;": "\u2a49",
"capcap;": "\u2a4b",
"capcup;": "\u2a47",
"capdot;": "\u2a40",
"caps;": "\u2229\ufe00",
"caret;": "\u2041",
"caron;": "\u02c7",
"ccaps;": "\u2a4d",
"ccaron;": "\u010d",
"ccedil": "\xe7",
"ccedil;": "\xe7",
"ccirc;": "\u0109",
"ccups;": "\u2a4c",
"ccupssm;": "\u2a50",
"cdot;": "\u010b",
"cedil": "\xb8",
"cedil;": "\xb8",
"cemptyv;": "\u29b2",
"cent": "\xa2",
"cent;": "\xa2",
"centerdot;": "\xb7",
"cfr;": "\U0001d520",
"chcy;": "\u0447",
"check;": "\u2713",
"checkmark;": "\u2713",
"chi;": "\u03c7",
"cir;": "\u25cb",
"cirE;": "\u29c3",
"circ;": "\u02c6",
"circeq;": "\u2257",
"circlearrowleft;": "\u21ba",
"circlearrowright;": "\u21bb",
"circledR;": "\xae",
"circledS;": "\u24c8",
"circledast;": "\u229b",
"circledcirc;": "\u229a",
"circleddash;": "\u229d",
"cire;": "\u2257",
"cirfnint;": "\u2a10",
"cirmid;": "\u2aef",
"cirscir;": "\u29c2",
"clubs;": "\u2663",
"clubsuit;": "\u2663",
"colon;": ":",
"colone;": "\u2254",
"coloneq;": "\u2254",
"comma;": ",",
"commat;": "@",
"comp;": "\u2201",
"compfn;": "\u2218",
"complement;": "\u2201",
"complexes;": "\u2102",
"cong;": "\u2245",
"congdot;": "\u2a6d",
"conint;": "\u222e",
"copf;": "\U0001d554",
"coprod;": "\u2210",
"copy": "\xa9",
"copy;": "\xa9",
"copysr;": "\u2117",
"crarr;": "\u21b5",
"cross;": "\u2717",
"cscr;": "\U0001d4b8",
"csub;": "\u2acf",
"csube;": "\u2ad1",
"csup;": "\u2ad0",
"csupe;": "\u2ad2",
"ctdot;": "\u22ef",
"cudarrl;": "\u2938",
"cudarrr;": "\u2935",
"cuepr;": "\u22de",
"cuesc;": "\u22df",
"cularr;": "\u21b6",
"cularrp;": "\u293d",
"cup;": "\u222a",
"cupbrcap;": "\u2a48",
"cupcap;": "\u2a46",
"cupcup;": "\u2a4a",
"cupdot;": "\u228d",
"cupor;": "\u2a45",
"cups;": "\u222a\ufe00",
"curarr;": "\u21b7",
"curarrm;": "\u293c",
"curlyeqprec;": "\u22de",
"curlyeqsucc;": "\u22df",
"curlyvee;": "\u22ce",
"curlywedge;": "\u22cf",
"curren": "\xa4",
"curren;": "\xa4",
"curvearrowleft;": "\u21b6",
"curvearrowright;": "\u21b7",
"cuvee;": "\u22ce",
"cuwed;": "\u22cf",
"cwconint;": "\u2232",
"cwint;": "\u2231",
"cylcty;": "\u232d",
"dArr;": "\u21d3",
"dHar;": "\u2965",
"dagger;": "\u2020",
"daleth;": "\u2138",
"darr;": "\u2193",
"dash;": "\u2010",
"dashv;": "\u22a3",
"dbkarow;": "\u290f",
"dblac;": "\u02dd",
"dcaron;": "\u010f",
"dcy;": "\u0434",
"dd;": "\u2146",
"ddagger;": "\u2021",
"ddarr;": "\u21ca",
"ddotseq;": "\u2a77",
"deg": "\xb0",
"deg;": "\xb0",
"delta;": "\u03b4",
"demptyv;": "\u29b1",
"dfisht;": "\u297f",
"dfr;": "\U0001d521",
"dharl;": "\u21c3",
"dharr;": "\u21c2",
"diam;": "\u22c4",
"diamond;": "\u22c4",
"diamondsuit;": "\u2666",
"diams;": "\u2666",
"die;": "\xa8",
"digamma;": "\u03dd",
"disin;": "\u22f2",
"div;": "\xf7",
"divide": "\xf7",
"divide;": "\xf7",
"divideontimes;": "\u22c7",
"divonx;": "\u22c7",
"djcy;": "\u0452",
"dlcorn;": "\u231e",
"dlcrop;": "\u230d",
"dollar;": "$",
"dopf;": "\U0001d555",
"dot;": "\u02d9",
"doteq;": "\u2250",
"doteqdot;": "\u2251",
"dotminus;": "\u2238",
"dotplus;": "\u2214",
"dotsquare;": "\u22a1",
"doublebarwedge;": "\u2306",
"downarrow;": "\u2193",
"downdownarrows;": "\u21ca",
"downharpoonleft;": "\u21c3",
"downharpoonright;": "\u21c2",
"drbkarow;": "\u2910",
"drcorn;": "\u231f",
"drcrop;": "\u230c",
"dscr;": "\U0001d4b9",
"dscy;": "\u0455",
"dsol;": "\u29f6",
"dstrok;": "\u0111",
"dtdot;": "\u22f1",
"dtri;": "\u25bf",
"dtrif;": "\u25be",
"duarr;": "\u21f5",
"duhar;": "\u296f",
"dwangle;": "\u29a6",
"dzcy;": "\u045f",
"dzigrarr;": "\u27ff",
"eDDot;": "\u2a77",
"eDot;": "\u2251",
"eacute": "\xe9",
"eacute;": "\xe9",
"easter;": "\u2a6e",
"ecaron;": "\u011b",
"ecir;": "\u2256",
"ecirc": "\xea",
"ecirc;": "\xea",
"ecolon;": "\u2255",
"ecy;": "\u044d",
"edot;": "\u0117",
"ee;": "\u2147",
"efDot;": "\u2252",
"efr;": "\U0001d522",
"eg;": "\u2a9a",
"egrave": "\xe8",
"egrave;": "\xe8",
"egs;": "\u2a96",
"egsdot;": "\u2a98",
"el;": "\u2a99",
"elinters;": "\u23e7",
"ell;": "\u2113",
"els;": "\u2a95",
"elsdot;": "\u2a97",
"emacr;": "\u0113",
"empty;": "\u2205",
"emptyset;": "\u2205",
"emptyv;": "\u2205",
"emsp13;": "\u2004",
"emsp14;": "\u2005",
"emsp;": "\u2003",
"eng;": "\u014b",
"ensp;": "\u2002",
"eogon;": "\u0119",
"eopf;": "\U0001d556",
"epar;": "\u22d5",
"eparsl;": "\u29e3",
"eplus;": "\u2a71",
"epsi;": "\u03b5",
"epsilon;": "\u03b5",
"epsiv;": "\u03f5",
"eqcirc;": "\u2256",
"eqcolon;": "\u2255",
"eqsim;": "\u2242",
"eqslantgtr;": "\u2a96",
"eqslantless;": "\u2a95",
"equals;": "=",
"equest;": "\u225f",
"equiv;": "\u2261",
"equivDD;": "\u2a78",
"eqvparsl;": "\u29e5",
"erDot;": "\u2253",
"erarr;": "\u2971",
"escr;": "\u212f",
"esdot;": "\u2250",
"esim;": "\u2242",
"eta;": "\u03b7",
"eth": "\xf0",
"eth;": "\xf0",
"euml": "\xeb",
"euml;": "\xeb",
"euro;": "\u20ac",
"excl;": "!",
"exist;": "\u2203",
"expectation;": "\u2130",
"exponentiale;": "\u2147",
"fallingdotseq;": "\u2252",
"fcy;": "\u0444",
"female;": "\u2640",
"ffilig;": "\ufb03",
"fflig;": "\ufb00",
"ffllig;": "\ufb04",
"ffr;": "\U0001d523",
"filig;": "\ufb01",
"fjlig;": "fj",
"flat;": "\u266d",
"fllig;": "\ufb02",
"fltns;": "\u25b1",
"fnof;": "\u0192",
"fopf;": "\U0001d557",
"forall;": "\u2200",
"fork;": "\u22d4",
"forkv;": "\u2ad9",
"fpartint;": "\u2a0d",
"frac12": "\xbd",
"frac12;": "\xbd",
"frac13;": "\u2153",
"frac14": "\xbc",
"frac14;": "\xbc",
"frac15;": "\u2155",
"frac16;": "\u2159",
"frac18;": "\u215b",
"frac23;": "\u2154",
"frac25;": "\u2156",
"frac34": "\xbe",
"frac34;": "\xbe",
"frac35;": "\u2157",
"frac38;": "\u215c",
"frac45;": "\u2158",
"frac56;": "\u215a",
"frac58;": "\u215d",
"frac78;": "\u215e",
"frasl;": "\u2044",
"frown;": "\u2322",
"fscr;": "\U0001d4bb",
"gE;": "\u2267",
"gEl;": "\u2a8c",
"gacute;": "\u01f5",
"gamma;": "\u03b3",
"gammad;": "\u03dd",
"gap;": "\u2a86",
"gbreve;": "\u011f",
"gcirc;": "\u011d",
"gcy;": "\u0433",
"gdot;": "\u0121",
"ge;": "\u2265",
"gel;": "\u22db",
"geq;": "\u2265",
"geqq;": "\u2267",
"geqslant;": "\u2a7e",
"ges;": "\u2a7e",
"gescc;": "\u2aa9",
"gesdot;": "\u2a80",
"gesdoto;": "\u2a82",
"gesdotol;": "\u2a84",
"gesl;": "\u22db\ufe00",
"gesles;": "\u2a94",
"gfr;": "\U0001d524",
"gg;": "\u226b",
"ggg;": "\u22d9",
"gimel;": "\u2137",
"gjcy;": "\u0453",
"gl;": "\u2277",
"glE;": "\u2a92",
"gla;": "\u2aa5",
"glj;": "\u2aa4",
"gnE;": "\u2269",
"gnap;": "\u2a8a",
"gnapprox;": "\u2a8a",
"gne;": "\u2a88",
"gneq;": "\u2a88",
"gneqq;": "\u2269",
"gnsim;": "\u22e7",
"gopf;": "\U0001d558",
"grave;": "`",
"gscr;": "\u210a",
"gsim;": "\u2273",
"gsime;": "\u2a8e",
"gsiml;": "\u2a90",
"gt": ">",
"gt;": ">",
"gtcc;": "\u2aa7",
"gtcir;": "\u2a7a",
"gtdot;": "\u22d7",
"gtlPar;": "\u2995",
"gtquest;": "\u2a7c",
"gtrapprox;": "\u2a86",
"gtrarr;": "\u2978",
"gtrdot;": "\u22d7",
"gtreqless;": "\u22db",
"gtreqqless;": "\u2a8c",
"gtrless;": "\u2277",
"gtrsim;": "\u2273",
"gvertneqq;": "\u2269\ufe00",
"gvnE;": "\u2269\ufe00",
"hArr;": "\u21d4",
"hairsp;": "\u200a",
"half;": "\xbd",
"hamilt;": "\u210b",
"hardcy;": "\u044a",
"harr;": "\u2194",
"harrcir;": "\u2948",
"harrw;": "\u21ad",
"hbar;": "\u210f",
"hcirc;": "\u0125",
"hearts;": "\u2665",
"heartsuit;": "\u2665",
"hellip;": "\u2026",
"hercon;": "\u22b9",
"hfr;": "\U0001d525",
"hksearow;": "\u2925",
"hkswarow;": "\u2926",
"hoarr;": "\u21ff",
"homtht;": "\u223b",
"hookleftarrow;": "\u21a9",
"hookrightarrow;": "\u21aa",
"hopf;": "\U0001d559",
"horbar;": "\u2015",
"hscr;": "\U0001d4bd",
"hslash;": "\u210f",
"hstrok;": "\u0127",
"hybull;": "\u2043",
"hyphen;": "\u2010",
"iacute": "\xed",
"iacute;": "\xed",
"ic;": "\u2063",
"icirc": "\xee",
"icirc;": "\xee",
"icy;": "\u0438",
"iecy;": "\u0435",
"iexcl": "\xa1",
"iexcl;": "\xa1",
"iff;": "\u21d4",
"ifr;": "\U0001d526",
"igrave": "\xec",
"igrave;": "\xec",
"ii;": "\u2148",
"iiiint;": "\u2a0c",
"iiint;": "\u222d",
"iinfin;": "\u29dc",
"iiota;": "\u2129",
"ijlig;": "\u0133",
"imacr;": "\u012b",
"image;": "\u2111",
"imagline;": "\u2110",
"imagpart;": "\u2111",
"imath;": "\u0131",
"imof;": "\u22b7",
"imped;": "\u01b5",
"in;": "\u2208",
"incare;": "\u2105",
"infin;": "\u221e",
"infintie;": "\u29dd",
"inodot;": "\u0131",
"int;": "\u222b",
"intcal;": "\u22ba",
"integers;": "\u2124",
"intercal;": "\u22ba",
"intlarhk;": "\u2a17",
"intprod;": "\u2a3c",
"iocy;": "\u0451",
"iogon;": "\u012f",
"iopf;": "\U0001d55a",
"iota;": "\u03b9",
"iprod;": "\u2a3c",
"iquest": "\xbf",
"iquest;": "\xbf",
"iscr;": "\U0001d4be",
"isin;": "\u2208",
"isinE;": "\u22f9",
"isindot;": "\u22f5",
"isins;": "\u22f4",
"isinsv;": "\u22f3",
"isinv;": "\u2208",
"it;": "\u2062",
"itilde;": "\u0129",
"iukcy;": "\u0456",
"iuml": "\xef",
"iuml;": "\xef",
"jcirc;": "\u0135",
"jcy;": "\u0439",
"jfr;": "\U0001d527",
"jmath;": "\u0237",
"jopf;": "\U0001d55b",
"jscr;": "\U0001d4bf",
"jsercy;": "\u0458",
"jukcy;": "\u0454",
"kappa;": "\u03ba",
"kappav;": "\u03f0",
"kcedil;": "\u0137",
"kcy;": "\u043a",
"kfr;": "\U0001d528",
"kgreen;": "\u0138",
"khcy;": "\u0445",
"kjcy;": "\u045c",
"kopf;": "\U0001d55c",
"kscr;": "\U0001d4c0",
"lAarr;": "\u21da",
"lArr;": "\u21d0",
"lAtail;": "\u291b",
"lBarr;": "\u290e",
"lE;": "\u2266",
"lEg;": "\u2a8b",
"lHar;": "\u2962",
"lacute;": "\u013a",
"laemptyv;": "\u29b4",
"lagran;": "\u2112",
"lambda;": "\u03bb",
"lang;": "\u27e8",
"langd;": "\u2991",
"langle;": "\u27e8",
"lap;": "\u2a85",
"laquo": "\xab",
"laquo;": "\xab",
"larr;": "\u2190",
"larrb;": "\u21e4",
"larrbfs;": "\u291f",
"larrfs;": "\u291d",
"larrhk;": "\u21a9",
"larrlp;": "\u21ab",
"larrpl;": "\u2939",
"larrsim;": "\u2973",
"larrtl;": "\u21a2",
"lat;": "\u2aab",
"latail;": "\u2919",
"late;": "\u2aad",
"lates;": "\u2aad\ufe00",
"lbarr;": "\u290c",
"lbbrk;": "\u2772",
"lbrace;": "{",
"lbrack;": "[",
"lbrke;": "\u298b",
"lbrksld;": "\u298f",
"lbrkslu;": "\u298d",
"lcaron;": "\u013e",
"lcedil;": "\u013c",
"lceil;": "\u2308",
"lcub;": "{",
"lcy;": "\u043b",
"ldca;": "\u2936",
"ldquo;": "\u201c",
"ldquor;": "\u201e",
"ldrdhar;": "\u2967",
"ldrushar;": "\u294b",
"ldsh;": "\u21b2",
"le;": "\u2264",
"leftarrow;": "\u2190",
"leftarrowtail;": "\u21a2",
"leftharpoondown;": "\u21bd",
"leftharpoonup;": "\u21bc",
"leftleftarrows;": "\u21c7",
"leftrightarrow;": "\u2194",
"leftrightarrows;": "\u21c6",
"leftrightharpoons;": "\u21cb",
"leftrightsquigarrow;": "\u21ad",
"leftthreetimes;": "\u22cb",
"leg;": "\u22da",
"leq;": "\u2264",
"leqq;": "\u2266",
"leqslant;": "\u2a7d",
"les;": "\u2a7d",
"lescc;": "\u2aa8",
"lesdot;": "\u2a7f",
"lesdoto;": "\u2a81",
"lesdotor;": "\u2a83",
"lesg;": "\u22da\ufe00",
"lesges;": "\u2a93",
"lessapprox;": "\u2a85",
"lessdot;": "\u22d6",
"lesseqgtr;": "\u22da",
"lesseqqgtr;": "\u2a8b",
"lessgtr;": "\u2276",
"lesssim;": "\u2272",
"lfisht;": "\u297c",
"lfloor;": "\u230a",
"lfr;": "\U0001d529",
"lg;": "\u2276",
"lgE;": "\u2a91",
"lhard;": "\u21bd",
"lharu;": "\u21bc",
"lharul;": "\u296a",
"lhblk;": "\u2584",
"ljcy;": "\u0459",
"ll;": "\u226a",
"llarr;": "\u21c7",
"llcorner;": "\u231e",
"llhard;": "\u296b",
"lltri;": "\u25fa",
"lmidot;": "\u0140",
"lmoust;": "\u23b0",
"lmoustache;": "\u23b0",
"lnE;": "\u2268",
"lnap;": "\u2a89",
"lnapprox;": "\u2a89",
"lne;": "\u2a87",
"lneq;": "\u2a87",
"lneqq;": "\u2268",
"lnsim;": "\u22e6",
"loang;": "\u27ec",
"loarr;": "\u21fd",
"lobrk;": "\u27e6",
"longleftarrow;": "\u27f5",
"longleftrightarrow;": "\u27f7",
"longmapsto;": "\u27fc",
"longrightarrow;": "\u27f6",
"looparrowleft;": "\u21ab",
"looparrowright;": "\u21ac",
"lopar;": "\u2985",
"lopf;": "\U0001d55d",
"loplus;": "\u2a2d",
"lotimes;": "\u2a34",
"lowast;": "\u2217",
"lowbar;": "_",
"loz;": "\u25ca",
"lozenge;": "\u25ca",
"lozf;": "\u29eb",
"lpar;": "(",
"lparlt;": "\u2993",
"lrarr;": "\u21c6",
"lrcorner;": "\u231f",
"lrhar;": "\u21cb",
"lrhard;": "\u296d",
"lrm;": "\u200e",
"lrtri;": "\u22bf",
"lsaquo;": "\u2039",
"lscr;": "\U0001d4c1",
"lsh;": "\u21b0",
"lsim;": "\u2272",
"lsime;": "\u2a8d",
"lsimg;": "\u2a8f",
"lsqb;": "[",
"lsquo;": "\u2018",
"lsquor;": "\u201a",
"lstrok;": "\u0142",
"lt": "<",
"lt;": "<",
"ltcc;": "\u2aa6",
"ltcir;": "\u2a79",
"ltdot;": "\u22d6",
"lthree;": "\u22cb",
"ltimes;": "\u22c9",
"ltlarr;": "\u2976",
"ltquest;": "\u2a7b",
"ltrPar;": "\u2996",
"ltri;": "\u25c3",
"ltrie;": "\u22b4",
"ltrif;": "\u25c2",
"lurdshar;": "\u294a",
"luruhar;": "\u2966",
"lvertneqq;": "\u2268\ufe00",
"lvnE;": "\u2268\ufe00",
"mDDot;": "\u223a",
"macr": "\xaf",
"macr;": "\xaf",
"male;": "\u2642",
"malt;": "\u2720",
"maltese;": "\u2720",
"map;": "\u21a6",
"mapsto;": "\u21a6",
"mapstodown;": "\u21a7",
"mapstoleft;": "\u21a4",
"mapstoup;": "\u21a5",
"marker;": "\u25ae",
"mcomma;": "\u2a29",
"mcy;": "\u043c",
"mdash;": "\u2014",
"measuredangle;": "\u2221",
"mfr;": "\U0001d52a",
"mho;": "\u2127",
"micro": "\xb5",
"micro;": "\xb5",
"mid;": "\u2223",
"midast;": "*",
"midcir;": "\u2af0",
"middot": "\xb7",
"middot;": "\xb7",
"minus;": "\u2212",
"minusb;": "\u229f",
"minusd;": "\u2238",
"minusdu;": "\u2a2a",
"mlcp;": "\u2adb",
"mldr;": "\u2026",
"mnplus;": "\u2213",
"models;": "\u22a7",
"mopf;": "\U0001d55e",
"mp;": "\u2213",
"mscr;": "\U0001d4c2",
"mstpos;": "\u223e",
"mu;": "\u03bc",
"multimap;": "\u22b8",
"mumap;": "\u22b8",
"nGg;": "\u22d9\u0338",
"nGt;": "\u226b\u20d2",
"nGtv;": "\u226b\u0338",
"nLeftarrow;": "\u21cd",
"nLeftrightarrow;": "\u21ce",
"nLl;": "\u22d8\u0338",
"nLt;": "\u226a\u20d2",
"nLtv;": "\u226a\u0338",
"nRightarrow;": "\u21cf",
"nVDash;": "\u22af",
"nVdash;": "\u22ae",
"nabla;": "\u2207",
"nacute;": "\u0144",
"nang;": "\u2220\u20d2",
"nap;": "\u2249",
"napE;": "\u2a70\u0338",
"napid;": "\u224b\u0338",
"napos;": "\u0149",
"napprox;": "\u2249",
"natur;": "\u266e",
"natural;": "\u266e",
"naturals;": "\u2115",
"nbsp": "\xa0",
"nbsp;": "\xa0",
"nbump;": "\u224e\u0338",
"nbumpe;": "\u224f\u0338",
"ncap;": "\u2a43",
"ncaron;": "\u0148",
"ncedil;": "\u0146",
"ncong;": "\u2247",
"ncongdot;": "\u2a6d\u0338",
"ncup;": "\u2a42",
"ncy;": "\u043d",
"ndash;": "\u2013",
"ne;": "\u2260",
"neArr;": "\u21d7",
"nearhk;": "\u2924",
"nearr;": "\u2197",
"nearrow;": "\u2197",
"nedot;": "\u2250\u0338",
"nequiv;": "\u2262",
"nesear;": "\u2928",
"nesim;": "\u2242\u0338",
"nexist;": "\u2204",
"nexists;": "\u2204",
"nfr;": "\U0001d52b",
"ngE;": "\u2267\u0338",
"nge;": "\u2271",
"ngeq;": "\u2271",
"ngeqq;": "\u2267\u0338",
"ngeqslant;": "\u2a7e\u0338",
"nges;": "\u2a7e\u0338",
"ngsim;": "\u2275",
"ngt;": "\u226f",
"ngtr;": "\u226f",
"nhArr;": "\u21ce",
"nharr;": "\u21ae",
"nhpar;": "\u2af2",
"ni;": "\u220b",
"nis;": "\u22fc",
"nisd;": "\u22fa",
"niv;": "\u220b",
"njcy;": "\u045a",
"nlArr;": "\u21cd",
"nlE;": "\u2266\u0338",
"nlarr;": "\u219a",
"nldr;": "\u2025",
"nle;": "\u2270",
"nleftarrow;": "\u219a",
"nleftrightarrow;": "\u21ae",
"nleq;": "\u2270",
"nleqq;": "\u2266\u0338",
"nleqslant;": "\u2a7d\u0338",
"nles;": "\u2a7d\u0338",
"nless;": "\u226e",
"nlsim;": "\u2274",
"nlt;": "\u226e",
"nltri;": "\u22ea",
"nltrie;": "\u22ec",
"nmid;": "\u2224",
"nopf;": "\U0001d55f",
"not": "\xac",
"not;": "\xac",
"notin;": "\u2209",
"notinE;": "\u22f9\u0338",
"notindot;": "\u22f5\u0338",
"notinva;": "\u2209",
"notinvb;": "\u22f7",
"notinvc;": "\u22f6",
"notni;": "\u220c",
"notniva;": "\u220c",
"notnivb;": "\u22fe",
"notnivc;": "\u22fd",
"npar;": "\u2226",
"nparallel;": "\u2226",
"nparsl;": "\u2afd\u20e5",
"npart;": "\u2202\u0338",
"npolint;": "\u2a14",
"npr;": "\u2280",
"nprcue;": "\u22e0",
"npre;": "\u2aaf\u0338",
"nprec;": "\u2280",
"npreceq;": "\u2aaf\u0338",
"nrArr;": "\u21cf",
"nrarr;": "\u219b",
"nrarrc;": "\u2933\u0338",
"nrarrw;": "\u219d\u0338",
"nrightarrow;": "\u219b",
"nrtri;": "\u22eb",
"nrtrie;": "\u22ed",
"nsc;": "\u2281",
"nsccue;": "\u22e1",
"nsce;": "\u2ab0\u0338",
"nscr;": "\U0001d4c3",
"nshortmid;": "\u2224",
"nshortparallel;": "\u2226",
"nsim;": "\u2241",
"nsime;": "\u2244",
"nsimeq;": "\u2244",
"nsmid;": "\u2224",
"nspar;": "\u2226",
"nsqsube;": "\u22e2",
"nsqsupe;": "\u22e3",
"nsub;": "\u2284",
"nsubE;": "\u2ac5\u0338",
"nsube;": "\u2288",
"nsubset;": "\u2282\u20d2",
"nsubseteq;": "\u2288",
"nsubseteqq;": "\u2ac5\u0338",
"nsucc;": "\u2281",
"nsucceq;": "\u2ab0\u0338",
"nsup;": "\u2285",
"nsupE;": "\u2ac6\u0338",
"nsupe;": "\u2289",
"nsupset;": "\u2283\u20d2",
"nsupseteq;": "\u2289",
"nsupseteqq;": "\u2ac6\u0338",
"ntgl;": "\u2279",
"ntilde": "\xf1",
"ntilde;": "\xf1",
"ntlg;": "\u2278",
"ntriangleleft;": "\u22ea",
"ntrianglelefteq;": "\u22ec",
"ntriangleright;": "\u22eb",
"ntrianglerighteq;": "\u22ed",
"nu;": "\u03bd",
"num;": "#",
"numero;": "\u2116",
"numsp;": "\u2007",
"nvDash;": "\u22ad",
"nvHarr;": "\u2904",
"nvap;": "\u224d\u20d2",
"nvdash;": "\u22ac",
"nvge;": "\u2265\u20d2",
"nvgt;": ">\u20d2",
"nvinfin;": "\u29de",
"nvlArr;": "\u2902",
"nvle;": "\u2264\u20d2",
"nvlt;": "<\u20d2",
"nvltrie;": "\u22b4\u20d2",
"nvrArr;": "\u2903",
"nvrtrie;": "\u22b5\u20d2",
"nvsim;": "\u223c\u20d2",
"nwArr;": "\u21d6",
"nwarhk;": "\u2923",
"nwarr;": "\u2196",
"nwarrow;": "\u2196",
"nwnear;": "\u2927",
"oS;": "\u24c8",
"oacute": "\xf3",
"oacute;": "\xf3",
"oast;": "\u229b",
"ocir;": "\u229a",
"ocirc": "\xf4",
"ocirc;": "\xf4",
"ocy;": "\u043e",
"odash;": "\u229d",
"odblac;": "\u0151",
"odiv;": "\u2a38",
"odot;": "\u2299",
"odsold;": "\u29bc",
"oelig;": "\u0153",
"ofcir;": "\u29bf",
"ofr;": "\U0001d52c",
"ogon;": "\u02db",
"ograve": "\xf2",
"ograve;": "\xf2",
"ogt;": "\u29c1",
"ohbar;": "\u29b5",
"ohm;": "\u03a9",
"oint;": "\u222e",
"olarr;": "\u21ba",
"olcir;": "\u29be",
"olcross;": "\u29bb",
"oline;": "\u203e",
"olt;": "\u29c0",
"omacr;": "\u014d",
"omega;": "\u03c9",
"omicron;": "\u03bf",
"omid;": "\u29b6",
"ominus;": "\u2296",
"oopf;": "\U0001d560",
"opar;": "\u29b7",
"operp;": "\u29b9",
"oplus;": "\u2295",
"or;": "\u2228",
"orarr;": "\u21bb",
"ord;": "\u2a5d",
"order;": "\u2134",
"orderof;": "\u2134",
"ordf": "\xaa",
"ordf;": "\xaa",
"ordm": "\xba",
"ordm;": "\xba",
"origof;": "\u22b6",
"oror;": "\u2a56",
"orslope;": "\u2a57",
"orv;": "\u2a5b",
"oscr;": "\u2134",
"oslash": "\xf8",
"oslash;": "\xf8",
"osol;": "\u2298",
"otilde": "\xf5",
"otilde;": "\xf5",
"otimes;": "\u2297",
"otimesas;": "\u2a36",
"ouml": "\xf6",
"ouml;": "\xf6",
"ovbar;": "\u233d",
"par;": "\u2225",
"para": "\xb6",
"para;": "\xb6",
"parallel;": "\u2225",
"parsim;": "\u2af3",
"parsl;": "\u2afd",
"part;": "\u2202",
"pcy;": "\u043f",
"percnt;": "%",
"period;": ".",
"permil;": "\u2030",
"perp;": "\u22a5",
"pertenk;": "\u2031",
"pfr;": "\U0001d52d",
"phi;": "\u03c6",
"phiv;": "\u03d5",
"phmmat;": "\u2133",
"phone;": "\u260e",
"pi;": "\u03c0",
"pitchfork;": "\u22d4",
"piv;": "\u03d6",
"planck;": "\u210f",
"planckh;": "\u210e",
"plankv;": "\u210f",
"plus;": "+",
"plusacir;": "\u2a23",
"plusb;": "\u229e",
"pluscir;": "\u2a22",
"plusdo;": "\u2214",
"plusdu;": "\u2a25",
"pluse;": "\u2a72",
"plusmn": "\xb1",
"plusmn;": "\xb1",
"plussim;": "\u2a26",
"plustwo;": "\u2a27",
"pm;": "\xb1",
"pointint;": "\u2a15",
"popf;": "\U0001d561",
"pound": "\xa3",
"pound;": "\xa3",
"pr;": "\u227a",
"prE;": "\u2ab3",
"prap;": "\u2ab7",
"prcue;": "\u227c",
"pre;": "\u2aaf",
"prec;": "\u227a",
"precapprox;": "\u2ab7",
"preccurlyeq;": "\u227c",
"preceq;": "\u2aaf",
"precnapprox;": "\u2ab9",
"precneqq;": "\u2ab5",
"precnsim;": "\u22e8",
"precsim;": "\u227e",
"prime;": "\u2032",
"primes;": "\u2119",
"prnE;": "\u2ab5",
"prnap;": "\u2ab9",
"prnsim;": "\u22e8",
"prod;": "\u220f",
"profalar;": "\u232e",
"profline;": "\u2312",
"profsurf;": "\u2313",
"prop;": "\u221d",
"propto;": "\u221d",
"prsim;": "\u227e",
"prurel;": "\u22b0",
"pscr;": "\U0001d4c5",
"psi;": "\u03c8",
"puncsp;": "\u2008",
"qfr;": "\U0001d52e",
"qint;": "\u2a0c",
"qopf;": "\U0001d562",
"qprime;": "\u2057",
"qscr;": "\U0001d4c6",
"quaternions;": "\u210d",
"quatint;": "\u2a16",
"quest;": "?",
"questeq;": "\u225f",
"quot": "\"",
"quot;": "\"",
"rAarr;": "\u21db",
"rArr;": "\u21d2",
"rAtail;": "\u291c",
"rBarr;": "\u290f",
"rHar;": "\u2964",
"race;": "\u223d\u0331",
"racute;": "\u0155",
"radic;": "\u221a",
"raemptyv;": "\u29b3",
"rang;": "\u27e9",
"rangd;": "\u2992",
"range;": "\u29a5",
"rangle;": "\u27e9",
"raquo": "\xbb",
"raquo;": "\xbb",
"rarr;": "\u2192",
"rarrap;": "\u2975",
"rarrb;": "\u21e5",
"rarrbfs;": "\u2920",
"rarrc;": "\u2933",
"rarrfs;": "\u291e",
"rarrhk;": "\u21aa",
"rarrlp;": "\u21ac",
"rarrpl;": "\u2945",
"rarrsim;": "\u2974",
"rarrtl;": "\u21a3",
"rarrw;": "\u219d",
"ratail;": "\u291a",
"ratio;": "\u2236",
"rationals;": "\u211a",
"rbarr;": "\u290d",
"rbbrk;": "\u2773",
"rbrace;": "}",
"rbrack;": "]",
"rbrke;": "\u298c",
"rbrksld;": "\u298e",
"rbrkslu;": "\u2990",
"rcaron;": "\u0159",
"rcedil;": "\u0157",
"rceil;": "\u2309",
"rcub;": "}",
"rcy;": "\u0440",
"rdca;": "\u2937",
"rdldhar;": "\u2969",
"rdquo;": "\u201d",
"rdquor;": "\u201d",
"rdsh;": "\u21b3",
"real;": "\u211c",
"realine;": "\u211b",
"realpart;": "\u211c",
"reals;": "\u211d",
"rect;": "\u25ad",
"reg": "\xae",
"reg;": "\xae",
"rfisht;": "\u297d",
"rfloor;": "\u230b",
"rfr;": "\U0001d52f",
"rhard;": "\u21c1",
"rharu;": "\u21c0",
"rharul;": "\u296c",
"rho;": "\u03c1",
"rhov;": "\u03f1",
"rightarrow;": "\u2192",
"rightarrowtail;": "\u21a3",
"rightharpoondown;": "\u21c1",
"rightharpoonup;": "\u21c0",
"rightleftarrows;": "\u21c4",
"rightleftharpoons;": "\u21cc",
"rightrightarrows;": "\u21c9",
"rightsquigarrow;": "\u219d",
"rightthreetimes;": "\u22cc",
"ring;": "\u02da",
"risingdotseq;": "\u2253",
"rlarr;": "\u21c4",
"rlhar;": "\u21cc",
"rlm;": "\u200f",
"rmoust;": "\u23b1",
"rmoustache;": "\u23b1",
"rnmid;": "\u2aee",
"roang;": "\u27ed",
"roarr;": "\u21fe",
"robrk;": "\u27e7",
"ropar;": "\u2986",
"ropf;": "\U0001d563",
"roplus;": "\u2a2e",
"rotimes;": "\u2a35",
"rpar;": ")",
"rpargt;": "\u2994",
"rppolint;": "\u2a12",
"rrarr;": "\u21c9",
"rsaquo;": "\u203a",
"rscr;": "\U0001d4c7",
"rsh;": "\u21b1",
"rsqb;": "]",
"rsquo;": "\u2019",
"rsquor;": "\u2019",
"rthree;": "\u22cc",
"rtimes;": "\u22ca",
"rtri;": "\u25b9",
"rtrie;": "\u22b5",
"rtrif;": "\u25b8",
"rtriltri;": "\u29ce",
"ruluhar;": "\u2968",
"rx;": "\u211e",
"sacute;": "\u015b",
"sbquo;": "\u201a",
"sc;": "\u227b",
"scE;": "\u2ab4",
"scap;": "\u2ab8",
"scaron;": "\u0161",
"sccue;": "\u227d",
"sce;": "\u2ab0",
"scedil;": "\u015f",
"scirc;": "\u015d",
"scnE;": "\u2ab6",
"scnap;": "\u2aba",
"scnsim;": "\u22e9",
"scpolint;": "\u2a13",
"scsim;": "\u227f",
"scy;": "\u0441",
"sdot;": "\u22c5",
"sdotb;": "\u22a1",
"sdote;": "\u2a66",
"seArr;": "\u21d8",
"searhk;": "\u2925",
"searr;": "\u2198",
"searrow;": "\u2198",
"sect": "\xa7",
"sect;": "\xa7",
"semi;": ";",
"seswar;": "\u2929",
"setminus;": "\u2216",
"setmn;": "\u2216",
"sext;": "\u2736",
"sfr;": "\U0001d530",
"sfrown;": "\u2322",
"sharp;": "\u266f",
"shchcy;": "\u0449",
"shcy;": "\u0448",
"shortmid;": "\u2223",
"shortparallel;": "\u2225",
"shy": "\xad",
"shy;": "\xad",
"sigma;": "\u03c3",
"sigmaf;": "\u03c2",
"sigmav;": "\u03c2",
"sim;": "\u223c",
"simdot;": "\u2a6a",
"sime;": "\u2243",
"simeq;": "\u2243",
"simg;": "\u2a9e",
"simgE;": "\u2aa0",
"siml;": "\u2a9d",
"simlE;": "\u2a9f",
"simne;": "\u2246",
"simplus;": "\u2a24",
"simrarr;": "\u2972",
"slarr;": "\u2190",
"smallsetminus;": "\u2216",
"smashp;": "\u2a33",
"smeparsl;": "\u29e4",
"smid;": "\u2223",
"smile;": "\u2323",
"smt;": "\u2aaa",
"smte;": "\u2aac",
"smtes;": "\u2aac\ufe00",
"softcy;": "\u044c",
"sol;": "/",
"solb;": "\u29c4",
"solbar;": "\u233f",
"sopf;": "\U0001d564",
"spades;": "\u2660",
"spadesuit;": "\u2660",
"spar;": "\u2225",
"sqcap;": "\u2293",
"sqcaps;": "\u2293\ufe00",
"sqcup;": "\u2294",
"sqcups;": "\u2294\ufe00",
"sqsub;": "\u228f",
"sqsube;": "\u2291",
"sqsubset;": "\u228f",
"sqsubseteq;": "\u2291",
"sqsup;": "\u2290",
"sqsupe;": "\u2292",
"sqsupset;": "\u2290",
"sqsupseteq;": "\u2292",
"squ;": "\u25a1",
"square;": "\u25a1",
"squarf;": "\u25aa",
"squf;": "\u25aa",
"srarr;": "\u2192",
"sscr;": "\U0001d4c8",
"ssetmn;": "\u2216",
"ssmile;": "\u2323",
"sstarf;": "\u22c6",
"star;": "\u2606",
"starf;": "\u2605",
"straightepsilon;": "\u03f5",
"straightphi;": "\u03d5",
"strns;": "\xaf",
"sub;": "\u2282",
"subE;": "\u2ac5",
"subdot;": "\u2abd",
"sube;": "\u2286",
"subedot;": "\u2ac3",
"submult;": "\u2ac1",
"subnE;": "\u2acb",
"subne;": "\u228a",
"subplus;": "\u2abf",
"subrarr;": "\u2979",
"subset;": "\u2282",
"subseteq;": "\u2286",
"subseteqq;": "\u2ac5",
"subsetneq;": "\u228a",
"subsetneqq;": "\u2acb",
"subsim;": "\u2ac7",
"subsub;": "\u2ad5",
"subsup;": "\u2ad3",
"succ;": "\u227b",
"succapprox;": "\u2ab8",
"succcurlyeq;": "\u227d",
"succeq;": "\u2ab0",
"succnapprox;": "\u2aba",
"succneqq;": "\u2ab6",
"succnsim;": "\u22e9",
"succsim;": "\u227f",
"sum;": "\u2211",
"sung;": "\u266a",
"sup1": "\xb9",
"sup1;": "\xb9",
"sup2": "\xb2",
"sup2;": "\xb2",
"sup3": "\xb3",
"sup3;": "\xb3",
"sup;": "\u2283",
"supE;": "\u2ac6",
"supdot;": "\u2abe",
"supdsub;": "\u2ad8",
"supe;": "\u2287",
"supedot;": "\u2ac4",
"suphsol;": "\u27c9",
"suphsub;": "\u2ad7",
"suplarr;": "\u297b",
"supmult;": "\u2ac2",
"supnE;": "\u2acc",
"supne;": "\u228b",
"supplus;": "\u2ac0",
"supset;": "\u2283",
"supseteq;": "\u2287",
"supseteqq;": "\u2ac6",
"supsetneq;": "\u228b",
"supsetneqq;": "\u2acc",
"supsim;": "\u2ac8",
"supsub;": "\u2ad4",
"supsup;": "\u2ad6",
"swArr;": "\u21d9",
"swarhk;": "\u2926",
"swarr;": "\u2199",
"swarrow;": "\u2199",
"swnwar;": "\u292a",
"szlig": "\xdf",
"szlig;": "\xdf",
"target;": "\u2316",
"tau;": "\u03c4",
"tbrk;": "\u23b4",
"tcaron;": "\u0165",
"tcedil;": "\u0163",
"tcy;": "\u0442",
"tdot;": "\u20db",
"telrec;": "\u2315",
"tfr;": "\U0001d531",
"there4;": "\u2234",
"therefore;": "\u2234",
"theta;": "\u03b8",
"thetasym;": "\u03d1",
"thetav;": "\u03d1",
"thickapprox;": "\u2248",
"thicksim;": "\u223c",
"thinsp;": "\u2009",
"thkap;": "\u2248",
"thksim;": "\u223c",
"thorn": "\xfe",
"thorn;": "\xfe",
"tilde;": "\u02dc",
"times": "\xd7",
"times;": "\xd7",
"timesb;": "\u22a0",
"timesbar;": "\u2a31",
"timesd;": "\u2a30",
"tint;": "\u222d",
"toea;": "\u2928",
"top;": "\u22a4",
"topbot;": "\u2336",
"topcir;": "\u2af1",
"topf;": "\U0001d565",
"topfork;": "\u2ada",
"tosa;": "\u2929",
"tprime;": "\u2034",
"trade;": "\u2122",
"triangle;": "\u25b5",
"triangledown;": "\u25bf",
"triangleleft;": "\u25c3",
"trianglelefteq;": "\u22b4",
"triangleq;": "\u225c",
"triangleright;": "\u25b9",
"trianglerighteq;": "\u22b5",
"tridot;": "\u25ec",
"trie;": "\u225c",
"triminus;": "\u2a3a",
"triplus;": "\u2a39",
"trisb;": "\u29cd",
"tritime;": "\u2a3b",
"trpezium;": "\u23e2",
"tscr;": "\U0001d4c9",
"tscy;": "\u0446",
"tshcy;": "\u045b",
"tstrok;": "\u0167",
"twixt;": "\u226c",
"twoheadleftarrow;": "\u219e",
"twoheadrightarrow;": "\u21a0",
"uArr;": "\u21d1",
"uHar;": "\u2963",
"uacute": "\xfa",
"uacute;": "\xfa",
"uarr;": "\u2191",
"ubrcy;": "\u045e",
"ubreve;": "\u016d",
"ucirc": "\xfb",
"ucirc;": "\xfb",
"ucy;": "\u0443",
"udarr;": "\u21c5",
"udblac;": "\u0171",
"udhar;": "\u296e",
"ufisht;": "\u297e",
"ufr;": "\U0001d532",
"ugrave": "\xf9",
"ugrave;": "\xf9",
"uharl;": "\u21bf",
"uharr;": "\u21be",
"uhblk;": "\u2580",
"ulcorn;": "\u231c",
"ulcorner;": "\u231c",
"ulcrop;": "\u230f",
"ultri;": "\u25f8",
"umacr;": "\u016b",
"uml": "\xa8",
"uml;": "\xa8",
"uogon;": "\u0173",
"uopf;": "\U0001d566",
"uparrow;": "\u2191",
"updownarrow;": "\u2195",
"upharpoonleft;": "\u21bf",
"upharpoonright;": "\u21be",
"uplus;": "\u228e",
"upsi;": "\u03c5",
"upsih;": "\u03d2",
"upsilon;": "\u03c5",
"upuparrows;": "\u21c8",
"urcorn;": "\u231d",
"urcorner;": "\u231d",
"urcrop;": "\u230e",
"uring;": "\u016f",
"urtri;": "\u25f9",
"uscr;": "\U0001d4ca",
"utdot;": "\u22f0",
"utilde;": "\u0169",
"utri;": "\u25b5",
"utrif;": "\u25b4",
"uuarr;": "\u21c8",
"uuml": "\xfc",
"uuml;": "\xfc",
"uwangle;": "\u29a7",
"vArr;": "\u21d5",
"vBar;": "\u2ae8",
"vBarv;": "\u2ae9",
"vDash;": "\u22a8",
"vangrt;": "\u299c",
"varepsilon;": "\u03f5",
"varkappa;": "\u03f0",
"varnothing;": "\u2205",
"varphi;": "\u03d5",
"varpi;": "\u03d6",
"varpropto;": "\u221d",
"varr;": "\u2195",
"varrho;": "\u03f1",
"varsigma;": "\u03c2",
"varsubsetneq;": "\u228a\ufe00",
"varsubsetneqq;": "\u2acb\ufe00",
"varsupsetneq;": "\u228b\ufe00",
"varsupsetneqq;": "\u2acc\ufe00",
"vartheta;": "\u03d1",
"vartriangleleft;": "\u22b2",
"vartriangleright;": "\u22b3",
"vcy;": "\u0432",
"vdash;": "\u22a2",
"vee;": "\u2228",
"veebar;": "\u22bb",
"veeeq;": "\u225a",
"vellip;": "\u22ee",
"verbar;": "|",
"vert;": "|",
"vfr;": "\U0001d533",
"vltri;": "\u22b2",
"vnsub;": "\u2282\u20d2",
"vnsup;": "\u2283\u20d2",
"vopf;": "\U0001d567",
"vprop;": "\u221d",
"vrtri;": "\u22b3",
"vscr;": "\U0001d4cb",
"vsubnE;": "\u2acb\ufe00",
"vsubne;": "\u228a\ufe00",
"vsupnE;": "\u2acc\ufe00",
"vsupne;": "\u228b\ufe00",
"vzigzag;": "\u299a",
"wcirc;": "\u0175",
"wedbar;": "\u2a5f",
"wedge;": "\u2227",
"wedgeq;": "\u2259",
"weierp;": "\u2118",
"wfr;": "\U0001d534",
"wopf;": "\U0001d568",
"wp;": "\u2118",
"wr;": "\u2240",
"wreath;": "\u2240",
"wscr;": "\U0001d4cc",
"xcap;": "\u22c2",
"xcirc;": "\u25ef",
"xcup;": "\u22c3",
"xdtri;": "\u25bd",
"xfr;": "\U0001d535",
"xhArr;": "\u27fa",
"xharr;": "\u27f7",
"xi;": "\u03be",
"xlArr;": "\u27f8",
"xlarr;": "\u27f5",
"xmap;": "\u27fc",
"xnis;": "\u22fb",
"xodot;": "\u2a00",
"xopf;": "\U0001d569",
"xoplus;": "\u2a01",
"xotime;": "\u2a02",
"xrArr;": "\u27f9",
"xrarr;": "\u27f6",
"xscr;": "\U0001d4cd",
"xsqcup;": "\u2a06",
"xuplus;": "\u2a04",
"xutri;": "\u25b3",
"xvee;": "\u22c1",
"xwedge;": "\u22c0",
"yacute": "\xfd",
"yacute;": "\xfd",
"yacy;": "\u044f",
"ycirc;": "\u0177",
"ycy;": "\u044b",
"yen": "\xa5",
"yen;": "\xa5",
"yfr;": "\U0001d536",
"yicy;": "\u0457",
"yopf;": "\U0001d56a",
"yscr;": "\U0001d4ce",
"yucy;": "\u044e",
"yuml": "\xff",
"yuml;": "\xff",
"zacute;": "\u017a",
"zcaron;": "\u017e",
"zcy;": "\u0437",
"zdot;": "\u017c",
"zeetrf;": "\u2128",
"zeta;": "\u03b6",
"zfr;": "\U0001d537",
"zhcy;": "\u0436",
"zigrarr;": "\u21dd",
"zopf;": "\U0001d56b",
"zscr;": "\U0001d4cf",
"zwj;": "\u200d",
"zwnj;": "\u200c",
}
replacementCharacters = {
0x0: "\uFFFD",
0x0d: "\u000D",
0x80: "\u20AC",
0x81: "\u0081",
0x81: "\u0081",
0x82: "\u201A",
0x83: "\u0192",
0x84: "\u201E",
0x85: "\u2026",
0x86: "\u2020",
0x87: "\u2021",
0x88: "\u02C6",
0x89: "\u2030",
0x8A: "\u0160",
0x8B: "\u2039",
0x8C: "\u0152",
0x8D: "\u008D",
0x8E: "\u017D",
0x8F: "\u008F",
0x90: "\u0090",
0x91: "\u2018",
0x92: "\u2019",
0x93: "\u201C",
0x94: "\u201D",
0x95: "\u2022",
0x96: "\u2013",
0x97: "\u2014",
0x98: "\u02DC",
0x99: "\u2122",
0x9A: "\u0161",
0x9B: "\u203A",
0x9C: "\u0153",
0x9D: "\u009D",
0x9E: "\u017E",
0x9F: "\u0178",
}
encodings = {
'437': 'cp437',
'850': 'cp850',
'852': 'cp852',
'855': 'cp855',
'857': 'cp857',
'860': 'cp860',
'861': 'cp861',
'862': 'cp862',
'863': 'cp863',
'865': 'cp865',
'866': 'cp866',
'869': 'cp869',
'ansix341968': 'ascii',
'ansix341986': 'ascii',
'arabic': 'iso8859-6',
'ascii': 'ascii',
'asmo708': 'iso8859-6',
'big5': 'big5',
'big5hkscs': 'big5hkscs',
'chinese': 'gbk',
'cp037': 'cp037',
'cp1026': 'cp1026',
'cp154': 'ptcp154',
'cp367': 'ascii',
'cp424': 'cp424',
'cp437': 'cp437',
'cp500': 'cp500',
'cp775': 'cp775',
'cp819': 'windows-1252',
'cp850': 'cp850',
'cp852': 'cp852',
'cp855': 'cp855',
'cp857': 'cp857',
'cp860': 'cp860',
'cp861': 'cp861',
'cp862': 'cp862',
'cp863': 'cp863',
'cp864': 'cp864',
'cp865': 'cp865',
'cp866': 'cp866',
'cp869': 'cp869',
'cp936': 'gbk',
'cpgr': 'cp869',
'cpis': 'cp861',
'csascii': 'ascii',
'csbig5': 'big5',
'cseuckr': 'cp949',
'cseucpkdfmtjapanese': 'euc_jp',
'csgb2312': 'gbk',
'cshproman8': 'hp-roman8',
'csibm037': 'cp037',
'csibm1026': 'cp1026',
'csibm424': 'cp424',
'csibm500': 'cp500',
'csibm855': 'cp855',
'csibm857': 'cp857',
'csibm860': 'cp860',
'csibm861': 'cp861',
'csibm863': 'cp863',
'csibm864': 'cp864',
'csibm865': 'cp865',
'csibm866': 'cp866',
'csibm869': 'cp869',
'csiso2022jp': 'iso2022_jp',
'csiso2022jp2': 'iso2022_jp_2',
'csiso2022kr': 'iso2022_kr',
'csiso58gb231280': 'gbk',
'csisolatin1': 'windows-1252',
'csisolatin2': 'iso8859-2',
'csisolatin3': 'iso8859-3',
'csisolatin4': 'iso8859-4',
'csisolatin5': 'windows-1254',
'csisolatin6': 'iso8859-10',
'csisolatinarabic': 'iso8859-6',
'csisolatincyrillic': 'iso8859-5',
'csisolatingreek': 'iso8859-7',
'csisolatinhebrew': 'iso8859-8',
'cskoi8r': 'koi8-r',
'csksc56011987': 'cp949',
'cspc775baltic': 'cp775',
'cspc850multilingual': 'cp850',
'cspc862latinhebrew': 'cp862',
'cspc8codepage437': 'cp437',
'cspcp852': 'cp852',
'csptcp154': 'ptcp154',
'csshiftjis': 'shift_jis',
'csunicode11utf7': 'utf-7',
'cyrillic': 'iso8859-5',
'cyrillicasian': 'ptcp154',
'ebcdiccpbe': 'cp500',
'ebcdiccpca': 'cp037',
'ebcdiccpch': 'cp500',
'ebcdiccphe': 'cp424',
'ebcdiccpnl': 'cp037',
'ebcdiccpus': 'cp037',
'ebcdiccpwt': 'cp037',
'ecma114': 'iso8859-6',
'ecma118': 'iso8859-7',
'elot928': 'iso8859-7',
'eucjp': 'euc_jp',
'euckr': 'cp949',
'extendedunixcodepackedformatforjapanese': 'euc_jp',
'gb18030': 'gb18030',
'gb2312': 'gbk',
'gb231280': 'gbk',
'gbk': 'gbk',
'greek': 'iso8859-7',
'greek8': 'iso8859-7',
'hebrew': 'iso8859-8',
'hproman8': 'hp-roman8',
'hzgb2312': 'hz',
'ibm037': 'cp037',
'ibm1026': 'cp1026',
'ibm367': 'ascii',
'ibm424': 'cp424',
'ibm437': 'cp437',
'ibm500': 'cp500',
'ibm775': 'cp775',
'ibm819': 'windows-1252',
'ibm850': 'cp850',
'ibm852': 'cp852',
'ibm855': 'cp855',
'ibm857': 'cp857',
'ibm860': 'cp860',
'ibm861': 'cp861',
'ibm862': 'cp862',
'ibm863': 'cp863',
'ibm864': 'cp864',
'ibm865': 'cp865',
'ibm866': 'cp866',
'ibm869': 'cp869',
'iso2022jp': 'iso2022_jp',
'iso2022jp2': 'iso2022_jp_2',
'iso2022kr': 'iso2022_kr',
'iso646irv1991': 'ascii',
'iso646us': 'ascii',
'iso88591': 'windows-1252',
'iso885910': 'iso8859-10',
'iso8859101992': 'iso8859-10',
'iso885911987': 'windows-1252',
'iso885913': 'iso8859-13',
'iso885914': 'iso8859-14',
'iso8859141998': 'iso8859-14',
'iso885915': 'iso8859-15',
'iso885916': 'iso8859-16',
'iso8859162001': 'iso8859-16',
'iso88592': 'iso8859-2',
'iso885921987': 'iso8859-2',
'iso88593': 'iso8859-3',
'iso885931988': 'iso8859-3',
'iso88594': 'iso8859-4',
'iso885941988': 'iso8859-4',
'iso88595': 'iso8859-5',
'iso885951988': 'iso8859-5',
'iso88596': 'iso8859-6',
'iso885961987': 'iso8859-6',
'iso88597': 'iso8859-7',
'iso885971987': 'iso8859-7',
'iso88598': 'iso8859-8',
'iso885981988': 'iso8859-8',
'iso88599': 'windows-1254',
'iso885991989': 'windows-1254',
'isoceltic': 'iso8859-14',
'isoir100': 'windows-1252',
'isoir101': 'iso8859-2',
'isoir109': 'iso8859-3',
'isoir110': 'iso8859-4',
'isoir126': 'iso8859-7',
'isoir127': 'iso8859-6',
'isoir138': 'iso8859-8',
'isoir144': 'iso8859-5',
'isoir148': 'windows-1254',
'isoir149': 'cp949',
'isoir157': 'iso8859-10',
'isoir199': 'iso8859-14',
'isoir226': 'iso8859-16',
'isoir58': 'gbk',
'isoir6': 'ascii',
'koi8r': 'koi8-r',
'koi8u': 'koi8-u',
'korean': 'cp949',
'ksc5601': 'cp949',
'ksc56011987': 'cp949',
'ksc56011989': 'cp949',
'l1': 'windows-1252',
'l10': 'iso8859-16',
'l2': 'iso8859-2',
'l3': 'iso8859-3',
'l4': 'iso8859-4',
'l5': 'windows-1254',
'l6': 'iso8859-10',
'l8': 'iso8859-14',
'latin1': 'windows-1252',
'latin10': 'iso8859-16',
'latin2': 'iso8859-2',
'latin3': 'iso8859-3',
'latin4': 'iso8859-4',
'latin5': 'windows-1254',
'latin6': 'iso8859-10',
'latin8': 'iso8859-14',
'latin9': 'iso8859-15',
'ms936': 'gbk',
'mskanji': 'shift_jis',
'pt154': 'ptcp154',
'ptcp154': 'ptcp154',
'r8': 'hp-roman8',
'roman8': 'hp-roman8',
'shiftjis': 'shift_jis',
'tis620': 'cp874',
'unicode11utf7': 'utf-7',
'us': 'ascii',
'usascii': 'ascii',
'utf16': 'utf-16',
'utf16be': 'utf-16-be',
'utf16le': 'utf-16-le',
'utf8': 'utf-8',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows936': 'gbk',
'x-x-big5': 'big5'}
tokenTypes = {
"Doctype": 0,
"Characters": 1,
"SpaceCharacters": 2,
"StartTag": 3,
"EndTag": 4,
"EmptyTag": 5,
"Comment": 6,
"ParseError": 7
}
tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]))
prefixes = dict([(v, k) for k, v in namespaces.items()])
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
class DataLossWarning(UserWarning):
pass
class ReparseException(Exception):
pass
| lgpl-3.0 |
tumbl3w33d/ansible | lib/ansible/playbook/role/__init__.py | 22 | 21613 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleAssertionError
from ansible.module_utils.six import iteritems, binary_type, text_type
from ansible.module_utils.common._collections_compat import Container, Mapping, Set, Sequence
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.collectionsearch import CollectionSearch
from ansible.playbook.conditional import Conditional
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.role.metadata import RoleMetadata
from ansible.playbook.taggable import Taggable
from ansible.plugins.loader import add_all_plugin_dirs
from ansible.utils.collection_loader import AnsibleCollectionLoader
from ansible.utils.vars import combine_vars
__all__ = ['Role', 'hash_params']
# TODO: this should be a utility function, but can't be a member of
# the role due to the fact that it would require the use of self
# in a static method. This is also used in the base class for
# strategies (ansible/plugins/strategy/__init__.py)
def hash_params(params):
"""
Construct a data structure of parameters that is hashable.
This requires changing any mutable data structures into immutable ones.
We chose a frozenset because role parameters have to be unique.
.. warning:: this does not handle unhashable scalars. Two things
mitigate that limitation:
1) There shouldn't be any unhashable scalars specified in the yaml
2) Our only choice would be to return an error anyway.
"""
# Any container is unhashable if it contains unhashable items (for
# instance, tuple() is a Hashable subclass but if it contains a dict, it
# cannot be hashed)
if isinstance(params, Container) and not isinstance(params, (text_type, binary_type)):
if isinstance(params, Mapping):
try:
# Optimistically hope the contents are all hashable
new_params = frozenset(params.items())
except TypeError:
new_params = set()
for k, v in params.items():
# Hash each entry individually
new_params.add((k, hash_params(v)))
new_params = frozenset(new_params)
elif isinstance(params, (Set, Sequence)):
try:
# Optimistically hope the contents are all hashable
new_params = frozenset(params)
except TypeError:
new_params = set()
for v in params:
# Hash each entry individually
new_params.add(hash_params(v))
new_params = frozenset(new_params)
else:
# This is just a guess.
new_params = frozenset(params)
return new_params
# Note: We do not handle unhashable scalars but our only choice would be
# to raise an error there anyway.
return frozenset((params,))
class Role(Base, Conditional, Taggable, CollectionSearch):
_delegate_to = FieldAttribute(isa='string')
_delegate_facts = FieldAttribute(isa='bool')
def __init__(self, play=None, from_files=None, from_include=False):
self._role_name = None
self._role_path = None
self._role_collection = None
self._role_params = dict()
self._loader = None
self._metadata = None
self._play = play
self._parents = []
self._dependencies = []
self._task_blocks = []
self._handler_blocks = []
self._compiled_handler_blocks = None
self._default_vars = dict()
self._role_vars = dict()
self._had_task_run = dict()
self._completed = dict()
if from_files is None:
from_files = {}
self._from_files = from_files
# Indicates whether this role was included via include/import_role
self.from_include = from_include
super(Role, self).__init__()
def __repr__(self):
return self.get_name()
def get_name(self):
return self._role_name
@staticmethod
def load(role_include, play, parent_role=None, from_files=None, from_include=False):
if from_files is None:
from_files = {}
try:
# The ROLE_CACHE is a dictionary of role names, with each entry
# containing another dictionary corresponding to a set of parameters
# specified for a role as the key and the Role() object itself.
# We use frozenset to make the dictionary hashable.
params = role_include.get_role_params()
if role_include.when is not None:
params['when'] = role_include.when
if role_include.tags is not None:
params['tags'] = role_include.tags
if from_files is not None:
params['from_files'] = from_files
if role_include.vars:
params['vars'] = role_include.vars
params['from_include'] = from_include
hashed_params = hash_params(params)
if role_include.role in play.ROLE_CACHE:
for (entry, role_obj) in iteritems(play.ROLE_CACHE[role_include.role]):
if hashed_params == entry:
if parent_role:
role_obj.add_parent(parent_role)
return role_obj
# TODO: need to fix cycle detection in role load (maybe use an empty dict
# for the in-flight in role cache as a sentinel that we're already trying to load
# that role?)
# see https://github.com/ansible/ansible/issues/61527
r = Role(play=play, from_files=from_files, from_include=from_include)
r._load_role_data(role_include, parent_role=parent_role)
if role_include.role not in play.ROLE_CACHE:
play.ROLE_CACHE[role_include.role] = dict()
# FIXME: how to handle cache keys for collection-based roles, since they're technically adjustable per task?
play.ROLE_CACHE[role_include.role][hashed_params] = r
return r
except RuntimeError:
raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles",
obj=role_include._ds)
def _load_role_data(self, role_include, parent_role=None):
self._role_name = role_include.role
self._role_path = role_include.get_role_path()
self._role_collection = role_include._role_collection
self._role_params = role_include.get_role_params()
self._variable_manager = role_include.get_variable_manager()
self._loader = role_include.get_loader()
if parent_role:
self.add_parent(parent_role)
# copy over all field attributes from the RoleInclude
# update self._attributes directly, to avoid squashing
for (attr_name, _) in iteritems(self._valid_attrs):
if attr_name in ('when', 'tags'):
self._attributes[attr_name] = self._extend_value(
self._attributes[attr_name],
role_include._attributes[attr_name],
)
else:
self._attributes[attr_name] = role_include._attributes[attr_name]
# vars and default vars are regular dictionaries
self._role_vars = self._load_role_yaml('vars', main=self._from_files.get('vars'), allow_dir=True)
if self._role_vars is None:
self._role_vars = dict()
elif not isinstance(self._role_vars, dict):
raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
self._default_vars = self._load_role_yaml('defaults', main=self._from_files.get('defaults'), allow_dir=True)
if self._default_vars is None:
self._default_vars = dict()
elif not isinstance(self._default_vars, dict):
raise AnsibleParserError("The defaults/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
# load the role's other files, if they exist
metadata = self._load_role_yaml('meta')
if metadata:
self._metadata = RoleMetadata.load(metadata, owner=self, variable_manager=self._variable_manager, loader=self._loader)
self._dependencies = self._load_dependencies()
else:
self._metadata = RoleMetadata()
# reset collections list; roles do not inherit collections from parents, just use the defaults
# FUTURE: use a private config default for this so we can allow it to be overridden later
self.collections = []
# configure plugin/collection loading; either prepend the current role's collection or configure legacy plugin loading
# FIXME: need exception for explicit ansible.legacy?
if self._role_collection: # this is a collection-hosted role
self.collections.insert(0, self._role_collection)
else: # this is a legacy role, but set the default collection if there is one
default_collection = AnsibleCollectionLoader().default_collection
if default_collection:
self.collections.insert(0, default_collection)
# legacy role, ensure all plugin dirs under the role are added to plugin search path
add_all_plugin_dirs(self._role_path)
# collections can be specified in metadata for legacy or collection-hosted roles
if self._metadata.collections:
self.collections.extend((c for c in self._metadata.collections if c not in self.collections))
# if any collections were specified, ensure that core or legacy synthetic collections are always included
if self.collections:
# default append collection is core for collection-hosted roles, legacy for others
default_append_collection = 'ansible.builtin' if self._role_collection else 'ansible.legacy'
if 'ansible.builtin' not in self.collections and 'ansible.legacy' not in self.collections:
self.collections.append(default_append_collection)
task_data = self._load_role_yaml('tasks', main=self._from_files.get('tasks'))
if task_data:
try:
self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader, variable_manager=self._variable_manager)
except AssertionError as e:
raise AnsibleParserError("The tasks/main.yml file for role '%s' must contain a list of tasks" % self._role_name,
obj=task_data, orig_exc=e)
handler_data = self._load_role_yaml('handlers', main=self._from_files.get('handlers'))
if handler_data:
try:
self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader,
variable_manager=self._variable_manager)
except AssertionError as e:
raise AnsibleParserError("The handlers/main.yml file for role '%s' must contain a list of tasks" % self._role_name,
obj=handler_data, orig_exc=e)
def _load_role_yaml(self, subdir, main=None, allow_dir=False):
file_path = os.path.join(self._role_path, subdir)
if self._loader.path_exists(file_path) and self._loader.is_directory(file_path):
# Valid extensions and ordering for roles is hard-coded to maintain
# role portability
extensions = ['.yml', '.yaml', '.json']
# If no <main> is specified by the user, look for files with
# extensions before bare name. Otherwise, look for bare name first.
if main is None:
_main = 'main'
extensions.append('')
else:
_main = main
extensions.insert(0, '')
found_files = self._loader.find_vars_files(file_path, _main, extensions, allow_dir)
if found_files:
data = {}
for found in found_files:
new_data = self._loader.load_from_file(found)
if new_data and allow_dir:
data = combine_vars(data, new_data)
else:
data = new_data
return data
elif main is not None:
raise AnsibleParserError("Could not find specified file in role: %s/%s" % (subdir, main))
return None
def _load_dependencies(self):
'''
Recursively loads role dependencies from the metadata list of
dependencies, if it exists
'''
deps = []
if self._metadata:
for role_include in self._metadata.dependencies:
r = Role.load(role_include, play=self._play, parent_role=self)
deps.append(r)
return deps
# other functions
def add_parent(self, parent_role):
''' adds a role to the list of this roles parents '''
if not isinstance(parent_role, Role):
raise AnsibleAssertionError()
if parent_role not in self._parents:
self._parents.append(parent_role)
def get_parents(self):
return self._parents
def get_default_vars(self, dep_chain=None):
dep_chain = [] if dep_chain is None else dep_chain
default_vars = dict()
for dep in self.get_all_dependencies():
default_vars = combine_vars(default_vars, dep.get_default_vars())
if dep_chain:
for parent in dep_chain:
default_vars = combine_vars(default_vars, parent._default_vars)
default_vars = combine_vars(default_vars, self._default_vars)
return default_vars
def get_inherited_vars(self, dep_chain=None):
dep_chain = [] if dep_chain is None else dep_chain
inherited_vars = dict()
if dep_chain:
for parent in dep_chain:
inherited_vars = combine_vars(inherited_vars, parent._role_vars)
return inherited_vars
def get_role_params(self, dep_chain=None):
dep_chain = [] if dep_chain is None else dep_chain
params = {}
if dep_chain:
for parent in dep_chain:
params = combine_vars(params, parent._role_params)
params = combine_vars(params, self._role_params)
return params
def get_vars(self, dep_chain=None, include_params=True):
dep_chain = [] if dep_chain is None else dep_chain
all_vars = self.get_inherited_vars(dep_chain)
for dep in self.get_all_dependencies():
all_vars = combine_vars(all_vars, dep.get_vars(include_params=include_params))
all_vars = combine_vars(all_vars, self.vars)
all_vars = combine_vars(all_vars, self._role_vars)
if include_params:
all_vars = combine_vars(all_vars, self.get_role_params(dep_chain=dep_chain))
return all_vars
def get_direct_dependencies(self):
return self._dependencies[:]
def get_all_dependencies(self):
'''
Returns a list of all deps, built recursively from all child dependencies,
in the proper order in which they should be executed or evaluated.
'''
child_deps = []
for dep in self.get_direct_dependencies():
for child_dep in dep.get_all_dependencies():
child_deps.append(child_dep)
child_deps.append(dep)
return child_deps
def get_task_blocks(self):
return self._task_blocks[:]
def get_handler_blocks(self, play, dep_chain=None):
# Do not recreate this list each time ``get_handler_blocks`` is called.
# Cache the results so that we don't potentially overwrite with copied duplicates
#
# ``get_handler_blocks`` may be called when handling ``import_role`` during parsing
# as well as with ``Play.compile_roles_handlers`` from ``TaskExecutor``
if self._compiled_handler_blocks:
return self._compiled_handler_blocks
self._compiled_handler_blocks = block_list = []
# update the dependency chain here
if dep_chain is None:
dep_chain = []
new_dep_chain = dep_chain + [self]
for dep in self.get_direct_dependencies():
dep_blocks = dep.get_handler_blocks(play=play, dep_chain=new_dep_chain)
block_list.extend(dep_blocks)
for task_block in self._handler_blocks:
new_task_block = task_block.copy()
new_task_block._dep_chain = new_dep_chain
new_task_block._play = play
block_list.append(new_task_block)
return block_list
def has_run(self, host):
'''
Returns true if this role has been iterated over completely and
at least one task was run
'''
return host.name in self._completed and not self._metadata.allow_duplicates
def compile(self, play, dep_chain=None):
'''
Returns the task list for this role, which is created by first
recursively compiling the tasks for all direct dependencies, and
then adding on the tasks for this role.
The role compile() also remembers and saves the dependency chain
with each task, so tasks know by which route they were found, and
can correctly take their parent's tags/conditionals into account.
'''
block_list = []
# update the dependency chain here
if dep_chain is None:
dep_chain = []
new_dep_chain = dep_chain + [self]
deps = self.get_direct_dependencies()
for dep in deps:
dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain)
block_list.extend(dep_blocks)
for idx, task_block in enumerate(self._task_blocks):
new_task_block = task_block.copy()
new_task_block._dep_chain = new_dep_chain
new_task_block._play = play
if idx == len(self._task_blocks) - 1:
new_task_block._eor = True
block_list.append(new_task_block)
return block_list
def serialize(self, include_deps=True):
res = super(Role, self).serialize()
res['_role_name'] = self._role_name
res['_role_path'] = self._role_path
res['_role_vars'] = self._role_vars
res['_role_params'] = self._role_params
res['_default_vars'] = self._default_vars
res['_had_task_run'] = self._had_task_run.copy()
res['_completed'] = self._completed.copy()
if self._metadata:
res['_metadata'] = self._metadata.serialize()
if include_deps:
deps = []
for role in self.get_direct_dependencies():
deps.append(role.serialize())
res['_dependencies'] = deps
parents = []
for parent in self._parents:
parents.append(parent.serialize(include_deps=False))
res['_parents'] = parents
return res
def deserialize(self, data, include_deps=True):
self._role_name = data.get('_role_name', '')
self._role_path = data.get('_role_path', '')
self._role_vars = data.get('_role_vars', dict())
self._role_params = data.get('_role_params', dict())
self._default_vars = data.get('_default_vars', dict())
self._had_task_run = data.get('_had_task_run', dict())
self._completed = data.get('_completed', dict())
if include_deps:
deps = []
for dep in data.get('_dependencies', []):
r = Role()
r.deserialize(dep)
deps.append(r)
setattr(self, '_dependencies', deps)
parent_data = data.get('_parents', [])
parents = []
for parent in parent_data:
r = Role()
r.deserialize(parent, include_deps=False)
parents.append(r)
setattr(self, '_parents', parents)
metadata_data = data.get('_metadata')
if metadata_data:
m = RoleMetadata()
m.deserialize(metadata_data)
self._metadata = m
super(Role, self).deserialize(data)
def set_loader(self, loader):
self._loader = loader
for parent in self._parents:
parent.set_loader(loader)
for dep in self.get_direct_dependencies():
dep.set_loader(loader)
| gpl-3.0 |
mitchellrj/touchdown | docs/conf.py | 1 | 6509 | # -*- coding: utf-8 -*-
#
# touchdown documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 28 13:27:09 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, pkg_resources
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'touchdown'
copyright = u'2014-2015, John Carr'
# The full version, including alpha/beta/rc tags.
release = pkg_resources.get_distribution('touchdown').version
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'touchdowndoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'touchdown.tex', u'touchdown documentation',
u'John Carr', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'fuselage': ('http://docs.yaybu.com/projects/fuselage/en/latest/', None),
}
| apache-2.0 |
amousset/ansible | lib/ansible/executor/task_result.py | 130 | 2534 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.parsing import DataLoader
class TaskResult:
'''
This class is responsible for interpretting the resulting data
from an executed task, and provides helper methods for determining
the result of a given task.
'''
def __init__(self, host, task, return_data):
self._host = host
self._task = task
if isinstance(return_data, dict):
self._result = return_data.copy()
else:
self._result = DataLoader().load(return_data)
def is_changed(self):
return self._check_key('changed')
def is_skipped(self):
if 'results' in self._result and self._task.loop:
flag = True
for res in self._result.get('results', []):
if isinstance(res, dict):
flag &= res.get('skipped', False)
return flag
else:
return self._result.get('skipped', False)
def is_failed(self):
if 'failed_when_result' in self._result or \
'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]:
return self._check_key('failed_when_result')
else:
return self._check_key('failed') or self._result.get('rc', 0) != 0
def is_unreachable(self):
return self._check_key('unreachable')
def _check_key(self, key):
if 'results' in self._result and self._task.loop:
flag = False
for res in self._result.get('results', []):
if isinstance(res, dict):
flag |= res.get(key, False)
return flag
else:
return self._result.get(key, False)
| gpl-3.0 |
pombreda/django-hotclub | libs/external_libs/gdata.py-1.0.13/tests/gdata_tests/spreadsheet/service_test.py | 5 | 7826 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '[email protected] (Laura Beth Lincoln)'
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata.spreadsheet.service
import gdata.service
import atom.service
import gdata.spreadsheet
import atom
import getpass
username = ''
password = ''
ss_key = ''
ws_key = ''
class DocumentQueryTest(unittest.TestCase):
def setUp(self):
self.query = gdata.spreadsheet.service.DocumentQuery()
def testTitle(self):
self.query['title'] = 'my title'
self.assert_(self.query['title'] == 'my title')
self.assert_(self.query.ToUri() == '?title=my+title')
def testTitleExact(self):
self.query['title-exact'] = 'true'
self.assert_(self.query['title-exact'] == 'true')
self.assert_(self.query.ToUri() == '?title-exact=true')
class CellQueryTest(unittest.TestCase):
def setUp(self):
self.query = gdata.spreadsheet.service.CellQuery()
def testMinRow(self):
self.query['min-row'] = '1'
self.assert_(self.query['min-row'] == '1')
self.assert_(self.query.ToUri() == '?min-row=1')
def testMaxRow(self):
self.query['max-row'] = '100'
self.assert_(self.query['max-row'] == '100')
self.assert_(self.query.ToUri() == '?max-row=100')
def testMinCol(self):
self.query['min-col'] = '2'
self.assert_(self.query['min-col'] == '2')
self.assert_(self.query.ToUri() == '?min-col=2')
def testMaxCol(self):
self.query['max-col'] = '20'
self.assert_(self.query['max-col'] == '20')
self.assert_(self.query.ToUri() == '?max-col=20')
def testRange(self):
self.query['range'] = 'A1:B4'
self.assert_(self.query['range'] == 'A1:B4')
self.assert_(self.query.ToUri() == '?range=A1%3AB4')
def testReturnEmpty(self):
self.query['return-empty'] = 'false'
self.assert_(self.query['return-empty'] == 'false')
self.assert_(self.query.ToUri() == '?return-empty=false')
class ListQueryTest(unittest.TestCase):
def setUp(self):
self.query = gdata.spreadsheet.service.ListQuery()
def testSpreadsheetQuery(self):
self.query['sq'] = 'first=john&last=smith'
self.assert_(self.query['sq'] == 'first=john&last=smith')
self.assert_(self.query.ToUri() == '?sq=first%3Djohn%26last%3Dsmith')
def testOrderByQuery(self):
self.query['orderby'] = 'column:first'
self.assert_(self.query['orderby'] == 'column:first')
self.assert_(self.query.ToUri() == '?orderby=column%3Afirst')
def testReverseQuery(self):
self.query['reverse'] = 'true'
self.assert_(self.query['reverse'] == 'true')
self.assert_(self.query.ToUri() == '?reverse=true')
class SpreadsheetsServiceTest(unittest.TestCase):
def setUp(self):
self.key = ss_key
self.worksheet = ws_key
self.gd_client = gdata.spreadsheet.service.SpreadsheetsService()
self.gd_client.email = username
self.gd_client.password = password
self.gd_client.source = 'SpreadsheetsClient "Unit" Tests'
self.gd_client.ProgrammaticLogin()
def testGetSpreadsheetsFeed(self):
#feed = self.gd_client.GetSpreadsheetsFeed()
#self.assert_(isinstance(feed, gdata.spreadsheet.SpreadsheetsSpreadsheetsFeed))
entry = self.gd_client.GetSpreadsheetsFeed(self.key)
self.assert_(isinstance(entry, gdata.spreadsheet.SpreadsheetsSpreadsheet))
def testGetWorksheetsFeed(self):
feed = self.gd_client.GetWorksheetsFeed(self.key)
self.assert_(isinstance(feed, gdata.spreadsheet.SpreadsheetsWorksheetsFeed))
entry = self.gd_client.GetWorksheetsFeed(self.key, self.worksheet)
self.assert_(isinstance(entry, gdata.spreadsheet.SpreadsheetsWorksheet))
def testGetCellsFeed(self):
feed = self.gd_client.GetCellsFeed(self.key)
self.assert_(isinstance(feed, gdata.spreadsheet.SpreadsheetsCellsFeed))
entry = self.gd_client.GetCellsFeed(self.key, cell='R5C1')
self.assert_(isinstance(entry, gdata.spreadsheet.SpreadsheetsCell))
def testGetListFeed(self):
feed = self.gd_client.GetListFeed(self.key)
self.assert_(isinstance(feed, gdata.spreadsheet.SpreadsheetsListFeed))
entry = self.gd_client.GetListFeed(self.key, row_id='cokwr')
self.assert_(isinstance(entry, gdata.spreadsheet.SpreadsheetsList))
def testUpdateCell(self):
self.gd_client.UpdateCell(row='5', col='1', inputValue='', key=self.key)
self.gd_client.UpdateCell(row='5', col='1', inputValue='newer data',
key=self.key)
def testBatchUpdateCell(self):
cell_feed = self.gd_client.GetCellsFeed(key=self.key)
edit_cell = cell_feed.entry[0]
old_cell_value = 'a1'
# Create a batch request to change the contents of a cell.
batch_feed = gdata.spreadsheet.SpreadsheetsCellsFeed()
edit_cell.cell.inputValue = 'New Value'
batch_feed.AddUpdate(edit_cell)
result = self.gd_client.ExecuteBatch(batch_feed,
url=cell_feed.GetBatchLink().href)
self.assertEquals(len(result.entry), 1)
self.assertEquals(result.entry[0].cell.inputValue, 'New Value')
# Make a second batch request to change the cell's value back.
edit_cell = result.entry[0]
edit_cell.cell.inputValue = old_cell_value
batch_feed = gdata.spreadsheet.SpreadsheetsCellsFeed()
batch_feed.AddUpdate(edit_cell)
restored = self.gd_client.ExecuteBatch(batch_feed,
url=cell_feed.GetBatchLink().href)
self.assertEquals(len(restored.entry), 1)
self.assertEquals(restored.entry[0].cell.inputValue, old_cell_value)
def testInsertUpdateRow(self):
entry = self.gd_client.InsertRow({'a1':'new', 'b1':'row', 'c1':'was',
'd1':'here'}, self.key)
entry = self.gd_client.UpdateRow(entry, {'a1':'newer',
'b1':entry.custom['b1'].text, 'c1':entry.custom['c1'].text,
'd1':entry.custom['d1'].text})
self.gd_client.DeleteRow(entry)
def testWorksheetCRUD(self):
# Add a new worksheet.
new_worksheet = self.gd_client.AddWorksheet('worksheet_title_test_12', '2', 3, self.key)
self.assertEquals(new_worksheet.col_count.text, '3')
self.assertEquals(new_worksheet.row_count.text, '2')
self.assertEquals(new_worksheet.title.text, 'worksheet_title_test_12')
# Change the dimensions and title of the new worksheet.
new_worksheet.col_count.text = '1'
new_worksheet.title.text = 'edited worksheet test12'
edited_worksheet = self.gd_client.UpdateWorksheet(new_worksheet)
self.assertEquals(edited_worksheet.col_count.text, '1')
self.assertEquals(edited_worksheet.row_count.text, '2')
self.assertEquals(edited_worksheet.title.text, 'edited worksheet test12')
# Delete the new worksheet.
result = self.gd_client.DeleteWorksheet(edited_worksheet)
self.assertEquals(result, True)
if __name__ == '__main__':
print ('NOTE: Please run these tests only with a test account. ' +
'The tests may delete or update your data.')
username = raw_input('Please enter your username: ')
password = getpass.getpass()
ss_key = raw_input('Please enter your spreadsheet key: ')
ws_key = raw_input('Please enter your worksheet key: ')
unittest.main()
| mit |
KevinPike/pycache | pycache/factory.py | 1 | 1570 | from pycache.client import BaseClient, CacheClient
from twisted.internet.protocol import ClientFactory
class BaseFactory(ClientFactory):
def __init__(self, command, rest, version, headers, data, response):
self.response = response
self.command = command
self.rest = rest
self.headers = headers
self.data = data
self.version = version
def clientConnectionFailed(self, connector, reason):
self.response.setResponseCode(501, 'Gateway error')
self.response.responseHeaders.addRawHeader('Content-Type', 'text/html')
self.response.write('<H1>Could not connect</H1>')
self.response.finish()
class NoCacheClientFactory(BaseFactory):
protocol = BaseClient
def __init__(self, command, rest, version, headers, data, response):
BaseFactory.__init__(self, command, rest, version, headers, data, response)
def buildProtocol(self, _):
return self.protocol(self.command, self.rest, self.version,
self.headers, self.data, self.response)
class CacheClientFactory(BaseFactory):
"""Contains a cache and sends it to the protocol"""
protocol = CacheClient
def __init__(self, command, rest, version, headers, data, response, cache):
self.cache = cache
BaseFactory.__init__(self, command, rest, version, headers, data, response)
def buildProtocol(self, _):
return self.protocol(self.command, self.rest, self.version,
self.headers, self.data, self.response, self.cache)
| mit |
jalexvig/tensorflow | tensorflow/python/kernel_tests/distributions/exponential_test.py | 11 | 6220 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import exponential as exponential_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
@test_util.run_all_in_graph_and_eager_modes
class ExponentialTest(test.TestCase):
def testExponentialLogPDF(self):
with session.Session():
batch_size = 6
lam = constant_op.constant([2.0] * batch_size)
lam_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
exponential = exponential_lib.Exponential(rate=lam)
log_pdf = exponential.log_prob(x)
self.assertEqual(log_pdf.get_shape(), (6,))
pdf = exponential.prob(x)
self.assertEqual(pdf.get_shape(), (6,))
if not stats:
return
expected_log_pdf = stats.expon.logpdf(x, scale=1 / lam_v)
self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf)
self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))
def testExponentialCDF(self):
with session.Session():
batch_size = 6
lam = constant_op.constant([2.0] * batch_size)
lam_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
exponential = exponential_lib.Exponential(rate=lam)
cdf = exponential.cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
if not stats:
return
expected_cdf = stats.expon.cdf(x, scale=1 / lam_v)
self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testExponentialMean(self):
with session.Session():
lam_v = np.array([1.0, 4.0, 2.5])
exponential = exponential_lib.Exponential(rate=lam_v)
self.assertEqual(exponential.mean().get_shape(), (3,))
if not stats:
return
expected_mean = stats.expon.mean(scale=1 / lam_v)
self.assertAllClose(self.evaluate(exponential.mean()), expected_mean)
def testExponentialVariance(self):
with session.Session():
lam_v = np.array([1.0, 4.0, 2.5])
exponential = exponential_lib.Exponential(rate=lam_v)
self.assertEqual(exponential.variance().get_shape(), (3,))
if not stats:
return
expected_variance = stats.expon.var(scale=1 / lam_v)
self.assertAllClose(
self.evaluate(exponential.variance()), expected_variance)
def testExponentialEntropy(self):
with session.Session():
lam_v = np.array([1.0, 4.0, 2.5])
exponential = exponential_lib.Exponential(rate=lam_v)
self.assertEqual(exponential.entropy().get_shape(), (3,))
if not stats:
return
expected_entropy = stats.expon.entropy(scale=1 / lam_v)
self.assertAllClose(
self.evaluate(exponential.entropy()), expected_entropy)
def testExponentialSample(self):
with self.test_session():
lam = constant_op.constant([3.0, 4.0])
lam_v = [3.0, 4.0]
n = constant_op.constant(100000)
exponential = exponential_lib.Exponential(rate=lam)
samples = exponential.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(sample_values.shape, (100000, 2))
self.assertFalse(np.any(sample_values < 0.0))
if not stats:
return
for i in range(2):
self.assertLess(
stats.kstest(
sample_values[:, i], stats.expon(scale=1.0 / lam_v[i]).cdf)[0],
0.01)
def testExponentialSampleMultiDimensional(self):
with self.test_session():
batch_size = 2
lam_v = [3.0, 22.0]
lam = constant_op.constant([lam_v] * batch_size)
exponential = exponential_lib.Exponential(rate=lam)
n = 100000
samples = exponential.sample(n, seed=138)
self.assertEqual(samples.get_shape(), (n, batch_size, 2))
sample_values = self.evaluate(samples)
self.assertFalse(np.any(sample_values < 0.0))
if not stats:
return
for i in range(2):
self.assertLess(
stats.kstest(
sample_values[:, 0, i],
stats.expon(scale=1.0 / lam_v[i]).cdf)[0],
0.01)
self.assertLess(
stats.kstest(
sample_values[:, 1, i],
stats.expon(scale=1.0 / lam_v[i]).cdf)[0],
0.01)
def testFullyReparameterized(self):
lam = constant_op.constant([0.1, 1.0])
with backprop.GradientTape() as tape:
tape.watch(lam)
exponential = exponential_lib.Exponential(rate=lam)
samples = exponential.sample(100)
grad_lam = tape.gradient(samples, lam)
self.assertIsNotNone(grad_lam)
def testExponentialWithSoftplusRate(self):
with self.test_session():
lam = [-2.2, -3.4]
exponential = exponential_lib.ExponentialWithSoftplusRate(rate=lam)
self.assertAllClose(
self.evaluate(nn_ops.softplus(lam)), self.evaluate(exponential.rate))
if __name__ == "__main__":
test.main()
| apache-2.0 |
aismail/proxysql-0.2 | test/proxysql_ping_thread.py | 6 | 2953 | from email.mime.text import MIMEText
import smtplib
from threading import Thread
import time
import MySQLdb
class ProxySQL_Ping_Thread(Thread):
"""ProxySQL_Ping_Thread's purpose is to do a continuous health check of the
ProxySQL daemon when tests are running against it. When it has crashed
or it's simply not responding anymore, it will send an e-mail to draw the
attention of the developer so that he or she will examine the situation.
This is because the test suite is designed to be long running and we want
to find out as quickly as possible when the tests ran into trouble without
continuously keeping an eye on the tests.
"""
def __init__(self, config, **kwargs):
self.username = config.get('ProxySQL', 'username')
self.password = config.get('ProxySQL', 'password')
self.hostname = config.get('ProxySQL', 'hostname')
self.port = int(config.get('ProxySQL', 'port'))
self.db = config.get('Ping', 'db')
self.ping_command = config.get('Ping', 'ping_command')
self.interval = int(config.get('Ping', 'ping_interval'))
self.max_failed_connections = int(config.get('Ping', 'failed_connections_before_alert'))
self.config=config
self.running = True
self.failed_connections = 0
super(ProxySQL_Ping_Thread, self).__init__(**kwargs)
def run(self):
while self.running:
time.sleep(self.interval)
if not self.running:
return
try:
connection = MySQLdb.connect(self.hostname,
self.username,
self.password,
port=self.port,
db=self.db,
connect_timeout=30)
cursor = connection.cursor()
cursor.execute(self.ping_command)
rows = cursor.fetchall()
cursor.close()
connection.close()
print("ProxySQL server @ %s:%d responded to query %s with %r" %
(self.hostname, self.port, self.ping_command, rows))
self.failed_connections = 0
except:
self.failed_connections = self.failed_connections + 1
if self.failed_connections >= self.max_failed_connections:
self.send_error_email()
self.running = False
return
def stop(self):
self.running = False
def send_error_email(self):
msg = MIMEText("ProxySQL daemon stopped responding during tests.\n"
"Please check if it has crashed and you have been left with a gdb console on!")
# me == the sender's email address
# you == the recipient's email address
msg['Subject'] = 'Daemon has stopped responding'
msg['From'] = self.config.get('Email', 'from')
msg['To'] = self.config.get('Email', 'to')
# Send the message via our own SMTP server, but don't include the
# envelope header.
s = smtplib.SMTP(self.config.get('Email', 'smtp_server'),
int(self.config.get('Email', 'smtp_port')))
s.ehlo()
s.starttls()
s.login(self.config.get('Email', 'username'),
self.config.get('Email', 'password'))
s.sendmail(self.config.get('Email', 'from'),
[self.config.get('Email', 'to')],
msg.as_string())
s.quit()
| gpl-3.0 |
scorphus/django | django/db/__init__.py | 376 | 2322 | from django.core import signals
from django.db.utils import (
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, ConnectionHandler,
ConnectionRouter, DatabaseError, DataError, Error, IntegrityError,
InterfaceError, InternalError, NotSupportedError, OperationalError,
ProgrammingError,
)
__all__ = [
'backend', 'connection', 'connections', 'router', 'DatabaseError',
'IntegrityError', 'InternalError', 'ProgrammingError', 'DataError',
'NotSupportedError', 'Error', 'InterfaceError', 'OperationalError',
'DEFAULT_DB_ALIAS', 'DJANGO_VERSION_PICKLE_KEY'
]
connections = ConnectionHandler()
router = ConnectionRouter()
# `connection`, `DatabaseError` and `IntegrityError` are convenient aliases
# for backend bits.
# DatabaseWrapper.__init__() takes a dictionary, not a settings module, so we
# manually create the dictionary from the settings, passing only the settings
# that the database backends care about.
# We load all these up for backwards compatibility, you should use
# connections['default'] instead.
class DefaultConnectionProxy(object):
"""
Proxy for accessing the default DatabaseWrapper object's attributes. If you
need to access the DatabaseWrapper object itself, use
connections[DEFAULT_DB_ALIAS] instead.
"""
def __getattr__(self, item):
return getattr(connections[DEFAULT_DB_ALIAS], item)
def __setattr__(self, name, value):
return setattr(connections[DEFAULT_DB_ALIAS], name, value)
def __delattr__(self, name):
return delattr(connections[DEFAULT_DB_ALIAS], name)
def __eq__(self, other):
return connections[DEFAULT_DB_ALIAS] == other
def __ne__(self, other):
return connections[DEFAULT_DB_ALIAS] != other
connection = DefaultConnectionProxy()
# Register an event to reset saved queries when a Django request is started.
def reset_queries(**kwargs):
for conn in connections.all():
conn.queries_log.clear()
signals.request_started.connect(reset_queries)
# Register an event to reset transaction state and close connections past
# their lifetime.
def close_old_connections(**kwargs):
for conn in connections.all():
conn.close_if_unusable_or_obsolete()
signals.request_started.connect(close_old_connections)
signals.request_finished.connect(close_old_connections)
| bsd-3-clause |
40223240/2015cdb_g3_40223240 | static/Brython3.1.1-20150328-091302/Lib/test/support.py | 603 | 68067 | """Supporting definitions for the Python regression tests."""
if __name__ != 'test.support':
raise ImportError('support must be imported from the test package')
import contextlib
import errno
import functools
import gc
import socket
import sys
import os
import platform
import shutil
import warnings
import unittest
import importlib
import collections.abc
import re
import subprocess
import imp
import time
import sysconfig
import fnmatch
import logging.handlers
import struct
import tempfile
import _testcapi
try:
import _thread, threading
except ImportError:
_thread = None
threading = None
# BCE fixme brython.
# causes an undefined is not a function error. Will track down later.
#try:
# import multiprocessing.process
#except ImportError:
# multiprocessing = None
multiprocessing = None
#try:
# import zlib
#except ImportError:
# zlib = None
#try:
# import bz2
#except ImportError:
# bz2 = None
#try:
# import lzma
#except ImportError:
# lzma = None
zlib=bz2=lzma=None
__all__ = [
"Error", "TestFailed", "ResourceDenied", "import_module", "verbose",
"use_resources", "max_memuse", "record_original_stdout",
"get_original_stdout", "unload", "unlink", "rmtree", "forget",
"is_resource_enabled", "requires", "requires_freebsd_version",
"requires_linux_version", "requires_mac_ver", "find_unused_port",
"bind_port", "IPV6_ENABLED", "is_jython", "TESTFN", "HOST", "SAVEDCWD",
"temp_cwd", "findfile", "create_empty_file", "sortdict",
"check_syntax_error", "open_urlresource", "check_warnings", "CleanImport",
"EnvironmentVarGuard", "TransientResource", "captured_stdout",
"captured_stdin", "captured_stderr", "time_out", "socket_peer_reset",
"ioerror_peer_reset", "run_with_locale", 'temp_umask',
"transient_internet", "set_memlimit", "bigmemtest", "bigaddrspacetest",
"BasicTestRunner", "run_unittest", "run_doctest", "threading_setup",
"threading_cleanup", "reap_children", "cpython_only", "check_impl_detail",
"get_attribute", "swap_item", "swap_attr", "requires_IEEE_754",
"TestHandler", "Matcher", "can_symlink", "skip_unless_symlink",
"skip_unless_xattr", "import_fresh_module", "requires_zlib",
"PIPE_MAX_SIZE", "failfast", "anticipate_failure", "run_with_tz",
"requires_bz2", "requires_lzma", "suppress_crash_popup",
]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
@contextlib.contextmanager
def _ignore_deprecated_imports(ignore=True):
"""Context manager to suppress package and module deprecation
warnings when importing them.
If ignore is False, this context manager has no effect."""
if ignore:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".+ (module|package)",
DeprecationWarning)
yield
else:
yield
def import_module(name, deprecated=False):
"""Import and return the module to be tested, raising SkipTest if
it is not available.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
with _ignore_deprecated_imports(deprecated):
try:
return importlib.import_module(name)
except ImportError as msg:
raise unittest.SkipTest(str(msg))
def _save_and_remove_module(name, orig_modules):
"""Helper function to save and remove a module from sys.modules
Raise ImportError if the module can't be imported."""
# try to import the module and raise an error if it can't be imported
if name not in sys.modules:
__import__(name)
del sys.modules[name]
for modname in list(sys.modules):
if modname == name or modname.startswith(name + '.'):
orig_modules[modname] = sys.modules[modname]
del sys.modules[modname]
def _save_and_block_module(name, orig_modules):
"""Helper function to save and block a module in sys.modules
Return True if the module was in sys.modules, False otherwise."""
saved = True
try:
orig_modules[name] = sys.modules[name]
except KeyError:
saved = False
sys.modules[name] = None
return saved
def anticipate_failure(condition):
"""Decorator to mark a test that is known to be broken in some cases
Any use of this decorator should have a comment identifying the
associated tracker issue.
"""
if condition:
return unittest.expectedFailure
return lambda f: f
def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
"""Imports and returns a module, deliberately bypassing the sys.modules cache
and importing a fresh copy of the module. Once the import is complete,
the sys.modules cache is restored to its original state.
Modules named in fresh are also imported anew if needed by the import.
If one of these modules can't be imported, None is returned.
Importing of modules named in blocked is prevented while the fresh import
takes place.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
# NOTE: test_heapq, test_json and test_warnings include extra sanity checks
# to make sure that this utility function is working as expected
with _ignore_deprecated_imports(deprecated):
# Keep track of modules saved for later restoration as well
# as those which just need a blocking entry removed
orig_modules = {}
names_to_remove = []
_save_and_remove_module(name, orig_modules)
try:
for fresh_name in fresh:
_save_and_remove_module(fresh_name, orig_modules)
for blocked_name in blocked:
if not _save_and_block_module(blocked_name, orig_modules):
names_to_remove.append(blocked_name)
fresh_module = importlib.import_module(name)
except ImportError:
console.log('ImportError')
fresh_module = None
finally:
for orig_name, module in orig_modules.items():
sys.modules[orig_name] = module
for name_to_remove in names_to_remove:
del sys.modules[name_to_remove]
from javascript import console
console.log("fresh_module", fresh_module)
return fresh_module
def get_attribute(obj, name):
"""Get an attribute, raising SkipTest if AttributeError is raised."""
try:
attribute = getattr(obj, name)
except AttributeError:
raise unittest.SkipTest("object %r has no attribute %r" % (obj, name))
else:
return attribute
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
real_max_memuse = 0
failfast = False
match_tests = None
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Peform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on a [email protected] shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existance of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
def _rmdir(dirname):
_waitfor(os.rmdir, dirname)
def _rmtree(path):
def _rmtree_inner(path):
for name in os.listdir(path):
fullname = os.path.join(path, name)
if os.path.isdir(fullname):
_waitfor(_rmtree_inner, fullname, waitall=True)
os.rmdir(fullname)
else:
os.unlink(fullname)
_waitfor(_rmtree_inner, path, waitall=True)
_waitfor(os.rmdir, path)
else:
_unlink = os.unlink
_rmdir = os.rmdir
_rmtree = shutil.rmtree
def unlink(filename):
try:
_unlink(filename)
except OSError as error:
# The filename need not exist.
if error.errno not in (errno.ENOENT, errno.ENOTDIR):
raise
def rmdir(dirname):
try:
_rmdir(dirname)
except OSError as error:
# The directory need not exist.
if error.errno != errno.ENOENT:
raise
def rmtree(path):
try:
_rmtree(path)
except OSError as error:
if error.errno != errno.ENOENT:
raise
def make_legacy_pyc(source):
"""Move a PEP 3147 pyc/pyo file to its legacy pyc/pyo location.
The choice of .pyc or .pyo extension is done based on the __debug__ flag
value.
:param source: The file system path to the source file. The source file
does not need to exist, however the PEP 3147 pyc file must exist.
:return: The file system path to the legacy pyc file.
"""
pyc_file = imp.cache_from_source(source)
up_one = os.path.dirname(os.path.abspath(source))
legacy_pyc = os.path.join(up_one, source + ('c' if __debug__ else 'o'))
os.rename(pyc_file, legacy_pyc)
return legacy_pyc
def forget(modname):
"""'Forget' a module was ever imported.
This removes the module from sys.modules and deletes any PEP 3147 or
legacy .pyc and .pyo files.
"""
unload(modname)
for dirname in sys.path:
source = os.path.join(dirname, modname + '.py')
# It doesn't matter if they exist or not, unlink all possible
# combinations of PEP 3147 and legacy pyc and pyo files.
unlink(source + 'c')
unlink(source + 'o')
unlink(imp.cache_from_source(source, debug_override=True))
unlink(imp.cache_from_source(source, debug_override=False))
# On some platforms, should not run gui test even if it is allowed
# in `use_resources'.
if sys.platform.startswith('win'):
import ctypes
import ctypes.wintypes
def _is_gui_available():
UOI_FLAGS = 1
WSF_VISIBLE = 0x0001
class USEROBJECTFLAGS(ctypes.Structure):
_fields_ = [("fInherit", ctypes.wintypes.BOOL),
("fReserved", ctypes.wintypes.BOOL),
("dwFlags", ctypes.wintypes.DWORD)]
dll = ctypes.windll.user32
h = dll.GetProcessWindowStation()
if not h:
raise ctypes.WinError()
uof = USEROBJECTFLAGS()
needed = ctypes.wintypes.DWORD()
res = dll.GetUserObjectInformationW(h,
UOI_FLAGS,
ctypes.byref(uof),
ctypes.sizeof(uof),
ctypes.byref(needed))
if not res:
raise ctypes.WinError()
return bool(uof.dwFlags & WSF_VISIBLE)
else:
def _is_gui_available():
return True
def is_resource_enabled(resource):
"""Test whether a resource is enabled. Known resources are set by
regrtest.py."""
return use_resources is not None and resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True. The
possibility of False being returned occurs when regrtest.py is
executing.
"""
if resource == 'gui' and not _is_gui_available():
raise unittest.SkipTest("Cannot use the 'gui' resource")
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe(1).f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the %r resource not enabled" % resource
raise ResourceDenied(msg)
def _requires_unix_version(sysname, min_version):
"""Decorator raising SkipTest if the OS is `sysname` and the version is less
than `min_version`.
For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if
the FreeBSD version is less than 7.2.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if platform.system() == sysname:
version_txt = platform.release().split('-', 1)[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"%s version %s or higher required, not %s"
% (sysname, min_version_txt, version_txt))
return wrapper
return decorator
def requires_freebsd_version(*min_version):
"""Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version is
less than `min_version`.
For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD
version is less than 7.2.
"""
return _requires_unix_version('FreeBSD', min_version)
def requires_linux_version(*min_version):
"""Decorator raising SkipTest if the OS is Linux and the Linux version is
less than `min_version`.
For example, @requires_linux_version(2, 6, 32) raises SkipTest if the Linux
version is less than 2.6.32.
"""
return _requires_unix_version('Linux', min_version)
def requires_mac_ver(*min_version):
"""Decorator raising SkipTest if the OS is Mac OS X and the OS X
version if less than min_version.
For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
is lesser than 10.5.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"Mac OS X %s or higher required, not %s"
% (min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
HOST = 'localhost'
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
socket.error will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it.
"""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(('::1', 0))
return True
except (socket.error, socket.gaierror):
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
# A constant likely larger than the underlying OS pipe buffer size.
# Windows limit seems to be around 512B, and many Unix kernels have a 64K pipe
# buffer size or 16*PAGE_SIZE: take a few megs to be sure. This
PIPE_MAX_SIZE = 3 * 1000 * 1000
# decorator for skipping tests on non-IEEE 754 platforms
requires_IEEE_754 = unittest.skipUnless(
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
requires_zlib = unittest.skipUnless(zlib, 'requires zlib')
requires_bz2 = unittest.skipUnless(bz2, 'requires bz2')
requires_lzma = unittest.skipUnless(lzma, 'requires lzma')
is_jython = sys.platform.startswith('java')
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
else:
TESTFN = '@test'
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
# module name.
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
# FS_NONASCII: non-ASCII character encodable by os.fsencode(),
# or None if there is no such character.
FS_NONASCII = None
for character in (
# First try printable and common characters to have a readable filename.
# For each character, the encoding list are just example of encodings able
# to encode the character (the list is not exhaustive).
# U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
'\u00E6',
# U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
'\u0130',
# U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
'\u0141',
# U+03C6 (Greek Small Letter Phi): cp1253
'\u03C6',
# U+041A (Cyrillic Capital Letter Ka): cp1251
'\u041A',
# U+05D0 (Hebrew Letter Alef): Encodable to cp424
'\u05D0',
# U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
'\u060C',
# U+062A (Arabic Letter Teh): cp720
'\u062A',
# U+0E01 (Thai Character Ko Kai): cp874
'\u0E01',
# Then try more "special" characters. "special" because they may be
# interpreted or displayed differently depending on the exact locale
# encoding and the font.
# U+00A0 (No-Break Space)
'\u00A0',
# U+20AC (Euro Sign)
'\u20AC',
):
try:
os.fsdecode(os.fsencode(character))
except UnicodeError:
pass
else:
FS_NONASCII = character
break
# TESTFN_UNICODE is a non-ascii filename
TESTFN_UNICODE = TESTFN + "-\xe0\xf2\u0258\u0141\u011f"
if sys.platform == 'darwin':
# In Mac OS X's VFS API file names are, by definition, canonically
# decomposed Unicode, encoded using UTF-8. See QA1173:
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
import unicodedata
TESTFN_UNICODE = unicodedata.normalize('NFD', TESTFN_UNICODE)
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNENCODABLE is a filename (str type) that should *not* be able to be
# encoded by the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename.
TESTFN_UNENCODABLE = None
if os.name in ('nt', 'ce'):
# skip win32s (0) or Windows 9x/ME (1)
if sys.getwindowsversion().platform >= 2:
# Different kinds of characters from various languages to minimize the
# probability that the whole name is encodable to MBCS (issue #9819)
TESTFN_UNENCODABLE = TESTFN + "-\u5171\u0141\u2661\u0363\uDC80"
try:
TESTFN_UNENCODABLE.encode(TESTFN_ENCODING)
except UnicodeEncodeError:
pass
else:
print('WARNING: The filename %r CAN be encoded by the filesystem encoding (%s). '
'Unicode filename tests may not be effective'
% (TESTFN_UNENCODABLE, TESTFN_ENCODING))
TESTFN_UNENCODABLE = None
# Mac OS X denies unencodable filenames (invalid utf-8)
elif sys.platform != 'darwin':
try:
# ascii and utf-8 cannot encode the byte 0xff
b'\xff'.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
# 0xff will be encoded using the surrogate character u+DCFF
TESTFN_UNENCODABLE = TESTFN \
+ b'-\xff'.decode(TESTFN_ENCODING, 'surrogateescape')
else:
# File system encoding (eg. ISO-8859-* encodings) can encode
# the byte 0xff. Skip some unicode filename tests.
pass
# TESTFN_UNDECODABLE is a filename (bytes type) that should *not* be able to be
# decoded from the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename (ex: the latin1 encoding can decode any byte
# sequence). On UNIX, TESTFN_UNDECODABLE can be decoded by os.fsdecode() thanks
# to the surrogateescape error handler (PEP 383), but not from the filesystem
# encoding in strict mode.
TESTFN_UNDECODABLE = None
''' #fixme brython
for name in (
# b'\xff' is not decodable by os.fsdecode() with code page 932. Windows
# accepts it to create a file or a directory, or don't accept to enter to
# such directory (when the bytes name is used). So test b'\xe7' first: it is
# not decodable from cp932.
b'\xe7w\xf0',
# undecodable from ASCII, UTF-8
b'\xff',
# undecodable from iso8859-3, iso8859-6, iso8859-7, cp424, iso8859-8, cp856
# and cp857
b'\xae\xd5'
# undecodable from UTF-8 (UNIX and Mac OS X)
b'\xed\xb2\x80', b'\xed\xb4\x80',
# undecodable from shift_jis, cp869, cp874, cp932, cp1250, cp1251, cp1252,
# cp1253, cp1254, cp1255, cp1257, cp1258
b'\x81\x98',
):
try:
name.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
TESTFN_UNDECODABLE = os.fsencode(TESTFN) + name
break
'''
if FS_NONASCII:
TESTFN_NONASCII = TESTFN + '-' + FS_NONASCII
else:
TESTFN_NONASCII = None
# Save the initial cwd
SAVEDCWD = os.getcwd()
@contextlib.contextmanager
def temp_cwd(name='tempcwd', quiet=False, path=None):
"""
Context manager that temporarily changes the CWD.
An existing path may be provided as *path*, in which case this
function makes no changes to the file system.
Otherwise, the new CWD is created in the current directory and it's
named *name*. If *quiet* is False (default) and it's not possible to
create or change the CWD, an error is raised. If it's True, only a
warning is raised and the original CWD is used.
"""
saved_dir = os.getcwd()
is_temporary = False
if path is None:
path = name
try:
os.mkdir(name)
is_temporary = True
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to create temp CWD ' + name,
RuntimeWarning, stacklevel=3)
try:
os.chdir(path)
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to change the CWD to ' + path,
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
if is_temporary:
rmtree(name)
if hasattr(os, "umask"):
@contextlib.contextmanager
def temp_umask(umask):
"""Context manager that temporarily sets the process umask."""
oldmask = os.umask(umask)
try:
yield
finally:
os.umask(oldmask)
def findfile(file, here=__file__, subdir=None):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
if os.path.isabs(file):
return file
if subdir is not None:
file = os.path.join(subdir, file)
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def create_empty_file(filename):
"""Create an empty file. If the file already exists, truncate it."""
fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.close(fd)
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = sorted(dict.items())
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def make_bad_fd():
"""
Create an invalid file descriptor by opening and closing a file and return
its fd.
"""
file = open(TESTFN, "wb")
try:
return file.fileno()
finally:
file.close()
unlink(TESTFN)
def check_syntax_error(testcase, statement):
testcase.assertRaises(SyntaxError, compile, statement,
'<test string>', 'exec')
def open_urlresource(url, *args, **kw):
import urllib.request, urllib.parse
check = kw.pop('check', None)
filename = urllib.parse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
fn = os.path.join(os.path.dirname(__file__), "data", filename)
def check_valid_file(fn):
f = open(fn, *args, **kw)
if check is None:
return f
elif check(f):
f.seek(0)
return f
f.close()
if os.path.exists(fn):
f = check_valid_file(fn)
if f is not None:
return f
unlink(fn)
# Verify the requirement before downloading the file
requires('urlfetch')
print('\tfetching %s ...' % url, file=get_original_stdout())
f = urllib.request.urlopen(url, timeout=15)
try:
with open(fn, "wb") as out:
s = f.read()
while s:
out.write(s)
s = f.read()
finally:
f.close()
f = check_valid_file(fn)
if f is not None:
return f
raise TestFailed('invalid resource %r' % fn)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
def __getattr__(self, attr):
if len(self._warnings) > self._last:
return getattr(self._warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
@property
def warnings(self):
return self._warnings[self._last:]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False):
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get('__warningregistry__')
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules['warnings'].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = list(w)
missing = []
for msg, cat in filters:
seen = False
for w in reraise[:]:
warning = w.message
# Filter out the matching messages
if (re.match(msg, str(warning), re.I) and
issubclass(warning.__class__, cat)):
seen = True
reraise.remove(w)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %s" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" %
missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get('quiet')
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
importlib.import_module("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(collections.abc.MutableMapping):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def __iter__(self):
return iter(self._environ)
def __len__(self):
return len(self._environ)
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
os.environ = self._environ
class DirsOnSysPath(object):
"""Context manager to temporarily add directories to sys.path.
This makes a copy of sys.path, appends any directories given
as positional arguments, then reverts sys.path to the copied
settings when the context ends.
Note that *all* sys.path modifications in the body of the
context manager, including replacement of the object,
will be reverted at the end of the block.
"""
def __init__(self, *paths):
self.original_value = sys.path[:]
self.original_object = sys.path
sys.path.extend(paths)
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.path = self.original_object
sys.path[:] = self.original_value
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.items():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
# Context managers that raise ResourceDenied when various issues
# with the Internet connection manifest themselves as exceptions.
# XXX deprecate these and use transient_internet() instead
time_out = TransientResource(IOError, errno=errno.ETIMEDOUT)
socket_peer_reset = TransientResource(socket.error, errno=errno.ECONNRESET)
ioerror_peer_reset = TransientResource(IOError, errno=errno.ECONNRESET)
@contextlib.contextmanager
def transient_internet(resource_name, *, timeout=30.0, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Encountered when trying to resolve IPv6-only hostnames
('WSANO_DATA', 11004),
]
denied = ResourceDenied("Resource %r is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
n in captured_errnos):
if not verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except IOError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], IOError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise IOError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], IOError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout)
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
import io
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, io.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as s:
print("hello")
self.assertEqual(s.getvalue(), "hello")
"""
return captured_output("stdout")
def captured_stderr():
return captured_output("stderr")
def captured_stdin():
return captured_output("stdin")
def gc_collect():
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if is_jython:
time.sleep(0.1)
gc.collect()
gc.collect()
@contextlib.contextmanager
def disable_gc():
have_gc = gc.isenabled()
gc.disable()
try:
yield
finally:
if have_gc:
gc.enable()
def python_is_optimized():
"""Find if Python was built with optimizations."""
cflags = sysconfig.get_config_var('PY_CFLAGS') or ''
final_opt = ""
for opt in cflags.split():
if opt.startswith('-O'):
final_opt = opt
return final_opt != '' and final_opt != '-O0'
_header = 'nP'
_align = '0n'
if hasattr(sys, "gettotalrefcount"):
_header = '2P' + _header
_align = '0P'
_vheader = _header + 'n'
def calcobjsize(fmt):
return struct.calcsize(_header + fmt + _align)
def calcvobjsize(fmt):
return struct.calcsize(_vheader + fmt + _align)
_TPFLAGS_HAVE_GC = 1<<14
_TPFLAGS_HEAPTYPE = 1<<9
def check_sizeof(test, o, size):
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
size += _testcapi.SIZEOF_PYGC_HEAD
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
test.assertEqual(result, size, msg)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Decorator for running a function in a specific timezone, correctly
# resetting it afterwards.
def run_with_tz(tz):
def decorator(func):
def inner(*args, **kwds):
try:
tzset = time.tzset
except AttributeError:
raise unittest.SkipTest("tzset required")
if 'TZ' in os.environ:
orig_tz = os.environ['TZ']
else:
orig_tz = None
os.environ['TZ'] = tz
tzset()
# now run the function, resetting the tz on exceptions
try:
return func(*args, **kwds)
finally:
if orig_tz is None:
del os.environ['TZ']
else:
os.environ['TZ'] = orig_tz
time.tzset()
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use
# should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
_4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
def set_memlimit(limit):
global max_memuse
global real_max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
real_max_memuse = memlimit
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
class _MemoryWatchdog:
"""An object which periodically watches the process' memory consumption
and prints it out.
"""
def __init__(self):
self.procfile = '/proc/{pid}/statm'.format(pid=os.getpid())
self.started = False
def start(self):
try:
f = open(self.procfile, 'r')
except OSError as e:
warnings.warn('/proc not available for stats: {}'.format(e),
RuntimeWarning)
sys.stderr.flush()
return
watchdog_script = findfile("memory_watchdog.py")
self.mem_watchdog = subprocess.Popen([sys.executable, watchdog_script],
stdin=f, stderr=subprocess.DEVNULL)
f.close()
self.started = True
def stop(self):
if self.started:
self.mem_watchdog.terminate()
self.mem_watchdog.wait()
def bigmemtest(size, memuse, dry_run=True):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it.
if 'dry_run' is False, it means the test doesn't support dummy runs
when -M is not specified.
"""
def decorator(f):
def wrapper(self):
size = wrapper.size
memuse = wrapper.memuse
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if ((real_max_memuse or not dry_run)
and real_max_memuse < maxsize * memuse):
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (size * memuse / (1024 ** 3)))
if real_max_memuse and verbose:
print()
print(" ... expected peak memory use: {peak:.1f}G"
.format(peak=size * memuse / (1024 ** 3)))
watchdog = _MemoryWatchdog()
watchdog.start()
else:
watchdog = None
try:
return f(self, maxsize)
finally:
if watchdog:
watchdog.stop()
wrapper.size = size
wrapper.memuse = memuse
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if MAX_Py_ssize_t >= 2**63 - 1 and max_memuse >= 2**31:
raise unittest.SkipTest(
"not enough memory: try a 32-bit build instead")
else:
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (MAX_Py_ssize_t / (1024 ** 3)))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _id(obj):
return obj
def requires_resource(resource):
if resource == 'gui' and not _is_gui_available():
return unittest.skip("resource 'gui' is not available")
if is_resource_enabled(resource):
return _id
else:
return unittest.skip("resource {0!r} is not enabled".format(resource))
def cpython_only(test):
"""
Decorator for tests only applicable on CPython.
"""
return impl_detail(cpython=True)(test)
def impl_detail(msg=None, **guards):
if check_impl_detail(**guards):
return _id
if msg is None:
guardnames, default = _parse_guards(guards)
if default:
msg = "implementation detail not available on {0}"
else:
msg = "implementation detail specific to {0}"
guardnames = sorted(guardnames.keys())
msg = msg.format(' or '.join(guardnames))
return unittest.skip(msg)
def _parse_guards(guards):
# Returns a tuple ({platform_name: run_me}, default_value)
if not guards:
return ({'cpython': True}, False)
is_true = list(guards.values())[0]
assert list(guards.values()) == [is_true] * len(guards) # all True or all False
return (guards, not is_true)
# Use the following check to guard CPython's implementation-specific tests --
# or to run them only on the implementation(s) guarded by the arguments.
def check_impl_detail(**guards):
"""This function returns True or False depending on the host platform.
Examples:
if check_impl_detail(): # only on CPython (default)
if check_impl_detail(jython=True): # only on Jython
if check_impl_detail(cpython=False): # everywhere except on CPython
"""
guards, default = _parse_guards(guards)
return guards.get(platform.python_implementation().lower(), default)
def no_tracing(func):
"""Decorator to temporarily turn off tracing for the duration of a test."""
if not hasattr(sys, 'gettrace'):
return func
else:
@functools.wraps(func)
def wrapper(*args, **kwargs):
original_trace = sys.gettrace()
try:
sys.settrace(None)
return func(*args, **kwargs)
finally:
sys.settrace(original_trace)
return wrapper
def refcount_test(test):
"""Decorator for tests which involve reference counting.
To start, the decorator does not run the test if is not run by CPython.
After that, any trace function is unset during the test to prevent
unexpected refcounts caused by the trace function.
"""
return no_tracing(cpython_only(test))
def _filter_suite(suite, pred):
"""Recursively filter test cases in a suite based on a predicate."""
newtests = []
for test in suite._tests:
if isinstance(test, unittest.TestSuite):
_filter_suite(test, pred)
newtests.append(test)
else:
if pred(test):
newtests.append(test)
suite._tests = newtests
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2,
failfast=failfast)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not verbose: err += "; run in verbose mode for details"
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
def case_pred(test):
if match_tests is None:
return True
for name in test.id().split("."):
if fnmatch.fnmatchcase(name, match_tests):
return True
return False
_filter_suite(suite, case_pred)
_run_suite(suite)
#=======================================================================
# Check for the presence of docstrings.
HAVE_DOCSTRINGS = (check_impl_detail(cpython=False) or
sys.platform == 'win32' or
sysconfig.get_config_var('WITH_DOC_STRINGS'))
requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS,
"test requires docstrings")
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None, optionflags=0):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
f, t = doctest.testmod(module, verbose=verbosity, optionflags=optionflags)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
if verbose:
print('doctest (%s) ... %d tests with zero failures' %
(module.__name__, t))
return f, t
#=======================================================================
# Support for saving and restoring the imported modules.
def modules_setup():
return sys.modules.copy(),
def modules_cleanup(oldmodules):
# Encoders/decoders are registered permanently within the internal
# codec cache. If we destroy the corresponding modules their
# globals will be set to None which will trip up the cached functions.
encodings = [(k, v) for k, v in sys.modules.items()
if k.startswith('encodings.')]
sys.modules.clear()
sys.modules.update(encodings)
# XXX: This kind of problem can affect more than just encodings. In particular
# extension modules (such as _ssl) don't cope with reloading properly.
# Really, test modules should be cleaning out the test specific modules they
# know they added (ala test_runpy) rather than relying on this function (as
# test_importhooks and test_pkg do currently).
# Implicitly imported *real* modules should be left alone (see issue 10556).
sys.modules.update(oldmodules)
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
# NOTE: we use thread._count() rather than threading.enumerate() (or the
# moral equivalent thereof) because a threading.Thread object is still alive
# until its __bootstrap() method has returned, even after it has been
# unregistered from the threading module.
# thread._count(), on the other hand, only gets decremented *after* the
# __bootstrap() method has returned, which gives us reliable reference counts
# at the end of a test run.
def threading_setup():
if _thread:
return _thread._count(), threading._dangling.copy()
else:
return 1, ()
def threading_cleanup(*original_values):
if not _thread:
return
_MAX_COUNT = 10
for count in range(_MAX_COUNT):
values = _thread._count(), threading._dangling
if values == original_values:
break
time.sleep(0.1)
gc_collect()
# XXX print a warning in case of failure?
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
If threading is unavailable this function does nothing.
"""
if not _thread:
return func
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
@contextlib.contextmanager
def swap_attr(obj, attr, new_val):
"""Temporary swap out an attribute with a new object.
Usage:
with swap_attr(obj, "attr", 5):
...
This will set obj.attr to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `attr` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if hasattr(obj, attr):
real_val = getattr(obj, attr)
setattr(obj, attr, new_val)
try:
yield
finally:
setattr(obj, attr, real_val)
else:
setattr(obj, attr, new_val)
try:
yield
finally:
delattr(obj, attr)
@contextlib.contextmanager
def swap_item(obj, item, new_val):
"""Temporary swap out an item with a new object.
Usage:
with swap_item(obj, "item", 5):
...
This will set obj["item"] to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `item` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if item in obj:
real_val = obj[item]
obj[item] = new_val
try:
yield
finally:
obj[item] = real_val
else:
obj[item] = new_val
try:
yield
finally:
del obj[item]
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(br"\[\d+ refs\]\r?\n?", b"", stderr).strip()
return stderr
def args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
return subprocess._args_from_interpreter_flags()
#============================================================
# Support for assertions about logging.
#============================================================
class TestHandler(logging.handlers.BufferingHandler):
def __init__(self, matcher):
# BufferingHandler takes a "capacity" argument
# so as to know when to flush. As we're overriding
# shouldFlush anyway, we can set a capacity of zero.
# You can call flush() manually to clear out the
# buffer.
logging.handlers.BufferingHandler.__init__(self, 0)
self.matcher = matcher
def shouldFlush(self):
return False
def emit(self, record):
self.format(record)
self.buffer.append(record.__dict__)
def matches(self, **kwargs):
"""
Look for a saved dict whose keys/values match the supplied arguments.
"""
result = False
for d in self.buffer:
if self.matcher.matches(d, **kwargs):
result = True
break
return result
class Matcher(object):
_partial_matches = ('msg', 'message')
def matches(self, d, **kwargs):
"""
Try to match a single dict with the supplied arguments.
Keys whose values are strings and which are in self._partial_matches
will be checked for partial (i.e. substring) matches. You can extend
this scheme to (for example) do regular expression matching, etc.
"""
result = True
for k in kwargs:
v = kwargs[k]
dv = d.get(k)
if not self.match_value(k, dv, v):
result = False
break
return result
def match_value(self, k, dv, v):
"""
Try to match a single stored value (dv) with a supplied value (v).
"""
if type(v) != type(dv):
result = False
elif type(dv) is not str or k not in self._partial_matches:
result = (v == dv)
else:
result = dv.find(v) >= 0
return result
_can_symlink = None
def can_symlink():
global _can_symlink
if _can_symlink is not None:
return _can_symlink
symlink_path = TESTFN + "can_symlink"
try:
os.symlink(TESTFN, symlink_path)
can = True
except (OSError, NotImplementedError, AttributeError):
can = False
else:
os.remove(symlink_path)
_can_symlink = can
return can
def skip_unless_symlink(test):
"""Skip decorator for tests that require functional symlink"""
ok = can_symlink()
msg = "Requires functional symlink implementation"
return test if ok else unittest.skip(msg)(test)
_can_xattr = None
def can_xattr():
global _can_xattr
if _can_xattr is not None:
return _can_xattr
if not hasattr(os, "setxattr"):
can = False
else:
tmp_fp, tmp_name = tempfile.mkstemp()
try:
with open(TESTFN, "wb") as fp:
try:
# TESTFN & tempfile may use different file systems with
# different capabilities
os.setxattr(tmp_fp, b"user.test", b"")
os.setxattr(fp.fileno(), b"user.test", b"")
# Kernels < 2.6.39 don't respect setxattr flags.
kernel_version = platform.release()
m = re.match("2.6.(\d{1,2})", kernel_version)
can = m is None or int(m.group(1)) >= 39
except OSError:
can = False
finally:
unlink(TESTFN)
unlink(tmp_name)
_can_xattr = can
return can
def skip_unless_xattr(test):
"""Skip decorator for tests that require functional extended attributes"""
ok = can_xattr()
msg = "no non-broken extended attribute support"
return test if ok else unittest.skip(msg)(test)
if sys.platform.startswith('win'):
@contextlib.contextmanager
def suppress_crash_popup():
"""Disable Windows Error Reporting dialogs using SetErrorMode."""
# see http://msdn.microsoft.com/en-us/library/windows/desktop/ms680621%28v=vs.85%29.aspx
# GetErrorMode is not available on Windows XP and Windows Server 2003,
# but SetErrorMode returns the previous value, so we can use that
import ctypes
k32 = ctypes.windll.kernel32
SEM_NOGPFAULTERRORBOX = 0x02
old_error_mode = k32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
k32.SetErrorMode(old_error_mode | SEM_NOGPFAULTERRORBOX)
try:
yield
finally:
k32.SetErrorMode(old_error_mode)
else:
# this is a no-op for other platforms
@contextlib.contextmanager
def suppress_crash_popup():
yield
def patch(test_instance, object_to_patch, attr_name, new_value):
"""Override 'object_to_patch'.'attr_name' with 'new_value'.
Also, add a cleanup procedure to 'test_instance' to restore
'object_to_patch' value for 'attr_name'.
The 'attr_name' should be a valid attribute for 'object_to_patch'.
"""
# check that 'attr_name' is a real attribute for 'object_to_patch'
# will raise AttributeError if it does not exist
getattr(object_to_patch, attr_name)
# keep a copy of the old value
attr_is_local = False
try:
old_value = object_to_patch.__dict__[attr_name]
except (AttributeError, KeyError):
old_value = getattr(object_to_patch, attr_name, None)
else:
attr_is_local = True
# restore the value when the test is done
def cleanup():
if attr_is_local:
setattr(object_to_patch, attr_name, old_value)
else:
delattr(object_to_patch, attr_name)
test_instance.addCleanup(cleanup)
# actually override the attribute
setattr(object_to_patch, attr_name, new_value)
| gpl-3.0 |
VincentCATILLON/navitia | source/jormungandr/jormungandr/interfaces/v1/make_links.py | 6 | 12361 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from flask import url_for
from collections import OrderedDict
from functools import wraps
from sqlalchemy.sql.elements import _type_from_args
from converters_collection_type import resource_type_to_collection,\
collections_to_resource_type
from flask.ext.restful.utils import unpack
def create_external_link(url, rel, _type=None, templated=False, description=None, **kwargs):
"""
:param url: url forwarded to flask's url_for
:param rel: relation of the link to the current object
:param _type: type of linked object
:param templated: if the link is templated ({} is the url)
:param description: description of the link
:param kwargs: args forwarded to url_for
:return: a dict representing a link
"""
#if no type, type is rel
if not _type:
_type = rel
d = {
"href": url_for(url, _external=True, **kwargs),
"templated": templated,
"rel": rel,
"type": _type
}
if description:
d['title'] = description
return d
def create_internal_link(rel, _type, id, templated=False, description=None):
"""
:param rel: relation of the link to the current object
:param _type: type of linked object
:param id: id of the link
:param templated: if the link is templated ({} is the url)
:return: a dict representing a link
"""
#if no type, type is rel
if not _type:
_type = rel
d = {
"templated": templated,
"rel": rel,
"internal": True,
"type": _type
}
if description:
d['title'] = description
if id:
d['id'] = id
return d
class generate_links(object):
def prepare_objetcs(self, objects, hasCollections=False):
if isinstance(objects, tuple):
objects = objects[0]
if not "links" in objects.keys():
objects["links"] = []
elif hasattr(self, "collections"):
for link in objects["links"]:
if "type" in link.keys():
self.collections.remove(link["type"])
return objects
def prepare_kwargs(self, kwargs, objects):
if not "region" in kwargs.keys() and not "lon" in kwargs.keys()\
and "regions" in objects.keys():
kwargs["region"] = "{regions.id}"
if "uri" in kwargs:
del kwargs["uri"]
return kwargs
class add_pagination_links(object):
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
objects = f(*args, **kwargs)
if objects[1] != 200:
return objects
endpoint = None
pagination = None
if isinstance(objects, tuple):
data, code, header = unpack(objects)
else:
data = objects
for key, value in data.iteritems():
if key == "regions":
endpoint = "v1.coverage"
elif key == "pagination":
pagination = value
elif key in collections_to_resource_type.keys():
endpoint = "v1." + key + "."
endpoint += "id" if "id" in kwargs.keys() else "collection"
elif key in ["journeys", "stop_schedules", "route_schedules",
"departures", "arrivals", "places_nearby", "calendars"]:
endpoint = "v1." + key
if pagination and endpoint and "region" in kwargs:
pagination = data["pagination"]
if "start_page" in pagination.keys() and \
"items_on_page" in pagination.keys() and \
"items_per_page" in pagination.keys() and \
"total_result" in pagination.keys():
if not "links" in data.keys():
data["links"] = []
start_page = int(pagination["start_page"])
items_per_page = int(pagination["items_per_page"])
items_on_page = int(pagination["items_on_page"])
total_result = int(pagination["total_result"])
kwargs["_external"] = True
if start_page > 0:
kwargs["start_page"] = start_page - 1
data["links"].append({
"href": url_for(endpoint, **kwargs),
"type": "previous",
"templated": False
})
nb_next_page = items_per_page * start_page
nb_next_page += items_on_page
if total_result > nb_next_page:
kwargs["start_page"] = start_page + 1
data["links"].append({
"href": url_for(endpoint, **kwargs),
"type": "next",
"templated": False
})
if items_per_page == 0 or total_result == 0:
kwargs["start_page"] = 0
else:
nb_last_page = total_result - 1
nb_last_page = nb_last_page / items_per_page
kwargs["start_page"] = nb_last_page
data["links"].append({
"href": url_for(endpoint, **kwargs),
"type": "last",
"templated": False
})
del kwargs["start_page"]
data["links"].append({
"href": url_for(endpoint, **kwargs),
"type": "first",
"templated": False
})
if isinstance(objects, tuple):
return data, code, header
else:
return data
return wrapper
class add_coverage_link(generate_links):
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
objects = f(*args, **kwargs)
if objects[1] != 200:
return objects
if isinstance(objects, tuple):
data, code, header = unpack(objects)
else:
data = objects
if isinstance(data, OrderedDict):
data = self.prepare_objetcs(data)
kwargs = self.prepare_kwargs(kwargs, data)
data["links"].append(create_external_link("v1.coverage", rel='related', templated=True, **kwargs))
if isinstance(objects, tuple):
return data, code, header
else:
return data
return wrapper
class add_collection_links(generate_links):
def __init__(self, collections):
self.collections = collections
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
objects = f(*args, **kwargs)
if objects[1] != 200:
return objects
if isinstance(objects, tuple):
data, code, header = unpack(objects)
else:
data = objects
if isinstance(data, OrderedDict):
data = self.prepare_objetcs(objects, True)
kwargs = self.prepare_kwargs(kwargs, data)
for collection in self.collections:
data["links"].append(create_external_link("v1.{c}.collection".format(c=collection),
rel=collection, templated=True, **kwargs))
if isinstance(objects, tuple):
return data, code, header
else:
return data
return wrapper
class add_id_links(generate_links):
def __init__(self, *args, **kwargs):
self.data = set()
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
objects = f(*args, **kwargs)
if objects[1] != 200:
return objects
if isinstance(objects, tuple):
data, code, header = unpack(objects)
else:
data = objects
self.get_objets(data)
data = self.prepare_objetcs(objects, True)
kwargs = self.prepare_kwargs(kwargs, data)
uri_id = None
if "id" in kwargs.keys() and\
"collection" in kwargs.keys() and \
kwargs["collection"] in data.keys():
uri_id = kwargs["id"]
for obj in self.data:
if obj in resource_type_to_collection.keys():
kwargs["collection"] = resource_type_to_collection[obj]
else:
kwargs["collection"] = obj
if kwargs["collection"] in collections_to_resource_type.keys():
if not uri_id:
kwargs["id"] = "{" + obj + ".id}"
endpoint = "v1." + kwargs["collection"] + "."
endpoint += "id" if "region" in kwargs.keys() or\
"lon" in kwargs.keys()\
else "redirect"
collection = kwargs["collection"]
to_pass = {k:v for k,v in kwargs.iteritems() if k != "collection"}
data["links"].append(create_external_link(url=endpoint, rel=collection,
_type=obj, templated=True,
**to_pass))
if isinstance(objects, tuple):
return data, code, header
else:
return data
return wrapper
def get_objets(self, data, collection_name=None):
if hasattr(data, 'keys'):
if "id" in data.keys() \
and (not "href" in data.keys()) \
and collection_name:
self.data.add(collection_name)
for key, value in data.iteritems():
self.get_objets(value, key)
if isinstance(data, (list, tuple)):
for item in data:
self.get_objets(item, collection_name)
class clean_links(object):
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
response = f(*args, **kwargs)
if isinstance(response, tuple):
data, code, header = unpack(response)
if code != 200:
return data, code, header
else:
data = response
if isinstance(data, OrderedDict) and "links" in data.keys():
for link in data['links']:
link['href'] = link['href'].replace("%7B", "{")\
.replace("%7D", "}")\
.replace("%3B", ";")
if isinstance(response, tuple):
return data, code, header
else:
return data
return wrapper
| agpl-3.0 |
AdotDdot/sproxy | sproxy.py | 1 | 17349 | #!/usr/bin/env python
#
# sproxy.py
# Copyright (C) 2014 by A.D. <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import socket
import sys
import threading
import ssl
import os
import time
import urlparse
from OpenSSL import crypto
from _abcoll import *
from operator import eq as _eq
from itertools import imap as _imap
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
class Proxy:
def __init__(self, serv_port):
self.serv_host = ''
self.serv_port = serv_port
self.max_listen = 300
self.debug = False
self.browser_timeout = 0.5
self.web_timeout = 0.5
self.buffer_size = 4096
self._stdout_lock = threading.Lock()
self._certfactory = CertFactory()
self._init_localcert()
def modify_all(self, request):
'''Override to apply changes to every request'''
pass
def parse_response(self, response, host):
'''Override to handle received response - best used with concurrency'''
pass
def output_flow(self, request, response):
'''Override to change output'''
print '\n'+request.first_line
print response.first_line
def start(self):
'''Start the proxy server'''
try:
serv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv_sock.bind((self.serv_host, self.serv_port))
serv_sock.listen(self.max_listen)
cname = serv_sock.getsockname()
time.sleep(0.5)
print '\nProxy running on port %d - listening'%self.serv_port
except socket.error, (value, message):
self._log(cname, 'Could not open server socket: error %d %s'%(value,message))
sys.exit(1)
#mainloop
while True:
try:
conn, addr = serv_sock.accept()
self._log(cname, 'server connected by %s %s'%addr)
conn_thread = threading.Thread(target = self._handle_conn, args = (conn,))
conn_thread.daemon = 1
try: conn_thread.start()
except: conn.close()
except KeyboardInterrupt:
if conn: conn.close()
self._certfactory.cleanup()
serv_sock.close()
exit(0)
def _init_localcert(self):
with open(os.path.join('sproxy_files', 'localcerts.txt'), 'rt') as loc:
self.certfile = loc.read()
def _handle_conn(self, conn):
#get request from browser
conn.settimeout(self.browser_timeout)
cname = conn.getsockname()
request = self._recv_pipe('browser', conn)
if not request:
self._log(cname, 'no request received from browser: closing socket')
conn.close()
sys.exit(1)
#process request to allow for user changes
request_obj = HTTPRequest(request)
self._handle_reqs(request_obj)
request = request_obj.whole
tunneling = request_obj.method == 'CONNECT'
http_port = 443 if tunneling else 80
http_host = request_obj.headers['Host']
self._log(cname, 'got host %s, port %d'%(http_host, http_port))
#get and send response
if tunneling: self._get_https_resp(http_host, http_port, conn)
else:
self._get_http_resp(http_host, http_port, conn, request, request_obj)
conn.close()
def _get_https_resp(self, host, port, conn):
cname = conn.getsockname()
conn.send(b'HTTP/1.1 200 Connection estabilished\n\n')
wclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
wclient = ssl.wrap_socket(wclient, server_side = False, ca_certs = self.certfile, cert_reqs = ssl.CERT_REQUIRED)
try: wclient.connect((host, port))
except ssl.SSLError, m:
self._log(cname, 'could not connect to %s: %s'%(host, m))
wclient.close()
conn.close()
sys.exit(1)
except socket.error, (v, m):
self._log(cname, 'could not connect to %s: socket error %d %s'%(host, v, m))
wclient.close()
conn.close()
sys.exit(1)
wclient.settimeout(self.web_timeout)
#get server's certificate as pem
pem_data = ssl.DER_cert_to_PEM_cert(wclient.getpeercert(binary_form = True))
certfile, keyfile = self._certfactory.make_cert(pem_data)
try: conn = ssl.wrap_socket(conn, server_side = True, certfile = certfile, keyfile= keyfile)
except ssl.SSLError, m:
self._log(cname, 'could not complete ssl handshacke with browser client: %s'%m)
wclient.close()
conn.close()
sys.exit(1)
except socket.error, (v, m):
self._log(cname, ('could not complete ssl handshake with browser client: socket error %d - %s'%(v, m)))
wclient.close()
conn.close()
sys.exit(1)
#get plain text data
request = self._recv_pipe(host, conn)
if not request:
wclient.close()
conn.close()
sys.exit(1)
request_obj = HTTPRequest(request, https=True)
self._handle_reqs(request_obj)
request = request_obj.whole
wclient.send(request)
response = self._recv_pipe(host, wclient, conn)
if response:
response_obj = HTTPResponse(response)
self._handle_response(request_obj, response_obj, host)
wclient.close()
conn.close()
def _get_http_resp(self, host, port, conn, req, req_obj):
cname = conn.getsockname()
wclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._log(cname, 'client to host %s initialized'%host)
wclient.settimeout(self.web_timeout)
try:
wclient.connect((host, port))
self._log(cname, 'client to host %s connected'%host)
except socket.timeout:
self._log(cname, 'could not connect to %s: socket timed out'%host)
wclient.close()
conn.close()
sys.exit(1)
except socket.error, (value, message):
self._log(cname, 'could not connect to %s: socket error error %d %s'%(host, value, message))
wclient.close()
conn.close()
sys.exit(1)
wclient.send(req)
self._log(cname, 'request sent to host %s'%host)
response = self._recv_pipe(host, wclient, conn)
if response:
response_obj = HTTPResponse(response)
self._handle_response(req_obj, response_obj, host)
wclient.close()
self._log(cname, 'connection to client and connection to host %s closed'%host)
def _recv_pipe(self, source, from_conn, to_conn = ''):
msg = []
cname = from_conn.getsockname()
gotnull = 0
while True:
try:
msg_pack = from_conn.recv(self.buffer_size)
except ssl.SSLError, m:
self._log(cname, 'ssl error occured while receiving data from %s: %s'%(source, m))
break
except socket.timeout:
break
except socket.error, (v, m):
self._log(cname, 'socket error %d occurred while receiving data from %s - %s'%(v, source, m))
break
if not msg_pack:
if gotnull:
break
else: gotnull = 1
else:
msg.append(msg_pack)
if to_conn:
try: to_conn.send(msg_pack)
except socket.error, (value, message):
self._log(cname, 'could not send response from %s to %s: socket error %d - %s'%(source, (to_conn.getsockname()), value, message))
from_conn.close()
to_conn.close()
sys.exit(1)
return b''.join(msg)
def _log(self, cname, content):
if self.debug:
self._stdout_lock.acquire()
print '%f '%time.time(), ('[%s %d]'%cname).ljust(25), content
self._stdout_lock.release()
def _handle_reqs(self, request):
#decrease value of max-forward header
if "Max-Forward" in request.headers:
request.headers["Max-Forwards"] = str(int(request.headers["Max-Forwards"])-1)
#apply user-defined changes
self.modify_all(request)
#reset request
request.whole = request.make_raw()
def _handle_response(self, request, response, host):
'''After response has been received'''
self._stdout_lock.acquire()
self.output_flow(request, response)
self._stdout_lock.release()
self.parse_response(response, host)
class HTTPRequest:
def __init__(self, raw_req, https = False):
self.https = https
self.on_hold = False
self.whole = raw_req.replace('\r', '\n').replace('\n\n', '\n')
self._set_parts()
self._decode_body()
def _set_parts(self):
self.head, self.body = self.whole.split('\n\n')
self.first_line = str(self.head).splitlines()[0]
self.headers = HeaderDict([x.split(': ', 1) for x in self.head.splitlines()[1:]])
self.method, self.url, self.protov = self.first_line.split(' ', 2)
if self.https: self.url = 'https://'+self.headers['host']+self.url
def _decode_body(self):
if self.body and 'Content-Type' in self.headers and 'application/x-www-form-urlencoded' in self.headers['Content-Type']:
self.decoded_body = '\n'.join(['[Url-encoded]']+[': '.join(t) for t in urlparse.parse_qsl(self.body.strip('\n'))])
else:
self.decoded_body = self.body
def set_header(self, header, value):
self.headers[header] = value
headers = '\n'.join([header+': '+self.headers[header] for header in self.headers])
self.head = '\n'.join([self.first_line, headers])
def make_raw(self):
#put all parts back together
parsed = urlparse.urlparse(self.url)
url = self.url.replace(parsed.scheme+'://'+parsed.netloc, '', 1)
first_line = ' '.join([self.method, url, self.protov])
headers = '\r\n'.join([header+': '+self.headers[header] for header in self.headers])
head = '\r\n'.join([first_line, headers])
return '\r\n\r\n'.join([head, self.body])
class HTTPResponse:
def __init__(self, raw_resp):
self.raw = raw_resp
self._set_parts()
def _set_parts(self):
self.head = str(self.raw.replace(b'\r\n\r\n', b'\n\n').replace(b'\n\r\n\r', b'\n\n')).split('\n\n', 2)[0]
self.body = self.raw.replace(self.head.encode(), b'').replace('\n\n', '')
self.first_line = self.head.splitlines()[0]
self.headers = HeaderDict(x.split(': ', 1) for x in self.head.splitlines()[1:])
self.protov, self.status, self.status_text = self.first_line.split(' ', 2)
class CertFactory:
def __init__(self):
self._files_dir = 'sproxy_files'
self._sid = os.path.join(self._files_dir,'sid.txt')
with open(self._sid, 'rt') as sid: self._count = int(sid.read())
self._count_lock = threading.Lock()
self.root_cert = crypto.load_certificate(crypto.FILETYPE_PEM, open(os.path.join(self._files_dir, 'sproxy.pem')).read())
self.root_key = crypto.load_privatekey(crypto.FILETYPE_PEM, open(os.path.join(self._files_dir, 'sproxy.key')).read())
self.issuer= self.root_cert.get_subject()
def make_cert(self, pem_data):
old_cert = crypto.load_certificate(crypto.FILETYPE_PEM, pem_data)
common_name = old_cert.get_subject().CN
if os.path.isfile(os.path.join(self._files_dir, common_name+'.pem')):
certfile = os.path.join(self._files_dir, common_name+'.pem')
keyfile = os.path.join(self._files_dir, common_name+'.key')
return certfile, keyfile
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 2048)
new_cert = crypto.X509()
new_cert.gmtime_adj_notBefore(0)
new_cert.gmtime_adj_notAfter(10*365*24*60*60)
#set same subject of old cert
new_cert.set_subject(old_cert.get_subject())
#look for and set SNA of old cert
for i in range(old_cert.get_extension_count()):
ext = old_cert.get_extension(i)
if ext.get_short_name() == 'subjectAltName':
new_cert.add_extensions([ext])
new_cert.set_issuer(self.issuer)
self._count_lock.acquire()
new_cert.set_serial_number(self._count)
self._count += 1
self._count_lock.release()
new_cert.set_pubkey(pkey)
new_cert.sign(self.root_key, 'sha1')
certfile = os.path.join( self._files_dir, common_name+'.pem',)
keyfile = os.path.join( self._files_dir, common_name+'.key')
#write key and cert
with open(certfile, "wt") as cf: cf.write(crypto.dump_certificate(crypto.FILETYPE_PEM, new_cert))
with open(keyfile, "wt") as kf: kf.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
#append root to cert chain
with open(certfile, 'at') as ccf: ccf.write(crypto.dump_certificate(crypto.FILETYPE_PEM, self.root_cert))
return certfile, keyfile
def cleanup(self):
#update count of last serial number used
with open(self._sid, 'wt') as sid:
self._count_lock.acquire()
sid.write(str(self._count))
self._count_lock.release()
class HeaderDict(dict):
'''Caseless Ordered Dictionary
Enables case insensitive searching and updating while preserving case sensitivity when keys are listed.
Combination of the code of collections.OrderedDict and CaselessDictionary (https://gist.github.com/bloomonkey/3003096) '''
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = []
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def __getitem__(self, key):
return dict.__getitem__(self, key.lower())['val']
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
return dict.__setitem__(self, key.lower(), {'key': key, 'val': value})
def __delitem__(self, key, dict_delitem=dict.__delitem__):
dict_delitem(self, key)
link_prev, link_next, _ = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
dict.clear(self)
def keys(self):
return list(self)
def values(self):
return [self[key] for key in self]
def items(self):
return [(key, self[key]) for key in self]
def iterkeys(self):
return iter(self)
def itervalues(self):
for k in self:
yield self[k]
def iteritems(self):
for k in self:
yield (k, self[k])
def get(self, key, default=None):
try:
v = dict.__getitem__(self, key.lower())
except KeyError:
return default
else:
return v['val']
def has_key(self,key):
return key in self
update = MutableMapping.update
__update = update
__marker = object()
def pop(self, key, default=__marker):
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
if key in self:
return self[key]
self[key] = default
return default
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
key = next(reversed(self) if last else iter(self))
value = self.pop(key)
return key, value
def __repr__(self, _repr_running={}):
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(_imap(_eq, self, other))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
def viewkeys(self):
return KeysView(self)
def viewvalues(self):
return ValuesView(self)
def viewitems(self):
return ItemsView(self)
if __name__ == '__main__':
serv_port = int(sys.argv[1]) if len(sys.argv) > 1 else 50007
proxy = Proxy(serv_port)
proxy.start()
| gpl-3.0 |
Jonnymcc/ansible | lib/ansible/module_utils/rax.py | 14 | 12055 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by
# Ansible still belong to the author of the module, and may assign their own
# license to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import re
from uuid import UUID
from ansible.module_utils.basic import BOOLEANS
FINAL_STATUSES = ('ACTIVE', 'ERROR')
VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
'error', 'error_deleting')
CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
NON_CALLABLES = (basestring, bool, dict, int, list, type(None))
PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
def rax_slugify(value):
"""Prepend a key with rax_ and normalize the key name"""
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
def rax_clb_node_to_dict(obj):
"""Function to convert a CLB Node object to a dict"""
if not obj:
return {}
node = obj.to_dict()
node['id'] = obj.id
node['weight'] = obj.weight
return node
def rax_to_dict(obj, obj_type='standard'):
"""Generic function to convert a pyrax object to a dict
obj_type values:
standard
clb
server
"""
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if obj_type == 'clb' and key == 'nodes':
instance[key] = []
for node in value:
instance[key].append(rax_clb_node_to_dict(node))
elif (isinstance(value, list) and len(value) > 0 and
not isinstance(value[0], NON_CALLABLES)):
instance[key] = []
for item in value:
instance[key].append(rax_to_dict(item))
elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
if obj_type == 'server':
if key == 'image':
if not value:
instance['rax_boot_source'] = 'volume'
else:
instance['rax_boot_source'] = 'local'
key = rax_slugify(key)
instance[key] = value
if obj_type == 'server':
for attr in ['id', 'accessIPv4', 'name', 'status']:
instance[attr] = instance.get(rax_slugify(attr))
return instance
def rax_find_bootable_volume(module, rax_module, server, exit=True):
"""Find a servers bootable volume"""
cs = rax_module.cloudservers
cbs = rax_module.cloud_blockstorage
server_id = rax_module.utils.get_id(server)
volumes = cs.volumes.get_server_volumes(server_id)
bootable_volumes = []
for volume in volumes:
vol = cbs.get(volume)
if module.boolean(vol.bootable):
bootable_volumes.append(vol)
if not bootable_volumes:
if exit:
module.fail_json(msg='No bootable volumes could be found for '
'server %s' % server_id)
else:
return False
elif len(bootable_volumes) > 1:
if exit:
module.fail_json(msg='Multiple bootable volumes found for server '
'%s' % server_id)
else:
return False
return bootable_volumes[0]
def rax_find_image(module, rax_module, image, exit=True):
"""Find a server image by ID or Name"""
cs = rax_module.cloudservers
try:
UUID(image)
except ValueError:
try:
image = cs.images.find(human_id=image)
except(cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
try:
image = cs.images.find(name=image)
except (cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
if exit:
module.fail_json(msg='No matching image found (%s)' %
image)
else:
return False
return rax_module.utils.get_id(image)
def rax_find_volume(module, rax_module, name):
"""Find a Block storage volume by ID or name"""
cbs = rax_module.cloud_blockstorage
try:
UUID(name)
volume = cbs.get(name)
except ValueError:
try:
volume = cbs.find(name=name)
except rax_module.exc.NotFound:
volume = None
except Exception as e:
module.fail_json(msg='%s' % e)
return volume
def rax_find_network(module, rax_module, network):
"""Find a cloud network by ID or name"""
cnw = rax_module.cloud_networks
try:
UUID(network)
except ValueError:
if network.lower() == 'public':
return cnw.get_server_networks(PUBLIC_NET_ID)
elif network.lower() == 'private':
return cnw.get_server_networks(SERVICE_NET_ID)
else:
try:
network_obj = cnw.find_network_by_label(network)
except (rax_module.exceptions.NetworkNotFound,
rax_module.exceptions.NetworkLabelNotUnique):
module.fail_json(msg='No matching network found (%s)' %
network)
else:
return cnw.get_server_networks(network_obj)
else:
return cnw.get_server_networks(network)
def rax_find_server(module, rax_module, server):
"""Find a Cloud Server by ID or name"""
cs = rax_module.cloudservers
try:
UUID(server)
server = cs.servers.get(server)
except ValueError:
servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
if not servers:
module.fail_json(msg='No Server was matched by name, '
'try using the Server ID instead')
if len(servers) > 1:
module.fail_json(msg='Multiple servers matched by name, '
'try using the Server ID instead')
# We made it this far, grab the first and hopefully only server
# in the list
server = servers[0]
return server
def rax_find_loadbalancer(module, rax_module, loadbalancer):
"""Find a Cloud Load Balancer by ID or name"""
clb = rax_module.cloud_loadbalancers
try:
found = clb.get(loadbalancer)
except:
found = []
for lb in clb.list():
if loadbalancer == lb.name:
found.append(lb)
if not found:
module.fail_json(msg='No loadbalancer was matched')
if len(found) > 1:
module.fail_json(msg='Multiple loadbalancers matched')
# We made it this far, grab the first and hopefully only item
# in the list
found = found[0]
return found
def rax_argument_spec():
"""Return standard base dictionary used for the argument_spec
argument in AnsibleModule
"""
return dict(
api_key=dict(type='str', aliases=['password'], no_log=True),
auth_endpoint=dict(type='str'),
credentials=dict(type='str', aliases=['creds_file']),
env=dict(type='str'),
identity_type=dict(type='str', default='rackspace'),
region=dict(type='str'),
tenant_id=dict(type='str'),
tenant_name=dict(type='str'),
username=dict(type='str'),
verify_ssl=dict(choices=BOOLEANS, type='bool'),
)
def rax_required_together():
"""Return the default list used for the required_together argument to
AnsibleModule"""
return [['api_key', 'username']]
def setup_rax_module(module, rax_module, region_required=True):
"""Set up pyrax in a standard way for all modules"""
rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version,
rax_module.USER_AGENT)
api_key = module.params.get('api_key')
auth_endpoint = module.params.get('auth_endpoint')
credentials = module.params.get('credentials')
env = module.params.get('env')
identity_type = module.params.get('identity_type')
region = module.params.get('region')
tenant_id = module.params.get('tenant_id')
tenant_name = module.params.get('tenant_name')
username = module.params.get('username')
verify_ssl = module.params.get('verify_ssl')
if env is not None:
rax_module.set_environment(env)
rax_module.set_setting('identity_type', identity_type)
if verify_ssl is not None:
rax_module.set_setting('verify_ssl', verify_ssl)
if auth_endpoint is not None:
rax_module.set_setting('auth_endpoint', auth_endpoint)
if tenant_id is not None:
rax_module.set_setting('tenant_id', tenant_id)
if tenant_name is not None:
rax_module.set_setting('tenant_name', tenant_name)
try:
username = username or os.environ.get('RAX_USERNAME')
if not username:
username = rax_module.get_setting('keyring_username')
if username:
api_key = 'USE_KEYRING'
if not api_key:
api_key = os.environ.get('RAX_API_KEY')
credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
os.environ.get('RAX_CREDS_FILE'))
region = (region or os.environ.get('RAX_REGION') or
rax_module.get_setting('region'))
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message)
try:
if api_key and username:
if api_key == 'USE_KEYRING':
rax_module.keyring_auth(username, region=region)
else:
rax_module.set_credentials(username, api_key=api_key,
region=region)
elif credentials:
credentials = os.path.expanduser(credentials)
rax_module.set_credential_file(credentials, region=region)
else:
raise Exception('No credentials supplied!')
except Exception as e:
if e.message:
msg = str(e.message)
else:
msg = repr(e)
module.fail_json(msg=msg)
if region_required and region not in rax_module.regions:
module.fail_json(msg='%s is not a valid region, must be one of: %s' %
(region, ','.join(rax_module.regions)))
return rax_module
| gpl-3.0 |
JonasFranzDEV/gitea | vendor/github.com/ugorji/go/codec/test.py | 1516 | 4019 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
# Ensure all "string" keys are utf strings (else encoded as bytes)
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464.0,
6464646464.0,
False,
True,
u"null",
None,
u"someday",
1328176922000002000,
u"",
-2206187877999998000,
u"bytestring",
270,
u"none",
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": u"True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": u"1234567890" },
{ True: "true", 138: False, "false": 200 }
]
l = []
l.extend(l0)
l.append(l0)
l.append(1)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| mit |
MrNuggles/HeyBoet-Telegram-Bot | temboo/Library/WolframAlpha/Query.py | 5 | 12491 | # -*- coding: utf-8 -*-
###############################################################################
#
# Query
# Allows your application to submit free-form queries similar to the queries one might enter at the Wolfram|Alpha website.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Query(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Query Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Query, self).__init__(temboo_session, '/Library/WolframAlpha/Query')
def new_input_set(self):
return QueryInputSet()
def _make_result_set(self, result, path):
return QueryResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return QueryChoreographyExecution(session, exec_id, path)
class QueryInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Query
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AppID(self, value):
"""
Set the value of the AppID input for this Choreo. ((required, string) The App ID provided by Wolfram|Alpha.)
"""
super(QueryInputSet, self)._set_input('AppID', value)
def set_Assumption(self, value):
"""
Set the value of the Assumption input for this Choreo. ((optional, string) Up to 10 comma-seperated assumptions to narrow a query. Wolfram|Alpha provides you with a list of assumptons in the response of a previous query. Please consult the documentation for more details.)
"""
super(QueryInputSet, self)._set_input('Assumption', value)
def set_Async(self, value):
"""
Set the value of the Async input for this Choreo. ((optional, boolean) Set to true to specify that asynchronous mode should be used. This allows partial results to come back before all the pods are computed.)
"""
super(QueryInputSet, self)._set_input('Async', value)
def set_ExcludePodID(self, value):
"""
Set the value of the ExcludePodID input for this Choreo. ((optional, string) Specifies the IDs of the pod(s) to exlude from the response. All pod IDs are returned by default.)
"""
super(QueryInputSet, self)._set_input('ExcludePodID', value)
def set_FormatTimeout(self, value):
"""
Set the value of the FormatTimeout input for this Choreo. ((optional, decimal) The number of seconds to allow Wolfram Alpha to spend in the "format" stage for the entire collection of pods. Default value is 8.0.)
"""
super(QueryInputSet, self)._set_input('FormatTimeout', value)
def set_Format(self, value):
"""
Set the value of the Format input for this Choreo. ((optional, string) The desired result formats separated by commas. Valid values are image, plaintext, minput, moutput, cell, mathml, imagemap, sound, wav. Defaults to "plaintext,image".)
"""
super(QueryInputSet, self)._set_input('Format', value)
def set_IgnoreCase(self, value):
"""
Set the value of the IgnoreCase input for this Choreo. ((optional, boolean) Whether to force Wolfram Alpha to ignore case in queries. Defaults to false.)
"""
super(QueryInputSet, self)._set_input('IgnoreCase', value)
def set_IncludePodID(self, value):
"""
Set the value of the IncludePodID input for this Choreo. ((optional, string) Specifies the IDs of the pod(s) to include in the response. All pod IDs are returned by default.)
"""
super(QueryInputSet, self)._set_input('IncludePodID', value)
def set_Input(self, value):
"""
Set the value of the Input input for this Choreo. ((required, string) Specifies the input string (e.g., "5 largest countries").)
"""
super(QueryInputSet, self)._set_input('Input', value)
def set_Latitude(self, value):
"""
Set the value of the Latitude input for this Choreo. ((optional, decimal) When query results depend on your location, use this parameter to specify a latitude point.)
"""
super(QueryInputSet, self)._set_input('Latitude', value)
def set_Location(self, value):
"""
Set the value of the Location input for this Choreo. ((optional, string) When query results depend on your location, use this parameter to specify a location such as "Los Angeles, CA", or "Madrid".)
"""
super(QueryInputSet, self)._set_input('Location', value)
def set_Longitude(self, value):
"""
Set the value of the Longitude input for this Choreo. ((optional, decimal) When query results depend on your location, use this parameter to specify a longitude point.)
"""
super(QueryInputSet, self)._set_input('Longitude', value)
def set_Magnification(self, value):
"""
Set the value of the Magnification input for this Choreo. ((optional, decimal) Controls the magnification of pod images. The default value is 1.0, indicating no magnification.)
"""
super(QueryInputSet, self)._set_input('Magnification', value)
def set_MaxWidth(self, value):
"""
Set the value of the MaxWidth input for this Choreo. ((optional, integer) Used to change the default width of pod images. Width and MaxWidth apply to images of text and tables. This can be used to avoid undesirable line breaks if the value of Width is too small.)
"""
super(QueryInputSet, self)._set_input('MaxWidth', value)
def set_ParseTimeout(self, value):
"""
Set the value of the ParseTimeout input for this Choreo. ((optional, decimal) The number of seconds to allow Wolfram Alpha to spend in the "parsing" stage of processing. Default value is 5.0.)
"""
super(QueryInputSet, self)._set_input('ParseTimeout', value)
def set_PlotWidth(self, value):
"""
Set the value of the PlotWidth input for this Choreo. ((optional, integer) Controls the width at which plots and graphics are rendered. The default value is 200 pixels.)
"""
super(QueryInputSet, self)._set_input('PlotWidth', value)
def set_PodIndex(self, value):
"""
Set the value of the PodIndex input for this Choreo. ((optional, string) Specifies the index of the pod(s) to return. This is an alternative to specifying pods by title or ID. You can give a single number or a sequence like "2,3,5".)
"""
super(QueryInputSet, self)._set_input('PodIndex', value)
def set_PodState(self, value):
"""
Set the value of the PodState input for this Choreo. ((optional, string) Specifies a pod state change, which replaces a pod with a modified version, such as a switch from Imperial to metric units.)
"""
super(QueryInputSet, self)._set_input('PodState', value)
def set_PodTimeout(self, value):
"""
Set the value of the PodTimeout input for this Choreo. ((optional, decimal) The number of seconds to allow Wolfram Alpha to spend in the "format" stage for any one pod. Default value is 4.0.)
"""
super(QueryInputSet, self)._set_input('PodTimeout', value)
def set_PodTitle(self, value):
"""
Set the value of the PodTitle input for this Choreo. ((optional, string) Specifies the titles of the pod(s) to include in the response. All pod titles are returned by default. You can use * as a wildcard to match zero or more characters in pod titles.)
"""
super(QueryInputSet, self)._set_input('PodTitle', value)
def set_Reinterpret(self, value):
"""
Set the value of the Reinterpret input for this Choreo. ((optional, boolean) Whether to allow Wolfram Alpha to reinterpret queries that would otherwise not be understood. Defaults to false.)
"""
super(QueryInputSet, self)._set_input('Reinterpret', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format for the response. Valid values are JSON and XML. This will be ignored when providng an XPath query because results are returned as a string or JSON depending on the Mode specified.)
"""
super(QueryInputSet, self)._set_input('ResponseFormat', value)
def set_ScanTimeout(self, value):
"""
Set the value of the ScanTimeout input for this Choreo. ((optional, decimal) The number of seconds to allow Wolfram Alpha to compute results in the "scan" stage of processing. Default value is 3.0.)
"""
super(QueryInputSet, self)._set_input('ScanTimeout', value)
def set_Scanner(self, value):
"""
Set the value of the Scanner input for this Choreo. ((optional, string) Specifies that only pods produced by the given scanner should be returned. (e.g. Numeric, Music). Defaults to all pods.)
"""
super(QueryInputSet, self)._set_input('Scanner', value)
def set_Translation(self, value):
"""
Set the value of the Translation input for this Choreo. ((optional, boolean) Whether to allow Wolfram Alpha to try to translate simple queries into English. Defaults to true.)
"""
super(QueryInputSet, self)._set_input('Translation', value)
def set_Units(self, value):
"""
Set the value of the Units input for this Choreo. ((optional, string) Lets you specify the preferred measurement system, either "metric" or "nonmetric" (U.S. customary units).)
"""
super(QueryInputSet, self)._set_input('Units', value)
def set_Width(self, value):
"""
Set the value of the Width input for this Choreo. ((optional, integer) Used to change the default width of pod images. The default is 500 pixels. Width and MaxWidth apply to images of text and tables.)
"""
super(QueryInputSet, self)._set_input('Width', value)
def set_XPathMode(self, value):
"""
Set the value of the XPathMode input for this Choreo. ((optional, string) Valid values are "select" (the default) or "recursive". Recursive mode will iterate using the provided XPath. Select mode will return the first match at the position indicated by the provided XPath.)
"""
super(QueryInputSet, self)._set_input('XPathMode', value)
def set_XPathRegex(self, value):
"""
Set the value of the XPathRegex input for this Choreo. ((optional, string) A regular expression that can be applied to the result of the XPath query provided.)
"""
super(QueryInputSet, self)._set_input('XPathRegex', value)
def set_XPath(self, value):
"""
Set the value of the XPath input for this Choreo. ((optional, string) An XPath query to apply to the API results.)
"""
super(QueryInputSet, self)._set_input('XPath', value)
class QueryResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Query Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Wolfram Alpha.)
"""
return self._output.get('Response', None)
class QueryChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return QueryResultSet(response, path)
| gpl-3.0 |
noba3/KoTos | addons/Navi-X/init/default.py | 7 | 2491 | #############################################################################
#
# Navi-X Playlist browser
#
#############################################################################
import xbmc, xbmcgui, xbmcaddon
import re, os, time, datetime, traceback
import urllib2
import zipfile
import shutil
#############################################################################
# directory settings
#############################################################################
addon = xbmcaddon.Addon(id='script.navi-x')
RootDir = addon.getAddonInfo('path')
if RootDir[-1]==';': RootDir=RootDir[0:-1]
if RootDir[0] == '/':
if RootDir[-1] != '/': RootDir = RootDir+'/'
else:
if RootDir[-1]!='\\': RootDir = RootDir+'\\'
import xbmc
version = xbmc.getInfoLabel("System.BuildVersion")[:1]
#version = xbmc.getInfoLabel("System.BuildVersion")[:1]
if xbmc.getInfoLabel("System.BuildVersion")[:2] == '10':
scriptDir = "special://home/addons/"
pluginDir = "special://home/addons/"
skinDir = "special://home/skin/"
NaviXDir = scriptDir + "Navi-X/"
elif xbmc.getInfoLabel("System.BuildVersion")[:1] == '9':
scriptDir = "special://home/scripts/"
pluginDir = "special://home/plugins/"
skinDir = "special://home/skin/"
NaviXDir = scriptDir + "Navi-X/"
else:
scriptDir = "Q:\\scripts\\"
pluginDir = "Q:\\plugins\\"
skinDir = "Q:\\skin\\"
NaviXDir = scriptDir + "Navi-X\\"
#############################################################################
def Trace(string):
f = open(RootDir + "trace.txt", "a")
f.write(string + '\n')
f.close()
######################################################################
def get_system_platform():
platform = "unknown"
if xbmc.getCondVisibility( "system.platform.linux" ):
platform = "linux"
elif xbmc.getCondVisibility( "system.platform.xbox" ):
platform = "xbox"
elif xbmc.getCondVisibility( "system.platform.windows" ):
platform = "windows"
elif xbmc.getCondVisibility( "system.platform.osx" ):
platform = "osx"
# Trace("Platform: %s"%platform)
return platform
#############################################################################
#############################################################################
#retrieve the platform.
#platform = get_system_platform()
shutil.copyfile(RootDir + 'startup.plx', NaviXDir + 'startup.plx')
xbmc.executescript(NaviXDir + 'default.py')
#xbmc.sleep(1000)
| gpl-2.0 |
pyladieshre/pyladies | profiles/migrations/0002_auto_20170301_1823.py | 1 | 1075 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-01 16:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='phone_number',
),
migrations.AddField(
model_name='profile',
name='birth_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='profile',
name='contact_number',
field=models.CharField(blank=True, max_length=16, null=True),
),
migrations.AddField(
model_name='profile',
name='location',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='profile',
name='bio',
field=models.TextField(blank=True, max_length=500),
),
]
| mit |
spzala/heat-translator | translator/hot/translate_outputs.py | 1 | 1734 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from translator.hot.syntax.hot_output import HotOutput
TOSCA_TO_HOT_GET_ATTRS = {'ip_address': 'first_address'}
class TranslateOutputs():
'''Translate TOSCA Outputs to Heat Outputs.'''
def __init__(self, outputs):
self.outputs = outputs
def translate(self):
return self._translate_outputs()
def _translate_outputs(self):
hot_outputs = []
for output in self.outputs:
hot_value = {}
if 'get_property' in output.value:
get_parameters = output.value['get_property']
if get_parameters[1] in TOSCA_TO_HOT_GET_ATTRS:
get_parameters[1] = \
TOSCA_TO_HOT_GET_ATTRS[get_parameters[1]]
hot_value['get_attr'] = get_parameters
hot_outputs.append(HotOutput(output.name,
hot_value,
output.description))
else:
hot_outputs.append(HotOutput(output.name,
output.value,
output.description))
return hot_outputs
| apache-2.0 |
shakybones/omaha | enterprise/build_group_policy_template.py | 63 | 2024 | #!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""A Hammer-specific wrapper for generate_group_policy_template."""
from omaha.enterprise import generate_group_policy_template
def BuildGroupPolicyTemplate(env, target, apps, apps_file_path=None):
"""Builds a Group Policy ADM template file, handling dependencies.
Causes WriteGroupPolicyTemplate() to be called at build time instead of as
part of the processing stage.
Args:
env: The environment.
target: ADM output file.
apps: A list of tuples containing information about each app. See
generate_group_policy_template for details.
apps_file_path: Optional path to the file that defines apps. Used to enforce
dependencies.
"""
def _WriteAdmFile(target, source, env):
"""Called during the build phase to generate and write the ADM file."""
source = source # Avoid PyLint warning.
generate_group_policy_template.WriteGroupPolicyTemplate(
env.File(target[0]).abspath,
env['public_apps'])
return 0
adm_output = env.Command(
target=target,
source=[],
action=_WriteAdmFile,
public_apps=apps
)
# Force ADM file to rebuild whenever the script or apps data change.
dependencies = ['$MAIN_DIR/enterprise/generate_group_policy_template.py']
if apps_file_path:
dependencies.append(apps_file_path)
env.Depends(adm_output, dependencies)
| apache-2.0 |
Lilywei123/tempest | tempest/api/compute/admin/test_flavors_access.py | 3 | 4324 | # Copyright 2013 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import test
class FlavorsAccessTestJSON(base.BaseV2ComputeAdminTest):
"""
Tests Flavor Access API extension.
Add and remove Flavor Access require admin privileges.
"""
@classmethod
def resource_setup(cls):
super(FlavorsAccessTestJSON, cls).resource_setup()
if not test.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
msg = "OS-FLV-EXT-DATA extension not enabled."
raise cls.skipException(msg)
# Compute admin flavor client
cls.client = cls.os_adm.flavors_client
# Non admin tenant ID
cls.tenant_id = cls.flavors_client.tenant_id
# Compute admin tenant ID
cls.adm_tenant_id = cls.client.tenant_id
cls.flavor_name_prefix = 'test_flavor_access_'
cls.ram = 512
cls.vcpus = 1
cls.disk = 10
@test.attr(type='gate')
def test_flavor_access_list_with_private_flavor(self):
# Test to make sure that list flavor access on a newly created
# private flavor will return an empty access list
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
self.assertEqual(resp.status, 200)
resp, flavor_access = self.client.list_flavor_access(new_flavor_id)
self.assertEqual(resp.status, 200)
self.assertEqual(len(flavor_access), 0, str(flavor_access))
@test.attr(type='gate')
def test_flavor_access_add_remove(self):
# Test to add and remove flavor access to a given tenant.
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
# Add flavor access to a tenant.
resp_body = {
"tenant_id": str(self.tenant_id),
"flavor_id": str(new_flavor['id']),
}
add_resp, add_body = \
self.client.add_flavor_access(new_flavor['id'], self.tenant_id)
self.assertEqual(add_resp.status, 200)
self.assertIn(resp_body, add_body)
# The flavor is present in list.
resp, flavors = self.flavors_client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
self.assertIn(new_flavor['id'], map(lambda x: x['id'], flavors))
# Remove flavor access from a tenant.
remove_resp, remove_body = \
self.client.remove_flavor_access(new_flavor['id'], self.tenant_id)
self.assertEqual(remove_resp.status, 200)
self.assertNotIn(resp_body, remove_body)
# The flavor is not present in list.
resp, flavors = self.flavors_client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
self.assertNotIn(new_flavor['id'], map(lambda x: x['id'], flavors))
| apache-2.0 |
ujjvala-addsol/addsol_hr | openerp/addons/crm/report/crm_opportunity_report.py | 309 | 4879 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.crm import crm
from openerp.osv import fields, osv
from openerp import tools
class crm_opportunity_report(osv.Model):
""" CRM Opportunity Analysis """
_name = "crm.opportunity.report"
_auto = False
_description = "CRM Opportunity Analysis"
_rec_name = 'date_deadline'
_inherit = ["crm.tracking.mixin"]
_columns = {
'date_deadline': fields.date('Exp. Closing', readonly=True, help="Expected Closing"),
'create_date': fields.datetime('Creation Date', readonly=True),
'opening_date': fields.datetime('Assignation Date', readonly=True),
'date_closed': fields.datetime('Close Date', readonly=True),
'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True),
'nbr_cases': fields.integer("# of Cases", readonly=True),
# durations
'delay_open': fields.float('Delay to Assign',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"),
'delay_close': fields.float('Delay to Close',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"),
'delay_expected': fields.float('Overpassed Deadline',digits=(16,2),readonly=True, group_operator="avg"),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'section_id':fields.many2one('crm.case.section', 'Sales Team', readonly=True),
'country_id':fields.many2one('res.country', 'Country', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'probability': fields.float('Probability',digits=(16,2),readonly=True, group_operator="avg"),
'total_revenue': fields.float('Total Revenue',digits=(16,2),readonly=True),
'expected_revenue': fields.float('Expected Revenue', digits=(16,2),readonly=True),
'stage_id': fields.many2one ('crm.case.stage', 'Stage', readonly=True, domain="[('section_ids', '=', section_id)]"),
'partner_id': fields.many2one('res.partner', 'Partner' , readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority'),
'type':fields.selection([
('lead','Lead'),
('opportunity','Opportunity'),
],'Type', help="Type is used to separate Leads and Opportunities"),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'crm_opportunity_report')
cr.execute("""
CREATE OR REPLACE VIEW crm_opportunity_report AS (
SELECT
id,
c.date_deadline,
count(id) as nbr_cases,
c.date_open as opening_date,
c.date_closed as date_closed,
c.date_last_stage_update as date_last_stage_update,
c.user_id,
c.probability,
c.stage_id,
c.type,
c.company_id,
c.priority,
c.section_id,
c.campaign_id,
c.source_id,
c.medium_id,
c.partner_id,
c.country_id,
c.planned_revenue as total_revenue,
c.planned_revenue*(c.probability/100) as expected_revenue,
c.create_date as create_date,
extract('epoch' from (c.date_closed-c.create_date))/(3600*24) as delay_close,
abs(extract('epoch' from (c.date_deadline - c.date_closed))/(3600*24)) as delay_expected,
extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open
FROM
crm_lead c
WHERE c.active = 'true'
GROUP BY c.id
)""")
| agpl-3.0 |
jumpstarter-io/nova | nova/api/openstack/compute/plugins/v3/certificates.py | 15 | 3057 | # Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
import nova.cert.rpcapi
from nova import exception
from nova.i18n import _
from nova import network
ALIAS = "os-certificates"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def _translate_certificate_view(certificate, private_key=None):
return {
'data': certificate,
'private_key': private_key,
}
class CertificatesController(object):
"""The x509 Certificates API controller for the OpenStack API."""
def __init__(self):
self.network_api = network.API()
self.cert_rpcapi = nova.cert.rpcapi.CertAPI()
super(CertificatesController, self).__init__()
@extensions.expected_errors((404, 501))
def show(self, req, id):
"""Return certificate information."""
context = req.environ['nova.context']
authorize(context, action='show')
if id != 'root':
msg = _("Only root certificate can be retrieved.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
try:
cert = self.cert_rpcapi.fetch_ca(context,
project_id=context.project_id)
except exception.CryptoCAFileNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return {'certificate': _translate_certificate_view(cert)}
# NOTE(gmann): Here should be 201 instead of 200 by v2.1
# +microversions because the resource certificate has been created
# completely when returning a response.
@extensions.expected_errors(())
def create(self, req, body=None):
"""Create a certificate."""
context = req.environ['nova.context']
authorize(context, action='create')
pk, cert = self.cert_rpcapi.generate_x509_cert(context,
user_id=context.user_id, project_id=context.project_id)
return {'certificate': _translate_certificate_view(cert, pk)}
class Certificates(extensions.V3APIExtensionBase):
"""Certificates support."""
name = "Certificates"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension('os-certificates',
CertificatesController(),
member_actions={})]
return resources
def get_controller_extensions(self):
return []
| apache-2.0 |
byterom/android_external_skia | platform_tools/android/gyp_gen/generate_user_config.py | 67 | 3795 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Function for generating the SkUserConfig file, customized for Android."""
import os
import shutil
AUTOGEN_WARNING = (
"""
///////////////////////////////////////////////////////////////////////////////
//
// THIS FILE IS AUTOGENERATED BY GYP_TO_ANDROID.PY. DO NOT EDIT.
//
// This file contains Skia's upstream include/config/SkUserConfig.h as a
// reference, followed by the actual defines set for Android.
//
///////////////////////////////////////////////////////////////////////////////
"""
)
BUILD_GUARD = 'SkUserConfig_Android_DEFINED'
def generate_user_config(original_sk_user_config, require_sk_user_config,
target_dir, ordered_set):
"""Generate the SkUserConfig file specific to the Android framework.
Android needs its #defines in its skia/include/core directory, so that other
libraries which use Skia's headers get the right definitions. This function
takes the existing sample version of SkUserConfig, checked into Skia, and
appends the defines from ordered_set, which is expected to be a
vars_dict_lib.OrderedSet containing the defines. The result is written to
target_dir/SkUserConfig.h
Args:
original_sk_user_config: Path to original SkUserConfig.h
require_sk_user_config: If True, raise an AssertionError if
SkUserConfig.h does not exist. Either way, if it does exist, copy it
into the new file.
target_dir: Directory within which the modified SkUserConfig.h will be
written. Its name will be the same basename as
original_sk_user_config. If None, the new file will be written to the
working directory.
ordered_set: A vars_dict_lib.OrderedSet, containing a list of defines to
be appended to SkUserConfig.
Raises:
AssertionError: If original_sk_user_config does not exist.
"""
sk_user_config_exists = os.path.exists(original_sk_user_config)
if require_sk_user_config:
assert sk_user_config_exists
dst_filename = os.path.basename(original_sk_user_config)
if target_dir:
dst_filename = os.path.join(target_dir, dst_filename)
with open(dst_filename, 'w') as dst:
dst.write(AUTOGEN_WARNING)
# Copy the original exactly. This is merely for reference. Many of the
# defines written to the file below, either manually or generated from the
# gyp files, have explanations in the original SkUserConfig.h
if sk_user_config_exists:
with open(original_sk_user_config, 'r') as original:
shutil.copyfileobj(original, dst)
# Now add the defines specific to Android. Write a custom build guard to
# ensure they don't get defined more than once.
dst.write('\n// Android defines:\n')
dst.write('#ifndef ' + BUILD_GUARD + '\n')
dst.write('#define ' + BUILD_GUARD + '\n')
# Add conditional defines manually:
# do this build check for other tools that still read this header
dst.write('#ifdef ANDROID\n')
dst.write(' #include <utils/misc.h>\n')
dst.write('#endif\n\n')
dst.write('#if __BYTE_ORDER == __BIG_ENDIAN\n')
dst.write(' #define SK_CPU_BENDIAN\n')
dst.write(' #undef SK_CPU_LENDIAN\n')
dst.write('#else\n')
dst.write(' #define SK_CPU_LENDIAN\n')
dst.write(' #undef SK_CPU_BENDIAN\n')
dst.write('#endif\n\n')
# Now add the defines from the gyp files.
for item in ordered_set:
# Although our defines may have '=' in them, when written to the header
# there should be a space between the macro and what it replaces.
dst.write('#define ' + item.replace('=', ' ') + '\n')
dst.write('\n#endif // ' + BUILD_GUARD + '\n')
| bsd-3-clause |
ethertricity/bluesky | bluesky/traffic/route.py | 1 | 40143 | """ Route implementation for the BlueSky FMS."""
from os import path
from numpy import *
import bluesky as bs
from bluesky.tools import geo
from bluesky.tools.aero import ft, kts, g0, nm, mach2cas
from bluesky.tools.misc import degto180
from bluesky.tools.position import txt2pos
from bluesky import stack
from bluesky.stack import Argparser
# Register settings defaults
bs.settings.set_variable_defaults(log_path='output')
class Route:
"""
Route class definition : Route data for an aircraft
(basic FMS functionality)
addwpt(name,wptype,lat,lon,alt) :
Add waypoint (closest to lat/lon when from navdb
For lat/lon waypoints: use call sign as wpname, number will be added
Created by : Jacco M. Hoekstra
"""
# Waypoint types:
wplatlon = 0 # lat/lon waypoint
wpnav = 1 # VOR/nav database waypoint
orig = 2 # Origin airport
dest = 3 # Destination airport
calcwp = 4 # Calculated waypoint (T/C, T/D, A/C)
runway = 5 # Runway: Copy name and positions
def __init__(self):
self.nwp = 0
# Waypoint data
self.wpname = []
self.wptype = []
self.wplat = []
self.wplon = []
self.wpalt = [] # [m] negative value means not specified
self.wpspd = [] # [m/s] negative value means not specified
self.wpflyby = [] # Flyby (True)/flyover(False) switch
# Current actual waypoint
self.iactwp = -1
self.swflyby = True # Default waypoints are flyby waypoint
# if the aircraft lands on a runway, the aircraft should keep the
# runway heading
# default: False
self.flag_landed_runway = False
self.iac = self.wpdirfrom = self.wpdistto = self.wpialt = \
self.wptoalt = self.wpxtoalt = None
@staticmethod
def get_available_name(data, name_, len_=2):
"""
Check if name already exists, if so add integer 01, 02, 03 etc.
"""
appi = 0 # appended integer to name starts at zero (=nothing)
nameorg = name_
while data.count(name_) > 0:
appi += 1
format_ = "%s%0" + str(len_) + "d"
name_ = format_ % (nameorg, appi)
return name_
def addwptStack(self, idx, *args): # args: all arguments of addwpt
"""ADDWPT acid, (wpname/lat,lon),[alt],[spd],[afterwp],[beforewp]"""
# print "addwptStack:",args
# Check FLYBY or FLYOVER switch, instead of adding a waypoint
if len(args) == 1:
isflyby = args[0].replace('-', '')
if isflyby == "FLYBY":
self.swflyby = True
return True
elif isflyby == "FLYOVER":
self.swflyby = False
return True
# Convert to positions
name = args[0].upper().strip()
# Choose reference position ot look up VOR and waypoints
# First waypoint: own position
if self.nwp == 0:
reflat = bs.traf.lat[idx]
reflon = bs.traf.lon[idx]
# Or last waypoint before destination
else:
if self.wptype[-1] != Route.dest or self.nwp == 1:
reflat = self.wplat[-1]
reflon = self.wplon[-1]
else:
reflat = self.wplat[-2]
reflon = self.wplon[-2]
# Default altitude, speed and afterwp
alt = -999.
spd = -999.
afterwp = ""
beforewp = ""
# Is it aspecial take-off waypoint?
takeoffwpt = name.replace('-', '') == "TAKEOFF"
# Normal waypoint (no take-off waypoint => see else)
if not takeoffwpt:
# Get waypoint position
success, posobj = txt2pos(name, reflat, reflon)
if success:
lat = posobj.lat
lon = posobj.lon
if posobj.type == "nav" or posobj.type == "apt":
wptype = Route.wpnav
elif posobj.type == "rwy":
wptype = Route.runway
else: # treat as lat/lon
name = bs.traf.id[idx]
wptype = Route.wplatlon
if len(args) > 1 and args[1]:
alt = args[1]
if len(args) > 2 and args[2]:
spd = args[2]
if len(args) > 3 and args[3]:
afterwp = args[3]
if len(args) > 4 and args[4]:
beforewp = args[4]
else:
return False, "Waypoint " + name + " not found."
# Take off waypoint: positioned 20% of the runway length after the runway
else:
# Look up runway in route
rwyrteidx = -1
i = 0
while i<self.nwp and rwyrteidx<0:
if self.wpname[i].count("/") >0:
# print (self.wpname[i])
rwyrteidx = i
i += 1
# Only TAKEOFF is specified wihtou a waypoint/runway
if len(args) == 1 or not args[1]:
# No runway given: use first in route or current position
# print ("rwyrteidx =",rwyrteidx)
# We find a runway in the route, so use it
if rwyrteidx>0:
rwylat = self.wplat[rwyrteidx]
rwylon = self.wplon[rwyrteidx]
aptidx = bs.navdb.getapinear(rwylat,rwylon)
aptname = bs.navdb.aptname[aptidx]
rwyname = self.wpname[rwyrteidx].split("/")[1]
rwyid = rwyname.replace("RWY","").replace("RW","")
rwyhdg = bs.navdb.rwythresholds[aptname][rwyid][2]
else:
rwylat = bs.traf.lat[idx]
rwylon = bs.traf.lon[idx]
rwyhdg = bs.traf.trk[idx]
elif args[1].count("/") > 0 or len(args) > 2 and args[2]: # we need apt,rwy
# Take care of both EHAM/RW06 as well as EHAM,RWY18L (so /&, and RW/RWY)
if args[1].count("/")>0:
aptid,rwyname = args[1].split("/")
else:
# Runway specified
aptid = args[1]
rwyname = args[2]
rwyid = rwyname.replace("RWY", "").replace("RW", "") # take away RW or RWY
# print ("apt,rwy=",aptid,rwyid)
# TDO: Add fingind the runway heading with rwyrteidx>0 and navdb!!!
# Try to get it from the database
try:
rwyhdg = bs.navdb.rwythresholds[aptid][rwyid][2]
except:
rwydir = rwyid.replace("L","").replace("R","").replace("C","")
try:
rwyhdg = float(rwydir)*10.
except:
return False,name+" not found."
success, posobj = txt2pos(aptid+"/RW"+rwyid, reflat, reflon)
if success:
rwylat,rwylon = posobj.lat,posobj.lon
else:
rwylat = bs.traf.lat[idx]
rwylon = bs.traf.lon[idx]
else:
return False,"Use ADDWPT TAKEOFF,AIRPORTID,RWYNAME"
# Create a waypoint 2 nm away from current point
rwydist = 2.0 # [nm] use default distance away from threshold
lat,lon = geo.qdrpos(rwylat, rwylon, rwyhdg, rwydist) #[deg,deg
wptype = Route.wplatlon
# Add after the runwy in the route
if rwyrteidx > 0:
afterwp = self.wpname[rwyrteidx]
elif self.wptype and self.wptype[0] == Route.orig:
afterwp = self.wpname[0]
else:
# Assume we're called before other waypoints are added
afterwp = ""
name = "T/O-" + bs.traf.id[idx] # Use lat/lon naming convention
# Add waypoint
wpidx = self.addwpt(idx, name, wptype, lat, lon, alt, spd, afterwp, beforewp)
# Check for success by checking insetred locaiton in flight plan >= 0
if wpidx < 0:
return False, "Waypoint " + name + " not added."
# chekc for presence of orig/dest
norig = int(bs.traf.ap.orig[idx] != "")
ndest = int(bs.traf.ap.dest[idx] != "")
# Check whether this is first 'real' wayppint (not orig & dest),
# And if so, make active
if self.nwp - norig - ndest == 1: # first waypoint: make active
self.direct(idx, self.wpname[norig]) # 0 if no orig
bs.traf.swlnav[idx] = True
if afterwp and self.wpname.count(afterwp) == 0:
return True, "Waypoint " + afterwp + " not found" + \
"waypoint added at end of route"
else:
return True
def afteraddwptStack(self, idx, *args): # args: all arguments of addwpt
# AFTER acid, wpinroute ADDWPT acid, (wpname/lat,lon),[alt],[spd]"
if len(args) < 3:
return False, "AFTER needs more arguments"
# Change order of arguments
arglst = [args[2], None, None, args[0]] # postxt,,,afterwp
# Add alt when given
if len(args) > 3:
arglst[1] = args[3] # alt
# Add speed when given
if len(args) > 4:
arglst[2] = args[4] # spd
result = self.addwptStack(idx, *arglst) # args: all arguments of addwpt
return result
def atwptStack(self, idx, *args): # args: all arguments of addwpt
# AT acid, wpinroute [DEL] ALT/SPD spd/alt"
# args = wpname,SPD/ALT, spd/alt(string)
if len(args) < 1:
return False, "AT needs at least an aicraft id and a waypoint name"
else:
name = args[0]
if name in self.wpname:
wpidx = self.wpname.index(name)
# acid AT wpinroute: show alt & spd constraints at this waypoint
# acid AT wpinroute SPD: show spd constraint at this waypoint
# acid AT wpinroute ALT: show alt constraint at this waypoint
if len(args) == 1 or \
(len(args) == 2 and not args[1].count("/") == 1):
txt = name + " : "
# Select what to show
if len(args)==1:
swalt = True
swspd = True
else:
swalt = args[1].upper()=="ALT"
swspd = args[1].upper() in ("SPD","SPEED")
# To be safe show both when we do not know what
if not (swalt or swspd):
swalt = True
swspd = True
# Show altitude
if swalt:
if self.wpalt[wpidx] < 0:
txt += "-----"
elif self.wpalt[wpidx] > 4500 * ft:
fl = int(round((self.wpalt[wpidx] / (100. * ft))))
txt += "FL" + str(fl)
else:
txt += str(int(round(self.wpalt[wpidx] / ft)))
if swspd:
txt += "/"
# Show speed
if swspd:
if self.wpspd[wpidx] < 0:
txt += "---"
else:
txt += str(int(round(self.wpspd[wpidx] / kts)))
# Type
if swalt and swspd:
if self.wptype[wpidx] == Route.orig:
txt += "[orig]"
elif self.wptype[wpidx] == Route.dest:
txt += "[dest]"
return bs.SIMPLE_ECHO, txt
elif args[1].count("/")==1:
# acid AT wpinroute alt"/"spd
success = True
# Use parse from stack.py to interpret alt & speed
alttxt, spdtxt = args[1].split('/')
# Edit waypoint altitude constraint
if alttxt.count('-') > 1: # "----" = delete
self.wpalt[wpidx] = -999.
else:
parser = Argparser(['alt'], [False], alttxt)
if parser.parse():
self.wpalt[wpidx] = parser.arglist[0]
else:
success = False
# Edit waypoint speed constraint
if spdtxt.count('-') > 1: # "----" = delete
self.wpspd[wpidx] = -999.
else:
parser = Argparser(['spd'], [False], spdtxt)
if parser.parse():
self.wpspd[wpidx] = parser.arglist[0]
else:
success = False
if not success:
return False,"Could not parse "+args[1]+" as alt / spd"
# If success: update flight plan and guidance
self.calcfp()
self.direct(idx, self.wpname[self.iactwp])
#acid AT wpinroute ALT/SPD alt/spd
elif len(args)==3 :
swalt = args[1].upper()=="ALT"
swspd = args[1].upper() in ("SPD","SPEED")
# Use parse from stack.py to interpret alt & speed
# Edit waypoint altitude constraint
if swalt:
parser = Argparser(['alt'], [False], args[2])
if parser.parse():
self.wpalt[wpidx] = parser.arglist[0]
else:
return False,'Could not parse "' + args[2] + '" as altitude'
# Edit waypoint speed constraint
elif swspd:
parser = Argparser(['spd'], [False], args[2])
if parser.parse():
self.wpspd[wpidx] = parser.arglist[0]
else:
return False,'Could not parse "' + args[2] + '" as speed'
# Delete a constraint (or both) at this waypoint
elif args[1]=="DEL" or args[1]=="DELETE":
swalt = args[2].upper()=="ALT"
swspd = args[2].upper() in ("SPD","SPEED")
both = args[2].upper() in ("ALL","BOTH")
if swspd or both:
self.wpspd[wpidx] = -999.
if swalt or both:
self.wpalt[wpidx] = -999.
else:
return False,"No "+args[1]+" at ",name
# If success: update flight plan and guidance
self.calcfp()
self.direct(idx, self.wpname[self.iactwp])
# Waypoint not found in route
else:
return False, name + " not found in route " + bs.traf.id[idx]
return True
def overwrite_wpt_data(self, wpidx, wpname, wplat, wplon, wptype, wpalt,
wpspd, swflyby):
"""
Overwrites information for a waypoint, via addwpt_data/9
"""
self.addwpt_data(True, wpidx, wpname, wplat, wplon, wptype, wpalt,
wpspd, swflyby)
def insert_wpt_data(self, wpidx, wpname, wplat, wplon, wptype, wpalt,
wpspd, swflyby):
"""
Inserts information for a waypoint, via addwpt_data/9
"""
self.addwpt_data(False, wpidx, wpname, wplat, wplon, wptype, wpalt,
wpspd, swflyby)
def addwpt_data(self, overwrt, wpidx, wpname, wplat, wplon, wptype,
wpalt, wpspd, swflyby):
"""
Overwrites or inserts information for a waypoint
"""
wplat = (wplat + 90.) % 180. - 90.
wplon = (wplon + 180.) % 360. - 180.
if overwrt:
self.wpname[wpidx] = wpname
self.wplat[wpidx] = wplat
self.wplon[wpidx] = wplon
self.wpalt[wpidx] = wpalt
self.wpspd[wpidx] = wpspd
self.wptype[wpidx] = wptype
self.wpflyby[wpidx] = swflyby
else:
self.wpname.insert(wpidx, wpname)
self.wplat.insert(wpidx, wplat)
self.wplon.insert(wpidx, wplon)
self.wpalt.insert(wpidx, wpalt)
self.wpspd.insert(wpidx, wpspd)
self.wptype.insert(wpidx, wptype)
self.wpflyby.insert(wpidx, swflyby)
def addwpt(self, iac, name, wptype, lat, lon, alt=-999., spd=-999., afterwp="", beforewp=""):
"""Adds waypoint an returns index of waypoint, lat/lon [deg], alt[m]"""
# print ("addwpt:")
# print ("iac = ",iac)
# print ("name = "+name)
# print ("alt = ",alt)
# print ("spd = ",spd)
# print ("afterwp ="+afterwp)
# print
self.iac = iac # a/c to which this route belongs
# For safety
self.nwp = len(self.wplat)
name = name.upper().strip()
wplat = lat
wplon = lon
# Be default we trust, distrust needs to be earned
wpok = True # switch for waypoint check
# Check if name already exists, if so add integer 01, 02, 03 etc.
wprtename = Route.get_available_name(
self.wpname, name)
# Select on wptype
# ORIGIN: Wptype is origin/destination?
if wptype == Route.orig or wptype == Route.dest:
orig = wptype == Route.orig
wpidx = 0 if orig else -1
suffix = "ORIG" if orig else "DEST"
if not name == bs.traf.id[iac] + suffix: # published identifier
i = bs.navdb.getaptidx(name)
if i >= 0:
wplat = bs.navdb.aptlat[i]
wplon = bs.navdb.aptlon[i]
if not orig and alt < 0:
alt = 0
# Overwrite existing origin/dest
if self.nwp > 0 and self.wptype[wpidx] == wptype:
self.overwrite_wpt_data(
wpidx, wprtename, wplat, wplon, wptype, alt, spd,
self.swflyby)
# Or add before first waypoint/append to end
else:
if not orig:
wpidx = len(self.wplat)
self.insert_wpt_data(
wpidx, wprtename, wplat, wplon, wptype, alt, spd,
self.swflyby)
self.nwp += 1
if orig and self.iactwp > 0:
self.iactwp += 1
elif not orig and self.iactwp < 0 and self.nwp == 1:
# When only waypoint: adjust pointer to point to destination
self.iactwp = 0
idx = 0 if orig else self.nwp - 1
# NORMAL: Wptype is normal waypoint? (lat/lon or nav)
else:
# Lat/lon: wpname is then call sign of aircraft: add number
if wptype == Route.wplatlon:
newname = Route.get_available_name(
self.wpname, name, 3)
# Else make data complete with nav database and closest to given lat,lon
else: # so wptypewpnav
newname = wprtename
if not wptype == Route.runway:
i = bs.navdb.getwpidx(name, lat, lon)
wpok = (i >= 0)
if wpok:
wplat = bs.navdb.wplat[i]
wplon = bs.navdb.wplon[i]
else:
i = bs.navdb.getaptidx(name)
wpok = (i >= 0)
if wpok:
wplat = bs.navdb.aptlat[i]
wplon = bs.navdb.aptlon[i]
# Check if afterwp or beforewp is specified and found:
aftwp = afterwp.upper().strip() # Remove space, upper case
bfwp = beforewp.upper().strip()
if wpok:
if (afterwp and self.wpname.count(aftwp) > 0) or \
(beforewp and self.wpname.count(bfwp) > 0):
wpidx = self.wpname.index(aftwp) + 1 if afterwp else \
self.wpname.index(bfwp)
self.insert_wpt_data(
wpidx, newname, wplat, wplon, wptype, alt, spd,
self.swflyby)
if afterwp and self.iactwp >= wpidx:
self.iactwp += 1
# No afterwp: append, just before dest if there is a dest
else:
# Is there a destination?
if self.nwp > 0 and self.wptype[-1] == Route.dest:
wpidx = self.nwp - 1
else:
wpidx = self.nwp
self.addwpt_data(
False, wpidx, newname, wplat, wplon, wptype, alt, spd,
self.swflyby)
idx = wpidx
self.nwp += 1
else:
idx = -1
if len(self.wplat) == 1:
self.iactwp = 0
#update qdr in traffic
bs.traf.actwp.next_qdr[iac] = self.getnextqdr()
# Update waypoints
if not (wptype == Route.calcwp):
self.calcfp()
# Update autopilot settings
if wpok and 0 <= self.iactwp < self.nwp:
self.direct(iac, self.wpname[self.iactwp])
return idx
def beforeaddwptStack(self, idx, *args): # args: all arguments of addwpt
# BEFORE acid, wpinroute ADDWPT acid, (wpname/lat,lon),[alt],[spd]"
if len(args) < 3:
return False, "BEFORE needs more arguments"
# Change order of arguments
arglst = [args[2], None, None, None, args[0]] # postxt,,,,beforewp
# Add alt when given
if len(args) > 3:
arglst[1] = args[3] # alt
# Add speed when given
if len(args) > 4:
arglst[2] = args[4] # spd
result = self.addwptStack(idx, *arglst) # args: all arguments of addwpt
return result
def direct(self, idx, wpnam):
"""Set active point to a waypoint by name"""
name = wpnam.upper().strip()
if name != "" and self.wpname.count(name) > 0:
wpidx = self.wpname.index(name)
self.iactwp = wpidx
bs.traf.actwp.lat[idx] = self.wplat[wpidx]
bs.traf.actwp.lon[idx] = self.wplon[wpidx]
bs.traf.actwp.flyby[idx] = self.wpflyby[wpidx]
self.calcfp()
bs.traf.ap.ComputeVNAV(idx, self.wptoalt[wpidx], self.wpxtoalt[wpidx])
# If there is a speed specified, process it
if self.wpspd[wpidx]>0.:
# Set target speed for autopilot
if self.wpalt[wpidx] < 0.0:
alt = bs.traf.alt[idx]
else:
alt = self.wpalt[wpidx]
# Check for valid Mach or CAS
if self.wpspd[wpidx] <2.0:
cas = mach2cas(self.wpspd[wpidx], alt)
else:
cas = self.wpspd[wpidx]
# Save it for next leg
bs.traf.actwp.spd[idx] = cas
# When already in VNAV: fly it
if bs.traf.swvnav[idx]:
bs.traf.selspd[idx]=cas
# No speed specified for next leg
else:
bs.traf.actwp.spd[idx] = -999.
qdr, dist = geo.qdrdist(bs.traf.lat[idx], bs.traf.lon[idx],
bs.traf.actwp.lat[idx], bs.traf.actwp.lon[idx])
turnrad = bs.traf.tas[idx]*bs.traf.tas[idx]/tan(radians(25.)) / g0 / nm # [nm]default bank angle 25 deg
bs.traf.actwp.turndist[idx] = (bs.traf.actwp.flyby[idx] > 0.5) * \
turnrad*abs(tan(0.5*radians(max(5., abs(degto180(qdr -
self.wpdirfrom[self.iactwp])))))) # [nm]
bs.traf.swlnav[idx] = True
return True
else:
return False, "Waypoint " + wpnam + " not found"
def listrte(self, idx, ipage=0):
"""LISTRTE command: output route to screen"""
if self.nwp <= 0:
return False, "Aircraft has no route."
if idx<0:
return False, "Aircraft id not found."
for i in range(ipage * 7, ipage * 7 + 7):
if 0 <= i < self.nwp:
# Name
if i == self.iactwp:
txt = "*" + self.wpname[i] + " : "
else:
txt = " " + self.wpname[i] + " : "
# Altitude
if self.wpalt[i] < 0:
txt += "-----/"
elif self.wpalt[i] > 4500 * ft:
fl = int(round((self.wpalt[i] / (100. * ft))))
txt += "FL" + str(fl) + "/"
else:
txt += str(int(round(self.wpalt[i] / ft))) + "/"
# Speed
if self.wpspd[i] < 0.:
txt += "---"
elif self.wpspd[i] > 2.0:
txt += str(int(round(self.wpspd[i] / kts)))
else:
txt += "M" + str(self.wpspd[i])
# Type
if self.wptype[i] == Route.orig:
txt += "[orig]"
elif self.wptype[i] == Route.dest:
txt += "[dest]"
# Display message
bs.scr.echo(txt)
# Add command for next page to screen command line
npages = int((self.nwp + 6) / 7)
if ipage + 1 < npages:
bs.scr.cmdline("LISTRTE " + bs.traf.id[idx] + "," + str(ipage + 1))
def getnextwp(self):
"""Go to next waypoint and return data"""
if self.flag_landed_runway:
# when landing, LNAV is switched off
lnavon = False
# no further waypoint
nextqdr = -999.
# and the aircraft just needs a fixed heading to
# remain on the runway
# syntax: HDG acid,hdg (deg,True)
name = self.wpname[self.iactwp]
if "RWY" in name:
rwykey = name[8:]
# if it is only RW
else:
rwykey = name[7:]
wphdg = bs.navdb.rwythresholds[name[:4]][rwykey][2]
# keep constant runway heading
stack.stack("HDG " + str(bs.traf.id[self.iac]) + " " + str(wphdg))
# start decelerating
stack.stack("DELAY " + "10 " + "SPD " + str(
bs.traf.id[self.iac]) + " " + "10")
# delete aircraft
stack.stack("DELAY " + "42 " + "DEL " + str(bs.traf.id[self.iac]))
return self.wplat[self.iactwp],self.wplon[self.iactwp], \
self.wpalt[self.iactwp],self.wpspd[self.iactwp], \
self.wpxtoalt[self.iactwp],self.wptoalt[self.iactwp],\
lnavon,self.wpflyby[self.iactwp], nextqdr
lnavon = self.iactwp +1 < self.nwp
if lnavon:
self.iactwp += 1
nextqdr = self.getnextqdr()
# in case that there is a runway, the aircraft should remain on it
# instead of deviating to the airport centre
# When there is a destination: current = runway, next = Dest
# Else: current = runway and this is also the last waypoint
if (self.wptype[self.iactwp] == 5 and
self.wpname[self.iactwp] == self.wpname[-1]) or \
(self.wptype[self.iactwp] == 5 and
self.wptype[self.iactwp + 1] == 3):
self.flag_landed_runway = True
# print ("getnextwp:",self.wpname[self.iactwp])
return self.wplat[self.iactwp],self.wplon[self.iactwp], \
self.wpalt[self.iactwp],self.wpspd[self.iactwp], \
self.wpxtoalt[self.iactwp],self.wptoalt[self.iactwp],\
lnavon,self.wpflyby[self.iactwp], nextqdr
def delrte(self):
"""Delete complete route"""
# Simple re-initilize this route as empty
self.__init__()
return True
def delwpt(self, delwpname):
"""Delete waypoint"""
# Delete complete route?
if delwpname =="*":
return self.delrte()
# Look up waypoint
idx = -1
i = len(self.wpname)
while idx == -1 and i > 0:
i -= 1
if self.wpname[i].upper() == delwpname.upper():
idx = i
# Delete waypoint
if idx == -1:
return False, "Waypoint " + delwpname + " not found"
self.nwp -= 1
del self.wpname[idx]
del self.wplat[idx]
del self.wplon[idx]
del self.wpalt[idx]
del self.wpspd[idx]
del self.wptype[idx]
if self.iactwp > idx:
self.iactwp = max(0, self.iactwp - 1)
self.iactwp = min(self.iactwp, self.nwp - 1)
return True
def newcalcfp(self):
"""Do flight plan calculations"""
# Remove old top of descents and old top of climbs
while self.wpname.count("T/D")>0:
self.delwpt("T/D")
while self.wpname.count("T/C")>0:
self.delwpt("T/C")
# Remove old actual position waypoints
while self.wpname.count("A/C")>0:
self.delwpt("A/C")
# Insert actual position as A/C waypoint
idx = self.iactwp
self.insertcalcwp(idx,"A/C")
self.wplat[idx] = bs.traf.lat[self.iac] # deg
self.wplon[idx] = bs.traf.lon[self.iac] # deg
self.wpalt[idx] = bs.traf.alt[self.iac] # m
self.wpspd[idx] = bs.traf.tas[self.iac] # m/s
# Calculate distance to last waypoint in route
nwp = len(self.wpname)
dist2go = [0.0]
for i in range(nwp - 2, -1, -1):
qdr, dist = geo.qdrdist(self.wplat[i], self.wplon[i],
self.wplat[i + 1], self.wplon[i + 1])
dist2go = [dist2go[0] + dist] + dist2go
# Make VNAV WP list with only waypoints with altitude constraints
# This list we will use to find where to insert t/c and t/d
alt = []
x = []
name = []
for i in range(nwp):
if self.wpalt[i]>-1.:
alt.append(self.wpalt[i])
x.append(dist2go[i])
name.append(self.wpname[i]+" ") # space for check first 2 chars later
# Find where to insert cruise segment (if any)
# Find longest segment without altitude constraints
desslope = clbslope = 1.
crzalt = bs.traf.crzalt[self.iac]
if crzalt>0.:
ilong = -1
dxlong = 0.0
nvwp = len(alt)
for i in range(nvwp-1):
if x[i]-x[i+1]> dxlong:
ilong = i
dxlong = x[i]-x[i+1]
# VNAV parameters to insert T/Cs and T/Ds
crzdist = 20.*nm # minimally required distance at cruise level
clbslope = 3000.*ft/(10.*nm) # 1:3 rule for now
desslope = clbslope # 1:3 rule for now
# Can we get a sufficient distance at cruise altitude?
if max(alt[ilong],alt[ilong+1]) < crzalt :
dxclimb = (crzalt-alt[ilong])*clbslope
dxdesc = (crzalt-alt[ilong+1])*desslope
if x[ilong] - x[ilong+1] > dxclimb + crzdist + dxdesc:
# Insert T/C (top of climb) at cruise level
name.insert(ilong+1,"T/C")
alt.insert(ilong+1,crzalt)
x.insert(ilong+1,x[ilong]+dxclimb)
# Insert T/D (top of descent) at cruise level
name.insert(ilong+2,"T/D")
alt.insert(ilong+2,crzalt)
x.insert(ilong+2,x[ilong+1]-dxdesc)
# Compare angles to rates:
epsh = 50.*ft # Nothing to be done for small altitude changes
epsx = 1.*nm # [m] Nothing to be done at this short range
i = 0
while i<len(alt)-1:
if name[i][:2]=="T/":
continue
dy = alt[i+1]-alt[i] # alt change (pos = climb)
dx = x[i]-x[i+1] # distance (positive)
dxdes = abs(dy)/desslope
dxclb = abs(dy)/clbslope
if dy<epsh and dx + epsx > dxdes: # insert T/D?
name.insert(i+1,"T/D")
alt.insert(i+1,alt[i])
x.insert(i+1,x[i+1]-dxdes)
i += 1
elif dy>epsh and dx + epsx > dxclb: # insert T/C?
name.insert(i+1,"T/C")
alt.insert(i+1,alt[i+1])
x.insert(i+1,x[i]+dxclb)
i += 2
else:
i += 1
# Now insert T/Cs and T/Ds in actual flight plan
nvwp = len(alt)
for i in range(nvwp,-1,-1):
# Copy all new waypoints (which are all named T/C or T/D)
if name[i][:2]=="T/":
# Find place in flight plan to insert T/C or T/D
j = nvwp-1
while dist2go[j]<x[i] and j>1:
j=j-1
# Interpolation factor for position on leg
f = (x[i]-dist2go[j+1])/(dist2go[j]-dist2go[j+1])
lat = f*self.wplat[j]+(1.-f)*self.wplat[j+1]
lon = f*self.wplon[j]+(1.-f)*self.wplon[j+1]
self.wpname.insert(j,name[i])
self.wptype.insert(j,Route.calcwp)
self.wplat.insert(j,lat)
self.wplon.insert(j,lon)
self.wpalt.insert(j,alt[i])
self.wpspd.insert(j,-999.)
def insertcalcwp(self, i, name):
"""Insert empty wp with no attributes at location i"""
self.wpname.insert(i,name)
self.wplat.insert(i,0.)
self.wplon.insert(i,0.)
self.wpalt.insert(i,-999.)
self.wpspd.insert(i,-999.)
self.wptype.insert(i,Route.calcwp)
def calcfp(self):
"""Do flight plan calculations"""
# self.delwpt("T/D")
# self.delwpt("T/C")
# Direction to waypoint
self.nwp = len(self.wpname)
# Create flight plan calculation table
self.wpdirfrom = self.nwp*[0.]
self.wpdistto = self.nwp*[0.]
self.wpialt = self.nwp*[-1]
self.wptoalt = self.nwp*[-999.]
self.wpxtoalt = self.nwp*[1.]
# No waypoints: make empty variables to be safe and return: nothing to do
if self.nwp==0:
return
# Calculate lateral leg data
# LNAV: Calculate leg distances and directions
for i in range(0, self.nwp - 1):
qdr,dist = geo.qdrdist(self.wplat[i] ,self.wplon[i],
self.wplat[i+1],self.wplon[i+1])
self.wpdirfrom[i] = qdr
self.wpdistto[i+1] = dist #[nm] distto is in nautical miles
if self.nwp>1:
self.wpdirfrom[-1] = self.wpdirfrom[-2]
# Calclate longitudinal leg data
# VNAV: calc next altitude constraint: index, altitude and distance to it
ialt = -1
toalt = -999.
xtoalt = 0.
for i in range(self.nwp-1,-1,-1):
# waypoint with altitude constraint (dest of al specified)
if self.wptype[i]==Route.dest:
ialt = i
toalt = 0.
xtoalt = 0. # [m]
elif self.wpalt[i] >= 0:
ialt = i
toalt = self.wpalt[i]
xtoalt = 0. # [m]
# waypoint with no altitude constraint:keep counting
else:
if i!=self.nwp-1:
xtoalt += self.wpdistto[i+1]*nm # [m] xtoalt is in meters!
else:
xtoalt = 0.0
self.wpialt[i] = ialt
self.wptoalt[i] = toalt #[m]
self.wpxtoalt[i] = xtoalt #[m]
def findact(self,i):
""" Find best default active waypoint.
This function is called during route creation"""
# print "findact is called.!"
# Check for easy answers first
if self.nwp<=0:
return -1
elif self.nwp == 1:
return 0
# Find closest
wplat = array(self.wplat)
wplon = array(self.wplon)
dy = (wplat - bs.traf.lat[i])
dx = (wplon - bs.traf.lon[i]) * bs.traf.coslat[i]
dist2 = dx*dx + dy*dy
iwpnear = argmin(dist2)
#Unless behind us, next waypoint?
if iwpnear+1<self.nwp:
qdr = degrees(arctan2(dx[iwpnear],dy[iwpnear]))
delhdg = abs(degto180(bs.traf.trk[i]-qdr))
# we only turn to the first waypoint if we can reach the required
# heading before reaching the waypoint
time_turn = max(0.01,bs.traf.tas[i])*radians(delhdg)/(g0*tan(bs.traf.bank[i]))
time_straight= sqrt(dist2[iwpnear])*60.*nm/max(0.01,bs.traf.tas[i])
if time_turn > time_straight:
iwpnear += 1
return iwpnear
def dumpRoute(self, idx):
acid = bs.traf.id[idx]
# Open file in append mode, write header
with open(path.join(bs.settings.output, 'routelog.txt'), "a") as f:
f.write("\nRoute "+acid+":\n")
f.write("(name,type,lat,lon,alt,spd,toalt,xtoalt) ")
f.write("type: 0=latlon 1=navdb 2=orig 3=dest 4=calwp\n")
# write flight plan VNAV data (Lateral is visible on screen)
for j in range(self.nwp):
f.write( str(( j, self.wpname[j], self.wptype[j],
round(self.wplat[j], 4), round(self.wplon[j], 4),
int(0.5+self.wpalt[j]/ft), int(0.5+self.wpspd[j]/kts),
int(0.5+self.wptoalt[j]/ft), round(self.wpxtoalt[j]/nm, 3)
)) + "\n")
# End of data
f.write("----\n")
f.close()
def getnextqdr(self):
# get qdr for next leg
if -1 < self.iactwp < self.nwp - 1:
nextqdr, dist = geo.qdrdist(\
self.wplat[self.iactwp], self.wplon[self.iactwp],\
self.wplat[self.iactwp+1],self.wplon[self.iactwp+1])
else:
nextqdr = -999.
return nextqdr
| gpl-3.0 |
upndwn4par/android_kernel_lge_hammerhead | scripts/gcc-wrapper.py | 1276 | 3382 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
flh/odoo | addons/note/tests/__init__.py | 159 | 1105 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_note
checks = [
test_note,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mdaif/olympia | apps/reviews/tasks.py | 11 | 3537 | import logging
from django.db.models import Count, Avg, F
import caching.base as caching
from addons.models import Addon
from amo.celery import task
from .models import Review, GroupedRating
log = logging.getLogger('z.task')
@task(rate_limit='50/m')
def update_denorm(*pairs, **kw):
"""
Takes a bunch of (addon, user) pairs and sets the denormalized fields for
all reviews matching that pair.
"""
log.info('[%s@%s] Updating review denorms.' %
(len(pairs), update_denorm.rate_limit))
using = kw.get('using')
for addon, user in pairs:
reviews = list(Review.objects.valid().no_cache().using(using)
.filter(addon=addon, user=user).order_by('created'))
if not reviews:
continue
for idx, review in enumerate(reviews):
review.previous_count = idx
review.is_latest = False
reviews[-1].is_latest = True
for review in reviews:
review.save()
@task
def addon_review_aggregates(addons, **kw):
if isinstance(addons, (int, long)): # Got passed a single addon id.
addons = [addons]
log.info('[%s@%s] Updating total reviews and average ratings.' %
(len(addons), addon_review_aggregates.rate_limit))
using = kw.get('using')
addon_objs = list(Addon.objects.filter(pk__in=addons))
# The following returns something like
# [{'rating': 2.0, 'addon': 7L, 'count': 5},
# {'rating': 3.75, 'addon': 6L, 'count': 8}, ...]
qs = (Review.objects.valid().no_cache().using(using)
.values('addon') # Group by addon id.
.annotate(rating=Avg('rating'), count=Count('addon'))) # Aggregates.
stats = dict((x['addon'], (x['rating'], x['count'])) for x in qs)
for addon in addon_objs:
rating, reviews = stats.get(addon.id, [0, 0])
addon.update(total_reviews=reviews, average_rating=rating)
# Delay bayesian calculations to avoid slave lag.
addon_bayesian_rating.apply_async(args=addons, countdown=5)
addon_grouped_rating.apply_async(args=addons, kwargs={'using': using})
@task
def addon_bayesian_rating(*addons, **kw):
def addon_aggregates():
return Addon.objects.aggregate(rating=Avg('average_rating'),
reviews=Avg('total_reviews'))
log.info('[%s@%s] Updating bayesian ratings.' %
(len(addons), addon_bayesian_rating.rate_limit))
avg = caching.cached(addon_aggregates, 'task.bayes.avg', 60 * 60 * 60)
# Rating can be NULL in the DB, so don't update it if it's not there.
if avg['rating'] is None:
return
mc = avg['reviews'] * avg['rating']
for addon in Addon.objects.no_cache().filter(id__in=addons):
if addon.average_rating is None:
# Ignoring addons with no average rating.
continue
q = Addon.objects.filter(id=addon.id)
if addon.total_reviews:
num = mc + F('total_reviews') * F('average_rating')
denom = avg['reviews'] + F('total_reviews')
q.update(bayesian_rating=num / denom)
else:
q.update(bayesian_rating=0)
@task
def addon_grouped_rating(*addons, **kw):
"""Roll up add-on ratings for the bar chart."""
# We stick this all in memcached since it's not critical.
log.info('[%s@%s] Updating addon grouped ratings.' %
(len(addons), addon_grouped_rating.rate_limit))
using = kw.get('using')
for addon in addons:
GroupedRating.set(addon, using=using)
| bsd-3-clause |
SUSE/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/virtual_network.py | 2 | 3725 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetwork(Resource):
"""Virtual Network resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict
:param address_space: The AddressSpace that contains an array of IP
address ranges that can be used by subnets.
:type address_space: :class:`AddressSpace
<azure.mgmt.network.v2016_09_01.models.AddressSpace>`
:param dhcp_options: The dhcpOptions that contains an array of DNS servers
available to VMs deployed in the virtual network.
:type dhcp_options: :class:`DhcpOptions
<azure.mgmt.network.v2016_09_01.models.DhcpOptions>`
:param subnets: A list of subnets in a Virtual Network.
:type subnets: list of :class:`Subnet
<azure.mgmt.network.v2016_09_01.models.Subnet>`
:param virtual_network_peerings: A list of peerings in a Virtual Network.
:type virtual_network_peerings: list of :class:`VirtualNetworkPeering
<azure.mgmt.network.v2016_09_01.models.VirtualNetworkPeering>`
:param resource_guid: The resourceGuid property of the Virtual Network
resource.
:type resource_guid: str
:param provisioning_state: The provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'address_space': {'key': 'properties.addressSpace', 'type': 'AddressSpace'},
'dhcp_options': {'key': 'properties.dhcpOptions', 'type': 'DhcpOptions'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'virtual_network_peerings': {'key': 'properties.virtualNetworkPeerings', 'type': '[VirtualNetworkPeering]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, address_space=None, dhcp_options=None, subnets=None, virtual_network_peerings=None, resource_guid=None, provisioning_state=None, etag=None):
super(VirtualNetwork, self).__init__(id=id, location=location, tags=tags)
self.address_space = address_space
self.dhcp_options = dhcp_options
self.subnets = subnets
self.virtual_network_peerings = virtual_network_peerings
self.resource_guid = resource_guid
self.provisioning_state = provisioning_state
self.etag = etag
| mit |
m-sanders/wagtail | wagtail/wagtailcore/blocks/field_block.py | 1 | 15150 | from __future__ import absolute_import, unicode_literals
import datetime
import six
from django import forms
from django.db.models.fields import BLANK_CHOICE_DASH
from django.template.loader import render_to_string
from django.utils.encoding import force_text
from django.utils.dateparse import parse_date, parse_time, parse_datetime
from django.utils.functional import cached_property
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from wagtail.wagtailcore.rich_text import RichText
from .base import Block
class FieldBlock(Block):
"""A block that wraps a Django form field"""
class Meta:
default = None
def id_for_label(self, prefix):
return self.field.widget.id_for_label(prefix)
def render_form(self, value, prefix='', errors=None):
widget = self.field.widget
widget_attrs = {'id': prefix, 'placeholder': self.label}
field_value = self.value_for_form(value)
if hasattr(widget, 'render_with_errors'):
widget_html = widget.render_with_errors(prefix, field_value, attrs=widget_attrs, errors=errors)
widget_has_rendered_errors = True
else:
widget_html = widget.render(prefix, field_value, attrs=widget_attrs)
widget_has_rendered_errors = False
return render_to_string('wagtailadmin/block_forms/field.html', {
'name': self.name,
'classes': self.meta.classname,
'widget': widget_html,
'field': self.field,
'errors': errors if (not widget_has_rendered_errors) else None
})
def value_from_form(self, value):
"""
The value that we get back from the form field might not be the type
that this block works with natively; for example, the block may want to
wrap a simple value such as a string in an object that provides a fancy
HTML rendering (e.g. EmbedBlock).
We therefore provide this method to perform any necessary conversion
from the form field value to the block's native value. As standard,
this returns the form field value unchanged.
"""
return value
def value_for_form(self, value):
"""
Reverse of value_from_form; convert a value of this block's native value type
to one that can be rendered by the form field
"""
return value
def value_from_datadict(self, data, files, prefix):
return self.value_from_form(self.field.widget.value_from_datadict(data, files, prefix))
def clean(self, value):
# We need an annoying value_for_form -> value_from_form round trip here to account for
# the possibility that the form field is set up to validate a different value type to
# the one this block works with natively
return self.value_from_form(self.field.clean(self.value_for_form(value)))
class CharBlock(FieldBlock):
def __init__(self, required=True, help_text=None, max_length=None, min_length=None, **kwargs):
# CharField's 'label' and 'initial' parameters are not exposed, as Block handles that functionality natively (via 'label' and 'default')
self.field = forms.CharField(required=required, help_text=help_text, max_length=max_length, min_length=min_length)
super(CharBlock, self).__init__(**kwargs)
def get_searchable_content(self, value):
return [force_text(value)]
class TextBlock(FieldBlock):
def __init__(self, required=True, help_text=None, rows=1, max_length=None, min_length=None, **kwargs):
self.field_options = {'required': required, 'help_text': help_text, 'max_length': max_length, 'min_length': min_length}
self.rows = rows
super(TextBlock, self).__init__(**kwargs)
@cached_property
def field(self):
from wagtail.wagtailadmin.widgets import AdminAutoHeightTextInput
field_kwargs = {'widget': AdminAutoHeightTextInput(attrs={'rows': self.rows})}
field_kwargs.update(self.field_options)
return forms.CharField(**field_kwargs)
def get_searchable_content(self, value):
return [force_text(value)]
class URLBlock(FieldBlock):
def __init__(self, required=True, help_text=None, max_length=None, min_length=None, **kwargs):
self.field = forms.URLField(required=required, help_text=help_text, max_length=max_length, min_length=min_length)
super(URLBlock, self).__init__(**kwargs)
class BooleanBlock(FieldBlock):
def __init__(self, required=True, help_text=None, **kwargs):
# NOTE: As with forms.BooleanField, the default of required=True means that the checkbox
# must be ticked to pass validation (i.e. it's equivalent to an "I agree to the terms and
# conditions" box). To get the conventional yes/no behaviour, you must explicitly pass
# required=False.
self.field = forms.BooleanField(required=required, help_text=help_text)
super(BooleanBlock, self).__init__(**kwargs)
class DateBlock(FieldBlock):
def __init__(self, required=True, help_text=None, **kwargs):
self.field_options = {'required': required, 'help_text': help_text}
super(DateBlock, self).__init__(**kwargs)
@cached_property
def field(self):
from wagtail.wagtailadmin.widgets import AdminDateInput
field_kwargs = {'widget': AdminDateInput}
field_kwargs.update(self.field_options)
return forms.DateField(**field_kwargs)
def to_python(self, value):
# Serialising to JSON uses DjangoJSONEncoder, which converts date/time objects to strings.
# The reverse does not happen on decoding, because there's no way to know which strings
# should be decoded; we have to convert strings back to dates here instead.
if value is None or isinstance(value, datetime.date):
return value
else:
return parse_date(value)
class TimeBlock(FieldBlock):
def __init__(self, required=True, help_text=None, **kwargs):
self.field_options = {'required': required, 'help_text': help_text}
super(TimeBlock, self).__init__(**kwargs)
@cached_property
def field(self):
from wagtail.wagtailadmin.widgets import AdminTimeInput
field_kwargs = {'widget': AdminTimeInput}
field_kwargs.update(self.field_options)
return forms.TimeField(**field_kwargs)
def to_python(self, value):
if value is None or isinstance(value, datetime.time):
return value
else:
return parse_time(value)
class DateTimeBlock(FieldBlock):
def __init__(self, required=True, help_text=None, **kwargs):
self.field_options = {'required': required, 'help_text': help_text}
super(DateTimeBlock, self).__init__(**kwargs)
@cached_property
def field(self):
from wagtail.wagtailadmin.widgets import AdminDateTimeInput
field_kwargs = {'widget': AdminDateTimeInput}
field_kwargs.update(self.field_options)
return forms.DateTimeField(**field_kwargs)
def to_python(self, value):
if value is None or isinstance(value, datetime.datetime):
return value
else:
return parse_datetime(value)
class ChoiceBlock(FieldBlock):
choices = ()
def __init__(self, choices=None, required=True, help_text=None, **kwargs):
if choices is None:
# no choices specified, so pick up the choice list defined at the class level
choices = list(self.choices)
else:
choices = list(choices)
# keep a copy of all kwargs (including our normalised choices list) for deconstruct()
self._constructor_kwargs = kwargs.copy()
self._constructor_kwargs['choices'] = choices
if required is not True:
self._constructor_kwargs['required'] = required
if help_text is not None:
self._constructor_kwargs['help_text'] = help_text
# If choices does not already contain a blank option, insert one
# (to match Django's own behaviour for modelfields: https://github.com/django/django/blob/1.7.5/django/db/models/fields/__init__.py#L732-744)
has_blank_choice = False
for v1, v2 in choices:
if isinstance(v2, (list, tuple)):
# this is a named group, and v2 is the value list
has_blank_choice = any([value in ('', None) for value, label in v2])
if has_blank_choice:
break
else:
# this is an individual choice; v1 is the value
if v1 in ('', None):
has_blank_choice = True
break
if not has_blank_choice:
choices = BLANK_CHOICE_DASH + choices
self.field = forms.ChoiceField(choices=choices, required=required, help_text=help_text)
super(ChoiceBlock, self).__init__(**kwargs)
def deconstruct(self):
"""
Always deconstruct ChoiceBlock instances as if they were plain ChoiceBlocks with their
choice list passed in the constructor, even if they are actually subclasses. This allows
users to define subclasses of ChoiceBlock in their models.py, with specific choice lists
passed in, without references to those classes ending up frozen into migrations.
"""
return ('wagtail.wagtailcore.blocks.ChoiceBlock', [], self._constructor_kwargs)
class RichTextBlock(FieldBlock):
def __init__(self, required=True, help_text=None, **kwargs):
self.field_options = {'required': required, 'help_text': help_text}
super(RichTextBlock, self).__init__(**kwargs)
def get_default(self):
if isinstance(self.meta.default, RichText):
return self.meta.default
else:
return RichText(self.meta.default)
def to_python(self, value):
# convert a source-HTML string from the JSONish representation
# to a RichText object
return RichText(value)
def get_prep_value(self, value):
# convert a RichText object back to a source-HTML string to go into
# the JSONish representation
return value.source
@cached_property
def field(self):
from wagtail.wagtailcore.fields import RichTextArea
return forms.CharField(widget=RichTextArea, **self.field_options)
def value_for_form(self, value):
# RichTextArea takes the source-HTML string as input (and takes care
# of expanding it for the purposes of the editor)
return value.source
def value_from_form(self, value):
# RichTextArea returns a source-HTML string; concert to a RichText object
return RichText(value)
def get_searchable_content(self, value):
return [force_text(value.source)]
class RawHTMLBlock(FieldBlock):
def __init__(self, required=True, help_text=None, max_length=None, min_length=None, **kwargs):
self.field = forms.CharField(
required=required, help_text=help_text, max_length=max_length, min_length=min_length,
widget=forms.Textarea)
super(RawHTMLBlock, self).__init__(**kwargs)
def get_default(self):
return mark_safe(self.meta.default or '')
def to_python(self, value):
return mark_safe(value)
def get_prep_value(self, value):
# explicitly convert to a plain string, just in case we're using some serialisation method
# that doesn't cope with SafeText values correctly
return six.text_type(value)
def value_for_form(self, value):
# need to explicitly mark as unsafe, or it'll output unescaped HTML in the textarea
return six.text_type(value)
def value_from_form(self, value):
return mark_safe(value)
class Meta:
icon = 'code'
class ChooserBlock(FieldBlock):
def __init__(self, required=True, help_text=None, **kwargs):
self.required = required
self.help_text = help_text
super(ChooserBlock, self).__init__(**kwargs)
"""Abstract superclass for fields that implement a chooser interface (page, image, snippet etc)"""
@cached_property
def field(self):
return forms.ModelChoiceField(
queryset=self.target_model.objects.all(), widget=self.widget, required=self.required,
help_text=self.help_text)
def to_python(self, value):
# the incoming serialised value should be None or an ID
if value is None:
return value
else:
try:
return self.target_model.objects.get(pk=value)
except self.target_model.DoesNotExist:
return None
def get_prep_value(self, value):
# the native value (a model instance or None) should serialise to an ID or None
if value is None:
return None
else:
return value.id
def value_from_form(self, value):
# ModelChoiceField sometimes returns an ID, and sometimes an instance; we want the instance
if value is None or isinstance(value, self.target_model):
return value
else:
try:
return self.target_model.objects.get(pk=value)
except self.target_model.DoesNotExist:
return None
def clean(self, value):
# ChooserBlock works natively with model instances as its 'value' type (because that's what you
# want to work with when doing front-end templating), but ModelChoiceField.clean expects an ID
# as the input value (and returns a model instance as the result). We don't want to bypass
# ModelChoiceField.clean entirely (it might be doing relevant validation, such as checking page
# type) so we convert our instance back to an ID here. It means we have a wasted round-trip to
# the database when ModelChoiceField.clean promptly does its own lookup, but there's no easy way
# around that...
if isinstance(value, self.target_model):
value = value.pk
return super(ChooserBlock, self).clean(value)
class PageChooserBlock(ChooserBlock):
@cached_property
def target_model(self):
from wagtail.wagtailcore.models import Page # TODO: allow limiting to specific page types
return Page
@cached_property
def widget(self):
from wagtail.wagtailadmin.widgets import AdminPageChooser
return AdminPageChooser
def render_basic(self, value):
if value:
return format_html('<a href="{0}">{1}</a>', value.url, value.title)
else:
return ''
# Ensure that the blocks defined here get deconstructed as wagtailcore.blocks.FooBlock
# rather than wagtailcore.blocks.field.FooBlock
block_classes = [
FieldBlock, CharBlock, URLBlock, RichTextBlock, RawHTMLBlock, ChooserBlock, PageChooserBlock,
TextBlock, BooleanBlock, DateBlock, TimeBlock, DateTimeBlock, ChoiceBlock,
]
DECONSTRUCT_ALIASES = {
cls: 'wagtail.wagtailcore.blocks.%s' % cls.__name__
for cls in block_classes
}
__all__ = [cls.__name__ for cls in block_classes]
| bsd-3-clause |
kalov/ShapePFCN | caffe-ours/python/caffe/io.py | 1 | 12728 | import numpy as np
import skimage.io
from scipy.ndimage import zoom
from skimage.transform import resize
try:
# Python3 will most likely not be able to load protobuf
from caffe.proto import caffe_pb2
except:
import sys
if sys.version_info >= (3, 0):
print("Failed to include caffe_pb2, things might go wrong!")
else:
raise
## proto / datum / ndarray conversion
def blobproto_to_array(blob, return_diff=False):
"""
Convert a blob proto to an array. In default, we will just return the data,
unless return_diff is True, in which case we will return the diff.
"""
# Read the data into an array
if return_diff:
data = np.array(blob.diff)
else:
data = np.array(blob.data)
# Reshape the array
if blob.HasField('num') or blob.HasField('channels') or blob.HasField('height') or blob.HasField('width'):
# Use legacy 4D shape
return data.reshape(blob.num, blob.channels, blob.height, blob.width)
else:
return data.reshape(blob.shape.dim)
def array_to_blobproto(arr, diff=None):
"""Converts a N-dimensional array to blob proto. If diff is given, also
convert the diff. You need to make sure that arr and diff have the same
shape, and this function does not do sanity check.
"""
blob = caffe_pb2.BlobProto()
blob.shape.dim.extend(arr.shape)
blob.data.extend(arr.astype(float).flat)
if diff is not None:
blob.diff.extend(diff.astype(float).flat)
return blob
def arraylist_to_blobprotovecor_str(arraylist):
"""Converts a list of arrays to a serialized blobprotovec, which could be
then passed to a network for processing.
"""
vec = caffe_pb2.BlobProtoVector()
vec.blobs.extend([array_to_blobproto(arr) for arr in arraylist])
return vec.SerializeToString()
def blobprotovector_str_to_arraylist(str):
"""Converts a serialized blobprotovec to a list of arrays.
"""
vec = caffe_pb2.BlobProtoVector()
vec.ParseFromString(str)
return [blobproto_to_array(blob) for blob in vec.blobs]
def array_to_datum(arr, label=None):
"""Converts a 3-dimensional array to datum. If the array has dtype uint8,
the output data will be encoded as a string. Otherwise, the output data
will be stored in float format.
"""
if arr.ndim != 3:
raise ValueError('Incorrect array shape.')
datum = caffe_pb2.Datum()
datum.channels, datum.height, datum.width = arr.shape
if arr.dtype == np.uint8:
datum.data = arr.tostring()
else:
datum.float_data.extend(arr.flat)
if label is not None:
datum.label = label
return datum
def datum_to_array(datum):
"""Converts a datum to an array. Note that the label is not returned,
as one can easily get it by calling datum.label.
"""
if len(datum.data):
return np.fromstring(datum.data, dtype=np.uint8).reshape(
datum.channels, datum.height, datum.width)
else:
return np.array(datum.float_data).astype(float).reshape(
datum.channels, datum.height, datum.width)
## Pre-processing
class Transformer:
"""
Transform input for feeding into a Net.
Note: this is mostly for illustrative purposes and it is likely better
to define your own input preprocessing routine for your needs.
Parameters
----------
net : a Net for which the input should be prepared
"""
def __init__(self, inputs):
self.inputs = inputs
self.transpose = {}
self.channel_swap = {}
self.raw_scale = {}
self.mean = {}
self.input_scale = {}
def __check_input(self, in_):
if in_ not in self.inputs:
raise Exception('{} is not one of the net inputs: {}'.format(
in_, self.inputs))
def preprocess(self, in_, data):
"""
Format input for Caffe:
- convert to single
- resize to input dimensions (preserving number of channels)
- transpose dimensions to K x H x W
- reorder channels (for instance color to BGR)
- scale raw input (e.g. from [0, 1] to [0, 255] for ImageNet models)
- subtract mean
- scale feature
Parameters
----------
in_ : name of input blob to preprocess for
data : (H' x W' x K) ndarray
Returns
-------
caffe_in : (K x H x W) ndarray for input to a Net
"""
self.__check_input(in_)
caffe_in = data.astype(np.float32, copy=False)
transpose = self.transpose.get(in_)
channel_swap = self.channel_swap.get(in_)
raw_scale = self.raw_scale.get(in_)
mean = self.mean.get(in_)
input_scale = self.input_scale.get(in_)
in_dims = self.inputs[in_][2:]
if caffe_in.shape[:2] != in_dims:
caffe_in = resize_image(caffe_in, in_dims)
if transpose is not None:
caffe_in = caffe_in.transpose(transpose)
if channel_swap is not None:
caffe_in = caffe_in[channel_swap, :, :]
if raw_scale is not None:
caffe_in *= raw_scale
if mean is not None:
caffe_in -= mean
if input_scale is not None:
caffe_in *= input_scale
return caffe_in
def deprocess(self, in_, data):
"""
Invert Caffe formatting; see preprocess().
"""
self.__check_input(in_)
decaf_in = data.copy().squeeze()
transpose = self.transpose.get(in_)
channel_swap = self.channel_swap.get(in_)
raw_scale = self.raw_scale.get(in_)
mean = self.mean.get(in_)
input_scale = self.input_scale.get(in_)
if input_scale is not None:
decaf_in /= input_scale
if mean is not None:
decaf_in += mean
if raw_scale is not None:
decaf_in /= raw_scale
if channel_swap is not None:
decaf_in = decaf_in[np.argsort(channel_swap), :, :]
if transpose is not None:
decaf_in = decaf_in.transpose(np.argsort(transpose))
return decaf_in
def set_transpose(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
Parameters
----------
in_ : which input to assign this channel order
order : the order to transpose the dimensions
"""
self.__check_input(in_)
if len(order) != len(self.inputs[in_]) - 1:
raise Exception('Transpose order needs to have the same number of '
'dimensions as the input.')
self.transpose[in_] = order
def set_channel_swap(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
N.B. this assumes the channels are the first dimension AFTER transpose.
Parameters
----------
in_ : which input to assign this channel order
order : the order to take the channels.
(2,1,0) maps RGB to BGR for example.
"""
self.__check_input(in_)
if len(order) != self.inputs[in_][1]:
raise Exception('Channel swap needs to have the same number of '
'dimensions as the input channels.')
self.channel_swap[in_] = order
def set_raw_scale(self, in_, scale):
"""
Set the scale of raw features s.t. the input blob = input * scale.
While Python represents images in [0, 1], certain Caffe models
like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale
of these models must be 255.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient
"""
self.__check_input(in_)
self.raw_scale[in_] = scale
def set_mean(self, in_, mean):
"""
Set the mean to subtract for centering the data.
Parameters
----------
in_ : which input to assign this mean.
mean : mean ndarray (input dimensional or broadcastable)
"""
self.__check_input(in_)
ms = mean.shape
if mean.ndim == 1:
# broadcast channels
if ms[0] != self.inputs[in_][1]:
raise ValueError('Mean channels incompatible with input.')
mean = mean[:, np.newaxis, np.newaxis]
else:
# elementwise mean
if len(ms) == 2:
ms = (1,) + ms
if len(ms) != 3:
raise ValueError('Mean shape invalid')
if ms != self.inputs[in_][1:]:
raise ValueError('Mean shape incompatible with input shape.')
self.mean[in_] = mean
def set_input_scale(self, in_, scale):
"""
Set the scale of preprocessed inputs s.t. the blob = blob * scale.
N.B. input_scale is done AFTER mean subtraction and other preprocessing
while raw_scale is done BEFORE.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient
"""
self.__check_input(in_)
self.input_scale[in_] = scale
## Image IO
def load_image(filename, color=True):
"""
Load an image converting from grayscale or alpha as needed.
Parameters
----------
filename : string
color : boolean
flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Returns
-------
image : an image with type np.float32 in range [0, 1]
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
"""
img = skimage.img_as_float(skimage.io.imread(filename, as_grey=not color)).astype(np.float32)
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def resize_image(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
if im.shape[-1] == 1 or im.shape[-1] == 3:
im_min, im_max = im.min(), im.max()
if im_max > im_min:
# skimage is fast but only understands {1,3} channel images
# in [0, 1].
im_std = (im - im_min) / (im_max - im_min)
resized_std = resize(im_std, new_dims, order=interp_order)
resized_im = resized_std * (im_max - im_min) + im_min
else:
# the image is a constant -- avoid divide by 0
ret = np.empty((new_dims[0], new_dims[1], im.shape[-1]),
dtype=np.float32)
ret.fill(im_min)
return ret
else:
# ndimage interpolates anything but more slowly.
scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))
resized_im = zoom(im, scale + (1,), order=interp_order)
return resized_im.astype(np.float32)
def oversample(images, crop_dims):
"""
Crop images into the four corners, center, and their mirrored versions.
Parameters
----------
image : iterable of (H x W x K) ndarrays
crop_dims : (height, width) tuple for the crops.
Returns
-------
crops : (10*N x H x W x K) ndarray of crops for number of inputs N.
"""
# Dimensions and center.
im_shape = np.array(images[0].shape)
crop_dims = np.array(crop_dims)
im_center = im_shape[:2] / 2.0
# Make crop coordinates
h_indices = (0, im_shape[0] - crop_dims[0])
w_indices = (0, im_shape[1] - crop_dims[1])
crops_ix = np.empty((5, 4), dtype=int)
curr = 0
for i in h_indices:
for j in w_indices:
crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1])
curr += 1
crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate([
-crop_dims / 2.0,
crop_dims / 2.0
])
crops_ix = np.tile(crops_ix, (2, 1))
# Extract crops
crops = np.empty((10 * len(images), crop_dims[0], crop_dims[1],
im_shape[-1]), dtype=np.float32)
ix = 0
for im in images:
for crop in crops_ix:
crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :]
ix += 1
crops[ix-5:ix] = crops[ix-5:ix, :, ::-1, :] # flip for mirrors
return crops
| gpl-3.0 |
Dawny33/luigi | examples/spark_als.py | 55 | 4373 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import luigi
import luigi.format
import luigi.contrib.hdfs
from luigi.contrib.spark import SparkSubmitTask
class UserItemMatrix(luigi.Task):
#: the size of the data being generated
data_size = luigi.IntParameter()
def run(self):
"""
Generates :py:attr:`~.UserItemMatrix.data_size` elements.
Writes this data in \ separated value format into the target :py:func:`~/.UserItemMatrix.output`.
The data has the following elements:
* `user` is the default Elasticsearch id field,
* `track`: the text,
* `rating`: the day when the data was created.
"""
w = open(self.output(), 'w')
for user in range(self.data_size):
track = int(random * self.data_size)
w.write('%d\%d\%f' % (user, track, 1.0))
w.close()
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`~luigi.target.Target`)
"""
return luigi.contrib.hdfs.HdfsTarget('data-matrix', format=luigi.format.Gzip)
class SparkALS(SparkSubmitTask):
"""
This task runs a :py:class:`luigi.contrib.spark.SparkSubmitTask` task
over the target data returned by :py:meth:`~/.UserItemMatrix.output` and
writes the result into its :py:meth:`~.SparkALS.output` target (a file in HDFS).
This class uses :py:meth:`luigi.contrib.spark.SparkSubmitTask.run`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: yarn-client
"""
data_size = luigi.IntParameter(default=1000)
driver_memory = '2g'
executor_memory = '3g'
num_executors = luigi.IntParameter(default=100)
app = 'my-spark-assembly.jar'
entry_class = 'com.spotify.spark.ImplicitALS'
def app_options(self):
# These are passed to the Spark main args in the defined order.
return [self.input().path, self.output().path]
def requires(self):
"""
This task's dependencies:
* :py:class:`~.UserItemMatrix`
:return: object (:py:class:`luigi.task.Task`)
"""
return UserItemMatrix(self.data_size)
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`~luigi.target.Target`)
"""
# The corresponding Spark job outputs as GZip format.
return luigi.contrib.hdfs.HdfsTarget('%s/als-output/*' % self.item_type, format=luigi.format.Gzip)
'''
// Corresponding example Spark Job, a wrapper around the MLLib ALS job.
// This class would have to be jarred into my-spark-assembly.jar
// using sbt assembly (or package) and made available to the Luigi job
// above.
package com.spotify.spark
import org.apache.spark._
import org.apache.spark.mllib.recommendation.{Rating, ALS}
import org.apache.hadoop.io.compress.GzipCodec
object ImplicitALS {
def main(args: Array[String]) {
val sc = new SparkContext(args(0), "ImplicitALS")
val input = args(1)
val output = args(2)
val ratings = sc.textFile(input)
.map { l: String =>
val t = l.split('\t')
Rating(t(0).toInt, t(1).toInt, t(2).toFloat)
}
val model = ALS.trainImplicit(ratings, 40, 20, 0.8, 150)
model
.productFeatures
.map { case (id, vec) =>
id + "\t" + vec.map(d => "%.6f".format(d)).mkString(" ")
}
.saveAsTextFile(output, classOf[GzipCodec])
sc.stop()
}
}
'''
| apache-2.0 |
gautamMalu/XenInBox | pyanaconda/ui/gui/spokes/advstorage/dasd.py | 9 | 4925 | # DASD configuration dialog
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Samantha N. Bueno <[email protected]>
#
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.utils import gtk_action_nowait
from blivet.devicelibs.dasd import sanitize_dasd_dev_input, online_dasd
__all__ = ["DASDDialog"]
class DASDDialog(GUIObject):
""" Gtk dialog which allows users to manually add DASD devices without
having previously specified them in a parm file.
"""
builderObjects = ["dasdDialog"]
mainWidgetName = "dasdDialog"
uiFile = "spokes/advstorage/dasd.glade"
def __init__(self, data, storage):
GUIObject.__init__(self, data)
self.storage = storage
self.dasd = self.storage.dasd
self._discoveryError = None
self._update_devicetree = False
# grab all of the ui objects
self._dasdNotebook = self.builder.get_object("dasdNotebook")
self._configureGrid = self.builder.get_object("configureGrid")
self._conditionNotebook = self.builder.get_object("conditionNotebook")
self._startButton = self.builder.get_object("startButton")
self._okButton = self.builder.get_object("okButton")
self._cancelButton = self.builder.get_object("cancelButton")
self._deviceEntry = self.builder.get_object("deviceEntry")
self._spinner = self.builder.get_object("waitSpinner")
def refresh(self):
self._deviceEntry.set_text("")
self._deviceEntry.set_sensitive(True)
self._startButton.set_sensitive(True)
def run(self):
rc = self.window.run()
self.window.destroy()
# We need to call this to get the device nodes to show up
# in our devicetree.
if self._update_devicetree:
self.storage.devicetree.populate()
return rc
def on_start_clicked(self, *args):
""" Go through the process of validating entry contents and then
attempt to add the device.
"""
# First update widgets
self._startButton.hide()
self._cancelButton.set_sensitive(False)
self._okButton.set_sensitive(False)
self._conditionNotebook.set_current_page(1)
try:
device = sanitize_dasd_dev_input(self._deviceEntry.get_text())
except ValueError as e:
_config_error = str(e)
self.builder.get_object("deviceErrorLabel").set_text(_config_error)
self._conditionNotebook.set_current_page(2)
self._configureGrid.set_sensitive(True)
self._cancelButton.set_sensitive(True)
return
self._spinner.start()
self._discover(device)
self._check_discover()
@gtk_action_nowait
def _check_discover(self):
""" After the DASD discover thread runs, check to see whether a valid
device was discovered. Display an error message if not.
"""
self._spinner.stop()
if self._discoveryError:
# Failure, display a message and leave the user on the dialog so
# they can try again (or cancel)
self.builder.get_object("deviceErrorLabel").set_text(self._discoveryError)
self._discoveryError = None
self._conditionNotebook.set_current_page(2)
else:
# Great success. Just return to the advanced storage window and let the
# UI update with the newly-added device
self.window.response(1)
return True
self._cancelButton.set_sensitive(True)
return False
def _discover(self, device):
""" Given the configuration options from a user, attempt to discover
a DASD device. This includes searching black-listed devices.
"""
# attempt to add the device
try:
online_dasd(device)
self._update_devicetree = True
except ValueError as e:
self._discoveryError = str(e)
return
| gpl-2.0 |
Gabotero/GNURadioNext | gr-uhd/examples/python/usrp_nbfm_ptt.py | 58 | 17843 | #!/usr/bin/env python
#
# Copyright 2005,2007,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
import sys
import wx
from optparse import OptionParser
from gnuradio import gr, audio, uhd
from gnuradio import analog
from gnuradio import blocks
from gnuradio import filter
from gnuradio.eng_option import eng_option
from gnuradio.wxgui import stdgui2, fftsink2, scopesink2, slider, form
from numpy import convolve, array
#import os
#print "pid =", os.getpid()
#raw_input('Press Enter to continue: ')
# ////////////////////////////////////////////////////////////////////////
# Control Stuff
# ////////////////////////////////////////////////////////////////////////
class ptt_block(stdgui2.std_top_block):
def __init__(self, frame, panel, vbox, argv):
stdgui2.std_top_block.__init__ (self, frame, panel, vbox, argv)
self.frame = frame
self.space_bar_pressed = False
parser = OptionParser (option_class=eng_option)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option ("-f", "--freq", type="eng_float", default=442.1e6,
help="set Tx and Rx frequency to FREQ", metavar="FREQ")
parser.add_option ("-g", "--rx-gain", type="eng_float", default=None,
help="set rx gain [default=midpoint in dB]")
parser.add_option ("", "--tx-gain", type="eng_float", default=None,
help="set tx gain [default=midpoint in dB]")
parser.add_option("-I", "--audio-input", type="string", default="default",
help="pcm input device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-O", "--audio-output", type="string", default="default",
help="pcm output device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option ("-N", "--no-gui", action="store_true", default=False)
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help()
sys.exit(1)
if options.freq < 1e6:
options.freq *= 1e6
self.txpath = transmit_path(options.args, options.spec,
options.antenna, options.tx_gain,
options.audio_input)
self.rxpath = receive_path(options.args, options.spec,
options.antenna, options.rx_gain,
options.audio_output)
self.connect(self.txpath)
self.connect(self.rxpath)
self._build_gui(frame, panel, vbox, argv, options.no_gui)
self.set_transmit(False)
self.set_freq(options.freq)
self.set_rx_gain(self.rxpath.gain) # update gui
self.set_volume(self.rxpath.volume) # update gui
self.set_squelch(self.rxpath.threshold()) # update gui
def set_transmit(self, enabled):
self.txpath.set_enable(enabled)
self.rxpath.set_enable(not(enabled))
if enabled:
self.frame.SetStatusText ("Transmitter ON", 1)
else:
self.frame.SetStatusText ("Receiver ON", 1)
def set_rx_gain(self, gain):
self.myform['rx_gain'].set_value(gain) # update displayed value
self.rxpath.set_gain(gain)
def set_tx_gain(self, gain):
self.txpath.set_gain(gain)
def set_squelch(self, threshold):
self.rxpath.set_squelch(threshold)
self.myform['squelch'].set_value(self.rxpath.threshold())
def set_volume (self, vol):
self.rxpath.set_volume(vol)
self.myform['volume'].set_value(self.rxpath.volume)
#self.update_status_bar ()
def set_freq(self, freq):
r1 = self.txpath.set_freq(freq)
r2 = self.rxpath.set_freq(freq)
#print "txpath.set_freq =", r1
#print "rxpath.set_freq =", r2
if r1 and r2:
self.myform['freq'].set_value(freq) # update displayed value
return r1 and r2
def _build_gui(self, frame, panel, vbox, argv, no_gui):
def _form_set_freq(kv):
return self.set_freq(kv['freq'])
self.panel = panel
# FIXME This REALLY needs to be replaced with a hand-crafted button
# that sends both button down and button up events
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((10,0), 1)
self.status_msg = wx.StaticText(panel, -1, "Press Space Bar to Transmit")
of = self.status_msg.GetFont()
self.status_msg.SetFont(wx.Font(15, of.GetFamily(), of.GetStyle(), of.GetWeight()))
hbox.Add(self.status_msg, 0, wx.ALIGN_CENTER)
hbox.Add((10,0), 1)
vbox.Add(hbox, 0, wx.EXPAND | wx.ALIGN_CENTER)
panel.Bind(wx.EVT_KEY_DOWN, self._on_key_down)
panel.Bind(wx.EVT_KEY_UP, self._on_key_up)
panel.Bind(wx.EVT_KILL_FOCUS, self._on_kill_focus)
panel.SetFocus()
if 1 and not(no_gui):
rx_fft = fftsink2.fft_sink_c(panel, title="Rx Input", fft_size=512,
sample_rate=self.rxpath.if_rate,
ref_level=80, y_per_div=20)
self.connect (self.rxpath.u, rx_fft)
vbox.Add (rx_fft.win, 1, wx.EXPAND)
if 1 and not(no_gui):
rx_fft = fftsink2.fft_sink_c(panel, title="Post s/w Resampler",
fft_size=512, sample_rate=self.rxpath.quad_rate,
ref_level=80, y_per_div=20)
self.connect (self.rxpath.resamp, rx_fft)
vbox.Add (rx_fft.win, 1, wx.EXPAND)
if 0 and not(no_gui):
foo = scopesink2.scope_sink_f(panel, title="Squelch",
sample_rate=32000)
self.connect (self.rxpath.fmrx.div, (foo,0))
self.connect (self.rxpath.fmrx.gate, (foo,1))
self.connect (self.rxpath.fmrx.squelch_lpf, (foo,2))
vbox.Add (foo.win, 1, wx.EXPAND)
if 0 and not(no_gui):
tx_fft = fftsink2.fft_sink_c(panel, title="Tx Output",
fft_size=512, sample_rate=self.txpath.usrp_rate)
self.connect (self.txpath.amp, tx_fft)
vbox.Add (tx_fft.win, 1, wx.EXPAND)
# add control area at the bottom
self.myform = myform = form.form()
# first row
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0, 0)
myform['freq'] = form.float_field(
parent=panel, sizer=hbox, label="Freq", weight=1,
callback=myform.check_input_and_call(_form_set_freq, self._set_status_msg))
hbox.Add((5,0), 0, 0)
vbox.Add(hbox, 0, wx.EXPAND)
# second row
hbox = wx.BoxSizer(wx.HORIZONTAL)
myform['volume'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Volume",
weight=3, range=self.rxpath.volume_range(),
callback=self.set_volume)
hbox.Add((5,0), 0)
myform['squelch'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Squelch",
weight=3, range=self.rxpath.squelch_range(),
callback=self.set_squelch)
g = self.rxpath.u.get_gain_range()
hbox.Add((5,0), 0)
myform['rx_gain'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Rx Gain",
weight=3, range=(g.start(), g.stop(), g.step()),
callback=self.set_rx_gain)
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
self._build_subpanel(vbox)
def _build_subpanel(self, vbox_arg):
# build a secondary information panel (sometimes hidden)
# FIXME figure out how to have this be a subpanel that is always
# created, but has its visibility controlled by foo.Show(True/False)
#if not(self.show_debug_info):
# return
panel = self.panel
vbox = vbox_arg
myform = self.myform
#panel = wx.Panel(self.panel, -1)
#vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
#myform['decim'] = form.static_float_field(
# parent=panel, sizer=hbox, label="Decim")
#hbox.Add((5,0), 1)
#myform['fs@usb'] = form.static_float_field(
# parent=panel, sizer=hbox, label="Fs@USB")
#hbox.Add((5,0), 1)
#myform['dbname'] = form.static_text_field(
# parent=panel, sizer=hbox)
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
def _set_status_msg(self, msg, which=0):
self.frame.GetStatusBar().SetStatusText(msg, which)
def _on_key_down(self, evt):
# print "key_down:", evt.m_keyCode
if evt.m_keyCode == wx.WXK_SPACE and not(self.space_bar_pressed):
self.space_bar_pressed = True
self.set_transmit(True)
def _on_key_up(self, evt):
# print "key_up", evt.m_keyCode
if evt.m_keyCode == wx.WXK_SPACE:
self.space_bar_pressed = False
self.set_transmit(False)
def _on_kill_focus(self, evt):
# if we lose the keyboard focus, turn off the transmitter
self.space_bar_pressed = False
self.set_transmit(False)
# ////////////////////////////////////////////////////////////////////////
# Transmit Path
# ////////////////////////////////////////////////////////////////////////
class transmit_path(gr.hier_block2):
def __init__(self, args, spec, antenna, gain, audio_input):
gr.hier_block2.__init__(self, "transmit_path",
gr.io_signature(0, 0, 0), # Input signature
gr.io_signature(0, 0, 0)) # Output signature
self.u = uhd.usrp_sink(device_addr=args, stream_args=uhd.stream_args('fc32'))
# Set the subdevice spec
if(spec):
self.u.set_subdev_spec(spec, 0)
# Set the antenna
if(antenna):
self.u.set_antenna(antenna, 0)
self.if_rate = 320e3
self.audio_rate = 32e3
self.u.set_samp_rate(self.if_rate)
dev_rate = self.u.get_samp_rate()
self.audio_gain = 10
self.normal_gain = 32000
self.audio = audio.source(int(self.audio_rate), audio_input)
self.audio_amp = blocks.multiply_const_ff(self.audio_gain)
lpf = filter.firdes.low_pass(1, # gain
self.audio_rate, # sampling rate
3800, # low pass cutoff freq
300, # width of trans. band
filter.firdes.WIN_HANN) # filter type
hpf = filter.firdes.high_pass(1, # gain
self.audio_rate, # sampling rate
325, # low pass cutoff freq
50, # width of trans. band
filter.firdes.WIN_HANN) # filter type
audio_taps = convolve(array(lpf),array(hpf))
self.audio_filt = filter.fir_filter_fff(1,audio_taps)
self.pl = analog.ctcss_gen_f(self.audio_rate,123.0)
self.add_pl = blocks.add_ff()
self.connect(self.pl,(self.add_pl,1))
self.fmtx = analog.nbfm_tx(self.audio_rate, self.if_rate)
self.amp = blocks.multiply_const_cc (self.normal_gain)
rrate = dev_rate / self.if_rate
self.resamp = filter.pfb.arb_resampler_ccf(rrate)
self.connect(self.audio, self.audio_amp, self.audio_filt,
(self.add_pl,0), self.fmtx, self.amp,
self.resamp, self.u)
if gain is None:
# if no gain was specified, use the mid-point in dB
g = self.u.get_gain_range()
gain = float(g.start() + g.stop())/2.0
self.set_gain(gain)
self.set_enable(False)
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
Args:
target_freq: frequency in Hz
@rypte: bool
"""
r = self.u.set_center_freq(target_freq)
if r:
return True
return False
def set_gain(self, gain):
self.gain = gain
self.u.set_gain(gain)
def set_enable(self, enable):
if enable:
self.amp.set_k (self.normal_gain)
else:
self.amp.set_k (0)
# ////////////////////////////////////////////////////////////////////////
# Receive Path
# ////////////////////////////////////////////////////////////////////////
class receive_path(gr.hier_block2):
def __init__(self, args, spec, antenna, gain, audio_output):
gr.hier_block2.__init__(self, "receive_path",
gr.io_signature(0, 0, 0), # Input signature
gr.io_signature(0, 0, 0)) # Output signature
self.u = uhd.usrp_source(device_addr=args,
io_type=uhd.io_type.COMPLEX_FLOAT32,
num_channels=1)
self.if_rate = 256e3
self.quad_rate = 64e3
self.audio_rate = 32e3
self.u.set_samp_rate(self.if_rate)
dev_rate = self.u.get_samp_rate()
# Create filter to get actual channel we want
nfilts = 32
chan_coeffs = filter.firdes.low_pass(nfilts, # gain
nfilts*dev_rate, # sampling rate
13e3, # low pass cutoff freq
4e3, # width of trans. band
filter.firdes.WIN_HANN) # filter type
rrate = self.quad_rate / dev_rate
self.resamp = filter.pfb.arb_resampler_ccf(rrate, chan_coeffs, nfilts)
# instantiate the guts of the single channel receiver
self.fmrx = analog.nbfm_rx(self.audio_rate, self.quad_rate)
# standard squelch block
self.squelch = analog.standard_squelch(self.audio_rate)
# audio gain / mute block
self._audio_gain = blocks.multiply_const_ff(1.0)
# sound card as final sink
audio_sink = audio.sink(int(self.audio_rate), audio_output)
# now wire it all together
self.connect(self.u, self.resamp, self.fmrx, self.squelch,
self._audio_gain, audio_sink)
if gain is None:
# if no gain was specified, use the mid-point in dB
g = self.u.get_gain_range()
gain = float(g.start() + g.stop())/2.0
self.enabled = True
self.set_gain(gain)
v = self.volume_range()
self.set_volume((v[0]+v[1])/2)
s = self.squelch_range()
self.set_squelch((s[0]+s[1])/2)
# Set the subdevice spec
if(spec):
self.u.set_subdev_spec(spec, 0)
# Set the antenna
if(antenna):
self.u.set_antenna(antenna, 0)
def volume_range(self):
return (-20.0, 0.0, 0.5)
def set_volume (self, vol):
g = self.volume_range()
self.volume = max(g[0], min(g[1], vol))
self._update_audio_gain()
def set_enable(self, enable):
self.enabled = enable
self._update_audio_gain()
def _update_audio_gain(self):
if self.enabled:
self._audio_gain.set_k(10**(self.volume/10))
else:
self._audio_gain.set_k(0)
def squelch_range(self):
return self.squelch.squelch_range()
def set_squelch(self, threshold):
print "SQL =", threshold
self.squelch.set_threshold(threshold)
def threshold(self):
return self.squelch.threshold()
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
Args:
target_freq: frequency in Hz
@rypte: bool
"""
r = self.u.set_center_freq(target_freq)
if r:
return True
return False
def set_gain(self, gain):
self.gain = gain
self.u.set_gain(gain)
# ////////////////////////////////////////////////////////////////////////
# Main
# ////////////////////////////////////////////////////////////////////////
def main():
app = stdgui2.stdapp(ptt_block, "NBFM Push to Talk")
app.MainLoop()
if __name__ == '__main__':
main()
| gpl-3.0 |
mgagne/nova | nova/tests/unit/scheduler/test_scheduler.py | 4 | 6317 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler
"""
import mock
from oslo.config import cfg
from nova.compute import api as compute_api
from nova import context
from nova import db
from nova import exception
from nova.image import glance
from nova.scheduler import driver
from nova.scheduler import manager
from nova import servicegroup
from nova import test
from nova.tests.unit import fake_server_actions
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit.scheduler import fakes
CONF = cfg.CONF
class SchedulerManagerTestCase(test.NoDBTestCase):
"""Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
driver_cls_name = 'nova.scheduler.driver.Scheduler'
def setUp(self):
super(SchedulerManagerTestCase, self).setUp()
self.flags(scheduler_driver=self.driver_cls_name)
self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
self.manager = self.manager_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
fake_server_actions.stub_out_action_events(self.stubs)
def test_1_correct_init(self):
# Correct scheduler driver
manager = self.manager
self.assertIsInstance(manager.driver, self.driver_cls)
def _mox_schedule_method_helper(self, method_name):
# Make sure the method exists that we're going to test call
def stub_method(*args, **kwargs):
pass
setattr(self.manager.driver, method_name, stub_method)
self.mox.StubOutWithMock(self.manager.driver,
method_name)
def test_select_destination(self):
with mock.patch.object(self.manager, 'select_destinations'
) as select_destinations:
self.manager.select_destinations(None, None, {})
select_destinations.assert_called_once_with(None, None, {})
class SchedulerV3PassthroughTestCase(test.TestCase):
def setUp(self):
super(SchedulerV3PassthroughTestCase, self).setUp()
self.manager = manager.SchedulerManager()
self.proxy = manager._SchedulerManagerV3Proxy(self.manager)
def test_select_destination(self):
with mock.patch.object(self.manager, 'select_destinations'
) as select_destinations:
self.proxy.select_destinations(None, None, {})
select_destinations.assert_called_once_with(None, None, {})
class SchedulerTestCase(test.NoDBTestCase):
"""Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
def setUp(self):
super(SchedulerTestCase, self).setUp()
self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
def fake_show(meh, context, id, **kwargs):
if id:
return {'id': id, 'min_disk': None, 'min_ram': None,
'name': 'fake_name',
'status': 'active',
'properties': {'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'something_else': 'meow'}}
else:
raise exception.ImageNotFound(image_id=id)
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
self.image_service = glance.get_default_image_service()
self.driver = self.driver_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
self.servicegroup_api = servicegroup.API()
def test_hosts_up(self):
service1 = {'host': 'host1'}
service2 = {'host': 'host2'}
services = [service1, service2]
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
db.service_get_all_by_topic(self.context,
self.topic).AndReturn(services)
self.servicegroup_api.service_is_up(service1).AndReturn(False)
self.servicegroup_api.service_is_up(service2).AndReturn(True)
self.mox.ReplayAll()
result = self.driver.hosts_up(self.context, self.topic)
self.assertEqual(result, ['host2'])
class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test cases for base scheduler driver class methods
that will fail if the driver is changed.
"""
def test_unimplemented_select_destinations(self):
self.assertRaises(NotImplementedError,
self.driver.select_destinations, self.context, {}, {})
class SchedulerInstanceGroupData(test.TestCase):
driver_cls = driver.Scheduler
def setUp(self):
super(SchedulerInstanceGroupData, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
self.driver = self.driver_cls()
def _get_default_values(self):
return {'name': 'fake_name',
'user_id': self.user_id,
'project_id': self.project_id}
def _create_instance_group(self, context, values, policies=None,
metadata=None, members=None):
return db.instance_group_create(context, values, policies=policies,
metadata=metadata, members=members)
| apache-2.0 |
killabytenow/chirribackup | chirribackup/chunk.py | 1 | 17877 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
###############################################################################
# chirribackup/chunk.py
#
# Represents a chunk (in disk and database)
#
# -----------------------------------------------------------------------------
# Chirri Backup - Cheap and ugly backup tool
# Copyright (C) 2016 Gerardo Garcia Peña <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import absolute_import
import exceptions
import os
import re
import sys
import chirribackup.compression
import chirribackup.crypto
from chirribackup.Logger import logger
from chirribackup.exceptions import \
ChirriException, \
BadValueException, \
ChunkBadFilenameException, \
ChunkBadHashException, \
ChunkNotFoundException
# CONSTANTS
READ_BLOCKSIZE = (1024*1024)
# CHUNK CLASS
class Chunk(object):
ldb = None
hash = None
size = None
csize = None
first_seen_as = None
status = None
refcount = None
compression = None
def __init__(self, ldb, hash = None):
self.ldb = ldb
if hash is not None:
self.load(hash)
def load(self, hash):
if not chirribackup.crypto.ChirriHasher.hash_check(hash):
raise ChunkBadHashException("Bad hash '%s'." % hash)
c = self.ldb.connection.execute(
"SELECT * FROM file_data WHERE hash = :hash",
{ "hash" : hash }).fetchone()
if c is None:
raise ChunkNotFoundException("Chunk '%s' does not exist." % hash)
self.hash = c["hash"]
self.size = c["size"]
self.csize = c["csize"]
self.first_seen_as = c["first_seen_as"]
self.status = c["status"]
self.refcount = c["refcount"]
self.compression = c["compression"]
return self
def new(self, source_file):
# local_file (source_file in ldb.db_path)
local_file = os.path.join(self.ldb.db_path, source_file)
tmp_file = os.path.join(self.ldb.chunks_dir, "tmp.%s" % os.getpid())
# hash target file
sh = chirribackup.crypto.ChirriHasher()
try:
with open(local_file, 'rb') as ifile:
buf = ifile.read(READ_BLOCKSIZE)
while len(buf) > 0:
sh.update(buf)
buf = ifile.read(READ_BLOCKSIZE)
except exceptions.IOError, ex:
raise ChirriException("Cannot hash file '%s': %s" % (source_file, ex))
# set basic attribs
self.hash = sh.hash
self.size = sh.nbytes
self.csize = None
self.first_seen_as = source_file
self.status = 0
self.refcount = 0
self.compression = None
# configure target_file path
target_file = os.path.join(self.ldb.chunks_dir, self.get_filename())
# check if this chunk exists already in local database
oc = self.ldb.connection.execute(
"SELECT * FROM file_data WHERE hash = :hashkey",
{ "hashkey" : sh.hash }).fetchone()
if oc is not None:
# check the improbability
if sh.nbytes != oc["size"]:
raise ChirriException("OMG! File '%s' matches with chunk %s, but it differs in size."
% (target_file, self.hash_format()))
# hash already processed, load and return
logger.debug("Chunk %s already exists for file %s" \
% (self.hash_format(), source_file))
return self.load(sh.hash)
# if the target_file already exists (was generated, but not reg in db),
# delete it
if os.path.exists(target_file):
logger.warning("A local chunk '%s' was already created -- deleting it." \
% self.hash_format())
os.unlink(target_file)
# create the file snapshot 'target_file' (without compression)
compressor = chirribackup.compression.Compressor(None, tmp_file)
sh = chirribackup.crypto.ChirriHasher()
try:
with open(local_file, 'rb') as ifile:
buf = ifile.read(READ_BLOCKSIZE)
while len(buf) > 0:
compressor.compress(buf)
sh.update(buf)
buf = ifile.read(READ_BLOCKSIZE)
compressor.close()
except exceptions.IOError, ex:
os.unlink(tmp_file)
raise ChirriException("Cannot hash & copy file '%s': %s" % (source_file, ex))
# check hash and update csize
if sh.hash != self.hash or sh.nbytes != self.size:
raise ChunkChangedException("Chunk %s changed during snapshot" % source_file)
if sh.nbytes != compressor.bytes_out:
raise ChirriException(
"Null compressor bytes %d do not match with hash bytes %d" \
% (compressor.bytes_out, sh.nbytes))
self.csize = compressor.bytes_out
# commit target_file and register chunk in database
os.rename(tmp_file, target_file)
self.ldb.connection.execute(
"""
INSERT INTO file_data
(hash, size, csize, first_seen_as, status, refcount, compression)
VALUES (:hash, :size, :csize, :path, :status, 0, :compression)
""", {
"hash" : self.hash,
"size" : self.size,
"csize" : self.csize,
"path" : self.first_seen_as,
"status" : self.status,
"compression" : self.compression,
})
return self
def compress(self, compression):
# sanity checks
if self.status != 0:
raise ChirriException(
"Chunk cannot be compressed in status %d" % self.status)
# trivial case => user do not want compression
if compression is None \
or compression == self.compression:
# leave chunk in the current state (probably uncompressed)
logger.debug("%s: %s, not applying compression %s." \
% (self.get_filename(),
("Compressed with " + self.compression) \
if self.compression is not None else "Uncompressed",
"NONE" if compression is None else compression))
return False
# get paths
old_chunk_file = os.path.join(self.ldb.chunks_dir, self.get_filename())
tmp_file = os.path.join(self.ldb.chunks_dir, "tmp.%s" % os.getpid())
# try compressing it using 'compression' algorithm
# NOTE: we must decompress the existing chunk using the current
# compression algorithm (probably None)
decompressor = chirribackup.compression.Decompressor(self.compression)
compressor = chirribackup.compression.Compressor(compression, tmp_file)
sh = chirribackup.crypto.ChirriHasher()
try:
# read, write & hash
with open(old_chunk_file, 'rb') as ifile:
# read first block
buf = ifile.read(READ_BLOCKSIZE)
while len(buf) > 0:
# decompress data
buf = decompressor.decompress(buf)
# compress data & hash
compressor.compress(buf)
sh.update(buf)
# read more data
buf = ifile.read(READ_BLOCKSIZE)
# last pending bytes
buf = decompressor.close()
compressor.compress(buf)
sh.update(buf)
compressor.close()
except exceptions.IOError, ex:
os.unlink(tmp_file)
raise ChirriException("Cannot recompress chunk %s: %s" \
% (self.hash_format(), ex))
# check hashes
if sh.hash != self.hash:
os.unlink(tmp_file)
raise ChirriException(
"Data in file '%s' does not match with chunk %s" \
% (sh.hash, self.hash))
# check if compression has worked
if compressor.bytes_out >= self.csize:
if self.csize == 0:
logger.warning("Found zero bytes chunk '%s'." % self.hash_format())
else:
logger.warning("Storing '%s' uncompressed (uncompressed=%d < %s=%d; ratio %.2f)" \
% (self.hash_format(),
self.csize,
compression, compressor.bytes_out,
float(compressor.bytes_out) / float(self.csize)))
os.unlink(tmp_file)
return False
# ok .. proceed to update chunk with compressed version
# update chunk info
logger.debug("Chunk %s compressed (%d < %d)" \
% (self.hash_format(), compressor.bytes_out, self.csize))
self.compression = compression
self.csize = compressor.bytes_out
self.ldb.connection.execute(
"""
UPDATE file_data
SET compression = :compression, csize = :csize
WHERE hash = :hash
""", {
"compression" : self.compression,
"csize" : self.csize,
"hash" : self.hash,
})
# calculate new file name
new_chunk_file = os.path.join(self.ldb.chunks_dir, self.get_filename())
# rename tmp_file
if os.path.exists(new_chunk_file):
logger.warning("A local chunk '%s' was already created -- deleting it." \
% self.hash_format())
os.unlink(new_chunk_file)
os.rename(tmp_file, new_chunk_file)
self.ldb.connection.commit()
os.unlink(old_chunk_file)
return True
def download(self, sm, target_file, overwrite = False):
remote_chunk = "chunks/%s" % self.get_filename()
tmp_file = target_file + ".download"
# if file exists, try
if os.path.exists(target_file):
# overwrite check
if not overwrite:
raise ChirriException("Chunk file '%s' already exists." % target_file)
# yep! chunk is already on disk.. decide what to do...
eh = chirribackup.crypto.hash_file(target_file)
if eh.hash != h:
# hashes doesn't match... it is surely an old partial chunk of
# a previous restore operation (cancelled), delete it from disk
logger.warning("Old tmp download '%s' found but corrupt. Reloading again.")
os.unlink(target_file)
else:
# hashes match, so it is the file that we need -- continue as
# usual without downloading anything
logger.info("Found previous temp download '%s' with matching hash. Recycling it.")
return
# ok... download raw chunk
sm.download_file(remote_chunk, tmp_file)
# decompress it
if self.compression is None:
os.rename(tmp_file, target_file)
else:
try:
decompressor = chirribackup.compression.Decompressor(self.compression, target_file)
sh = chirribackup.crypto.ChirriHasher()
with open(tmp_file, 'rb') as ifile:
buf = ifile.read(READ_BLOCKSIZE)
while len(buf) > 0:
sh.update(decompressor.decompress(buf))
buf = ifile.read(READ_BLOCKSIZE)
sh.update(decompressor.close())
if sh.hash != self.hash:
raise ChirriException("Bad data recovered (%s, %s)" \
% (target_file, self.first_seen_as))
except exceptions.IOError, ex:
os.unlink(target_file)
raise ChirriException("Cannot hash & copy file '%s': %s" % (source_file, ex))
finally:
os.unlink(tmp_file)
def __refcount_sum(self, value):
self.ldb.connection.execute(
"""
UPDATE file_data
SET refcount = refcount + :value
WHERE hash = :hash
""", {
"value" : value,
"hash" : self.hash,
})
c = self.ldb.connection.execute(
"SELECT refcount FROM file_data WHERE hash = :hash",
{ "hash" : self.hash }).fetchone()
if c is None:
raise ChirriException("Chunk %s does not exists." % self.hash_format())
c = c[0]
if c < 0:
raise ChirriException("Negative ref in chunk %s." % self.hash_format())
return c
def refcount_inc(self):
return self.__refcount_sum(1)
def refcount_dec(self):
return self.__refcount_sum(-1)
def set_status(self, value):
if value < 0 or value > 2:
raise BadValueException("Bad chunk status value %s" % value)
self.status = value;
self.ldb.connection.execute(
"""
UPDATE file_data
SET status = :value
WHERE hash = :hash
""", {
"hash" : self.hash,
"value" : value,
})
def hash_format(self):
return chirribackup.crypto.ChirriHasher.hash_format(self.hash)
def get_filename(self, prefix = "", postfix = ""):
return Chunk.compose_filename(self.hash, self.size, self.compression, prefix, postfix)
@classmethod
def parse_filename(cls, fname):
m = re.compile("^([a-f0-9]+)((\.[a-zA-Z0-9_]+)+)$").match(fname)
if m is None:
raise ChunkBadFilenameException("Bad chunk file name '%s'." % fname)
hash = m.group(1)
if len(m.group(2)) == 0:
raise ChunkBadFilenameException("Expected extensions in '%s'" % fname)
exts = m.group(2)[1:]
exts = exts.split(".")
if len(exts) == 0:
raise ChunkBadFilenameException("Expected chunk size in '%s'" % fname)
size = exts.pop(0)
if len(exts) > 0:
compression = exts.pop(0)
else:
compression = None
if len(exts) > 0:
raise ChunkBadFilenameException("Too many extensions in chunk '%s'" % fname)
return {
"hash" : hash,
"size" : int(size),
"compression" : compression,
}
@classmethod
def compose_filename(cls, hash, size, compression, prefix = "", postfix = ""):
r = "%s.%d" % (hash, size)
if compression is not None:
r = r + "." + compression
r = "%s%s%s" % (prefix, r, postfix)
return r
@classmethod
def list(cls, ldb, status = None, refcount = None):
l = []
wl = []
if status is not None:
wl.append("status = :status")
if refcount is not None:
wl.append("refcount <= :refcount")
select = "SELECT hash FROM file_data"
if len(wl) > 0:
select = select + " WHERE " + " AND ".join(wl)
for row in ldb.connection.execute(
select,
{
"status" : status,
"refcount" : refcount,
}):
l.append(Chunk(ldb, row["hash"]))
return l
@classmethod
def insert(cls, ldb, hash, size, csize, first_seen_as, status, refcount, compression):
# insert in database
oc = ldb.connection.execute(
"SELECT * FROM file_data WHERE hash = :hash",
{ "hash" : hash }).fetchone()
if oc is None:
# insert new hash
ldb.connection.execute(
"""
INSERT INTO file_data
(hash, size, csize, first_seen_as, status, refcount, compression)
VALUES (:hash, :size, :csize, :first_seen_as, :status, :refcount, :compression)
""", {
"hash" : hash,
"size" : size,
"csize" : csize,
"first_seen_as" : first_seen_as,
"status" : status,
"refcount" : refcount,
"compression" : compression,
})
else:
raise ChirriException("Cannot add existing chunk %s" \
% chirribackup.crypto.ChirriHasher.hash_format(hash))
return Chunk(ldb, hash)
| gpl-3.0 |
sudosurootdev/external_chromium_org | tools/json_to_struct/struct_generator.py | 129 | 1501 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def _GenerateArrayField(field_info):
"""Generate a string defining an array field in a C structure.
"""
contents = field_info['contents']
contents['field'] = '* ' + field_info['field']
if contents['type'] == 'array':
raise RuntimeError('Nested arrays are not supported.')
return (GenerateField(contents) + ';\n' +
' const size_t %s_size') % field_info['field'];
def GenerateField(field_info):
"""Generate a string defining a field of the type specified by
field_info['type'] in a C structure.
"""
field = field_info['field']
type = field_info['type']
if type == 'int':
return 'const int %s' % field
elif type == 'string':
return 'const char* const %s' % field
elif type == 'string16':
return 'const wchar_t* const %s' % field
elif type == 'enum':
return 'const %s %s' % (field_info['ctype'], field)
elif type == 'array':
return _GenerateArrayField(field_info)
else:
raise RuntimeError('Unknown field type "%s"' % type)
def GenerateStruct(type_name, schema):
"""Generate a string defining a structure containing the fields specified in
the schema list.
"""
lines = [];
lines.append('struct %s {' % type_name)
for field_info in schema:
lines.append(' ' + GenerateField(field_info) + ';')
lines.append('};');
return '\n'.join(lines) + '\n';
| bsd-3-clause |
DomenicD/dom_ml_playground | python/src/learning_algorithms/backpropagation.py | 1 | 3948 | from python.src.neural_networks.neural_network_utils import NeuralNetworkUtils
from python.src.neurons.neurons import NeuronType
import math
import random
# http://home.agh.edu.pl/~vlsi/AI/backp_t_en/backprop.html
# http://www.cse.unsw.edu.au/~cs9417ml/MLP2/
class TrainingResult(object):
def __init__(self, epochs, error):
self.epochs = epochs
self.error = error
# wonder how to compute the contribution of each node
# during feed forward phase?
class Backpropagator(object):
def teach(self, neural_network, expectations,
acceptable_error = .001, max_iterations = 100,
time_limit = None, learning_rate = 0.5, callback_rate = 10,
callback_func = None):
epochs = 0
error = 0.0
within_acceptable_error = False
while (epochs < max_iterations and not within_acceptable_error):
sample = list(expectations)
#random.shuffle(sample)
for expectation in sample:
self.learn(neural_network, expectation, learning_rate)
epochs += 1
if epochs % callback_rate == 0:
error = 0.0
for exp in expectations:
error += self.calculate_error(
neural_network.receive_inputs(exp.inputs),
exp.outputs)
within_acceptable_error = error < acceptable_error
if callback_func != None:
callback_func(neural_network, expectations,
TrainingResult(epochs, error))
return TrainingResult(epochs, error)
def calculate_error(self, actual, correct):
errors = [math.pow(actual[i] - correct[i], 2)
for i in range(len(actual))]
return sum(errors)
def learn(self, neural_network, expectation, learning_rate):
"""This is the actual backpropagation part of the code
here is where we will perform one propagation iteration
adjusting the networks's node weights
"""
neural_network.receive_inputs(expectation.inputs)
denorm_expectation = [
neural_network.normalizer.denorm_output(exp_out)
for exp_out in expectation.outputs]
NeuralNetworkUtils.OutputBreadthTraversal(
neural_network,
lambda neuron: self.propagate_errors(
neural_network, neuron, denorm_expectation, learning_rate))
def propagate_errors(self, network, neuron, expectations,
learning_rate):
if neuron.type == NeuronType.OUTPUT:
index = network.neuron_index(neuron).index
expectation = expectations[index]
neuron.error = self.output_error(
neuron, expectation)
else:
# Calculate the error for the current layer
neuron.error = self.hidden_error(neuron)
# Adjust the weights of the prior layer
for outbound in neuron.out_connections:
self.adjust_weight(outbound, learning_rate)
def adjust_weight(self, connection, learning_rate):
connection.weight += (learning_rate *
connection.receiver.error *
connection.signal_received)
def output_error(self, neuron, expectation):
return ((expectation - neuron.output) *
neuron.activation.derivative(
neuron.accumulated_input_signals))
def hidden_error(self, neuron):
weightedErrorSum = reduce(
lambda sum, c: sum + c.weight * c.receiver.error,
neuron.out_connections, 0.0)
return (weightedErrorSum *
neuron.activation.derivative(
neuron.accumulated_input_signals))
| mit |
mattt416/ansible-lxc-rpc | scripts/elsa_repo.py | 4 | 26976 | #!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------------
# Rackspace Private Cloud Frozen Repo Tool (Elsa)
# This tool does the following:
# - Create dist containing all the required packages at specific versions
# Example: python elsa_repo.py config.yml dist_from_config repo-123
# - Check for new/updated packages and generate a new config file
# Example: python elsa_repo.py config.yml\
# generate_updated_config --add-new newconfig.yml
#
# Requirements:
# - Pip:
# * python-apt
# * pyyaml
# - OS tools:
# * curl (apt/curl)
# * aptly http://www.aptly.info/download/
# - Lots of space for mirrors of potentially large repos.
# The repo mirrors will be stored in ~/.aptly.
#
# Example YAML Config File:
# ---
# config:
# # only packages matching this arch or 'all' will be imported
# architecture: amd64
# # hash of package name: version
# packages:
# compat-libstdc: 5-1
# libargtable2-0: 12-1
# ...
# # list of upstreams to search for packages
# upstream_repos:
# - component: main
# dist: testing
# key_url: http://www.rabbitmq.com/rabbitmq-signing-key-public.asc
# name: rabbit
# url: http://www.rabbitmq.com/debian/
# - component: main
# dist: stable
# key_url: http://packages.elasticsearch.org/GPG-KEY-elasticsearch
# name: elasticsearch
# url: http://packages.elasticsearch.org/logstash/1.4/debian
# - key_id: 5234BF2B
# name: rsyslog_ppa_v8
# ppa: adiscon/v8-stable
# - key_id: 1285491434D8786F
# name: openmanage
# url: http://linux.dell.com/repo/community/deb/latest/
# - component: main
# dist: cloudmonitoring
# key_url: https://monitoring.api.rackspacecloud.com/pki/agent/linux.asc
# name: cloud_monitoring_agent
# url: >
# http://stable.packages.cloudmonitoring.rackspace.com/
# ubuntu-14.04-x86_64
# Notes: to generate a full mirror set packages: {} in config then use
# generate_updated_config --add-new
# Standard Lib Imports
import argparse
import copy
import logging
import re
import subprocess
import sys
# External imports
import apt_pkg
import yaml
# Log configuration
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
stdout_channel = logging.StreamHandler(sys.stdout)
LOGGER.addHandler(stdout_channel)
class PackageStore(object):
"""Store a list of PackageLists and provide search"""
def __init__(self):
self.package_lists = {}
def __iter__(self):
return self.package_lists.values().__iter__()
def __contains__(self, list_name):
return list_name in self.package_lists
def __getitem__(self, item):
return self.package_lists.get(item)
def __setitem__(self, key, value):
self.package_lists[key] = value
def package_query(self, package, newer_only=False):
"""Search for a package in all package lists"""
results = []
for pl in self.package_lists.values():
results.extend(pl.package_query(package, newer_only))
return sorted(set(results))
def all_packages(self):
"""return all packages in a flat list"""
results = PackageList('all_packages')
for pl in self.package_lists.values():
for package in pl.all_packages():
results.add_package(package)
return results
def distinct_packages(self):
"""return newest version for each package in flat list"""
results = PackageList('all_packages')
for pl in self.package_lists.values():
for package in pl.distinct_packages():
results.add_package(package)
return results
class PackageList(object):
"""Store packages and versions from a single source"""
def __init__(self, name):
# data contains a hash for fast lookup
self.data = {}
self.name = name
def __iter__(self):
return self.all_packages().__iter__()
def __len__(self):
return len(self.all_packages())
def __contains__(self, other):
return other.name in self.data
def __sub__(self, other):
"""subtract another package list from this one
returns a new package list with the result"""
result_packages = set(self) - set(other)
new_list = PackageList("%s - %s" % (self.name, other.name))
for package in result_packages:
new_list.add_package(package)
return new_list
def __add__(self, other):
"""Add two package lists, return a new list"""
new_list = PackageList("%s + %s" % (self.name, other.name))
new_packages = set(self) + set(other)
for package in new_packages:
new_list.add_package(package)
def add_package(self, package):
"""Add a package to this package list, if its not already present"""
if package.name not in self.data:
self.data[package.name] = {}
versions = self.data[package.name]
if package.version not in versions:
versions[package.version] = package
package.last_list = self.name
LOGGER.debug('adding %s to %s' % (package, self.name))
def package_query(self, package, newer_only=False):
"""Search for a package in this package list
if package.version is None, multiple versions may be returned
if newer_only is true, then only versions newer than the specified
version will be returned
Always returns a list.
"""
if package.name not in self.data:
return []
versions = sorted(set(self.data[package.name].values()))
if package.version is None:
return versions
else:
if not newer_only:
if package.version in self.data[package.name]:
return [self.data[package.name].get(package.version)]
else:
return []
else:
return [p for p in versions if p > package]
def all_packages(self):
"""return flat list of all packages"""
packages = []
for versions in self.data.values():
packages.extend(versions.values())
return list(set(packages))
def distinct_packages(self):
"""return flat list of newest version of each package"""
packages = []
for versions in self.data.values():
packages.append(sorted(versions.values())[-1])
return list(set(packages))
class Package(object):
"""Represents a single package"""
def __init__(self, name, version=None, source=None):
self.name = name
self.version = version
self.source = source
def __eq__(self, other):
"""Packages are 'equal' if there name is the same"""
return self.name == other.name and self.version == other.version
def __hash__(self):
"""hash matches equality by hashing name and version"""
return hash(self.name) ^ hash(str(self.version))
def __cmp__(self, other):
""" packages are sorted by name then version """
if self.name == other.name:
return apt_pkg.version_compare(self.version, other.version)
else:
return self.name.__cmp__(other.name)
def __repr__(self):
return "Package: %s, Version: %s" % (self.name, self.version)
class Aptly(object):
"""Wrapper around the aptly tool"""
def run(self, cmd_string, insert_aptly=True, shell=False):
""" execute a single command
if insert_aptly is true, the path to aptly will be
inserted as the first argument.
returns string for stdout
"""
args = cmd_string.split(' ')
if insert_aptly:
args.insert(0, '/usr/bin/aptly')
# remove spaces and empty args
args = [i.strip() for i in args if i.strip()]
if shell:
args = " ".join(args)
LOGGER.debug("run args: %s" % args)
return subprocess.check_output(args, shell=shell)
def mirror_list(self):
"""List aptly mirrors"""
return self.run('mirror list -raw').splitlines()
def mirror_create(self, name, architecture, dist="./",
component="", url=None, key_id=None, key_url=None,
ppa=None):
"""Create a repo mirror via aptly.
These are metadata only, packages arent pulled till update
"""
# check mirror doesn't exist
if name in self.mirror_list():
raise ValueError("a mirror with name %(name)s already exists"
% name)
# check we have a key for the mirror
if key_id is None and key_url is None:
raise ValueError("mirror create requires key_id, key_url")
LOGGER.info("Creating mirror: %s" % name)
got_key = False
for attempt in range(3):
try:
if key_id is not None:
self.run('gpg --no-default-keyring --keyring '
'trustedkeys.gpg --recv-keys %(key_id)s'
% {'key_id': key_id}, insert_aptly=False)
else:
self.run('curl %(url)s | gpg --no-default-keyring '
'--keyring trustedkeys.gpg --import'
% {'url': key_url}, insert_aptly=False,
shell=True)
got_key = True
break
except Exception as e:
LOGGER.warning("gpg import failed: %(exception)s "
"Attempt: %(attempt)s"
% {'exception': e, 'attempt': attempt})
continue # try again
if not got_key:
raise Exception("Failed to import gpg key for mirror %s"
% name)
if ppa:
self.run("mirror create -architectures %(arch)s %(name)s "
"ppa:%(ppa)s" % {'arch': architecture,
'name': name,
'ppa': ppa})
else:
self.run("mirror create -architectures %(arch)s %(name)s %(url)s "
"%(dist)s %(component)s" % {'arch': architecture,
'name': name,
'url': url,
'dist': dist,
'component': component})
def mirror_update(self, name):
"""Update (refresh) an aptly mirror"""
if name not in self.mirror_list():
raise ValueError("mirror %(name)s not found"
% {'name': name})
LOGGER.info("Updating mirror %s" % name)
self.run("mirror update %(name)s" % {'name': name})
def mirror_get_packages(self, name, arch):
"""Get list of packages from a mirror"""
lines = self.run("mirror show -with-packages %(name)s"
% {'name': name}).splitlines()
packages = self.parse_aptly_package_list(lines, source=name,
arch=arch)
LOGGER.debug("Found %(num_packages)s packages for mirror %(name)s"
% {'num_packages': len(packages),
'name': name})
return packages
def parse_aptly_package_list(self, lines, source=None, arch=None):
"""Generate a PackageList() from aptly package list output"""
# match package list from aptly ... show -with-packages name
line_re = re.compile('\s{2,}(?P<name>[^_]*)_(?P<version>[^_]*)'
'_(?P<arch>[^_]*)')
package_list = PackageList(name=source)
for line in lines:
match = line_re.match(line)
if match:
gd = match.groupdict()
if gd['arch'] in [arch, 'all', None]:
package_list.add_package(Package(name=gd['name'],
version=gd['version'],
source=source))
else:
LOGGER.debug('rejecting package %s %s due to invalid '
'architecture %s' % (gd['name'],
gd['version'],
gd['arch']))
return package_list
def repo_list(self):
"""Get list of repos known to aptly"""
return self.run("-raw repo list").splitlines()
def repo_create(self, name):
"""Create an aptly repo"""
self.run("repo create %(name)s" % {'name': name})
def repo_get_packages(self, name):
"""Generate PackageList representing an atply repo"""
lines = self.run('repo show -with-packages %(name)s'
% {'name': name})
return self.parse_aptly_package_list(lines, source=name)
def package_query(self, package):
"""return aptly query string for a package name & version"""
return '"%(name)s (=%(version)s)"' % {'name': package.name,
'version': package.version}
def repo_import_package(self, mirror, repo, package):
"""Pull a package from a mirror into a repo"""
self.run('repo import %(mirror)s %(repo)s %(query)s'
% {'mirror': mirror,
'repo': repo,
'query': self.package_query(
package.name, package.version)
}, shell=True)
def repo_import_packages(self, repo, package_list, batch_size=200):
"""Batch import packages from a mirror into a repo"""
packages = package_list.all_packages()
packages_copy = copy.deepcopy(packages)
packages_reconstruct = []
LOGGER.info('importing %s packages from %s' % (len(packages),
package_list.name))
while packages:
batch = packages[:batch_size]
packages_reconstruct.extend(batch)
if not batch:
break
packages = packages[batch_size:]
query_string = " ".join([self.package_query(p) for p in batch])
self.run('repo import %(mirror)s %(repo)s %(query)s'
% {'mirror': package_list.name,
'repo': repo,
'query': query_string},
shell=True)
assert packages_reconstruct == packages_copy
def repo_publish(self, name):
"""Create on disk distribution metata for an aptly internal repo"""
self.run('publish repo -distribution %(name)s %(name)s'
% {'name': name})
LOGGER.info("Published repo %(name)s" % {'name': name})
def snapshot_list(self):
"""List reposnapshots known to aptly"""
self.run("-raw snapshot list").splitlines()
def snapshot_create(self, name):
"""Create an empty aptly snapshot"""
self.run("snapshot create %(name)s empty")
class Config(object):
""" Class representing YAML config files"""
def __init__(self, path):
self.path = path
self.read(path)
def read(self, path=None):
"""Read yaml from self.path into self.data"""
if path is None:
path = self.path
self.data = yaml.load(open(path).read())
def write(self, path=None):
"""Write yaml version of self.data to self.path"""
if path is None:
path = self.path
with open(path, 'w') as f:
f.write(yaml.dump(self.data, default_flow_style=False))
def __getitem__(self, key):
"""pass subscript requests to self.data"""
return self.data.get(key)
def __setitem__(self, key, value):
"""pass subscript requests to self.data"""
self.data[key] = value
class AptlyOrechestrator(object):
""" Class which uses the Aptly wrapper to achieve RPC aims
This is mostly creating repos from a supplied config file.
"""
def __init__(self, args):
self.config_path = args.config_path
self.args = args
self.aptly = Aptly()
self.config = Config(args.config_path)
# needed in order to use apt_pkg.version_compare
apt_pkg.init()
def ensure_mirrors(self, required_mirrors):
""" Check mirrors list, create any that are missing"""
current_mirrors = self.aptly.mirror_list()
self.mirrors = PackageStore()
for required_mirror in required_mirrors:
# Create mirror if necessary
mirror_name = required_mirror['name']
if mirror_name not in current_mirrors:
required_mirror['architecture'] =\
self.config['config']['architecture']
self.aptly.mirror_create(**required_mirror)
# Store list of available packages for each mirror
self.aptly.mirror_update(mirror_name)
self.mirrors[mirror_name] = \
self.aptly.mirror_get_packages(
mirror_name,
self.config['config']['architecture'])
def packages_from_config(self):
"""get list of required packages from config
"""
pl = PackageList(name='config')
for name, version in self.config['packages'].iteritems():
pl.add_package(Package(name=name, version=version,
source='config'))
return pl
def ensure_packages(self, dist_name):
"""Add all packages from config file to repo dist_name"""
# convert name: version dict to [(name,version),..]
required_packages = self.packages_from_config()
repo_packages = self.aptly.repo_get_packages(dist_name)
# packages that aren't in this repo already so need to be added
missing_packages = required_packages - repo_packages
LOGGER.debug("Packages to add: %s" % missing_packages)
# list of packages we don't find in any upstreams
unavailable_packages = PackageList('unavailable')
# map of mirror to package list for found packages
packages_to_import = PackageStore()
for package in missing_packages:
result = self.mirrors.package_query(package)
if not result:
unavailable_packages.add_package(package)
LOGGER.debug("Failed to find package %(name)s %(version)s"
% {'name': package.name,
'version': package.version})
else:
package = result[0]
if package.source not in packages_to_import:
packages_to_import[package.source] = \
PackageList(package.source)
packages_to_import[package.source].add_package(package)
LOGGER.debug("Found %(pname)s in %(mname)s"
% {'pname': package, 'mname': package.source})
# Batch import all the packages that are known to be available
for package_list in packages_to_import:
self.aptly.repo_import_packages(dist_name, package_list)
# Return list of packages that were not found
return unavailable_packages
def create_dist_from_package_list(self):
"""High level function for creating a dist from
a supplied config file"""
dist_name = self.args.dist_name
if dist_name in self.aptly.repo_list():
raise ValueError("dist name must be unique, %s already exists"
% dist_name)
LOGGER.info("Creating repo %(name)s" % {'name': dist_name})
self.ensure_mirrors(copy.deepcopy(self.config['upstream_repos']))
self.aptly.repo_create(dist_name)
unavailable_packages = self.ensure_packages(dist_name)
if unavailable_packages:
LOGGER.warning("The following packages are not available")
for package in unavailable_packages:
LOGGER.warning("Not Found: %(package)s"
% {'package': package})
self.aptly.repo_publish(dist_name)
def add_package_to_config(self):
""" Add a package to the supplied config file"""
self.config['packages'][self.args.name] = self.args.version
def delete_package_from_config(self):
""" Remove a package from the supplied config file"""
del self.config['packages'][self.args.name]
def list_upstream_packages(self):
self.ensure_mirrors(self.config['upstream_repos'])
for package in self.mirrors.all_packages():
print "%(mirror_name)s,%(package)s"\
% {'mirror_name': package.source,
'package_name': package}
def check_for_new_and_updated(self):
"""Check for new and updated packages print them and optionally
generate a new config file"""
self.ensure_mirrors(self.config['upstream_repos'])
# Count updated packages
package_updates = 0
config_packages = self.packages_from_config()
# Itterate over packages in config and check for newer versions.
for package in config_packages:
updates = self.mirrors.package_query(package, newer_only=True)
if updates:
self.config['packages'][package.name] = updates[-1].version
package_updates += 1
LOGGER.info("Package %(name)s Config Version: %(current)s"
" Updates: %(updates)s"
% {'name': package.name,
'current': package.version,
'updates': [p.version for p in updates]})
# Find packages that are available upstream but not in the config file
all_upstream_packages = self.mirrors.all_packages()
new_packages = PackageList('new_packages')
for package in all_upstream_packages:
if package not in config_packages:
new_packages.add_package(package)
LOGGER.info("New %s" % (package))
self.config['packages'][package.name] = package.version
# write new config file with updated versions and new packages
# if requested
if 'new_config_path' in self.args:
self.config.write(self.args.new_config_path)
LOGGER.info("updated config containing %s packages written to %s"
% (len(self.config['packages']),
self.args.new_config_path))
LOGGER.info("Input Config Packages: %s, Upstream Packages: %s, "
"New Packages: %s, Package Updates: %s" %
(len(config_packages),
len(self.mirrors.distinct_packages()),
len(new_packages.distinct_packages()), package_updates))
def main(args):
parser = argparse.ArgumentParser()
# arguments that are relevant to all subcommands
parser.add_argument('config_path')
parser.add_argument('-v', '--verbose', action='store_true',
help="more logging")
# Add subparsers for subcommands
subparsers = parser.add_subparsers()
# dfc = distribution from config
parser_dfc = subparsers.add_parser(
'dist_from_config',
help="Create distribution from config file")
parser_dfc.add_argument('dist_name', help="must be unique")
parser_dfc.set_defaults(func="dfc")
# subcommand to add packages to config
parser_add_package = subparsers.add_parser(
'add_package_to_config',
help="Add a package to the config file")
parser_add_package.add_argument('name')
parser_add_package.add_argument('version')
parser_add_package.set_defaults(func="add_package")
# subcommand to remove packages from config
parser_del_package = subparsers.add_parser(
'delete_package_from_config',
help="Delete package from config file")
parser_del_package.add_argument('name')
parser_del_package.set_defaults(func="del_package")
# List all packages available in configured upstreams
parser_list_upstream_pkgs = subparsers.add_parser(
'list_upstream_packages',
help="List all packages from upstreams in config file")
parser_list_upstream_pkgs.set_defaults(func='list_upstream')
# List all packages in the package list that have updates available
parser_list_upstream_pkgs = subparsers.add_parser(
'list_package_updates',
help="List packages which have newer versions available upstream"
" than listed in the config file")
parser_list_upstream_pkgs.set_defaults(func='list_updates')
# Generate new config file with updated package versions
parser_generate_updated_config = subparsers.add_parser(
'generate_updated_config',
help="Generate new config file with package versions "
"updated to latest available")
parser_generate_updated_config.add_argument(
'new_config_path',
help="New config with updated package verions will be generated"
" and written to this path")
parser_generate_updated_config.add_argument(
'--add-new', dest="add_new", action='store_true',
help="Add packages available upstream but not listed in"
" the specified config file to the new config file")
parser_generate_updated_config.set_defaults(func='list_updates')
args = parser.parse_args(args=args[1:])
if args.verbose:
LOGGER.setLevel(logging.DEBUG)
apt_pkg.init() # Only needs to be done once to read the apt configs
ao = AptlyOrechestrator(args)
# Each subparser sets the func arg, call the appropriate function
{'dfc': ao.create_dist_from_package_list,
'add_package': ao.add_package_to_config,
'del_package': ao.delete_package_from_config,
'list_upstream': ao.list_upstream_packages,
'list_updates': ao.check_for_new_and_updated}[args.func]()
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 |
orgito/ansible | test/runner/lib/sanity/pslint.py | 35 | 5231 | """Sanity test using PSScriptAnalyzer."""
from __future__ import absolute_import, print_function
import collections
import json
import os
import re
from lib.sanity import (
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanitySkipped,
)
from lib.util import (
SubprocessError,
run_command,
find_executable,
read_lines_without_comments,
)
from lib.config import (
SanityConfig,
)
from lib.test import (
calculate_confidence,
calculate_best_confidence,
)
PSLINT_SKIP_PATH = 'test/sanity/pslint/skip.txt'
PSLINT_IGNORE_PATH = 'test/sanity/pslint/ignore.txt'
class PslintTest(SanitySingleVersion):
"""Sanity test using PSScriptAnalyzer."""
def test(self, args, targets):
"""
:type args: SanityConfig
:type targets: SanityTargets
:rtype: TestResult
"""
skip_paths = read_lines_without_comments(PSLINT_SKIP_PATH)
invalid_ignores = []
ignore_entries = read_lines_without_comments(PSLINT_IGNORE_PATH)
ignore = collections.defaultdict(dict)
line = 0
for ignore_entry in ignore_entries:
line += 1
if not ignore_entry:
continue
if ' ' not in ignore_entry:
invalid_ignores.append((line, 'Invalid syntax'))
continue
path, code = ignore_entry.split(' ', 1)
if not os.path.exists(path):
invalid_ignores.append((line, 'Remove "%s" since it does not exist' % path))
continue
ignore[path][code] = line
paths = sorted(i.path for i in targets.include if os.path.splitext(i.path)[1] in ('.ps1', '.psm1', '.psd1') and i.path not in skip_paths)
if not paths:
return SanitySkipped(self.name)
if not find_executable('pwsh', required='warning'):
return SanitySkipped(self.name)
cmd = ['test/sanity/pslint/pslint.ps1'] + paths
try:
stdout, stderr = run_command(args, cmd, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
return SanitySuccess(self.name)
severity = [
'Information',
'Warning',
'Error',
]
cwd = os.getcwd() + '/'
# replace unicode smart quotes with ascii versions
stdout = re.sub(u'[\u2018\u2019]', "'", stdout)
stdout = re.sub(u'[\u201c\u201d]', '"', stdout)
messages = json.loads(stdout)
errors = [SanityMessage(
code=m['RuleName'],
message=m['Message'],
path=m['ScriptPath'].replace(cwd, ''),
line=m['Line'] or 0,
column=m['Column'] or 0,
level=severity[m['Severity']],
) for m in messages]
line = 0
filtered = []
for error in errors:
if error.code in ignore[error.path]:
ignore[error.path][error.code] = None # error ignored, clear line number of ignore entry to track usage
else:
filtered.append(error) # error not ignored
errors = filtered
for invalid_ignore in invalid_ignores:
errors.append(SanityMessage(
code='A201',
message=invalid_ignore[1],
path=PSLINT_IGNORE_PATH,
line=invalid_ignore[0],
column=1,
confidence=calculate_confidence(PSLINT_IGNORE_PATH, line, args.metadata) if args.metadata.changes else None,
))
for path in skip_paths:
line += 1
if not path:
continue
if not os.path.exists(path):
# Keep files out of the list which no longer exist in the repo.
errors.append(SanityMessage(
code='A101',
message='Remove "%s" since it does not exist' % path,
path=PSLINT_SKIP_PATH,
line=line,
column=1,
confidence=calculate_best_confidence(((PSLINT_SKIP_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None,
))
for path in paths:
if path not in ignore:
continue
for code in ignore[path]:
line = ignore[path][code]
if not line:
continue
errors.append(SanityMessage(
code='A102',
message='Remove since "%s" passes "%s" test' % (path, code),
path=PSLINT_IGNORE_PATH,
line=line,
column=1,
confidence=calculate_best_confidence(((PSLINT_IGNORE_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None,
))
if errors:
return SanityFailure(self.name, messages=errors)
return SanitySuccess(self.name)
| gpl-3.0 |
nhippenmeyer/django | django/contrib/gis/db/backends/postgis/const.py | 528 | 1484 | """
PostGIS to GDAL conversion constant definitions
"""
# Lookup to convert pixel type values from GDAL to PostGIS
GDAL_TO_POSTGIS = [None, 4, 6, 5, 8, 7, 10, 11, None, None, None, None]
# Lookup to convert pixel type values from PostGIS to GDAL
POSTGIS_TO_GDAL = [1, 1, 1, 3, 1, 3, 2, 5, 4, None, 6, 7, None, None]
# Struct pack structure for raster header, the raster header has the
# following structure:
#
# Endianness, PostGIS raster version, number of bands, scale, origin,
# skew, srid, width, and height.
#
# Scale, origin, and skew have x and y values. PostGIS currently uses
# a fixed endianness (1) and there is only one version (0).
POSTGIS_HEADER_STRUCTURE = 'B H H d d d d d d i H H'
# Lookup values to convert GDAL pixel types to struct characters. This is
# used to pack and unpack the pixel values of PostGIS raster bands.
GDAL_TO_STRUCT = [
None, 'B', 'H', 'h', 'L', 'l', 'f', 'd',
None, None, None, None,
]
# Size of the packed value in bytes for different numerical types.
# This is needed to cut chunks of band data out of PostGIS raster strings
# when decomposing them into GDALRasters.
# See https://docs.python.org/3/library/struct.html#format-characters
STRUCT_SIZE = {
'b': 1, # Signed char
'B': 1, # Unsigned char
'?': 1, # _Bool
'h': 2, # Short
'H': 2, # Unsigned short
'i': 4, # Integer
'I': 4, # Unsigned Integer
'l': 4, # Long
'L': 4, # Unsigned Long
'f': 4, # Float
'd': 8, # Double
}
| bsd-3-clause |
ColOfAbRiX/ansible | lib/ansible/modules/identity/ipa/ipa_sudocmd.py | 28 | 6618 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ipa_sudocmd
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA sudo command
description:
- Add, modify or delete sudo command within FreeIPA server using FreeIPA API.
options:
sudocmd:
description:
- Sudo Command.
aliases: ['name']
required: true
description:
description:
- A description of this command.
required: false
state:
description: State to ensure
required: false
default: present
choices: ['present', 'absent']
ipa_port:
description: Port of IPA server
required: false
default: 443
ipa_host:
description: IP or hostname of IPA server
required: false
default: "ipa.example.com"
ipa_user:
description: Administrative account used on IPA server
required: false
default: "admin"
ipa_pass:
description: Password of administrative user
required: true
ipa_prot:
description: Protocol used by IPA server
required: false
default: "https"
choices: ["http", "https"]
validate_certs:
description:
- This only applies if C(ipa_prot) is I(https).
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
required: false
default: true
version_added: "2.3"
'''
EXAMPLES = '''
# Ensure sudo command exists
- ipa_sudocmd:
name: su
description: Allow to run su via sudo
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure sudo command does not exist
- ipa_sudocmd:
name: su
state: absent
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = '''
sudocmd:
description: Sudo command as return from IPA API
returned: always
type: dict
'''
from ansible.module_utils.ipa import IPAClient
class SudoCmdIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(SudoCmdIPAClient, self).__init__(module, host, port, protocol)
def sudocmd_find(self, name):
return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name})
def sudocmd_add(self, name, item):
return self._post_json(method='sudocmd_add', name=name, item=item)
def sudocmd_mod(self, name, item):
return self._post_json(method='sudocmd_mod', name=name, item=item)
def sudocmd_del(self, name):
return self._post_json(method='sudocmd_del', name=name)
def get_sudocmd_dict(description=None):
data = {}
if description is not None:
data['description'] = description
return data
def get_sudocmd_diff(ipa_sudocmd, module_sudocmd):
data = []
for key in module_sudocmd.keys():
module_value = module_sudocmd.get(key, None)
ipa_value = ipa_sudocmd.get(key, None)
if isinstance(ipa_value, list) and not isinstance(module_value, list):
module_value = [module_value]
if isinstance(ipa_value, list) and isinstance(module_value, list):
ipa_value = sorted(ipa_value)
module_value = sorted(module_value)
if ipa_value != module_value:
data.append(key)
return data
def ensure(module, client):
name = module.params['sudocmd']
state = module.params['state']
module_sudocmd = get_sudocmd_dict(description=module.params['description'])
ipa_sudocmd = client.sudocmd_find(name=name)
changed = False
if state == 'present':
if not ipa_sudocmd:
changed = True
if not module.check_mode:
client.sudocmd_add(name=name, item=module_sudocmd)
else:
diff = get_sudocmd_diff(ipa_sudocmd, module_sudocmd)
if len(diff) > 0:
changed = True
if not module.check_mode:
data = {}
for key in diff:
data[key] = module_sudocmd.get(key)
client.sudocmd_mod(name=name, item=data)
else:
if ipa_sudocmd:
changed = True
if not module.check_mode:
client.sudocmd_del(name=name)
return changed, client.sudocmd_find(name=name)
def main():
module = AnsibleModule(
argument_spec=dict(
description=dict(type='str', required=False),
state=dict(type='str', required=False, default='present',
choices=['present', 'absent', 'enabled', 'disabled']),
sudocmd=dict(type='str', required=True, aliases=['name']),
ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
ipa_host=dict(type='str', required=False, default='ipa.example.com'),
ipa_port=dict(type='int', required=False, default=443),
ipa_user=dict(type='str', required=False, default='admin'),
ipa_pass=dict(type='str', required=True, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
),
supports_check_mode=True,
)
client = SudoCmdIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, sudocmd = ensure(module, client)
module.exit_json(changed=changed, sudocmd=sudocmd)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
| gpl-3.0 |
sserrot/champion_relationships | venv/Lib/site-packages/tornado/escape.py | 4 | 13174 | #
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Escaping/unescaping methods for HTML, JSON, URLs, and others.
Also includes a few other miscellaneous string manipulation functions that
have crept in over time.
"""
import html.entities
import json
import re
import urllib.parse
from tornado.util import unicode_type
import typing
from typing import Union, Any, Optional, Dict, List, Callable
_XHTML_ESCAPE_RE = re.compile("[&<>\"']")
_XHTML_ESCAPE_DICT = {
"&": "&",
"<": "<",
">": ">",
'"': """,
"'": "'",
}
def xhtml_escape(value: Union[str, bytes]) -> str:
"""Escapes a string so it is valid within HTML or XML.
Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``.
When used in attribute values the escaped strings must be enclosed
in quotes.
.. versionchanged:: 3.2
Added the single quote to the list of escaped characters.
"""
return _XHTML_ESCAPE_RE.sub(
lambda match: _XHTML_ESCAPE_DICT[match.group(0)], to_basestring(value)
)
def xhtml_unescape(value: Union[str, bytes]) -> str:
"""Un-escapes an XML-escaped string."""
return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
# The fact that json_encode wraps json.dumps is an implementation detail.
# Please see https://github.com/tornadoweb/tornado/pull/706
# before sending a pull request that adds **kwargs to this function.
def json_encode(value: Any) -> str:
"""JSON-encodes the given Python object."""
# JSON permits but does not require forward slashes to be escaped.
# This is useful when json data is emitted in a <script> tag
# in HTML, as it prevents </script> tags from prematurely terminating
# the javascript. Some json libraries do this escaping by default,
# although python's standard library does not, so we do it here.
# http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped
return json.dumps(value).replace("</", "<\\/")
def json_decode(value: Union[str, bytes]) -> Any:
"""Returns Python objects for the given JSON string.
Supports both `str` and `bytes` inputs.
"""
return json.loads(to_basestring(value))
def squeeze(value: str) -> str:
"""Replace all sequences of whitespace chars with a single space."""
return re.sub(r"[\x00-\x20]+", " ", value).strip()
def url_escape(value: Union[str, bytes], plus: bool = True) -> str:
"""Returns a URL-encoded version of the given value.
If ``plus`` is true (the default), spaces will be represented
as "+" instead of "%20". This is appropriate for query strings
but not for the path component of a URL. Note that this default
is the reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
quote = urllib.parse.quote_plus if plus else urllib.parse.quote
return quote(utf8(value))
@typing.overload
def url_unescape(value: Union[str, bytes], encoding: None, plus: bool = True) -> bytes:
pass
@typing.overload # noqa: F811
def url_unescape(
value: Union[str, bytes], encoding: str = "utf-8", plus: bool = True
) -> str:
pass
def url_unescape( # noqa: F811
value: Union[str, bytes], encoding: Optional[str] = "utf-8", plus: bool = True
) -> Union[str, bytes]:
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
if encoding is None:
if plus:
# unquote_to_bytes doesn't have a _plus variant
value = to_basestring(value).replace("+", " ")
return urllib.parse.unquote_to_bytes(value)
else:
unquote = urllib.parse.unquote_plus if plus else urllib.parse.unquote
return unquote(to_basestring(value), encoding=encoding)
def parse_qs_bytes(
qs: str, keep_blank_values: bool = False, strict_parsing: bool = False
) -> Dict[str, List[bytes]]:
"""Parses a query string like urlparse.parse_qs, but returns the
values as byte strings.
Keys still become type str (interpreted as latin1 in python3!)
because it's too painful to keep them as byte strings in
python3 and in practice they're nearly always ascii anyway.
"""
# This is gross, but python3 doesn't give us another way.
# Latin1 is the universal donor of character encodings.
result = urllib.parse.parse_qs(
qs, keep_blank_values, strict_parsing, encoding="latin1", errors="strict"
)
encoded = {}
for k, v in result.items():
encoded[k] = [i.encode("latin1") for i in v]
return encoded
_UTF8_TYPES = (bytes, type(None))
@typing.overload
def utf8(value: bytes) -> bytes:
pass
@typing.overload # noqa: F811
def utf8(value: str) -> bytes:
pass
@typing.overload # noqa: F811
def utf8(value: None) -> None:
pass
def utf8(value: Union[None, str, bytes]) -> Optional[bytes]: # noqa: F811
"""Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(value, _UTF8_TYPES):
return value
if not isinstance(value, unicode_type):
raise TypeError("Expected bytes, unicode, or None; got %r" % type(value))
return value.encode("utf-8")
_TO_UNICODE_TYPES = (unicode_type, type(None))
@typing.overload
def to_unicode(value: str) -> str:
pass
@typing.overload # noqa: F811
def to_unicode(value: bytes) -> str:
pass
@typing.overload # noqa: F811
def to_unicode(value: None) -> None:
pass
def to_unicode(value: Union[None, str, bytes]) -> Optional[str]: # noqa: F811
"""Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, _TO_UNICODE_TYPES):
return value
if not isinstance(value, bytes):
raise TypeError("Expected bytes, unicode, or None; got %r" % type(value))
return value.decode("utf-8")
# to_unicode was previously named _unicode not because it was private,
# but to avoid conflicts with the built-in unicode() function/type
_unicode = to_unicode
# When dealing with the standard library across python 2 and 3 it is
# sometimes useful to have a direct conversion to the native string type
native_str = to_unicode
to_basestring = to_unicode
def recursive_unicode(obj: Any) -> Any:
"""Walks a simple data structure, converting byte strings to unicode.
Supports lists, tuples, and dictionaries.
"""
if isinstance(obj, dict):
return dict(
(recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items()
)
elif isinstance(obj, list):
return list(recursive_unicode(i) for i in obj)
elif isinstance(obj, tuple):
return tuple(recursive_unicode(i) for i in obj)
elif isinstance(obj, bytes):
return to_unicode(obj)
else:
return obj
# I originally used the regex from
# http://daringfireball.net/2010/07/improved_regex_for_matching_urls
# but it gets all exponential on certain patterns (such as too many trailing
# dots), causing the regex matcher to never return.
# This regex should avoid those problems.
# Use to_unicode instead of tornado.util.u - we don't want backslashes getting
# processed as escapes.
_URL_RE = re.compile(
to_unicode(
r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&|")*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)""" # noqa: E501
)
)
def linkify(
text: Union[str, bytes],
shorten: bool = False,
extra_params: Union[str, Callable[[str], str]] = "",
require_protocol: bool = False,
permitted_protocols: List[str] = ["http", "https"],
) -> str:
"""Converts plain text into HTML with links.
For example: ``linkify("Hello http://tornadoweb.org!")`` would return
``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
Parameters:
* ``shorten``: Long urls will be shortened for display.
* ``extra_params``: Extra text to include in the link tag, or a callable
taking the link as an argument and returning the extra text
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
or::
def extra_params_cb(url):
if url.startswith("http://example.com"):
return 'class="internal"'
else:
return 'class="external" rel="nofollow"'
linkify(text, extra_params=extra_params_cb)
* ``require_protocol``: Only linkify urls which include a protocol. If
this is False, urls such as www.facebook.com will also be linkified.
* ``permitted_protocols``: List (or set) of protocols which should be
linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
"mailto"])``. It is very unsafe to include protocols such as
``javascript``.
"""
if extra_params and not callable(extra_params):
extra_params = " " + extra_params.strip()
def make_link(m: typing.Match) -> str:
url = m.group(1)
proto = m.group(2)
if require_protocol and not proto:
return url # not protocol, no linkify
if proto and proto not in permitted_protocols:
return url # bad protocol, no linkify
href = m.group(1)
if not proto:
href = "http://" + href # no proto specified, use http
if callable(extra_params):
params = " " + extra_params(href).strip()
else:
params = extra_params
# clip long urls. max_len is just an approximation
max_len = 30
if shorten and len(url) > max_len:
before_clip = url
if proto:
proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for :
else:
proto_len = 0
parts = url[proto_len:].split("/")
if len(parts) > 1:
# Grab the whole host part plus the first bit of the path
# The path is usually not that interesting once shortened
# (no more slug, etc), so it really just provides a little
# extra indication of shortening.
url = (
url[:proto_len]
+ parts[0]
+ "/"
+ parts[1][:8].split("?")[0].split(".")[0]
)
if len(url) > max_len * 1.5: # still too long
url = url[:max_len]
if url != before_clip:
amp = url.rfind("&")
# avoid splitting html char entities
if amp > max_len - 5:
url = url[:amp]
url += "..."
if len(url) >= len(before_clip):
url = before_clip
else:
# full url is visible on mouse-over (for those who don't
# have a status bar, such as Safari by default)
params += ' title="%s"' % href
return u'<a href="%s"%s>%s</a>' % (href, params, url)
# First HTML-escape so that our strings are all safe.
# The regex is modified to avoid character entites other than & so
# that we won't pick up ", etc.
text = _unicode(xhtml_escape(text))
return _URL_RE.sub(make_link, text)
def _convert_entity(m: typing.Match) -> str:
if m.group(1) == "#":
try:
if m.group(2)[:1].lower() == "x":
return chr(int(m.group(2)[1:], 16))
else:
return chr(int(m.group(2)))
except ValueError:
return "&#%s;" % m.group(2)
try:
return _HTML_UNICODE_MAP[m.group(2)]
except KeyError:
return "&%s;" % m.group(2)
def _build_unicode_map() -> Dict[str, str]:
unicode_map = {}
for name, value in html.entities.name2codepoint.items():
unicode_map[name] = chr(value)
return unicode_map
_HTML_UNICODE_MAP = _build_unicode_map()
| mit |
smaffulli/libcloud | libcloud/common/brightbox.py | 55 | 3413 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from libcloud.common.base import ConnectionUserAndKey, JsonResponse
from libcloud.compute.types import InvalidCredsError
from libcloud.utils.py3 import b
from libcloud.utils.py3 import httplib
try:
import simplejson as json
except ImportError:
import json
class BrightboxResponse(JsonResponse):
def success(self):
return self.status >= httplib.OK and self.status < httplib.BAD_REQUEST
def parse_body(self):
if self.headers['content-type'].split(';')[0] == 'application/json':
return super(BrightboxResponse, self).parse_body()
else:
return self.body
def parse_error(self):
response = super(BrightboxResponse, self).parse_body()
if 'error' in response:
if response['error'] in ['invalid_client', 'unauthorized_client']:
raise InvalidCredsError(response['error'])
return response['error']
elif 'error_name' in response:
return '%s: %s' % (response['error_name'], response['errors'][0])
return self.body
class BrightboxConnection(ConnectionUserAndKey):
"""
Connection class for the Brightbox driver
"""
host = 'api.gb1.brightbox.com'
responseCls = BrightboxResponse
def _fetch_oauth_token(self):
body = json.dumps({'client_id': self.user_id, 'grant_type': 'none'})
authorization = 'Basic ' + str(base64.encodestring(b('%s:%s' %
(self.user_id, self.key)))).rstrip()
self.connect()
headers = {
'Host': self.host,
'User-Agent': self._user_agent(),
'Authorization': authorization,
'Content-Type': 'application/json',
'Content-Length': str(len(body))
}
response = self.connection.request(method='POST', url='/token',
body=body, headers=headers)
response = self.connection.getresponse()
if response.status == httplib.OK:
return json.loads(response.read())['access_token']
else:
responseCls = BrightboxResponse(response=response, connection=self)
message = responseCls.parse_error()
raise InvalidCredsError(message)
def add_default_headers(self, headers):
try:
headers['Authorization'] = 'OAuth ' + self.token
except AttributeError:
self.token = self._fetch_oauth_token()
headers['Authorization'] = 'OAuth ' + self.token
return headers
def encode_data(self, data):
return json.dumps(data)
| apache-2.0 |
gregcaporaso/qiime | tests/test_add_alpha_to_mapping_file.py | 15 | 14308 | #!/usr/bin/env python
# File created on 02 Nov 2012
from __future__ import division
__author__ = "Yoshiki Vazquez-Baeza"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Yoshiki Vazquez-Baeza"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Yoshiki Vazquez-Baeza"
__email__ = "[email protected]"
from numpy import array, median
from unittest import TestCase, main
from qiime.add_alpha_to_mapping_file import (
add_alpha_diversity_values_to_mapping_file,
_get_level, mean_alpha)
class TopLevelTests(TestCase):
def setUp(self):
self.metrics = ['chao1', 'PD_whole_tree']
self.alpha_diversity_data = array([[173., 6.39901], [332.5, 7.48089],
[189.9375, 5.5103], [223.58333333,
6.26648], [
176.8, 5.40341],
[90., 4.84129], [127., 4.50866], [211., 7.3172], [146., 6.57543]])
self.sample_ids = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354',
'PC.593', 'PC.355', 'PC.607', 'PC.634']
self.collated_alpha_dict_a = COLLATED_ALPHA_DICT_A
self.collated_alpha_dict_b = COLLATED_ALPHA_DICT_B
self.mapping_file_data = MAPPING_FILE_DATA
self.mapping_file_headers = ['SampleID', 'BarcodeSequence',
'LinkerPrimerSequence', 'Treatment', 'DOB', 'Description']
def test_add_alpha_diversity_values_to_mapping_file(self):
"""checks a mapping file is added with the proper fields """
# regular case no special cases for avg method
expected_mapping_file_data = MAPPING_FILE_DATA_WITH_ALPHA_A
expected_mapping_file_headers = ['SampleID', 'BarcodeSequence',
'LinkerPrimerSequence', 'Treatment', 'DOB', 'Description',
'chao1_alpha', 'chao1_normalized_alpha', 'chao1_alpha_label',
'PD_whole_tree_alpha', 'PD_whole_tree_normalized_alpha',
'PD_whole_tree_alpha_label']
out_mapping_file_data, out_mapping_file_headers =\
add_alpha_diversity_values_to_mapping_file(self.metrics,
self.sample_ids, self.alpha_diversity_data,
self.mapping_file_headers, self.mapping_file_data, 4, 'equal')
self.assertEquals(out_mapping_file_data, expected_mapping_file_data)
self.assertEquals(
out_mapping_file_headers,
expected_mapping_file_headers)
# regular case no special cases for quantile method
expected_mapping_file_data = MAPPING_FILE_DATA_WITH_ALPHA_B
out_mapping_file_data, out_mapping_file_headers =\
add_alpha_diversity_values_to_mapping_file(self.metrics,
self.sample_ids, self.alpha_diversity_data,
self.mapping_file_headers, self.mapping_file_data, 4, 'quantile')
self.assertEquals(out_mapping_file_data, expected_mapping_file_data)
self.assertEquals(
out_mapping_file_headers,
expected_mapping_file_headers)
def test__get_level(self):
""" checks the level assignment is done correctly """
# check regular case with and without prefix tags
expected_output = 1
output = _get_level(0.20, [0.25, 0.5, 0.75])
self.assertEquals(output, expected_output)
expected_output = 'level_bin_1_of_4'
output = _get_level(0.20, [0.25, 0.5, 0.75], 'level_bin')
self.assertEquals(output, expected_output)
expected_output = 'level_bin_3_of_6'
output = _get_level(0.20, [0.05, 0.15, 0.35, 0.8, 0.95], 'level_bin')
self.assertEquals(output, expected_output)
# edge cases with and without prefix tags
expected_output = 2
output = _get_level(0.25, [0.25, 0.5, 0.75])
self.assertEquals(output, expected_output)
expected_output = 4
output = _get_level(1, [0.25, 0.5, 0.75])
self.assertEquals(output, expected_output)
expected_output = 'testing_bin_2_of_4'
output = _get_level(0.25, [0.25, 0.5, 0.75], 'testing_bin')
self.assertEquals(output, expected_output)
expected_output = 'testing_bin_4_of_4'
output = _get_level(1, [0.25, 0.5, 0.75], 'testing_bin')
self.assertEquals(output, expected_output)
# unwanted cases, greater than one and negative values
with self.assertRaises(ValueError):
output = _get_level(1.3, [0.5])
with self.assertRaises(ValueError):
output = _get_level(-1, [0.25, 0.5, 0.75])
def test_mean_alpha(self):
"""checks data is being correctly averaged"""
# regular use-cases for this function
expected_data = [[9.441785, 82.93],
[0.42877, 5.2006], [9.625995, 8.18]]
expected_metrics = ['PD_whole_tree_even_310', 'chao1_even_310']
expected_sample_ids = ['s1', 's2', 's3']
o_metrics, o_sample_ids, o_data = mean_alpha(
self.collated_alpha_dict_a, 310)
self.assertEquals(o_metrics, expected_metrics)
self.assertEquals(o_sample_ids, expected_sample_ids)
self.assertEquals(o_data, expected_data)
expected_data = [[12.508435, 11.6105],
[0.42877, 8.42], [11.58785, 1.0]]
expected_metrics = ['PD_whole_tree_even_610', 'chao1_even_610']
o_metrics, o_sample_ids, o_data = mean_alpha(
self.collated_alpha_dict_a, 610)
self.assertEquals(o_metrics, expected_metrics)
self.assertEquals(o_sample_ids, expected_sample_ids)
self.assertEquals(o_data, expected_data)
# should default to the highest depth
o_metrics, o_sample_ids, o_data = mean_alpha(
self.collated_alpha_dict_a,
None)
self.assertEquals(o_metrics, expected_metrics)
self.assertEquals(o_sample_ids, expected_sample_ids)
self.assertEquals(o_data, expected_data)
# non-existant depth
with self.assertRaises(ValueError):
o_metrics, o_sample_ids, o_data = mean_alpha(
self.collated_alpha_dict_b, 111111)
# files with non-matching sample ids should raise an exception
with self.assertRaises(ValueError):
o_metrics, o_sample_ids, o_data = mean_alpha(
self.collated_alpha_dict_b, 310)
# input types that should not be processed
with self.assertRaises(AssertionError):
output = mean_alpha([1, 2, 3], 5)
with self.assertRaises(AssertionError):
output = mean_alpha({'a': 'b'}, -1.4)
MAPPING_FILE_DATA = [
['PC.354',
'AGCACGAGCCTA',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061218',
'Control_mouse_I.D._354'],
['PC.355',
'AACTCGTCGATG',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061218',
'Control_mouse_I.D._355'],
['PC.356',
'ACAGACCACTCA',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061126',
'Control_mouse_I.D._356'],
['PC.481',
'ACCAGCGACTAG',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20070314',
'Control_mouse_I.D._481'],
['PC.593',
'AGCAGCACTTGT',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20071210',
'Control_mouse_I.D._593'],
['PC.607',
'AACTGTGCGTAC',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20071112',
'Fasting_mouse_I.D._607'],
['PC.634',
'ACAGAGTCGGCT',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20080116',
'Fasting_mouse_I.D._634'],
['PC.635',
'ACCGCAGAGTCA',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20080116',
'Fasting_mouse_I.D._635'],
['PC.636', 'ACGGTGAGTGTC', 'YATGCTGCCTCCCGTAGGAGT', 'Fast', '20080116', 'Fasting_mouse_I.D._636']]
MAPPING_FILE_DATA_WITH_ALPHA_A = [
['PC.354',
'AGCACGAGCCTA',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061218',
'Control_mouse_I.D._354',
'176.8',
'0.35793814433',
'bin_2_of_4',
'5.40341',
'0.301036595418',
'bin_2_of_4'],
['PC.355',
'AACTCGTCGATG',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061218',
'Control_mouse_I.D._355',
'127.0',
'0.152577319588',
'bin_1_of_4',
'4.50866',
'0.0',
'bin_1_of_4'],
['PC.356',
'ACAGACCACTCA',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061126',
'Control_mouse_I.D._356',
'189.9375',
'0.412113402062',
'bin_2_of_4',
'5.5103',
'0.336999491964',
'bin_2_of_4'],
['PC.481',
'ACCAGCGACTAG',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20070314',
'Control_mouse_I.D._481',
'223.58333333',
'0.550859106515',
'bin_3_of_4',
'6.26648',
'0.59141452714',
'bin_3_of_4'],
['PC.593',
'AGCAGCACTTGT',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20071210',
'Control_mouse_I.D._593',
'90.0',
'0.0',
'bin_1_of_4',
'4.84129',
'0.111912604341',
'bin_1_of_4'],
['PC.607',
'AACTGTGCGTAC',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20071112',
'Fasting_mouse_I.D._607',
'211.0',
'0.498969072165',
'bin_2_of_4',
'7.3172',
'0.944926873089',
'bin_4_of_4'],
['PC.634',
'ACAGAGTCGGCT',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20080116',
'Fasting_mouse_I.D._634',
'146.0',
'0.230927835052',
'bin_1_of_4',
'6.57543',
'0.695360049525',
'bin_3_of_4'],
['PC.635',
'ACCGCAGAGTCA',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20080116',
'Fasting_mouse_I.D._635',
'332.5',
'1.0',
'bin_4_of_4',
'7.48089',
'1.0',
'bin_4_of_4'],
['PC.636', 'ACGGTGAGTGTC', 'YATGCTGCCTCCCGTAGGAGT', 'Fast', '20080116', 'Fasting_mouse_I.D._636', '173.0', '0.342268041237', 'bin_2_of_4', '6.39901', '0.636003943167', 'bin_3_of_4']]
MAPPING_FILE_DATA_WITH_ALPHA_B = [
['PC.354',
'AGCACGAGCCTA',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061218',
'Control_mouse_I.D._354',
'176.8',
'0.35793814433',
'bin_3_of_4',
'5.40341',
'0.301036595418',
'bin_2_of_4'],
['PC.355',
'AACTCGTCGATG',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061218',
'Control_mouse_I.D._355',
'127.0',
'0.152577319588',
'bin_1_of_4',
'4.50866',
'0.0',
'bin_1_of_4'],
['PC.356',
'ACAGACCACTCA',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061126',
'Control_mouse_I.D._356',
'189.9375',
'0.412113402062',
'bin_3_of_4',
'5.5103',
'0.336999491964',
'bin_2_of_4'],
['PC.481',
'ACCAGCGACTAG',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20070314',
'Control_mouse_I.D._481',
'223.58333333',
'0.550859106515',
'bin_4_of_4',
'6.26648',
'0.59141452714',
'bin_3_of_4'],
['PC.593',
'AGCAGCACTTGT',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20071210',
'Control_mouse_I.D._593',
'90.0',
'0.0',
'bin_1_of_4',
'4.84129',
'0.111912604341',
'bin_1_of_4'],
['PC.607',
'AACTGTGCGTAC',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20071112',
'Fasting_mouse_I.D._607',
'211.0',
'0.498969072165',
'bin_4_of_4',
'7.3172',
'0.944926873089',
'bin_4_of_4'],
['PC.634',
'ACAGAGTCGGCT',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20080116',
'Fasting_mouse_I.D._634',
'146.0',
'0.230927835052',
'bin_2_of_4',
'6.57543',
'0.695360049525',
'bin_4_of_4'],
['PC.635',
'ACCGCAGAGTCA',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20080116',
'Fasting_mouse_I.D._635',
'332.5',
'1.0',
'bin_4_of_4',
'7.48089',
'1.0',
'bin_4_of_4'],
['PC.636', 'ACGGTGAGTGTC', 'YATGCTGCCTCCCGTAGGAGT', 'Fast', '20080116', 'Fasting_mouse_I.D._636', '173.0', '0.342268041237', 'bin_2_of_4', '6.39901', '0.636003943167', 'bin_3_of_4']]
COLLATED_ALPHA_DICT_A = {
'PD_whole_tree': ['\tsequences per sample\titeration\ts1\ts2\ts3',
'rare10.txt\t10\t0\t1.99181\t0.42877\t2.13996',
'rare10.txt\t10\t1\t2.07163\t0.42877\t2.37055',
'rare310.txt\t310\t0\t8.83115\t0.42877\t11.00725',
'rare310.txt\t310\t1\t10.05242\t0.42877\t8.24474',
'rare610.txt\t610\t0\t12.03067\t0.42877\t11.58928',
'rare610.txt\t610\t1\t12.9862\t0.42877\t11.58642'],
'chao1': ['\tsequences per sample\titeration\ts1\ts2\ts3',
'rare10.txt\t10\t0\t4.2\t3.1415\t9.11',
'rare10.txt\t10\t1\t5.6\t3.15\t9.62',
'rare310.txt\t310\t0\t83.11\t5.2012\t8.12',
'rare310.txt\t310\t1\t82.75\t5.2000\t8.24',
'rare610.txt\t610\t0\t11.11\t8.42\t1',
'rare610.txt\t610\t1\t12.111\t8.42\t1']
}
COLLATED_ALPHA_DICT_B = {
'PD_whole_tree': ['\tsequences per sample\titeration\ts1\ts2\ts3',
'rare10.txt\t10\t0\t1.99181\t0.42877\t2.13996',
'rare10.txt\t10\t1\t2.07163\t0.42877\t2.37055',
'rare310.txt\t310\t0\t8.83115\t0.42877\t11.00725',
'rare310.txt\t310\t1\t10.05242\t0.42877\t8.24474',
'rare610.txt\t610\t0\t12.03067\t0.42877\t11.58928',
'rare610.txt\t610\t1\t12.9862\t0.42877\t11.58642'],
'chao1': ['\tsequences per sample\titeration\ts511\ts512\ts3',
'rare10.txt\t10\t0\t4.2\t3.1415\t9.11',
'rare10.txt\t10\t1\t5.6\t3.15\t9.62',
'rare310.txt\t310\t0\t83.11\t5.2012\t8.12',
'rare310.txt\t310\t1\t82.75\t5.2000\t8.24',
'rare610.txt\t610\t0\t11.11\t8.42\t1',
'rare610.txt\t610\t1\t12.111\t8.42\t1']
}
if __name__ == "__main__":
main()
| gpl-2.0 |
qiuzhong/crosswalk-test-suite | apptools/apptools-android-tests/apptools/multiple_icons.py | 3 | 6492 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Liu, Yun <[email protected]>
import unittest
import os
import comm
from xml.etree import ElementTree
import json
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_icon_change_size(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["icons"][0]["sizes"] = "528x528"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(buildstatus, 0)
def test_icon_change_any_size(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["icons"][0]["sizes"] = "any"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(buildstatus, 0)
def test_icon_gif(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["icons"][0]["src"] = "../../../icon/icon.gif"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(buildstatus, 0)
def test_icon_jpg(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["icons"][0]["src"] = "../../../icon/icon.jpg"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(buildstatus, 0)
def test_icon_bmp(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["icons"][0]["src"] = "../../../icon/icon.bmp"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(buildstatus, 0)
def test_multiple_icons(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["icons"] = [{"src":"icon.png","sizes":"72x72"},{"src": "../../../icon/icon.gif","sizes": "82x82"},{"src": "../../../icon/icon.jpg","sizes": "97x97"},{"src": "../../../icon/icon.bmp","sizes": "117x117"}]
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(buildstatus, 0)
def test_icons_default(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["icons"] = []
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.system(buildcmd)
comm.clear("org.xwalk.test")
self.assertEquals(buildstatus, 0)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
TEAM-Gummy/platform_external_chromium_org | tools/run-bisect-perf-regression.py | 26 | 14140 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run Performance Test Bisect Tool
This script is used by a trybot to run the src/tools/bisect-perf-regression.py
script with the parameters specified in run-bisect-perf-regression.cfg. It will
check out a copy of the depot in a subdirectory 'bisect' of the working
directory provided, and run the bisect-perf-regression.py script there.
"""
import imp
import optparse
import os
import subprocess
import sys
import traceback
import bisect_utils
bisect = imp.load_source('bisect-perf-regression',
os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])),
'bisect-perf-regression.py'))
CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
CROS_IP_ENV = 'BISECT_CROS_IP'
class Goma(object):
def __init__(self, path_to_goma):
self._abs_path_to_goma = None
self._abs_path_to_goma_file = None
if path_to_goma:
self._abs_path_to_goma = os.path.abspath(path_to_goma)
self._abs_path_to_goma_file = self._GetExecutablePath(
self._abs_path_to_goma)
def __enter__(self):
if self._HasGOMAPath():
self._SetupAndStart()
return self
def __exit__(self, *_):
if self._HasGOMAPath():
self._Stop()
def _HasGOMAPath(self):
return bool(self._abs_path_to_goma)
def _GetExecutablePath(self, path_to_goma):
if os.name == 'nt':
return os.path.join(path_to_goma, 'goma_ctl.bat')
else:
return os.path.join(path_to_goma, 'goma_ctl.sh')
def _SetupEnvVars(self):
if os.name == 'nt':
os.environ['CC'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
' cl.exe')
os.environ['CXX'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
' cl.exe')
else:
os.environ['PATH'] = os.pathsep.join([self._abs_path_to_goma,
os.environ['PATH']])
def _SetupAndStart(self):
"""Sets up GOMA and launches it.
Args:
path_to_goma: Path to goma directory.
Returns:
True if successful."""
self._SetupEnvVars()
# Sometimes goma is lingering around if something went bad on a previous
# run. Stop it before starting a new process. Can ignore the return code
# since it will return an error if it wasn't running.
self._Stop()
if subprocess.call([self._abs_path_to_goma_file, 'start']):
raise RuntimeError('GOMA failed to start.')
def _Stop(self):
subprocess.call([self._abs_path_to_goma_file, 'stop'])
def _LoadConfigFile(path_to_file):
"""Attempts to load the specified config file as a module
and grab the global config dict.
Args:
path_to_file: Path to the file.
Returns:
The config dict which should be formatted as follows:
{'command': string, 'good_revision': string, 'bad_revision': string
'metric': string, etc...}.
Returns None on failure.
"""
try:
local_vars = {}
execfile(path_to_file, local_vars)
return local_vars['config']
except:
print
traceback.print_exc()
print
return {}
def _OutputFailedResults(text_to_print):
bisect_utils.OutputAnnotationStepStart('Results - Failed')
print
print text_to_print
print
bisect_utils.OutputAnnotationStepClosed()
def _CreateBisectOptionsFromConfig(config):
opts_dict = {}
opts_dict['command'] = config['command']
opts_dict['metric'] = config['metric']
if config['repeat_count']:
opts_dict['repeat_test_count'] = int(config['repeat_count'])
if config['truncate_percent']:
opts_dict['truncate_percent'] = int(config['truncate_percent'])
if config['max_time_minutes']:
opts_dict['max_time_minutes'] = int(config['max_time_minutes'])
if config.has_key('use_goma'):
opts_dict['use_goma'] = config['use_goma']
opts_dict['build_preference'] = 'ninja'
opts_dict['output_buildbot_annotations'] = True
if '--browser=cros' in config['command']:
opts_dict['target_platform'] = 'cros'
if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
opts_dict['cros_board'] = os.environ[CROS_BOARD_ENV]
opts_dict['cros_remote_ip'] = os.environ[CROS_IP_ENV]
else:
raise RuntimeError('Cros build selected, but BISECT_CROS_IP or'
'BISECT_CROS_BOARD undefined.')
elif 'android' in config['command']:
if 'android-chrome' in config['command']:
opts_dict['target_platform'] = 'android-chrome'
else:
opts_dict['target_platform'] = 'android'
return bisect.BisectOptions.FromDict(opts_dict)
def _RunPerformanceTest(config, path_to_file):
# Bisect script expects to be run from src
os.chdir(os.path.join(path_to_file, '..'))
bisect_utils.OutputAnnotationStepStart('Building With Patch')
opts = _CreateBisectOptionsFromConfig(config)
b = bisect.BisectPerformanceMetrics(None, opts)
if bisect_utils.RunGClient(['runhooks']):
raise RuntimeError('Failed to run gclient runhooks')
if not b.BuildCurrentRevision('chromium'):
raise RuntimeError('Patched version failed to build.')
bisect_utils.OutputAnnotationStepClosed()
bisect_utils.OutputAnnotationStepStart('Running With Patch')
results_with_patch = b.RunPerformanceTestAndParseResults(
opts.command, opts.metric, reset_on_first_run=True, results_label='Patch')
if results_with_patch[1]:
raise RuntimeError('Patched version failed to run performance test.')
bisect_utils.OutputAnnotationStepClosed()
bisect_utils.OutputAnnotationStepStart('Reverting Patch')
if bisect_utils.RunGClient(['revert']):
raise RuntimeError('Failed to run gclient runhooks')
bisect_utils.OutputAnnotationStepClosed()
bisect_utils.OutputAnnotationStepStart('Building Without Patch')
if bisect_utils.RunGClient(['runhooks']):
raise RuntimeError('Failed to run gclient runhooks')
if not b.BuildCurrentRevision('chromium'):
raise RuntimeError('Unpatched version failed to build.')
bisect_utils.OutputAnnotationStepClosed()
bisect_utils.OutputAnnotationStepStart('Running Without Patch')
results_without_patch = b.RunPerformanceTestAndParseResults(
opts.command, opts.metric, upload_on_last_run=True, results_label='ToT')
if results_without_patch[1]:
raise RuntimeError('Unpatched version failed to run performance test.')
# Find the link to the cloud stored results file.
output = results_without_patch[2]
cloud_file_link = [t for t in output.splitlines()
if 'storage.googleapis.com/chromium-telemetry/html-results/' in t]
if cloud_file_link:
# What we're getting here is basically "View online at http://..." so parse
# out just the url portion.
cloud_file_link = cloud_file_link[0]
cloud_file_link = [t for t in cloud_file_link.split(' ')
if 'storage.googleapis.com/chromium-telemetry/html-results/' in t]
assert cloud_file_link, "Couldn't parse url from output."
cloud_file_link = cloud_file_link[0]
else:
cloud_file_link = ''
# Calculate the % difference in the means of the 2 runs.
percent_diff_in_means = (results_with_patch[0]['mean'] /
max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
std_err = bisect.CalculatePooledStandardError(
[results_with_patch[0]['values'], results_without_patch[0]['values']])
bisect_utils.OutputAnnotationStepClosed()
bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
(percent_diff_in_means, std_err))
print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
'Std. Error'.center(20, ' '))
print ' %s %s %s' % ('Patch'.center(10, ' '),
('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
print ' %s %s %s' % ('No Patch'.center(10, ' '),
('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
if cloud_file_link:
bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
bisect_utils.OutputAnnotationStepClosed()
def _SetupAndRunPerformanceTest(config, path_to_file, path_to_goma):
"""Attempts to build and run the current revision with and without the
current patch, with the parameters passed in.
Args:
config: The config read from run-perf-test.cfg.
path_to_file: Path to the bisect-perf-regression.py script.
path_to_goma: Path to goma directory.
Returns:
0 on success, otherwise 1.
"""
try:
with Goma(path_to_goma) as goma:
config['use_goma'] = bool(path_to_goma)
_RunPerformanceTest(config, path_to_file)
return 0
except RuntimeError, e:
bisect_utils.OutputAnnotationStepClosed()
_OutputFailedResults('Error: %s' % e.message)
return 1
def _RunBisectionScript(config, working_directory, path_to_file, path_to_goma,
path_to_extra_src, dry_run):
"""Attempts to execute src/tools/bisect-perf-regression.py with the parameters
passed in.
Args:
config: A dict containing the parameters to pass to the script.
working_directory: A working directory to provide to the
bisect-perf-regression.py script, where it will store it's own copy of
the depot.
path_to_file: Path to the bisect-perf-regression.py script.
path_to_goma: Path to goma directory.
path_to_extra_src: Path to extra source file.
dry_run: Do a dry run, skipping sync, build, and performance testing steps.
Returns:
0 on success, otherwise 1.
"""
bisect_utils.OutputAnnotationStepStart('Config')
print
for k, v in config.iteritems():
print ' %s : %s' % (k, v)
print
bisect_utils.OutputAnnotationStepClosed()
cmd = ['python', os.path.join(path_to_file, 'bisect-perf-regression.py'),
'-c', config['command'],
'-g', config['good_revision'],
'-b', config['bad_revision'],
'-m', config['metric'],
'--working_directory', working_directory,
'--output_buildbot_annotations']
if config['repeat_count']:
cmd.extend(['-r', config['repeat_count']])
if config['truncate_percent']:
cmd.extend(['-t', config['truncate_percent']])
if config['max_time_minutes']:
cmd.extend(['--max_time_minutes', config['max_time_minutes']])
cmd.extend(['--build_preference', 'ninja'])
if '--browser=cros' in config['command']:
cmd.extend(['--target_platform', 'cros'])
if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
cmd.extend(['--cros_board', os.environ[CROS_BOARD_ENV]])
cmd.extend(['--cros_remote_ip', os.environ[CROS_IP_ENV]])
else:
print 'Error: Cros build selected, but BISECT_CROS_IP or'\
'BISECT_CROS_BOARD undefined.'
print
return 1
if 'android' in config['command']:
if 'android-chrome' in config['command']:
cmd.extend(['--target_platform', 'android-chrome'])
else:
cmd.extend(['--target_platform', 'android'])
if path_to_goma:
cmd.append('--use_goma')
if path_to_extra_src:
cmd.extend(['--extra_src', path_to_extra_src])
if dry_run:
cmd.extend(['--debug_ignore_build', '--debug_ignore_sync',
'--debug_ignore_perf_test'])
cmd = [str(c) for c in cmd]
with Goma(path_to_goma) as goma:
return_code = subprocess.call(cmd)
if return_code:
print 'Error: bisect-perf-regression.py returned with error %d' %\
return_code
print
return return_code
def main():
usage = ('%prog [options] [-- chromium-options]\n'
'Used by a trybot to run the bisection script using the parameters'
' provided in the run-bisect-perf-regression.cfg file.')
parser = optparse.OptionParser(usage=usage)
parser.add_option('-w', '--working_directory',
type='str',
help='A working directory to supply to the bisection '
'script, which will use it as the location to checkout '
'a copy of the chromium depot.')
parser.add_option('-p', '--path_to_goma',
type='str',
help='Path to goma directory. If this is supplied, goma '
'builds will be enabled.')
parser.add_option('--extra_src',
type='str',
help='Path to extra source file. If this is supplied, '
'bisect script will use this to override default behavior.')
parser.add_option('--dry_run',
action="store_true",
help='The script will perform the full bisect, but '
'without syncing, building, or running the performance '
'tests.')
(opts, args) = parser.parse_args()
path_to_current_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
path_to_bisect_cfg = os.path.join(path_to_current_directory,
'run-bisect-perf-regression.cfg')
config = _LoadConfigFile(path_to_bisect_cfg)
# Check if the config is empty
config_has_values = [v for v in config.values() if v]
if config and config_has_values:
if not opts.working_directory:
print 'Error: missing required parameter: --working_directory'
print
parser.print_help()
return 1
return _RunBisectionScript(config, opts.working_directory,
path_to_current_directory, opts.path_to_goma, opts.extra_src,
opts.dry_run)
else:
perf_cfg_files = ['run-perf-test.cfg', os.path.join('..', 'third_party',
'WebKit', 'Tools', 'run-perf-test.cfg')]
for current_perf_cfg_file in perf_cfg_files:
path_to_perf_cfg = os.path.join(
os.path.abspath(os.path.dirname(sys.argv[0])), current_perf_cfg_file)
config = _LoadConfigFile(path_to_perf_cfg)
config_has_values = [v for v in config.values() if v]
if config and config_has_values:
return _SetupAndRunPerformanceTest(config, path_to_current_directory,
opts.path_to_goma)
print 'Error: Could not load config file. Double check your changes to '\
'run-bisect-perf-regression.cfg/run-perf-test.cfg for syntax errors.'
print
return 1
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
HenryTheHamster/cloud-init | cloudinit/config/cc_disk_setup.py | 5 | 27517 | # vi: ts=4 expandtab
#
# Copyright (C) 2009-2010 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Ben Howard <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
import logging
import os
import shlex
frequency = PER_INSTANCE
# Define the commands to use
UDEVADM_CMD = util.which('udevadm')
SFDISK_CMD = util.which("sfdisk")
SGDISK_CMD = util.which("sgdisk")
LSBLK_CMD = util.which("lsblk")
BLKID_CMD = util.which("blkid")
BLKDEV_CMD = util.which("blockdev")
WIPEFS_CMD = util.which("wipefs")
LOG = logging.getLogger(__name__)
def handle(_name, cfg, cloud, log, _args):
"""
See doc/examples/cloud-config_disk-setup.txt for documentation on the
format.
"""
disk_setup = cfg.get("disk_setup")
if isinstance(disk_setup, dict):
update_disk_setup_devices(disk_setup, cloud.device_name_to_device)
log.debug("Partitioning disks: %s", str(disk_setup))
for disk, definition in disk_setup.items():
if not isinstance(definition, dict):
log.warn("Invalid disk definition for %s" % disk)
continue
try:
log.debug("Creating new partition table/disk")
util.log_time(logfunc=LOG.debug,
msg="Creating partition on %s" % disk,
func=mkpart, args=(disk, definition))
except Exception as e:
util.logexc(LOG, "Failed partitioning operation\n%s" % e)
fs_setup = cfg.get("fs_setup")
if isinstance(fs_setup, list):
log.debug("setting up filesystems: %s", str(fs_setup))
update_fs_setup_devices(fs_setup, cloud.device_name_to_device)
for definition in fs_setup:
if not isinstance(definition, dict):
log.warn("Invalid file system definition: %s" % definition)
continue
try:
log.debug("Creating new filesystem.")
device = definition.get('device')
util.log_time(logfunc=LOG.debug,
msg="Creating fs for %s" % device,
func=mkfs, args=(definition,))
except Exception as e:
util.logexc(LOG, "Failed during filesystem operation\n%s" % e)
def update_disk_setup_devices(disk_setup, tformer):
# update 'disk_setup' dictionary anywhere were a device may occur
# update it with the response from 'tformer'
for origname in disk_setup.keys():
transformed = tformer(origname)
if transformed is None or transformed == origname:
continue
if transformed in disk_setup:
LOG.info("Replacing %s in disk_setup for translation of %s",
origname, transformed)
del disk_setup[transformed]
disk_setup[transformed] = disk_setup[origname]
disk_setup[transformed]['_origname'] = origname
del disk_setup[origname]
LOG.debug("updated disk_setup device entry '%s' to '%s'",
origname, transformed)
def update_fs_setup_devices(disk_setup, tformer):
# update 'fs_setup' dictionary anywhere were a device may occur
# update it with the response from 'tformer'
for definition in disk_setup:
if not isinstance(definition, dict):
LOG.warn("entry in disk_setup not a dict: %s", definition)
continue
origname = definition.get('device')
if origname is None:
continue
(dev, part) = util.expand_dotted_devname(origname)
tformed = tformer(dev)
if tformed is not None:
dev = tformed
LOG.debug("%s is mapped to disk=%s part=%s",
origname, tformed, part)
definition['_origname'] = origname
definition['device'] = tformed
if part and 'partition' in definition:
definition['_partition'] = definition['partition']
definition['partition'] = part
def value_splitter(values, start=None):
"""
Returns the key/value pairs of output sent as string
like: FOO='BAR' HOME='127.0.0.1'
"""
_values = shlex.split(values)
if start:
_values = _values[start:]
for key, value in [x.split('=') for x in _values]:
yield key, value
def enumerate_disk(device, nodeps=False):
"""
Enumerate the elements of a child device.
Parameters:
device: the kernel device name
nodeps <BOOL>: don't enumerate children devices
Return a dict describing the disk:
type: the entry type, i.e disk or part
fstype: the filesystem type, if it exists
label: file system label, if it exists
name: the device name, i.e. sda
"""
lsblk_cmd = [LSBLK_CMD, '--pairs', '--output', 'NAME,TYPE,FSTYPE,LABEL',
device]
if nodeps:
lsblk_cmd.append('--nodeps')
info = None
try:
info, _err = util.subp(lsblk_cmd)
except Exception as e:
raise Exception("Failed during disk check for %s\n%s" % (device, e))
parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0]
for part in parts:
d = {'name': None,
'type': None,
'fstype': None,
'label': None,
}
for key, value in value_splitter(part):
d[key.lower()] = value
yield d
def device_type(device):
"""
Return the device type of the device by calling lsblk.
"""
for d in enumerate_disk(device, nodeps=True):
if "type" in d:
return d["type"].lower()
return None
def is_device_valid(name, partition=False):
"""
Check if the device is a valid device.
"""
d_type = ""
try:
d_type = device_type(name)
except:
LOG.warn("Query against device %s failed" % name)
return False
if partition and d_type == 'part':
return True
elif not partition and d_type == 'disk':
return True
return False
def check_fs(device):
"""
Check if the device has a filesystem on it
Output of blkid is generally something like:
/dev/sda: LABEL="Backup500G" UUID="..." TYPE="ext4"
Return values are device, label, type, uuid
"""
out, label, fs_type, uuid = None, None, None, None
blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device]
try:
out, _err = util.subp(blkid_cmd, rcs=[0, 2])
except Exception as e:
raise Exception("Failed during disk check for %s\n%s" % (device, e))
if out:
if len(out.splitlines()) == 1:
for key, value in value_splitter(out, start=1):
if key.lower() == 'label':
label = value
elif key.lower() == 'type':
fs_type = value
elif key.lower() == 'uuid':
uuid = value
return label, fs_type, uuid
def is_filesystem(device):
"""
Returns true if the device has a file system.
"""
_, fs_type, _ = check_fs(device)
return fs_type
def find_device_node(device, fs_type=None, label=None, valid_targets=None,
label_match=True, replace_fs=None):
"""
Find a device that is either matches the spec, or the first
The return is value is (<device>, <bool>) where the device is the
device to use and the bool is whether the device matches the
fs_type and label.
Note: This works with GPT partition tables!
"""
# label of None is same as no label
if label is None:
label = ""
if not valid_targets:
valid_targets = ['disk', 'part']
raw_device_used = False
for d in enumerate_disk(device):
if d['fstype'] == replace_fs and label_match is False:
# We found a device where we want to replace the FS
return ('/dev/%s' % d['name'], False)
if (d['fstype'] == fs_type and
((label_match and d['label'] == label) or not label_match)):
# If we find a matching device, we return that
return ('/dev/%s' % d['name'], True)
if d['type'] in valid_targets:
if d['type'] != 'disk' or d['fstype']:
raw_device_used = True
if d['type'] == 'disk':
# Skip the raw disk, its the default
pass
elif not d['fstype']:
return ('/dev/%s' % d['name'], False)
if not raw_device_used:
return (device, False)
LOG.warn("Failed to find device during available device search.")
return (None, False)
def is_disk_used(device):
"""
Check if the device is currently used. Returns true if the device
has either a file system or a partition entry
is no filesystem found on the disk.
"""
# If the child count is higher 1, then there are child nodes
# such as partition or device mapper nodes
if len(list(enumerate_disk(device))) > 1:
return True
# If we see a file system, then its used
_, check_fstype, _ = check_fs(device)
if check_fstype:
return True
return False
def get_dyn_func(*args):
"""
Call the appropriate function.
The first value is the template for function name
The second value is the template replacement
The remain values are passed to the function
For example: get_dyn_func("foo_%s", 'bar', 1, 2, 3,)
would call "foo_bar" with args of 1, 2, 3
"""
if len(args) < 2:
raise Exception("Unable to determine dynamic funcation name")
func_name = (args[0] % args[1])
func_args = args[2:]
try:
if func_args:
return globals()[func_name](*func_args)
else:
return globals()[func_name]
except KeyError:
raise Exception("No such function %s to call!" % func_name)
def get_mbr_hdd_size(device):
size_cmd = [SFDISK_CMD, '--show-size', device]
size = None
try:
size, _err = util.subp(size_cmd)
except Exception as e:
raise Exception("Failed to get %s size\n%s" % (device, e))
return int(size.strip())
def get_gpt_hdd_size(device):
out, _ = util.subp([SGDISK_CMD, '-p', device])
return out.splitlines()[0].split()[2]
def get_hdd_size(table_type, device):
"""
Returns the hard disk size.
This works with any disk type, including GPT.
"""
return get_dyn_func("get_%s_hdd_size", table_type, device)
def check_partition_mbr_layout(device, layout):
"""
Returns true if the partition layout matches the one on the disk
Layout should be a list of values. At this time, this only
verifies that the number of partitions and their labels is correct.
"""
read_parttbl(device)
prt_cmd = [SFDISK_CMD, "-l", device]
try:
out, _err = util.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
raise Exception("Error running partition command on %s\n%s" % (
device, e))
found_layout = []
for line in out.splitlines():
_line = line.split()
if len(_line) == 0:
continue
if device in _line[0]:
# We don't understand extended partitions yet
if _line[-1].lower() in ['extended', 'empty']:
continue
# Find the partition types
type_label = None
for x in sorted(range(1, len(_line)), reverse=True):
if _line[x].isdigit() and _line[x] != '/':
type_label = _line[x]
break
found_layout.append(type_label)
return found_layout
def check_partition_gpt_layout(device, layout):
prt_cmd = [SGDISK_CMD, '-p', device]
try:
out, _err = util.subp(prt_cmd)
except Exception as e:
raise Exception("Error running partition command on %s\n%s" % (
device, e))
out_lines = iter(out.splitlines())
# Skip header
for line in out_lines:
if line.strip().startswith('Number'):
break
return [line.strip().split()[-1] for line in out_lines]
def check_partition_layout(table_type, device, layout):
"""
See if the partition lay out matches.
This is future a future proofing function. In order
to add support for other disk layout schemes, add a
function called check_partition_%s_layout
"""
found_layout = get_dyn_func(
"check_partition_%s_layout", table_type, device, layout)
if isinstance(layout, bool):
# if we are using auto partitioning, or "True" be happy
# if a single partition exists.
if layout and len(found_layout) >= 1:
return True
return False
else:
if len(found_layout) != len(layout):
return False
else:
# This just makes sure that the number of requested
# partitions and the type labels are right
for x in range(1, len(layout) + 1):
if isinstance(layout[x - 1], tuple):
_, part_type = layout[x]
if int(found_layout[x]) != int(part_type):
return False
return True
return False
def get_partition_mbr_layout(size, layout):
"""
Calculate the layout of the partition table. Partition sizes
are defined as percentage values or a tuple of percentage and
partition type.
For example:
[ 33, [66: 82] ]
Defines the first partition to be a size of 1/3 the disk,
while the remaining 2/3's will be of type Linux Swap.
"""
if not isinstance(layout, list) and isinstance(layout, bool):
# Create a single partition
return "0,"
if ((len(layout) == 0 and isinstance(layout, list)) or
not isinstance(layout, list)):
raise Exception("Partition layout is invalid")
last_part_num = len(layout)
if last_part_num > 4:
raise Exception("Only simply partitioning is allowed.")
part_definition = []
part_num = 0
for part in layout:
part_type = 83 # Default to Linux
percent = part
part_num += 1
if isinstance(part, list):
if len(part) != 2:
raise Exception("Partition was incorrectly defined: %s" % part)
percent, part_type = part
part_size = int((float(size) * (float(percent) / 100)) / 1024)
if part_num == last_part_num:
part_definition.append(",,%s" % part_type)
else:
part_definition.append(",%s,%s" % (part_size, part_type))
sfdisk_definition = "\n".join(part_definition)
if len(part_definition) > 4:
raise Exception("Calculated partition definition is too big\n%s" %
sfdisk_definition)
return sfdisk_definition
def get_partition_gpt_layout(size, layout):
if isinstance(layout, bool):
return [(None, [0, 0])]
partition_specs = []
for partition in layout:
if isinstance(partition, list):
if len(partition) != 2:
raise Exception(
"Partition was incorrectly defined: %s" % partition)
percent, partition_type = partition
else:
percent = partition
partition_type = None
part_size = int(float(size) * (float(percent) / 100))
partition_specs.append((partition_type, [0, '+{}'.format(part_size)]))
# The last partition should use up all remaining space
partition_specs[-1][-1][-1] = 0
return partition_specs
def purge_disk_ptable(device):
# wipe the first and last megabyte of a disk (or file)
# gpt stores partition table both at front and at end.
null = '\0'
start_len = 1024 * 1024
end_len = 1024 * 1024
with open(device, "rb+") as fp:
fp.write(null * (start_len))
fp.seek(-end_len, os.SEEK_END)
fp.write(null * end_len)
fp.flush()
read_parttbl(device)
def purge_disk(device):
"""
Remove parition table entries
"""
# wipe any file systems first
for d in enumerate_disk(device):
if d['type'] not in ["disk", "crypt"]:
wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']]
try:
LOG.info("Purging filesystem on /dev/%s" % d['name'])
util.subp(wipefs_cmd)
except Exception:
raise Exception("Failed FS purge of /dev/%s" % d['name'])
purge_disk_ptable(device)
def get_partition_layout(table_type, size, layout):
"""
Call the appropriate function for creating the table
definition. Returns the table definition
This is a future proofing function. To add support for
other layouts, simply add a "get_partition_%s_layout"
function.
"""
return get_dyn_func("get_partition_%s_layout", table_type, size, layout)
def read_parttbl(device):
"""
Use partprobe instead of 'udevadm'. Partprobe is the only
reliable way to probe the partition table.
"""
blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
udev_cmd = [UDEVADM_CMD, 'settle']
try:
util.subp(udev_cmd)
util.subp(blkdev_cmd)
util.subp(udev_cmd)
except Exception as e:
util.logexc(LOG, "Failed reading the partition table %s" % e)
def exec_mkpart_mbr(device, layout):
"""
Break out of mbr partition to allow for future partition
types, i.e. gpt
"""
# Create the partitions
prt_cmd = [SFDISK_CMD, "--Linux", "-uM", device]
try:
util.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
raise Exception("Failed to partition device %s\n%s" % (device, e))
read_parttbl(device)
def exec_mkpart_gpt(device, layout):
try:
util.subp([SGDISK_CMD, '-Z', device])
for index, (partition_type, (start, end)) in enumerate(layout):
index += 1
util.subp([SGDISK_CMD,
'-n', '{}:{}:{}'.format(index, start, end), device])
if partition_type is not None:
util.subp(
[SGDISK_CMD,
'-t', '{}:{}'.format(index, partition_type), device])
except Exception:
LOG.warn("Failed to partition device %s" % device)
raise
def exec_mkpart(table_type, device, layout):
"""
Fetches the function for creating the table type.
This allows to dynamically find which function to call.
Paramaters:
table_type: type of partition table to use
device: the device to work on
layout: layout definition specific to partition table
"""
return get_dyn_func("exec_mkpart_%s", table_type, device, layout)
def mkpart(device, definition):
"""
Creates the partition table.
Parameters:
definition: dictionary describing how to create the partition.
The following are supported values in the dict:
overwrite: Should the partition table be created regardless
of any pre-exisiting data?
layout: the layout of the partition table
table_type: Which partition table to use, defaults to MBR
device: the device to work on.
"""
# ensure that we get a real device rather than a symbolic link
device = os.path.realpath(device)
LOG.debug("Checking values for %s definition" % device)
overwrite = definition.get('overwrite', False)
layout = definition.get('layout', False)
table_type = definition.get('table_type', 'mbr')
# Check if the default device is a partition or not
LOG.debug("Checking against default devices")
if (isinstance(layout, bool) and not layout) or not layout:
LOG.debug("Device is not to be partitioned, skipping")
return # Device is not to be partitioned
# This prevents you from overwriting the device
LOG.debug("Checking if device %s is a valid device", device)
if not is_device_valid(device):
raise Exception("Device %s is not a disk device!", device)
# Remove the partition table entries
if isinstance(layout, str) and layout.lower() == "remove":
LOG.debug("Instructed to remove partition table entries")
purge_disk(device)
return
LOG.debug("Checking if device layout matches")
if check_partition_layout(table_type, device, layout):
LOG.debug("Device partitioning layout matches")
return True
LOG.debug("Checking if device is safe to partition")
if not overwrite and (is_disk_used(device) or is_filesystem(device)):
LOG.debug("Skipping partitioning on configured device %s" % device)
return
LOG.debug("Checking for device size")
device_size = get_hdd_size(table_type, device)
LOG.debug("Calculating partition layout")
part_definition = get_partition_layout(table_type, device_size, layout)
LOG.debug(" Layout is: %s" % part_definition)
LOG.debug("Creating partition table on %s", device)
exec_mkpart(table_type, device, part_definition)
LOG.debug("Partition table created for %s", device)
def lookup_force_flag(fs):
"""
A force flag might be -F or -F, this look it up
"""
flags = {'ext': '-F',
'btrfs': '-f',
'xfs': '-f',
'reiserfs': '-f',
}
if 'ext' in fs.lower():
fs = 'ext'
if fs.lower() in flags:
return flags[fs]
LOG.warn("Force flag for %s is unknown." % fs)
return ''
def mkfs(fs_cfg):
"""
Create a file system on the device.
label: defines the label to use on the device
fs_cfg: defines how the filesystem is to look
The following values are required generally:
device: which device or cloud defined default_device
filesystem: which file system type
overwrite: indiscriminately create the file system
partition: when device does not define a partition,
setting this to a number will mean
device + partition. When set to 'auto', the
first free device or the first device which
matches both label and type will be used.
'any' means the first filesystem that matches
on the device.
When 'cmd' is provided then no other parameter is required.
"""
label = fs_cfg.get('label')
device = fs_cfg.get('device')
partition = str(fs_cfg.get('partition', 'any'))
fs_type = fs_cfg.get('filesystem')
fs_cmd = fs_cfg.get('cmd', [])
fs_opts = fs_cfg.get('extra_opts', [])
fs_replace = fs_cfg.get('replace_fs', False)
overwrite = fs_cfg.get('overwrite', False)
# ensure that we get a real device rather than a symbolic link
device = os.path.realpath(device)
# This allows you to define the default ephemeral or swap
LOG.debug("Checking %s against default devices", device)
if not partition or partition.isdigit():
# Handle manual definition of partition
if partition.isdigit():
device = "%s%s" % (device, partition)
LOG.debug("Manual request of partition %s for %s",
partition, device)
# Check to see if the fs already exists
LOG.debug("Checking device %s", device)
check_label, check_fstype, _ = check_fs(device)
LOG.debug("Device %s has %s %s", device, check_label, check_fstype)
if check_label == label and check_fstype == fs_type:
LOG.debug("Existing file system found at %s", device)
if not overwrite:
LOG.debug("Device %s has required file system", device)
return
else:
LOG.warn("Destroying filesystem on %s", device)
else:
LOG.debug("Device %s is cleared for formating", device)
elif partition and str(partition).lower() in ('auto', 'any'):
# For auto devices, we match if the filesystem does exist
odevice = device
LOG.debug("Identifying device to create %s filesytem on", label)
# any mean pick the first match on the device with matching fs_type
label_match = True
if partition.lower() == 'any':
label_match = False
device, reuse = find_device_node(device, fs_type=fs_type, label=label,
label_match=label_match,
replace_fs=fs_replace)
LOG.debug("Automatic device for %s identified as %s", odevice, device)
if reuse:
LOG.debug("Found filesystem match, skipping formating.")
return
if not reuse and fs_replace and device:
LOG.debug("Replacing file system on %s as instructed." % device)
if not device:
LOG.debug("No device aviable that matches request. "
"Skipping fs creation for %s", fs_cfg)
return
elif not partition or str(partition).lower() == 'none':
LOG.debug("Using the raw device to place filesystem %s on" % label)
else:
LOG.debug("Error in device identification handling.")
return
LOG.debug("File system %s will be created on %s", label, device)
# Make sure the device is defined
if not device:
LOG.warn("Device is not known: %s", device)
return
# Check that we can create the FS
if not (fs_type or fs_cmd):
raise Exception("No way to create filesystem '%s'. fs_type or fs_cmd "
"must be set.", label)
# Create the commands
if fs_cmd:
fs_cmd = fs_cfg['cmd'] % {'label': label,
'filesystem': fs_type,
'device': device,
}
else:
# Find the mkfs command
mkfs_cmd = util.which("mkfs.%s" % fs_type)
if not mkfs_cmd:
mkfs_cmd = util.which("mk%s" % fs_type)
if not mkfs_cmd:
LOG.warn("Cannot create fstype '%s'. No mkfs.%s command", fs_type,
fs_type)
return
fs_cmd = [mkfs_cmd, device]
if label:
fs_cmd.extend(["-L", label])
# File systems that support the -F flag
if not fs_cmd and (overwrite or device_type(device) == "disk"):
fs_cmd.append(lookup_force_flag(fs_type))
# Add the extends FS options
if fs_opts:
fs_cmd.extend(fs_opts)
LOG.debug("Creating file system %s on %s", label, device)
LOG.debug(" Using cmd: %s", " ".join(fs_cmd))
try:
util.subp(fs_cmd)
except Exception as e:
raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e))
| gpl-3.0 |
zestrada/nova-cs498cc | nova/consoleauth/rpcapi.py | 5 | 2856 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the consoleauth RPC API.
"""
from oslo.config import cfg
import nova.openstack.common.rpc.proxy
CONF = cfg.CONF
class ConsoleAuthAPI(nova.openstack.common.rpc.proxy.RpcProxy):
'''Client side of the consoleauth rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_backdoor_port()
1.2 - Added instance_uuid to authorize_console, and
delete_tokens_for_instance
'''
#
# NOTE(russellb): This is the default minimum version that the server
# (manager) side must implement unless otherwise specified using a version
# argument to self.call()/cast()/etc. here. It should be left as X.0 where
# X is the current major API version (1.0, 2.0, ...). For more information
# about rpc API versioning, see the docs in
# openstack/common/rpc/dispatcher.py.
#
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
super(ConsoleAuthAPI, self).__init__(
topic=CONF.consoleauth_topic,
default_version=self.BASE_RPC_API_VERSION)
def authorize_console(self, ctxt, token, console_type, host, port,
internal_access_path, instance_uuid=None):
# The remote side doesn't return anything, but we want to block
# until it completes.
return self.call(ctxt,
self.make_msg('authorize_console',
token=token, console_type=console_type,
host=host, port=port,
internal_access_path=internal_access_path,
instance_uuid=instance_uuid),
version="1.2")
def check_token(self, ctxt, token):
return self.call(ctxt, self.make_msg('check_token', token=token))
def delete_tokens_for_instance(self, ctxt, instance_uuid):
return self.cast(ctxt,
self.make_msg('delete_tokens_for_instance',
instance_uuid=instance_uuid),
version="1.2")
def get_backdoor_port(self, ctxt, host):
return self.call(ctxt, self.make_msg('get_backdoor_port'),
version='1.1')
| apache-2.0 |
ojengwa/talk | venv/lib/python2.7/site-packages/django/contrib/staticfiles/finders.py | 106 | 9852 | from collections import OrderedDict
import os
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import default_storage, Storage, FileSystemStorage
from django.utils.functional import empty, LazyObject
from django.utils.module_loading import import_string
from django.utils._os import safe_join
from django.utils import six, lru_cache
from django.contrib.staticfiles import utils
# To keep track on which directories the finder has searched the static files.
searched_locations = []
class BaseFinder(object):
"""
A base file finder to be used for custom staticfiles finder classes.
"""
def find(self, path, all=False):
"""
Given a relative file path this ought to find an
absolute file path.
If the ``all`` parameter is ``False`` (default) only
the first found file path will be returned; if set
to ``True`` a list of all found files paths is returned.
"""
raise NotImplementedError('subclasses of BaseFinder must provide a find() method')
def list(self, ignore_patterns):
"""
Given an optional list of paths to ignore, this should return
a two item iterable consisting of the relative path and storage
instance.
"""
raise NotImplementedError('subclasses of BaseFinder must provide a list() method')
class FileSystemFinder(BaseFinder):
"""
A static files finder that uses the ``STATICFILES_DIRS`` setting
to locate files.
"""
def __init__(self, app_names=None, *args, **kwargs):
# List of locations with static files
self.locations = []
# Maps dir paths to an appropriate storage instance
self.storages = OrderedDict()
if not isinstance(settings.STATICFILES_DIRS, (list, tuple)):
raise ImproperlyConfigured(
"Your STATICFILES_DIRS setting is not a tuple or list; "
"perhaps you forgot a trailing comma?")
for root in settings.STATICFILES_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ''
if settings.STATIC_ROOT and os.path.abspath(settings.STATIC_ROOT) == os.path.abspath(root):
raise ImproperlyConfigured(
"The STATICFILES_DIRS setting should "
"not contain the STATIC_ROOT setting")
if (prefix, root) not in self.locations:
self.locations.append((prefix, root))
for prefix, root in self.locations:
filesystem_storage = FileSystemStorage(location=root)
filesystem_storage.prefix = prefix
self.storages[root] = filesystem_storage
super(FileSystemFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the extra locations
as defined in ``STATICFILES_DIRS``.
"""
matches = []
for prefix, root in self.locations:
if root not in searched_locations:
searched_locations.append(root)
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches
def find_location(self, root, path, prefix=None):
"""
Finds a requested static file in a location, returning the found
absolute path (or ``None`` if no match).
"""
if prefix:
prefix = '%s%s' % (prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
path = safe_join(root, path)
if os.path.exists(path):
return path
def list(self, ignore_patterns):
"""
List all files in all locations.
"""
for prefix, root in self.locations:
storage = self.storages[root]
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
class AppDirectoriesFinder(BaseFinder):
"""
A static files finder that looks in the directory of each app as
specified in the source_dir attribute.
"""
storage_class = FileSystemStorage
source_dir = 'static'
def __init__(self, app_names=None, *args, **kwargs):
# The list of apps that are handled
self.apps = []
# Mapping of app names to storage instances
self.storages = OrderedDict()
app_configs = apps.get_app_configs()
if app_names:
app_names = set(app_names)
app_configs = [ac for ac in app_configs if ac.name in app_names]
for app_config in app_configs:
app_storage = self.storage_class(
os.path.join(app_config.path, self.source_dir))
if os.path.isdir(app_storage.location):
self.storages[app_config.name] = app_storage
if app_config.name not in self.apps:
self.apps.append(app_config.name)
super(AppDirectoriesFinder, self).__init__(*args, **kwargs)
def list(self, ignore_patterns):
"""
List all files in all app storages.
"""
for storage in six.itervalues(self.storages):
if storage.exists(''): # check if storage location exists
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
def find(self, path, all=False):
"""
Looks for files in the app directories.
"""
matches = []
for app in self.apps:
app_location = self.storages[app].location
if app_location not in searched_locations:
searched_locations.append(app_location)
match = self.find_in_app(app, path)
if match:
if not all:
return match
matches.append(match)
return matches
def find_in_app(self, app, path):
"""
Find a requested static file in an app's static locations.
"""
storage = self.storages.get(app, None)
if storage:
# only try to find a file if the source dir actually exists
if storage.exists(path):
matched_path = storage.path(path)
if matched_path:
return matched_path
class BaseStorageFinder(BaseFinder):
"""
A base static files finder to be used to extended
with an own storage class.
"""
storage = None
def __init__(self, storage=None, *args, **kwargs):
if storage is not None:
self.storage = storage
if self.storage is None:
raise ImproperlyConfigured("The staticfiles storage finder %r "
"doesn't have a storage class "
"assigned." % self.__class__)
# Make sure we have an storage instance here.
if not isinstance(self.storage, (Storage, LazyObject)):
self.storage = self.storage()
super(BaseStorageFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the default file storage, if it's local.
"""
try:
self.storage.path('')
except NotImplementedError:
pass
else:
if self.storage.location not in searched_locations:
searched_locations.append(self.storage.location)
if self.storage.exists(path):
match = self.storage.path(path)
if all:
match = [match]
return match
return []
def list(self, ignore_patterns):
"""
List all files of the storage.
"""
for path in utils.get_files(self.storage, ignore_patterns):
yield path, self.storage
class DefaultStorageFinder(BaseStorageFinder):
"""
A static files finder that uses the default storage backend.
"""
storage = default_storage
def __init__(self, *args, **kwargs):
super(DefaultStorageFinder, self).__init__(*args, **kwargs)
base_location = getattr(self.storage, 'base_location', empty)
if not base_location:
raise ImproperlyConfigured("The storage backend of the "
"staticfiles finder %r doesn't have "
"a valid location." % self.__class__)
def find(path, all=False):
"""
Find a static file with the given path using all enabled finders.
If ``all`` is ``False`` (default), return the first matching
absolute path (or ``None`` if no match). Otherwise return a list.
"""
searched_locations[:] = []
matches = []
for finder in get_finders():
result = finder.find(path, all=all)
if not all and result:
return result
if not isinstance(result, (list, tuple)):
result = [result]
matches.extend(result)
if matches:
return matches
# No match.
return [] if all else None
def get_finders():
for finder_path in settings.STATICFILES_FINDERS:
yield get_finder(finder_path)
@lru_cache.lru_cache(maxsize=None)
def get_finder(import_path):
"""
Imports the staticfiles finder class described by import_path, where
import_path is the full Python path to the class.
"""
Finder = import_string(import_path)
if not issubclass(Finder, BaseFinder):
raise ImproperlyConfigured('Finder "%s" is not a subclass of "%s"' %
(Finder, BaseFinder))
return Finder()
| mit |
adailtonjn68/pwm-control | pwm_control.py | 1 | 4004 | #!/usr/bin/python3
'''
This program was created by Adailton Braga Júnior in July/2015.
Its main purpose is to allow the user to control the Arduino's PWM ports.
'''
from tkinter import *
import serial
from time import sleep
class Serialport(object):
'''This class takes care of the arduino connection using the serial port'''
def __init__(self, port, baudrate):
self.port = port
self.baudrate = baudrate
self.connection = serial.Serial(self.port, self.baudrate)
def send_data(self):
self.connection.write(b'\n')
def close_port(self):
self.connection.write(b'0')
self.send_data()
self.connection.close()
class Window_settings(object):
'''In this class is set the window features'''
def __init__(self, master):
self.master = master
self.master.title('PWM Control')
self.master.resizable(0,0)
self.master.geometry('300x200')
self.scale_value = IntVar()
self.scale = Scale(self.master, from_=0, to=255, orient=HORIZONTAL, sliderlength=8, showvalue=0, variable=self.scale_value)
self.state_button = Button(self.master, text='Ligar PWM')
self.quit_button = Button(self.master, text='Quit')
self.intensity = Label(self.master, text='Intensity: 0%')
self.image_function()
self.scale.pack(fill=X)
self.intensity.pack()
self.state_button.pack()
self.quit_button.pack(side=BOTTOM)
def image_function(self):
self.image = PhotoImage(file='arduino.gif')
Label(self.master, image=self.image).pack(side=TOP)
def close_window(self):
self.master.destroy()
class Everything_happens(object):
'''Everything happens in this class'''
def __init__(self, serial_part, window_part):
self.serial_part = serial_part
self.window_part = window_part
self.window_part.master.protocol('WM_DELETE_WINDOW', self.close_all)
self.window_part.scale.configure(command=self.callback_scale)
self.window_part.quit_button.configure(command=self.close_all)
self.window_part.state_button.configure(command=self.button_clicked)
self.state = 0
def callback_scale(self, *args):
# This method is called when the scale's value is changed
self.value = self.window_part.scale_value.get()
if self.state==0 and self.value!=0:
self.serial_part.connection.write(str.encode(str(self.value)))
self.serial_part.send_data()
self.window_part.intensity.configure(text='Intensity: ' + str(int(self.value*100/255)) + '%')
self.state = 1
self.window_part.state_button.configure(text='Desligar PWM')
elif self.state==0 and self.value==0:
self.serial_part.connection.write(b'0')
self.serial_part.send_data()
self.window_part.intensity.configure(text='Intensity: 0%')
self.window_part.state_button.configure(text='Ligar PWM')
elif self.state==1 and self.value==0:
self.serial_part.connection.write(b'0')
self.serial_part.send_data()
self.window_part.state_button.configure(text='Ligar PWM')
self.window_part.intensity.configure(text='Intensity: 0%')
self.state = 0
elif self.state==1 and self.value!=0:
self.serial_part.connection.write(str.encode(str(self.value)))
self.serial_part.send_data()
self.window_part.state_button.configure(text='Desligar PWM')
self.window_part.intensity.configure(text='Intensity: ' + str(int(self.value*100/255)) + '%')
sleep(0.05)
def button_clicked(self):
# This method is called when the button is clicked
if self.state==0:
self.window_part.scale.set(255)
self.state = 1
elif self.state==1:
self.window_part.scale.set(0)
self.state=0
sleep(0.05)
def close_all(self):
# This method is used to call the close method of each class
self.serial_part.close_port()
self.window_part.close_window()
def main():
arduino = Serialport('/dev/ttyACM0', 9600)
window_root = Tk()
window = Window_settings(window_root)
run = Everything_happens(arduino, window)
window_root.mainloop()
main()
| gpl-2.0 |
hdinsight/hue | desktop/core/ext-py/pytz-2015.2/pytz/lazy.py | 514 | 5263 | from threading import RLock
try:
from UserDict import DictMixin
except ImportError:
from collections import Mapping as DictMixin
# With lazy loading, we might end up with multiple threads triggering
# it at the same time. We need a lock.
_fill_lock = RLock()
class LazyDict(DictMixin):
"""Dictionary populated on first use."""
data = None
def __getitem__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data[key.upper()]
def __contains__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return key in self.data
def __iter__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return iter(self.data)
def __len__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return len(self.data)
def keys(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data.keys()
class LazyList(list):
"""List populated on first use."""
_props = [
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'append', 'count', 'index', 'extend', 'insert', 'pop', 'remove',
'reverse', 'sort', '__add__', '__radd__', '__iadd__', '__mul__',
'__rmul__', '__imul__', '__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__reversed__', '__getslice__', '__setslice__', '__delslice__']
def __new__(cls, fill_iter=None):
if fill_iter is None:
return list()
# We need a new class as we will be dynamically messing with its
# methods.
class LazyList(list):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
list.extend(self, fill_iter.pop())
for method_name in cls._props:
delattr(LazyList, method_name)
finally:
_fill_lock.release()
return getattr(list, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazyList, name, lazy(name))
new_list = LazyList()
return new_list
# Not all versions of Python declare the same magic methods.
# Filter out properties that don't exist in this version of Python
# from the list.
LazyList._props = [prop for prop in LazyList._props if hasattr(list, prop)]
class LazySet(set):
"""Set populated on first use."""
_props = (
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__sub__', '__and__', '__xor__', '__or__',
'__rsub__', '__rand__', '__rxor__', '__ror__',
'__isub__', '__iand__', '__ixor__', '__ior__',
'add', 'clear', 'copy', 'difference', 'difference_update',
'discard', 'intersection', 'intersection_update', 'isdisjoint',
'issubset', 'issuperset', 'pop', 'remove',
'symmetric_difference', 'symmetric_difference_update',
'union', 'update')
def __new__(cls, fill_iter=None):
if fill_iter is None:
return set()
class LazySet(set):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
for i in fill_iter.pop():
set.add(self, i)
for method_name in cls._props:
delattr(LazySet, method_name)
finally:
_fill_lock.release()
return getattr(set, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazySet, name, lazy(name))
new_set = LazySet()
return new_set
# Not all versions of Python declare the same magic methods.
# Filter out properties that don't exist in this version of Python
# from the list.
LazySet._props = [prop for prop in LazySet._props if hasattr(set, prop)]
| apache-2.0 |
doug-willowit/Pentaho-reports-for-OpenERP | openerp_addon/pentaho_reports/core.py | 1 | 8394 | # Todo:
# multiple prpt files for one action - allows for alternate formats.
import io
import os
import logging
import subprocess
import xmlrpclib
import base64
import netsvc
import pooler
import report
from osv import osv, fields
from tools.translate import _
from datetime import datetime
from .java_oe import JAVA_MAPPING, check_java_list, PARAM_VALUES, RESERVED_PARAMS
from tools import config
SERVICE_NAME_PREFIX = 'report.'
DEFAULT_OUTPUT_TYPE = 'pdf'
def get_proxy_args(cr, uid, prpt_content):
"""Return the arguments needed by Pentaho server proxy.
@return: Tuple with:
[0]: Has the url for the Pentaho server.
[1]: Has dict with basic arguments to pass to Pentaho server. This
includes the connection settings and report definition but does
not include any report parameter values.
"""
pool = pooler.get_pool(cr.dbname)
current_user = pool.get("res.users").browse(cr, uid, uid)
config_obj = pool.get('ir.config_parameter')
proxy_url = config_obj.get_param(cr, uid, 'pentaho.server.url', default='http://localhost:8080/pentaho-reports-for-openerp')
proxy_argument = {
"prpt_file_content": xmlrpclib.Binary(prpt_content),
"connection_settings" : {'openerp' : {"host": config["xmlrpc_interface"] or "localhost",
"port": str(config["xmlrpc_port"]),
"db": cr.dbname,
"login": current_user.login,
"password": current_user.password,
}},
}
postgresconfig_host = config_obj.get_param(cr, uid, 'pentaho.postgres.host', default='localhost')
postgresconfig_port = config_obj.get_param(cr, uid, 'pentaho.postgres.port', default='5432')
postgresconfig_login = config_obj.get_param(cr, uid, 'pentaho.postgres.login')
postgresconfig_password = config_obj.get_param(cr, uid, 'pentaho.postgres.password')
if postgresconfig_host and postgresconfig_port and postgresconfig_login and postgresconfig_password:
proxy_argument['connection_settings'].update({'postgres' : {'host': postgresconfig_host,
'port': postgresconfig_port,
'db': cr.dbname,
'login': postgresconfig_login,
'password': postgresconfig_password,
}})
return proxy_url, proxy_argument
class Report(object):
def __init__(self, name, cr, uid, ids, data, context):
self.name = name
self.cr = cr
self.uid = uid
self.ids = ids
self.data = data
self.context = context or {}
self.pool = pooler.get_pool(self.cr.dbname)
self.prpt_content = None
self.default_output_type = DEFAULT_OUTPUT_TYPE
def setup_report(self):
ids = self.pool.get("ir.actions.report.xml").search(self.cr, self.uid, [("report_name", "=", self.name[7:]), ("is_pentaho_report", "=", True)], context = self.context)
if not ids:
raise osv.except_osv(_('Error'), _("Report service name '%s' is not a Pentaho report.") % self.name[7:])
data = self.pool.get("ir.actions.report.xml").read(self.cr, self.uid, ids[0], ["pentaho_report_output_type", "pentaho_file"])
self.default_output_type = data["pentaho_report_output_type"] or DEFAULT_OUTPUT_TYPE
self.prpt_content = base64.decodestring(data["pentaho_file"])
def execute(self):
self.setup_report()
# returns report and format
return self.execute_report()
def fetch_report_parameters(self):
"""Return the parameters object for this report.
Returns the parameters object as returned by the Pentaho
server.
"""
self.setup_report()
proxy_url, proxy_argument = get_proxy_args(self.cr, self.uid, self.prpt_content)
proxy = xmlrpclib.ServerProxy(proxy_url)
return proxy.report.getParameterInfo(proxy_argument)
def execute_report(self):
proxy_url, proxy_argument = get_proxy_args(self.cr, self.uid, self.prpt_content)
proxy = xmlrpclib.ServerProxy(proxy_url)
proxy_parameter_info = proxy.report.getParameterInfo(proxy_argument)
output_type = self.data and self.data.get('output_type', False) or self.default_output_type or DEFAULT_OUTPUT_TYPE
proxy_argument.update({
'output_type' : output_type,
'report_parameters' : dict([(param_name, param_formula(self)) for (param_name, param_formula) in RESERVED_PARAMS.iteritems() if param_formula(self)]),
})
if self.data and self.data.get('variables', False):
proxy_argument['report_parameters'].update(self.data['variables'])
for parameter in proxy_parameter_info:
if parameter['name'] in proxy_argument['report_parameters'].keys():
value_type = parameter['value_type']
java_list, value_type = check_java_list(value_type)
if not value_type == 'java.lang.Object' and PARAM_VALUES[JAVA_MAPPING[value_type](parameter['attributes'].get('data-format', False))].get('convert',False):
# convert from string types to correct types for reporter
proxy_argument['report_parameters'][parameter['name']] = PARAM_VALUES[JAVA_MAPPING[value_type](parameter['attributes'].get('data-format', False))]['convert'](proxy_argument['report_parameters'][parameter['name']])
# turn in to list
if java_list:
proxy_argument['report_parameters'][parameter['name']] = [proxy_argument['report_parameters'][parameter['name']]]
rendered_report = proxy.report.execute(proxy_argument).data
if len(rendered_report) == 0:
raise osv.except_osv(_('Error'), _("Pentaho returned no data for the report '%s'. Check report definition and parameters.") % self.name[7:])
return (rendered_report, output_type)
class PentahoReportOpenERPInterface(report.interface.report_int):
def __init__(self, name):
if name in netsvc.Service._services:
del netsvc.Service._services[name]
super(PentahoReportOpenERPInterface, self).__init__(name)
def create(self, cr, uid, ids, data, context):
name = self.name
report_instance = Report(name, cr, uid, ids, data, context)
return report_instance.execute()
def register_pentaho_report(report_name):
if not report_name.startswith(SERVICE_NAME_PREFIX):
name = "%s%s" % (SERVICE_NAME_PREFIX, report_name)
else:
name = report_name
if name in netsvc.Service._services:
if isinstance(netsvc.Service._services[name], PentahoReportOpenERPInterface):
return
del netsvc.Service._services[name]
PentahoReportOpenERPInterface(name)
def fetch_report_parameters(cr, uid, report_name, context=None):
"""Return the parameters object for this report.
Returns the parameters object as returned by the Pentaho
server.
@param report_name: The service name for the report.
"""
if not report_name.startswith(SERVICE_NAME_PREFIX):
name = "%s%s" % (SERVICE_NAME_PREFIX, report_name)
else:
name = report_name
return Report(name, cr, uid, [1], {}, context).fetch_report_parameters()
#Following OpenERP's (messed up) naming convention
class ir_actions_report_xml(osv.osv):
_inherit = "ir.actions.report.xml"
def register_all(self, cr):
cr.execute("SELECT * FROM ir_act_report_xml WHERE is_pentaho_report = 'TRUE' ORDER BY id")
records = cr.dictfetchall()
for record in records:
register_pentaho_report(record["report_name"])
return super(ir_actions_report_xml, self).register_all(cr)
ir_actions_report_xml()
| gpl-2.0 |
k3nnyfr/s2a_fr-nsis | s2a/Python/Lib/decimal.py | 19 | 220995 | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module is currently Py2.3 compatible and should be kept that way
# unless a major compelling advantage arises. IOW, 2.3 compatibility is
# strongly preferred, but not guaranteed.
# Also, this module should be kept in sync with the latest updates of
# the IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is a Py2.3 implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
http://speleotrove.com/decimal/decarith.html
and IEEE standard 854-1987:
http://en.wikipedia.org/wiki/IEEE_854-1987
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of this module is to support arithmetic using familiar
"schoolhouse" rules and to avoid some of the tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of the expected Decimal('0.00') returned by decimal floating point).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal('0')
>>> Decimal('1')
Decimal('1')
>>> Decimal('-.0123')
Decimal('-0.0123')
>>> Decimal(123456)
Decimal('123456')
>>> Decimal('123.45e12345678901234567890')
Decimal('1.2345E+12345678901234567892')
>>> Decimal('1.33') + Decimal('1.27')
Decimal('2.60')
>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
Decimal('-2.20')
>>> dig = Decimal(1)
>>> print dig / Decimal(3)
0.333333333
>>> getcontext().prec = 18
>>> print dig / Decimal(3)
0.333333333333333333
>>> print dig.sqrt()
1
>>> print Decimal(3).sqrt()
1.73205080756887729
>>> print Decimal(3) ** 123
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print inf
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print neginf
-Infinity
>>> print neginf + inf
NaN
>>> print neginf * inf
-Infinity
>>> print dig / 0
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print dig / 0
Traceback (most recent call last):
...
...
...
DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal('NaN')
>>> c.traps[InvalidOperation] = 1
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> print c.divide(Decimal(0), Decimal(0))
Traceback (most recent call last):
...
...
...
InvalidOperation: 0 / 0
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print c.divide(Decimal(0), Decimal(0))
NaN
>>> print c.flags[InvalidOperation]
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP',
# Functions for manipulating contexts
'setcontext', 'getcontext', 'localcontext'
]
__version__ = '1.70' # Highest version of the spec this complies with
import copy as _copy
import math as _math
import numbers as _numbers
try:
from collections import namedtuple as _namedtuple
DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent')
except ImportError:
DecimalTuple = lambda *args: args
# Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
ROUND_05UP = 'ROUND_05UP'
# Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is not set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
The result of the operation after these is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
"""
def handle(self, context, *args):
if args:
ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True)
return ans._fix_nan(context)
return _NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if an string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, *args):
return _SignedInfinity[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return _SignedInfinity[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
if sign == 1:
if context.rounding == ROUND_FLOOR:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.currentThread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
import sys
class MockThreading(object):
def local(self, sys=sys):
return sys.modules[__name__]
threading = MockThreading()
del sys, MockThreading
try:
threading.local
except AttributeError:
# To fix reloading, force it to create a new context
# Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.currentThread(), '__decimal_context__'):
del threading.currentThread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.currentThread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.currentThread().__decimal_context__
except AttributeError:
context = Context()
threading.currentThread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
def localcontext(ctx=None):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
>>> setcontext(DefaultContext)
>>> print getcontext().prec
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print ctx.prec
...
30
>>> with localcontext(ExtendedContext):
... print getcontext().prec
...
9
>>> print getcontext().prec
28
"""
if ctx is None: ctx = getcontext()
return _ContextManager(ctx)
##### Decimal class #######################################################
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal('3.14')
>>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent)
Decimal('3.14')
>>> Decimal(314) # int or long
Decimal('314')
>>> Decimal(Decimal(314)) # another decimal instance
Decimal('314')
>>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay
Decimal('3.14')
"""
# Note that the coefficient, self._int, is actually stored as
# a string rather than as a tuple of digits. This speeds up
# the "digits to integer" and "integer to digits" conversions
# that are used in almost every arithmetic operation on
# Decimals. This is an internal detail: the as_tuple function
# and the Decimal constructor still deal with tuples of
# digits.
self = object.__new__(cls)
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, basestring):
m = _parser(value.strip())
if m is None:
if context is None:
context = getcontext()
return context._raise_error(ConversionSyntax,
"Invalid literal for Decimal: %r" % value)
if m.group('sign') == "-":
self._sign = 1
else:
self._sign = 0
intpart = m.group('int')
if intpart is not None:
# finite number
fracpart = m.group('frac') or ''
exp = int(m.group('exp') or '0')
self._int = str(int(intpart+fracpart))
self._exp = exp - len(fracpart)
self._is_special = False
else:
diag = m.group('diag')
if diag is not None:
# NaN
self._int = str(int(diag or '0')).lstrip('0')
if m.group('signal'):
self._exp = 'N'
else:
self._exp = 'n'
else:
# infinity
self._int = '0'
self._exp = 'F'
self._is_special = True
return self
# From an integer
if isinstance(value, (int,long)):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = str(abs(value))
self._is_special = False
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = str(value.int)
self._exp = int(value.exp)
self._is_special = False
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError('Invalid tuple size in creation of Decimal '
'from list or tuple. The list or tuple '
'should have exactly three elements.')
# process sign. The isinstance test rejects floats
if not (isinstance(value[0], (int, long)) and value[0] in (0,1)):
raise ValueError("Invalid sign. The first value in the tuple "
"should be an integer; either 0 for a "
"positive number or 1 for a negative number.")
self._sign = value[0]
if value[2] == 'F':
# infinity: value[1] is ignored
self._int = '0'
self._exp = value[2]
self._is_special = True
else:
# process and validate the digits in value[1]
digits = []
for digit in value[1]:
if isinstance(digit, (int, long)) and 0 <= digit <= 9:
# skip leading zeros
if digits or digit != 0:
digits.append(digit)
else:
raise ValueError("The second value in the tuple must "
"be composed of integers in the range "
"0 through 9.")
if value[2] in ('n', 'N'):
# NaN: digits form the diagnostic
self._int = ''.join(map(str, digits))
self._exp = value[2]
self._is_special = True
elif isinstance(value[2], (int, long)):
# finite number: digits give the coefficient
self._int = ''.join(map(str, digits or [0]))
self._exp = value[2]
self._is_special = False
else:
raise ValueError("The third value in the tuple must "
"be an integer, or one of the "
"strings 'F', 'n', 'N'.")
return self
if isinstance(value, float):
value = Decimal.from_float(value)
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
raise TypeError("Cannot convert %r to Decimal" % value)
# @classmethod, but @decorator is not valid Python 2.3 syntax, so
# don't use it (see notes on Py2.3 compatibility at top of file)
def from_float(cls, f):
"""Converts a float to a decimal number, exactly.
Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
Since 0.1 is not exactly representable in binary floating point, the
value is stored as the nearest representable value which is
0x1.999999999999ap-4. The exact equivalent of the value in decimal
is 0.1000000000000000055511151231257827021181583404541015625.
>>> Decimal.from_float(0.1)
Decimal('0.1000000000000000055511151231257827021181583404541015625')
>>> Decimal.from_float(float('nan'))
Decimal('NaN')
>>> Decimal.from_float(float('inf'))
Decimal('Infinity')
>>> Decimal.from_float(-float('inf'))
Decimal('-Infinity')
>>> Decimal.from_float(-0.0)
Decimal('-0')
"""
if isinstance(f, (int, long)): # handle integer inputs
return cls(f)
if _math.isinf(f) or _math.isnan(f): # raises TypeError if not a float
return cls(repr(f))
if _math.copysign(1.0, f) == 1.0:
sign = 0
else:
sign = 1
n, d = abs(f).as_integer_ratio()
k = d.bit_length() - 1
result = _dec_from_triple(sign, str(n*5**k), -k)
if cls is Decimal:
return result
else:
return cls(result)
from_float = classmethod(from_float)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other=None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return self._fix_nan(context)
return other._fix_nan(context)
return 0
def _compare_check_nans(self, other, context):
"""Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN.
"""
if context is None:
context = getcontext()
if self._is_special or other._is_special:
if self.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
self)
elif other.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
other)
elif self.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
self)
elif other.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
other)
return 0
def __nonzero__(self):
"""Return True if self is nonzero; otherwise return False.
NaNs and infinities are considered nonzero.
"""
return self._is_special or self._int != '0'
def _cmp(self, other):
"""Compare the two non-NaN decimal instances self and other.
Returns -1 if self < other, 0 if self == other and 1
if self > other. This routine is for internal use only."""
if self._is_special or other._is_special:
self_inf = self._isinfinity()
other_inf = other._isinfinity()
if self_inf == other_inf:
return 0
elif self_inf < other_inf:
return -1
else:
return 1
# check for zeros; Decimal('0') == Decimal('-0')
if not self:
if not other:
return 0
else:
return -((-1)**other._sign)
if not other:
return (-1)**self._sign
# If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted:
self_padded = self._int + '0'*(self._exp - other._exp)
other_padded = other._int + '0'*(other._exp - self._exp)
if self_padded == other_padded:
return 0
elif self_padded < other_padded:
return -(-1)**self._sign
else:
return (-1)**self._sign
elif self_adjusted > other_adjusted:
return (-1)**self._sign
else: # self_adjusted < other_adjusted
return -((-1)**self._sign)
# Note: The Decimal standard doesn't cover rich comparisons for
# Decimals. In particular, the specification is silent on the
# subject of what should happen for a comparison involving a NaN.
# We take the following approach:
#
# == comparisons involving a quiet NaN always return False
# != comparisons involving a quiet NaN always return True
# == or != comparisons involving a signaling NaN signal
# InvalidOperation, and return False or True as above if the
# InvalidOperation is not trapped.
# <, >, <= and >= comparisons involving a (quiet or signaling)
# NaN signal InvalidOperation, and return False if the
# InvalidOperation is not trapped.
#
# This behavior is designed to conform as closely as possible to
# that specified by IEEE 754.
def __eq__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return False
return self._cmp(other) == 0
def __ne__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return True
return self._cmp(other) != 0
def __lt__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) < 0
def __le__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) <= 0
def __gt__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) > 0
def __ge__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) >= 0
def compare(self, other, context=None):
"""Compares one to another.
-1 => a < b
0 => a = b
1 => a > b
NaN => one is NaN
Like __cmp__, but returns Decimal instances.
"""
other = _convert_other(other, raiseit=True)
# Compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self._cmp(other))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# Decimal integers must hash the same as the ints
#
# The hash of a nonspecial noninteger Decimal must depend only
# on the value of that Decimal, and not on its representation.
# For example: hash(Decimal('100E-1')) == hash(Decimal('10')).
# Equality comparisons involving signaling nans can raise an
# exception; since equality checks are implicitly and
# unpredictably used when checking set and dict membership, we
# prevent signaling nans from being used as set elements or
# dict keys by making __hash__ raise an exception.
if self._is_special:
if self.is_snan():
raise TypeError('Cannot hash a signaling NaN value.')
elif self.is_nan():
# 0 to match hash(float('nan'))
return 0
else:
# values chosen to match hash(float('inf')) and
# hash(float('-inf')).
if self._sign:
return -271828
else:
return 314159
# In Python 2.7, we're allowing comparisons (but not
# arithmetic operations) between floats and Decimals; so if
# a Decimal instance is exactly representable as a float then
# its hash should match that of the float.
self_as_float = float(self)
if Decimal.from_float(self_as_float) == self:
return hash(self_as_float)
if self._isinteger():
op = _WorkRep(self.to_integral_value())
# to make computation feasible for Decimals with large
# exponent, we use the fact that hash(n) == hash(m) for
# any two nonzero integers n and m such that (i) n and m
# have the same sign, and (ii) n is congruent to m modulo
# 2**64-1. So we can replace hash((-1)**s*c*10**e) with
# hash((-1)**s*c*pow(10, e, 2**64-1).
return hash((-1)**op.sign*op.int*pow(10, op.exp, 2**64-1))
# The value of a nonzero nonspecial Decimal instance is
# faithfully represented by the triple consisting of its sign,
# its adjusted exponent, and its coefficient with trailing
# zeros removed.
return hash((self._sign,
self._exp+len(self._int),
self._int.rstrip('0')))
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp)
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return "Decimal('%s')" % str(self)
def __str__(self, eng=False, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
sign = ['', '-'][self._sign]
if self._is_special:
if self._exp == 'F':
return sign + 'Infinity'
elif self._exp == 'n':
return sign + 'NaN' + self._int
else: # self._exp == 'N'
return sign + 'sNaN' + self._int
# number of digits of self._int to left of decimal point
leftdigits = self._exp + len(self._int)
# dotplace is number of digits of self._int to the left of the
# decimal point in the mantissa of the output string (that is,
# after adjusting the exponent)
if self._exp <= 0 and leftdigits > -6:
# no exponent required
dotplace = leftdigits
elif not eng:
# usual scientific notation: 1 digit on left of the point
dotplace = 1
elif self._int == '0':
# engineering notation, zero
dotplace = (leftdigits + 1) % 3 - 1
else:
# engineering notation, nonzero
dotplace = (leftdigits - 1) % 3 + 1
if dotplace <= 0:
intpart = '0'
fracpart = '.' + '0'*(-dotplace) + self._int
elif dotplace >= len(self._int):
intpart = self._int+'0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace]
fracpart = '.' + self._int[dotplace:]
if leftdigits == dotplace:
exp = ''
else:
if context is None:
context = getcontext()
exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace)
return sign + intpart + fracpart + exp
def to_eng_string(self, context=None):
"""Convert to engineering-type string.
Engineering notation has an exponent which is a multiple of 3, so there
are up to 3 digits left of the decimal place.
Same rules for when in exponential and when as a value as in __str__.
"""
return self.__str__(eng=True, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# -Decimal('0') is Decimal('0'), not Decimal('-0'), except
# in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = self.copy_negate()
return ans._fix(context)
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more then precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# + (-0) = 0, except in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = Decimal(self)
return ans._fix(context)
def __abs__(self, round=True, context=None):
"""Returns the absolute value of self.
If the keyword argument 'round' is false, do not round. The
expression self.__abs__(round=False) is equivalent to
self.copy_abs().
"""
if not round:
return self.copy_abs()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
# If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) # Can't both be infinity here
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
# If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
ans = _dec_from_triple(sign, '0', exp)
ans = ans._fix(context)
return ans
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
ans = _dec_from_triple(negativezero, '0', exp)
ans = ans._fix(context)
return ans
if op1.int < op2.int:
op1, op2 = op2, op1
# OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
# So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
# Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self - other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# self - other is computed as self + other.copy_negate()
return self.__add__(other.copy_negate(), context=context)
def __rsub__(self, other, context=None):
"""Return other - self"""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__sub__(self, context=context)
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return _SignedInfinity[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return _SignedInfinity[resultsign]
resultexp = self._exp + other._exp
# Special case for multiplying by zero
if not self or not other:
ans = _dec_from_triple(resultsign, '0', resultexp)
# Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == '1':
ans = _dec_from_triple(resultsign, other._int, resultexp)
ans = ans._fix(context)
return ans
if other._int == '1':
ans = _dec_from_triple(resultsign, self._int, resultexp)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp)
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __truediv__(self, other, context=None):
"""Return self / other."""
other = _convert_other(other)
if other is NotImplemented:
return NotImplemented
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity() and other._isinfinity():
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
return _SignedInfinity[sign]
if other._isinfinity():
context._raise_error(Clamped, 'Division by infinity')
return _dec_from_triple(sign, '0', context.Etiny())
# Special cases for zeroes
if not other:
if not self:
return context._raise_error(DivisionUndefined, '0 / 0')
return context._raise_error(DivisionByZero, 'x / 0', sign)
if not self:
exp = self._exp - other._exp
coeff = 0
else:
# OK, so neither = 0, INF or NaN
shift = len(other._int) - len(self._int) + context.prec + 1
exp = self._exp - other._exp - shift
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if shift >= 0:
coeff, remainder = divmod(op1.int * 10**shift, op2.int)
else:
coeff, remainder = divmod(op1.int, op2.int * 10**-shift)
if remainder:
# result is not exact; adjust to ensure correct rounding
if coeff % 5 == 0:
coeff += 1
else:
# result is exact; get as close to ideal exponent as possible
ideal_exp = self._exp - other._exp
while exp < ideal_exp and coeff % 10 == 0:
coeff //= 10
exp += 1
ans = _dec_from_triple(sign, str(coeff), exp)
return ans._fix(context)
def _divide(self, other, context):
"""Return (self // other, self % other), to context.prec precision.
Assumes that neither self nor other is a NaN, that self is not
infinite and that other is nonzero.
"""
sign = self._sign ^ other._sign
if other._isinfinity():
ideal_exp = self._exp
else:
ideal_exp = min(self._exp, other._exp)
expdiff = self.adjusted() - other.adjusted()
if not self or other._isinfinity() or expdiff <= -2:
return (_dec_from_triple(sign, '0', 0),
self._rescale(ideal_exp, context.rounding))
if expdiff <= context.prec:
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
if q < 10**context.prec:
return (_dec_from_triple(sign, str(q), 0),
_dec_from_triple(self._sign, str(r), ideal_exp))
# Here the quotient is too large to be representable
ans = context._raise_error(DivisionImpossible,
'quotient too large in //, % or divmod')
return ans, ans
def __rtruediv__(self, other, context=None):
"""Swaps self/other and returns __truediv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__truediv__(self, context=context)
__div__ = __truediv__
__rdiv__ = __rtruediv__
def __divmod__(self, other, context=None):
"""
Return (self // other, self % other)
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return (ans, ans)
sign = self._sign ^ other._sign
if self._isinfinity():
if other._isinfinity():
ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)')
return ans, ans
else:
return (_SignedInfinity[sign],
context._raise_error(InvalidOperation, 'INF % x'))
if not other:
if not self:
ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)')
return ans, ans
else:
return (context._raise_error(DivisionByZero, 'x // 0', sign),
context._raise_error(InvalidOperation, 'x % 0'))
quotient, remainder = self._divide(other, context)
remainder = remainder._fix(context)
return quotient, remainder
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
return context._raise_error(InvalidOperation, 'INF % x')
elif not other:
if self:
return context._raise_error(InvalidOperation, 'x % 0')
else:
return context._raise_error(DivisionUndefined, '0 % 0')
remainder = self._divide(other, context)[1]
remainder = remainder._fix(context)
return remainder
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
# self == +/-infinity -> InvalidOperation
if self._isinfinity():
return context._raise_error(InvalidOperation,
'remainder_near(infinity, x)')
# other == 0 -> either InvalidOperation or DivisionUndefined
if not other:
if self:
return context._raise_error(InvalidOperation,
'remainder_near(x, 0)')
else:
return context._raise_error(DivisionUndefined,
'remainder_near(0, 0)')
# other = +/-infinity -> remainder = self
if other._isinfinity():
ans = Decimal(self)
return ans._fix(context)
# self = 0 -> remainder = self, with ideal exponent
ideal_exponent = min(self._exp, other._exp)
if not self:
ans = _dec_from_triple(self._sign, '0', ideal_exponent)
return ans._fix(context)
# catch most cases of large or small quotient
expdiff = self.adjusted() - other.adjusted()
if expdiff >= context.prec + 1:
# expdiff >= prec+1 => abs(self/other) > 10**prec
return context._raise_error(DivisionImpossible)
if expdiff <= -2:
# expdiff <= -2 => abs(self/other) < 0.1
ans = self._rescale(ideal_exponent, context.rounding)
return ans._fix(context)
# adjust both arguments to have the same exponent, then divide
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
# remainder is r*10**ideal_exponent; other is +/-op2.int *
# 10**ideal_exponent. Apply correction to ensure that
# abs(remainder) <= abs(other)/2
if 2*r + (q&1) > op2.int:
r -= op2.int
q += 1
if q >= 10**context.prec:
return context._raise_error(DivisionImpossible)
# result has same sign as self unless r is negative
sign = self._sign
if r < 0:
sign = 1-sign
r = -r
ans = _dec_from_triple(sign, str(r), ideal_exponent)
return ans._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if other._isinfinity():
return context._raise_error(InvalidOperation, 'INF // INF')
else:
return _SignedInfinity[self._sign ^ other._sign]
if not other:
if self:
return context._raise_error(DivisionByZero, 'x // 0',
self._sign ^ other._sign)
else:
return context._raise_error(DivisionUndefined, '0 // 0')
return self._divide(other, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
if self._isnan():
if self.is_snan():
raise ValueError("Cannot convert signaling NaN to float")
s = "-nan" if self._sign else "nan"
else:
s = str(self)
return float(s)
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
raise ValueError("Cannot convert NaN to integer")
elif self._isinfinity():
raise OverflowError("Cannot convert infinity to integer")
s = (-1)**self._sign
if self._exp >= 0:
return s*int(self._int)*10**self._exp
else:
return s*int(self._int[:self._exp] or '0')
__trunc__ = __int__
def real(self):
return self
real = property(real)
def imag(self):
return Decimal(0)
imag = property(imag)
def conjugate(self):
return self
def __complex__(self):
return complex(float(self))
def __long__(self):
"""Converts to a long.
Equivalent to long(int(self))
"""
return long(self.__int__())
def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if _clamp=0,
# precision-1 if _clamp=1.
max_payload_len = context.prec - context._clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self)
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
if self._isnan():
# decapitate payload if necessary
return self._fix_nan(context)
else:
# self is +/-Infinity; return unaltered
return Decimal(self)
# if self is zero then exponent should be between Etiny and
# Emax if _clamp==0, and between Etiny and Etop if _clamp==1.
Etiny = context.Etiny()
Etop = context.Etop()
if not self:
exp_max = [context.Emax, Etop][context._clamp]
new_exp = min(max(self._exp, Etiny), exp_max)
if new_exp != self._exp:
context._raise_error(Clamped)
return _dec_from_triple(self._sign, '0', new_exp)
else:
return Decimal(self)
# exp_min is the smallest allowable exponent of the result,
# equal to max(self.adjusted()-context.prec+1, Etiny)
exp_min = len(self._int) + self._exp - context.prec
if exp_min > Etop:
# overflow: exp_min > Etop iff self.adjusted() > Emax
ans = context._raise_error(Overflow, 'above Emax', self._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
self_is_subnormal = exp_min < Etiny
if self_is_subnormal:
exp_min = Etiny
# round if self has too many digits
if self._exp < exp_min:
digits = len(self._int) + self._exp - exp_min
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp_min-1)
digits = 0
rounding_method = self._pick_rounding_function[context.rounding]
changed = rounding_method(self, digits)
coeff = self._int[:digits] or '0'
if changed > 0:
coeff = str(int(coeff)+1)
if len(coeff) > context.prec:
coeff = coeff[:-1]
exp_min += 1
# check whether the rounding pushed the exponent out of range
if exp_min > Etop:
ans = context._raise_error(Overflow, 'above Emax', self._sign)
else:
ans = _dec_from_triple(self._sign, coeff, exp_min)
# raise the appropriate signals, taking care to respect
# the precedence described in the specification
if changed and self_is_subnormal:
context._raise_error(Underflow)
if self_is_subnormal:
context._raise_error(Subnormal)
if changed:
context._raise_error(Inexact)
context._raise_error(Rounded)
if not ans:
# raise Clamped on underflow to 0
context._raise_error(Clamped)
return ans
if self_is_subnormal:
context._raise_error(Subnormal)
# fold down if _clamp == 1 and self has too few digits
if context._clamp == 1 and self._exp > Etop:
context._raise_error(Clamped)
self_padded = self._int + '0'*(self._exp - Etop)
return _dec_from_triple(self._sign, self_padded, Etop)
# here self was representable to begin with; return unchanged
return Decimal(self)
# for each of the rounding functions below:
# self is a finite, nonzero Decimal
# prec is an integer satisfying 0 <= prec < len(self._int)
#
# each function returns either -1, 0, or 1, as follows:
# 1 indicates that self should be rounded up (away from zero)
# 0 indicates that self should be truncated, and that all the
# digits to be truncated are zeros (so the value is unchanged)
# -1 indicates that there are nonzero digits to be truncated
def _round_down(self, prec):
"""Also known as round-towards-0, truncate."""
if _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_up(self, prec):
"""Rounds away from 0."""
return -self._round_down(prec)
def _round_half_up(self, prec):
"""Rounds 5 up (away from 0)"""
if self._int[prec] in '56789':
return 1
elif _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_half_down(self, prec):
"""Round 5 down"""
if _exact_half(self._int, prec):
return -1
else:
return self._round_half_up(prec)
def _round_half_even(self, prec):
"""Round 5 to even, rest to nearest."""
if _exact_half(self._int, prec) and \
(prec == 0 or self._int[prec-1] in '02468'):
return -1
else:
return self._round_half_up(prec)
def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_floor(self, prec):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec)
_pick_rounding_function = dict(
ROUND_DOWN = _round_down,
ROUND_UP = _round_up,
ROUND_HALF_UP = _round_half_up,
ROUND_HALF_DOWN = _round_half_down,
ROUND_HALF_EVEN = _round_half_even,
ROUND_CEILING = _round_ceiling,
ROUND_FLOOR = _round_floor,
ROUND_05UP = _round_05up,
)
def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = _SignedInfinity[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = _SignedInfinity[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
third = _convert_other(third, raiseit=True)
return product.__add__(third, context)
def _power_modulo(self, other, modulo, context=None):
"""Three argument version of __pow__"""
# if can't convert other and modulo to Decimal, raise
# TypeError; there's no point returning NotImplemented (no
# equivalent of __rpow__ for three argument pow)
other = _convert_other(other, raiseit=True)
modulo = _convert_other(modulo, raiseit=True)
if context is None:
context = getcontext()
# deal with NaNs: if there are any sNaNs then first one wins,
# (i.e. behaviour for NaNs is identical to that of fma)
self_is_nan = self._isnan()
other_is_nan = other._isnan()
modulo_is_nan = modulo._isnan()
if self_is_nan or other_is_nan or modulo_is_nan:
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if modulo_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
modulo)
if self_is_nan:
return self._fix_nan(context)
if other_is_nan:
return other._fix_nan(context)
return modulo._fix_nan(context)
# check inputs: we apply same restrictions as Python's pow()
if not (self._isinteger() and
other._isinteger() and
modulo._isinteger()):
return context._raise_error(InvalidOperation,
'pow() 3rd argument not allowed '
'unless all arguments are integers')
if other < 0:
return context._raise_error(InvalidOperation,
'pow() 2nd argument cannot be '
'negative when 3rd argument specified')
if not modulo:
return context._raise_error(InvalidOperation,
'pow() 3rd argument cannot be 0')
# additional restriction for decimal: the modulus must be less
# than 10**prec in absolute value
if modulo.adjusted() >= context.prec:
return context._raise_error(InvalidOperation,
'insufficient precision: pow() 3rd '
'argument must not have more than '
'precision digits')
# define 0**0 == NaN, for consistency with two-argument pow
# (even though it hurts!)
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
'and 2nd argument must be nonzero ;'
'0**0 is not defined')
# compute sign of result
if other._iseven():
sign = 0
else:
sign = self._sign
# convert modulo to a Python integer, and self and other to
# Decimal integers (i.e. force their exponents to be >= 0)
modulo = abs(int(modulo))
base = _WorkRep(self.to_integral_value())
exponent = _WorkRep(other.to_integral_value())
# compute result using integer pow()
base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo
for i in xrange(exponent.exp):
base = pow(base, 10, modulo)
base = pow(base, exponent.int, modulo)
return _dec_from_triple(sign, str(base), 0)
def _power_exact(self, other, p):
"""Attempt to compute self**other exactly.
Given Decimals self and other and an integer p, attempt to
compute an exact result for the power self**other, with p
digits of precision. Return None if self**other is not
exactly representable in p digits.
Assumes that elimination of special cases has already been
performed: self and other must both be nonspecial; self must
be positive and not numerically equal to 1; other must be
nonzero. For efficiency, other._exp should not be too large,
so that 10**abs(other._exp) is a feasible calculation."""
# In the comments below, we write x for the value of self and y for the
# value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc
# and yc positive integers not divisible by 10.
# The main purpose of this method is to identify the *failure*
# of x**y to be exactly representable with as little effort as
# possible. So we look for cheap and easy tests that
# eliminate the possibility of x**y being exact. Only if all
# these tests are passed do we go on to actually compute x**y.
# Here's the main idea. Express y as a rational number m/n, with m and
# n relatively prime and n>0. Then for x**y to be exactly
# representable (at *any* precision), xc must be the nth power of a
# positive integer and xe must be divisible by n. If y is negative
# then additionally xc must be a power of either 2 or 5, hence a power
# of 2**n or 5**n.
#
# There's a limit to how small |y| can be: if y=m/n as above
# then:
#
# (1) if xc != 1 then for the result to be representable we
# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So
# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=
# 2**(1/|y|), hence xc**|y| < 2 and the result is not
# representable.
#
# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if
# |y| < 1/|xe| then the result is not representable.
#
# Note that since x is not equal to 1, at least one of (1) and
# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <
# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.
#
# There's also a limit to how large y can be, at least if it's
# positive: the normalized result will have coefficient xc**y,
# so if it's representable then xc**y < 10**p, and y <
# p/log10(xc). Hence if y*log10(xc) >= p then the result is
# not exactly representable.
# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,
# so |y| < 1/xe and the result is not representable.
# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|
# < 1/nbits(xc).
x = _WorkRep(self)
xc, xe = x.int, x.exp
while xc % 10 == 0:
xc //= 10
xe += 1
y = _WorkRep(other)
yc, ye = y.int, y.exp
while yc % 10 == 0:
yc //= 10
ye += 1
# case where xc == 1: result is 10**(xe*y), with xe*y
# required to be an integer
if xc == 1:
xe *= yc
# result is now 10**(xe * 10**ye); xe * 10**ye must be integral
while xe % 10 == 0:
xe //= 10
ye += 1
if ye < 0:
return None
exponent = xe * 10**ye
if y.sign == 1:
exponent = -exponent
# if other is a nonnegative integer, use ideal exponent
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(exponent-ideal_exponent, p-1)
else:
zeros = 0
return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros)
# case where y is negative: xc must be either a power
# of 2 or a power of 5.
if y.sign == 1:
last_digit = xc % 10
if last_digit in (2,4,6,8):
# quick test for power of 2
if xc & -xc != xc:
return None
# now xc is a power of 2; e is its exponent
e = _nbits(xc)-1
# We now have:
#
# x = 2**e * 10**xe, e > 0, and y < 0.
#
# The exact result is:
#
# x**y = 5**(-e*y) * 10**(e*y + xe*y)
#
# provided that both e*y and xe*y are integers. Note that if
# 5**(-e*y) >= 10**p, then the result can't be expressed
# exactly with p digits of precision.
#
# Using the above, we can guard against large values of ye.
# 93/65 is an upper bound for log(10)/log(5), so if
#
# ye >= len(str(93*p//65))
#
# then
#
# -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5),
#
# so 5**(-e*y) >= 10**p, and the coefficient of the result
# can't be expressed in p digits.
# emax >= largest e such that 5**e < 10**p.
emax = p*93//65
if ye >= len(str(emax)):
return None
# Find -e*y and -xe*y; both must be integers
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 5**e
elif last_digit == 5:
# e >= log_5(xc) if xc is a power of 5; we have
# equality all the way up to xc=5**2658
e = _nbits(xc)*28//65
xc, remainder = divmod(5**e, xc)
if remainder:
return None
while xc % 5 == 0:
xc //= 5
e -= 1
# Guard against large values of ye, using the same logic as in
# the 'xc is a power of 2' branch. 10/3 is an upper bound for
# log(10)/log(2).
emax = p*10//3
if ye >= len(str(emax)):
return None
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 2**e
else:
return None
if xc >= 10**p:
return None
xe = -e-xe
return _dec_from_triple(0, str(xc), xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
m, n = yc*10**ye, 1
else:
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
m //= 2
n //= 2
while m % 5 == n % 5 == 0:
m //= 5
n //= 5
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
if xc != 1 and xc_bits <= n:
return None
xe, rem = divmod(xe, n)
if rem != 0:
return None
# compute nth root of xc using Newton's method
a = 1L << -(-_nbits(xc)//n) # initial estimate
while True:
q, r = divmod(xc, a**(n-1))
if a <= q:
break
else:
a = (a*(n-1) + q)//n
if not (a == q and r == 0):
return None
xc = a
# now xc*10**xe is the nth root of the original xc*10**xe
# compute mth power of xc*10**xe
# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m >
# 10**p and the result is not representable.
if xc > 1 and m > p*100//_log10_lb(xc):
return None
xc = xc**m
xe *= m
if xc > 10**p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
else:
zeros = 0
return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros)
def __pow__(self, other, modulo=None, context=None):
"""Return self ** other [ % modulo].
With two arguments, compute self**other.
With three arguments, compute (self**other) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- other must be nonnegative
- either self or other (or both) must be nonzero
- modulo must be nonzero and must have at most p digits,
where p is the context precision.
If any of these restrictions is violated the InvalidOperation
flag is raised.
The result of pow(self, other, modulo) is identical to the
result that would be obtained by computing (self**other) %
modulo with unbounded precision, but is computed more
efficiently. It is always exact.
"""
if modulo is not None:
return self._power_modulo(other, modulo, context)
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
# either argument is a NaN => result is NaN
ans = self._check_nans(other, context)
if ans:
return ans
# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)
if not other:
if not self:
return context._raise_error(InvalidOperation, '0 ** 0')
else:
return _One
# result has sign 1 iff self._sign is 1 and other is an odd integer
result_sign = 0
if self._sign == 1:
if other._isinteger():
if not other._iseven():
result_sign = 1
else:
# -ve**noninteger = NaN
# (-0)**noninteger = 0**noninteger
if self:
return context._raise_error(InvalidOperation,
'x ** y with x negative and y not an integer')
# negate self, without doing any unwanted rounding
self = self.copy_negate()
# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity
if not self:
if other._sign == 0:
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0
if self._isinfinity():
if other._sign == 0:
return _SignedInfinity[result_sign]
else:
return _dec_from_triple(result_sign, '0', 0)
# 1**other = 1, but the choice of exponent and the flags
# depend on the exponent of self, and on whether other is a
# positive integer, a negative integer, or neither
if self == _One:
if other._isinteger():
# exp = max(self._exp*max(int(other), 0),
# 1-context.prec) but evaluating int(other) directly
# is dangerous until we know other is small (other
# could be 1e999999999)
if other._sign == 1:
multiplier = 0
elif other > context.prec:
multiplier = context.prec
else:
multiplier = int(other)
exp = self._exp * multiplier
if exp < 1-context.prec:
exp = 1-context.prec
context._raise_error(Rounded)
else:
context._raise_error(Inexact)
context._raise_error(Rounded)
exp = 1-context.prec
return _dec_from_triple(result_sign, '1'+'0'*-exp, exp)
# compute adjusted exponent of self
self_adj = self.adjusted()
# self ** infinity is infinity if self > 1, 0 if self < 1
# self ** -infinity is infinity if self < 1, 0 if self > 1
if other._isinfinity():
if (other._sign == 0) == (self_adj < 0):
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# from here on, the result always goes through the call
# to _fix at the end of this function.
ans = None
exact = False
# crude test to catch cases of extreme overflow/underflow. If
# log10(self)*other >= 10**bound and bound >= len(str(Emax))
# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence
# self**other >= 10**(Emax+1), so overflow occurs. The test
# for underflow is similar.
bound = self._log10_exp_bound() + other.adjusted()
if (self_adj >= 0) == (other._sign == 0):
# self > 1 and other +ve, or self < 1 and other -ve
# possibility of overflow
if bound >= len(str(context.Emax)):
ans = _dec_from_triple(result_sign, '1', context.Emax+1)
else:
# self > 1 and other -ve, or self < 1 and other +ve
# possibility of underflow to 0
Etiny = context.Etiny()
if bound >= len(str(-Etiny)):
ans = _dec_from_triple(result_sign, '1', Etiny-1)
# try for an exact result with precision +1
if ans is None:
ans = self._power_exact(other, context.prec + 1)
if ans is not None:
if result_sign == 1:
ans = _dec_from_triple(1, ans._int, ans._exp)
exact = True
# usual case: inexact result, x**y computed directly as exp(y*log(x))
if ans is None:
p = context.prec
x = _WorkRep(self)
xc, xe = x.int, x.exp
y = _WorkRep(other)
yc, ye = y.int, y.exp
if y.sign == 1:
yc = -yc
# compute correctly rounded result: start with precision +3,
# then increase precision until result is unambiguously roundable
extra = 3
while True:
coeff, exp = _dpower(xc, xe, yc, ye, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(result_sign, str(coeff), exp)
# unlike exp, ln and log10, the power function respects the
# rounding mode; no need to switch to ROUND_HALF_EVEN here
# There's a difficulty here when 'other' is not an integer and
# the result is exact. In this case, the specification
# requires that the Inexact flag be raised (in spite of
# exactness), but since the result is exact _fix won't do this
# for us. (Correspondingly, the Underflow signal should also
# be raised for subnormal results.) We can't directly raise
# these signals either before or after calling _fix, since
# that would violate the precedence for signals. So we wrap
# the ._fix call in a temporary context, and reraise
# afterwards.
if exact and not other._isinteger():
# pad with zeros up to length context.prec+1 if necessary; this
# ensures that the Rounded signal will be raised.
if len(ans._int) <= context.prec:
expdiff = context.prec + 1 - len(ans._int)
ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff,
ans._exp-expdiff)
# create a copy of the current context, with cleared flags/traps
newcontext = context.copy()
newcontext.clear_flags()
for exception in _signals:
newcontext.traps[exception] = 0
# round in the new context
ans = ans._fix(newcontext)
# raise Inexact, and if necessary, Underflow
newcontext._raise_error(Inexact)
if newcontext.flags[Subnormal]:
newcontext._raise_error(Underflow)
# propagate signals to the original context; _fix could
# have raised any of Overflow, Underflow, Subnormal,
# Inexact, Rounded, Clamped. Overflow needs the correct
# arguments. Note that the order of the exceptions is
# important here.
if newcontext.flags[Overflow]:
context._raise_error(Overflow, 'above Emax', ans._sign)
for exception in Underflow, Subnormal, Inexact, Rounded, Clamped:
if newcontext.flags[exception]:
context._raise_error(exception)
else:
ans = ans._fix(context)
return ans
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return _dec_from_triple(dup._sign, '0', 0)
exp_max = [context.Emax, context.Etop()][context._clamp]
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == '0' and exp < exp_max:
exp += 1
end -= 1
return _dec_from_triple(dup._sign, dup._int[:end], exp)
def quantize(self, exp, rounding=None, context=None, watchexp=True):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
exp = _convert_other(exp, raiseit=True)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return Decimal(self) # if both are inf, it is OK
return context._raise_error(InvalidOperation,
'quantize with one INF')
# if we're not watching exponents, do a simple rescale
if not watchexp:
ans = self._rescale(exp._exp, rounding)
# raise Inexact and Rounded where appropriate
if ans._exp > self._exp:
context._raise_error(Rounded)
if ans != self:
context._raise_error(Inexact)
return ans
# exp._exp should be between Etiny and Emax
if not (context.Etiny() <= exp._exp <= context.Emax):
return context._raise_error(InvalidOperation,
'target exponent out of bounds in quantize')
if not self:
ans = _dec_from_triple(self._sign, '0', exp._exp)
return ans._fix(context)
self_adjusted = self.adjusted()
if self_adjusted > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if self_adjusted - exp._exp + 1 > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
ans = self._rescale(exp._exp, rounding)
if ans.adjusted() > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if len(ans._int) > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
# raise appropriate flags
if ans and ans.adjusted() < context.Emin:
context._raise_error(Subnormal)
if ans._exp > self._exp:
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
# call to fix takes care of any necessary folddown, and
# signals Clamped if necessary
ans = ans._fix(context)
return ans
def same_quantum(self, other):
"""Return True if self and other have the same exponent; otherwise
return False.
If either operand is a special value, the following rules are used:
* return True if both operands are infinities
* return True if both operands are NaNs
* otherwise, return False.
"""
other = _convert_other(other, raiseit=True)
if self._is_special or other._is_special:
return (self.is_nan() and other.is_nan() or
self.is_infinite() and other.is_infinite())
return self._exp == other._exp
def _rescale(self, exp, rounding):
"""Rescale self so that the exponent is exp, either by padding with zeros
or by truncating digits, using the given rounding mode.
Specials are returned without change. This operation is
quiet: it raises no flags, and uses no information from the
context.
exp = exp to scale to (an integer)
rounding = rounding mode
"""
if self._is_special:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', exp)
if self._exp >= exp:
# pad answer with zeros if necessary
return _dec_from_triple(self._sign,
self._int + '0'*(self._exp - exp), exp)
# too many digits; round and lose data. If self.adjusted() <
# exp-1, replace self by 10**(exp-1) before rounding
digits = len(self._int) + self._exp - exp
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp-1)
digits = 0
this_function = self._pick_rounding_function[rounding]
changed = this_function(self, digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
return _dec_from_triple(self._sign, coeff, exp)
def _round(self, places, rounding):
"""Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
"""
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale alters the adjusted exponent;
# for example when rounding 99.97 to 3 significant figures.
# When this happens we end up with an extra 0 at the end of
# the number; a second rescale fixes this.
if ans.adjusted() != self.adjusted():
ans = ans._rescale(ans.adjusted()+1-places, rounding)
return ans
def to_integral_exact(self, rounding=None, context=None):
"""Rounds to a nearby integer.
If no rounding mode is specified, take the rounding mode from
the context. This method raises the Rounded and Inexact flags
when appropriate.
See also: to_integral_value, which does exactly the same as
this method except that it doesn't raise Inexact or Rounded.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', 0)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
ans = self._rescale(0, rounding)
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
def to_integral_value(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
else:
return self._rescale(0, rounding)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
def sqrt(self, context=None):
"""Return the square root of self."""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
# exponent = self._exp // 2. sqrt(-0) = -0
ans = _dec_from_triple(self._sign, '0', self._exp // 2)
return ans._fix(context)
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
# At this point self represents a positive number. Let p be
# the desired precision and express self in the form c*100**e
# with c a positive real number and e an integer, c and e
# being chosen so that 100**(p-1) <= c < 100**p. Then the
# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)
# <= sqrt(c) < 10**p, so the closest representable Decimal at
# precision p is n*10**e where n = round_half_even(sqrt(c)),
# the closest integer to sqrt(c) with the even integer chosen
# in the case of a tie.
#
# To ensure correct rounding in all cases, we use the
# following trick: we compute the square root to an extra
# place (precision p+1 instead of precision p), rounding down.
# Then, if the result is inexact and its last digit is 0 or 5,
# we increase the last digit to 1 or 6 respectively; if it's
# exact we leave the last digit alone. Now the final round to
# p places (or fewer in the case of underflow) will round
# correctly and raise the appropriate flags.
# use an extra digit of precision
prec = context.prec+1
# write argument in the form c*100**e where e = self._exp//2
# is the 'ideal' exponent, to be used if the square root is
# exactly representable. l is the number of 'digits' of c in
# base 100, so that 100**(l-1) <= c < 100**l.
op = _WorkRep(self)
e = op.exp >> 1
if op.exp & 1:
c = op.int * 10
l = (len(self._int) >> 1) + 1
else:
c = op.int
l = len(self._int)+1 >> 1
# rescale so that c has exactly prec base 100 'digits'
shift = prec-l
if shift >= 0:
c *= 100**shift
exact = True
else:
c, remainder = divmod(c, 100**-shift)
exact = not remainder
e -= shift
# find n = floor(sqrt(c)) using Newton's method
n = 10**prec
while True:
q = c//n
if n <= q:
break
else:
n = n + q >> 1
exact = exact and n*n == c
if exact:
# result is exact; rescale to use ideal exponent e
if shift >= 0:
# assert n % 10**shift == 0
n //= 10**shift
else:
n *= 10**-shift
e += shift
else:
# result is not exact; fix last digit as described above
if n % 5 == 0:
n += 1
ans = _dec_from_triple(0, str(n), e)
# round, and fit to current context
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def max(self, other, context=None):
"""Returns the larger value.
Like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
# If both operands are finite and equal in numerical value
# then an ordering is applied:
#
# If the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# If the signs are the same then the exponent is used to select
# the result. This is exactly the ordering used in compare_total.
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min(self, other, context=None):
"""Returns the smaller value.
Like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def _isinteger(self):
"""Returns whether self is an integer"""
if self._is_special:
return False
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == '0'*len(rest)
def _iseven(self):
"""Returns True if self is even. Assumes self is an integer."""
if not self or self._exp > 0:
return True
return self._int[-1+self._exp] in '02468'
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
# If NaN or Infinity, self._exp is string
except TypeError:
return 0
def canonical(self, context=None):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
"""
return self
def compare_signal(self, other, context=None):
"""Compares self to the other operand numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
"""
other = _convert_other(other, raiseit = True)
ans = self._compare_check_nans(other, context)
if ans:
return ans
return self.compare(other, context=context)
def compare_total(self, other):
"""Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
"""
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
def compare_total_mag(self, other):
"""Compares self to other using abstract repr., ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
other = _convert_other(other, raiseit=True)
s = self.copy_abs()
o = other.copy_abs()
return s.compare_total(o)
def copy_abs(self):
"""Returns a copy with the sign set to 0. """
return _dec_from_triple(0, self._int, self._exp, self._is_special)
def copy_negate(self):
"""Returns a copy with the sign inverted."""
if self._sign:
return _dec_from_triple(0, self._int, self._exp, self._is_special)
else:
return _dec_from_triple(1, self._int, self._exp, self._is_special)
def copy_sign(self, other):
"""Returns self with the sign of other."""
other = _convert_other(other, raiseit=True)
return _dec_from_triple(other._sign, self._int,
self._exp, self._is_special)
def exp(self, context=None):
"""Returns e ** self."""
if context is None:
context = getcontext()
# exp(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# exp(-Infinity) = 0
if self._isinfinity() == -1:
return _Zero
# exp(0) = 1
if not self:
return _One
# exp(Infinity) = Infinity
if self._isinfinity() == 1:
return Decimal(self)
# the result is now guaranteed to be inexact (the true
# mathematical result is transcendental). There's no need to
# raise Rounded and Inexact here---they'll always be raised as
# a result of the call to _fix.
p = context.prec
adj = self.adjusted()
# we only need to do any computation for quite a small range
# of adjusted exponents---for example, -29 <= adj <= 10 for
# the default context. For smaller exponent the result is
# indistinguishable from 1 at the given precision, while for
# larger exponent the result either overflows or underflows.
if self._sign == 0 and adj > len(str((context.Emax+1)*3)):
# overflow
ans = _dec_from_triple(0, '1', context.Emax+1)
elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)):
# underflow to 0
ans = _dec_from_triple(0, '1', context.Etiny()-1)
elif self._sign == 0 and adj < -p:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p)
elif self._sign == 1 and adj < -p-1:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '9'*(p+1), -p-1)
# general case
else:
op = _WorkRep(self)
c, e = op.int, op.exp
if op.sign == 1:
c = -c
# compute correctly rounded result: increase precision by
# 3 digits at a time until we get an unambiguously
# roundable result
extra = 3
while True:
coeff, exp = _dexp(c, e, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(0, str(coeff), exp)
# at this stage, ans should round correctly with *any*
# rounding mode, not just with ROUND_HALF_EVEN
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
def is_finite(self):
"""Return True if self is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
"""
return not self._is_special
def is_infinite(self):
"""Return True if self is infinite; otherwise return False."""
return self._exp == 'F'
def is_nan(self):
"""Return True if self is a qNaN or sNaN; otherwise return False."""
return self._exp in ('n', 'N')
def is_normal(self, context=None):
"""Return True if self is a normal number; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return context.Emin <= self.adjusted()
def is_qnan(self):
"""Return True if self is a quiet NaN; otherwise return False."""
return self._exp == 'n'
def is_signed(self):
"""Return True if self is negative; otherwise return False."""
return self._sign == 1
def is_snan(self):
"""Return True if self is a signaling NaN; otherwise return False."""
return self._exp == 'N'
def is_subnormal(self, context=None):
"""Return True if self is subnormal; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return self.adjusted() < context.Emin
def is_zero(self):
"""Return True if self is a zero; otherwise return False."""
return not self._is_special and self._int == '0'
def _ln_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.ln().
In other words, compute r such that self.ln() >= 10**r. Assumes
that self is finite and positive and that self != 1.
"""
# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1
adj = self._exp + len(self._int) - 1
if adj >= 1:
# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)
return len(str(adj*23//10)) - 1
if adj <= -2:
# argument <= 0.1
return len(str((-1-adj)*23//10)) - 1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(c)
return len(num) - len(den) - (num < den)
# adj == -1, 0.1 <= self < 1
return e + len(str(10**-e - c)) - 1
def ln(self, context=None):
"""Returns the natural (base e) logarithm of self."""
if context is None:
context = getcontext()
# ln(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# ln(0.0) == -Infinity
if not self:
return _NegativeInfinity
# ln(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# ln(1.0) == 0.0
if self == _One:
return _Zero
# ln(negative) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'ln of a negative value')
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision by 3
# until we get an unambiguously roundable result
places = p - self._ln_exp_bound() + 2 # at least p+3 places
while True:
coeff = _dlog(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def _log10_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.log10().
In other words, find r such that self.log10() >= 10**r.
Assumes that self is finite and positive and that self != 1.
"""
# For x >= 10 or x < 0.1 we only need a bound on the integer
# part of log10(self), and this comes directly from the
# exponent of x. For 0.1 <= x <= 10 we use the inequalities
# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| >
# (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0
adj = self._exp + len(self._int) - 1
if adj >= 1:
# self >= 10
return len(str(adj))-1
if adj <= -2:
# self < 0.1
return len(str(-1-adj))-1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(231*c)
return len(num) - len(den) - (num < den) + 2
# adj == -1, 0.1 <= self < 1
num = str(10**-e-c)
return len(num) + e - (num < "231") - 1
def log10(self, context=None):
"""Returns the base 10 logarithm of self."""
if context is None:
context = getcontext()
# log10(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# log10(0.0) == -Infinity
if not self:
return _NegativeInfinity
# log10(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# log10(negative or -Infinity) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'log10 of a negative value')
# log10(10**n) = n
if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1):
# answer may need rounding
ans = Decimal(self._exp + len(self._int) - 1)
else:
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision
# until result is unambiguously roundable
places = p-self._log10_exp_bound()+2
while True:
coeff = _dlog10(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def logb(self, context=None):
""" Returns the exponent of the magnitude of self's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of self (as though it were truncated
to a single digit while maintaining the value of that digit and
without limiting the resulting exponent).
"""
# logb(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
# logb(+/-Inf) = +Inf
if self._isinfinity():
return _Infinity
# logb(0) = -Inf, DivisionByZero
if not self:
return context._raise_error(DivisionByZero, 'logb(0)', 1)
# otherwise, simply return the adjusted exponent of self, as a
# Decimal. Note that no attempt is made to fit the result
# into the current context.
ans = Decimal(self.adjusted())
return ans._fix(context)
def _islogical(self):
"""Return True if self is a logical operand.
For being logical, it must be a finite number with a sign of 0,
an exponent of 0, and a coefficient whose digits must all be
either 0 or 1.
"""
if self._sign != 0 or self._exp != 0:
return False
for dig in self._int:
if dig not in '01':
return False
return True
def _fill_logical(self, context, opa, opb):
dif = context.prec - len(opa)
if dif > 0:
opa = '0'*dif + opa
elif dif < 0:
opa = opa[-context.prec:]
dif = context.prec - len(opb)
if dif > 0:
opb = '0'*dif + opb
elif dif < 0:
opb = opb[-context.prec:]
return opa, opb
def logical_and(self, other, context=None):
"""Applies an 'and' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_invert(self, context=None):
"""Invert all its digits."""
if context is None:
context = getcontext()
return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0),
context)
def logical_or(self, other, context=None):
"""Applies an 'or' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_xor(self, other, context=None):
"""Applies an 'xor' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def max_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def next_minus(self, context=None):
"""Returns the largest representable number smaller than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == -1:
return _NegativeInfinity
if self._isinfinity() == 1:
return _dec_from_triple(0, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_FLOOR)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_plus(self, context=None):
"""Returns the smallest representable number larger than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == 1:
return _Infinity
if self._isinfinity() == -1:
return _dec_from_triple(1, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_CEILING)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_toward(self, other, context=None):
"""Returns the number closest to self, in the direction towards other.
The result is the closest representable number to self
(excluding self) that is in the direction towards other,
unless both have the same value. If the two operands are
numerically equal, then the result is a copy of self with the
sign set to be the same as the sign of other.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
comparison = self._cmp(other)
if comparison == 0:
return self.copy_sign(other)
if comparison == -1:
ans = self.next_plus(context)
else: # comparison == 1
ans = self.next_minus(context)
# decide which flags to raise using value of ans
if ans._isinfinity():
context._raise_error(Overflow,
'Infinite result from next_toward',
ans._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
elif ans.adjusted() < context.Emin:
context._raise_error(Underflow)
context._raise_error(Subnormal)
context._raise_error(Inexact)
context._raise_error(Rounded)
# if precision == 1 then we don't raise Clamped for a
# result 0E-Etiny.
if not ans:
context._raise_error(Clamped)
return ans
def number_class(self, context=None):
"""Returns an indication of the class of self.
The class is one of the following strings:
sNaN
NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
"""
if self.is_snan():
return "sNaN"
if self.is_qnan():
return "NaN"
inf = self._isinfinity()
if inf == 1:
return "+Infinity"
if inf == -1:
return "-Infinity"
if self.is_zero():
if self._sign:
return "-Zero"
else:
return "+Zero"
if context is None:
context = getcontext()
if self.is_subnormal(context=context):
if self._sign:
return "-Subnormal"
else:
return "+Subnormal"
# just a normal, regular, boring number, :)
if self._sign:
return "-Normal"
else:
return "+Normal"
def radix(self):
"""Just returns 10, as this is Decimal, :)"""
return Decimal(10)
def rotate(self, other, context=None):
"""Returns a rotated copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's rotate!
rotated = rotdig[torot:] + rotdig[:torot]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
def scaleb(self, other, context=None):
"""Returns self operand after adding the second value to its exp."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
liminf = -2 * (context.Emax + context.prec)
limsup = 2 * (context.Emax + context.prec)
if not (liminf <= int(other) <= limsup):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
d = _dec_from_triple(self._sign, self._int, self._exp + int(other))
d = d._fix(context)
return d
def shift(self, other, context=None):
"""Returns a shifted copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's shift!
if torot < 0:
shifted = rotdig[:torot]
else:
shifted = rotdig + '0'*torot
shifted = shifted[-context.prec:]
return _dec_from_triple(self._sign,
shifted.lstrip('0') or '0', self._exp)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) is Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) is Decimal:
return self # My components are also immutable
return self.__class__(str(self))
# PEP 3101 support. the _localeconv keyword argument should be
# considered private: it's provided for ease of testing only.
def __format__(self, specifier, context=None, _localeconv=None):
"""Format a Decimal instance according to the given specifier.
The specifier should be a standard format specifier, with the
form described in PEP 3101. Formatting types 'e', 'E', 'f',
'F', 'g', 'G', 'n' and '%' are supported. If the formatting
type is omitted it defaults to 'g' or 'G', depending on the
value of context.capitals.
"""
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to make sure that
# format(float, '') behaves similarly to str(float).
if context is None:
context = getcontext()
spec = _parse_format_specifier(specifier, _localeconv=_localeconv)
# special values don't care about the type or precision
if self._is_special:
sign = _format_sign(self._sign, spec)
body = str(self.copy_abs())
return _format_align(sign, body, spec)
# a type of None defaults to 'g' or 'G', depending on context
if spec['type'] is None:
spec['type'] = ['g', 'G'][context.capitals]
# if type is '%', adjust exponent of self accordingly
if spec['type'] == '%':
self = _dec_from_triple(self._sign, self._int, self._exp+2)
# round if necessary, taking rounding mode from the context
rounding = context.rounding
precision = spec['precision']
if precision is not None:
if spec['type'] in 'eE':
self = self._round(precision+1, rounding)
elif spec['type'] in 'fF%':
self = self._rescale(-precision, rounding)
elif spec['type'] in 'gG' and len(self._int) > precision:
self = self._round(precision, rounding)
# special case: zeros with a positive exponent can't be
# represented in fixed point; rescale them to 0e0.
if not self and self._exp > 0 and spec['type'] in 'fF%':
self = self._rescale(0, rounding)
# figure out placement of the decimal point
leftdigits = self._exp + len(self._int)
if spec['type'] in 'eE':
if not self and precision is not None:
dotplace = 1 - precision
else:
dotplace = 1
elif spec['type'] in 'fF%':
dotplace = leftdigits
elif spec['type'] in 'gG':
if self._exp <= 0 and leftdigits > -6:
dotplace = leftdigits
else:
dotplace = 1
# find digits before and after decimal point, and get exponent
if dotplace < 0:
intpart = '0'
fracpart = '0'*(-dotplace) + self._int
elif dotplace > len(self._int):
intpart = self._int + '0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace] or '0'
fracpart = self._int[dotplace:]
exp = leftdigits-dotplace
# done with the decimal-specific stuff; hand over the rest
# of the formatting to the _format_number function
return _format_number(self._sign, intpart, fracpart, exp, spec)
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
self = object.__new__(Decimal)
self._sign = sign
self._int = coefficient
self._exp = exponent
self._is_special = special
return self
# Register Decimal as a kind of Number (an abstract base class).
# However, do not register it as Real (because Decimals are not
# interoperable with floats).
_numbers.Number.register(Decimal)
##### Context class #######################################################
class _ContextManager(object):
"""Context manager class to support localcontext().
Sets a copy of the supplied context in __enter__() and restores
the previous decimal context in __exit__()
"""
def __init__(self, new_context):
self.new_context = new_context.copy()
def __enter__(self):
self.saved_context = getcontext()
setcontext(self.new_context)
return self.new_context
def __exit__(self, t, v, tb):
setcontext(self.saved_context)
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type (how you round)
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is set.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
_clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None,
traps=None, flags=None,
Emin=None, Emax=None,
capitals=None, _clamp=0,
_ignored_flags=None):
# Set defaults; for everything except flags and _ignored_flags,
# inherit from DefaultContext.
try:
dc = DefaultContext
except NameError:
pass
self.prec = prec if prec is not None else dc.prec
self.rounding = rounding if rounding is not None else dc.rounding
self.Emin = Emin if Emin is not None else dc.Emin
self.Emax = Emax if Emax is not None else dc.Emax
self.capitals = capitals if capitals is not None else dc.capitals
self._clamp = _clamp if _clamp is not None else dc._clamp
if _ignored_flags is None:
self._ignored_flags = []
else:
self._ignored_flags = _ignored_flags
if traps is None:
self.traps = dc.traps.copy()
elif not isinstance(traps, dict):
self.traps = dict((s, int(s in traps)) for s in _signals)
else:
self.traps = traps
if flags is None:
self.flags = dict.fromkeys(_signals, 0)
elif not isinstance(flags, dict):
self.flags = dict((s, int(s in flags)) for s in _signals)
else:
self.flags = flags
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, '
'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d'
% vars(self))
names = [f.__name__ for f, v in self.flags.items() if v]
s.append('flags=[' + ', '.join(names) + ']')
names = [t.__name__ for t, v in self.traps.items() if v]
s.append('traps=[' + ', '.join(names) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.traps,
self.flags, self.Emin, self.Emax,
self.capitals, self._clamp, self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.traps.copy(),
self.flags.copy(), self.Emin, self.Emax,
self.capitals, self._clamp, self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it sets the flag, then, if the corresponding
trap_enabler is set, it reraises the exception. Otherwise, it returns
the default value after setting the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
# Don't touch the flag
return error().handle(self, *args)
self.flags[error] = 1
if not self.traps[error]:
# The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
# self._ignored_flags = []
raise error(explanation)
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding= type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context.
This method implements the to-number operation of the
IBM Decimal specification."""
if isinstance(num, basestring) and num != num.strip():
return self._raise_error(ConversionSyntax,
"no trailing or leading whitespace is "
"permitted.")
d = Decimal(num, context=self)
if d._isnan() and len(d._int) > self.prec - self._clamp:
return self._raise_error(ConversionSyntax,
"diagnostic info too long in NaN")
return d._fix(self)
def create_decimal_from_float(self, f):
"""Creates a new Decimal instance from a float but rounding using self
as the context.
>>> context = Context(prec=5, rounding=ROUND_DOWN)
>>> context.create_decimal_from_float(3.1415926535897932)
Decimal('3.1415')
>>> context = Context(prec=5, traps=[Inexact])
>>> context.create_decimal_from_float(3.1415926535897932)
Traceback (most recent call last):
...
Inexact: None
"""
d = Decimal.from_float(f) # An exact conversion
return d._fix(self) # Apply the context rounding
# Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal('19.00')
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal('1.02E+4')
>>> ExtendedContext.add(1, Decimal(2))
Decimal('3')
>>> ExtendedContext.add(Decimal(8), 5)
Decimal('13')
>>> ExtendedContext.add(5, 5)
Decimal('10')
"""
a = _convert_other(a, raiseit=True)
r = a.__add__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def _apply(self, a):
return str(a._fix(self))
def canonical(self, a):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
>>> ExtendedContext.canonical(Decimal('2.50'))
Decimal('2.50')
"""
return a.canonical(context=self)
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal('-1')
>>> ExtendedContext.compare(1, 2)
Decimal('-1')
>>> ExtendedContext.compare(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare(b, context=self)
def compare_signal(self, a, b):
"""Compares the values of the two operands numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
>>> c = ExtendedContext
>>> c.compare_signal(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> c.compare_signal(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> c.flags[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> c.compare_signal(Decimal('NaN'), Decimal('2.1'))
Decimal('NaN')
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> c.compare_signal(Decimal('sNaN'), Decimal('2.1'))
Decimal('NaN')
>>> print c.flags[InvalidOperation]
1
>>> c.compare_signal(-1, 2)
Decimal('-1')
>>> c.compare_signal(Decimal(-1), 2)
Decimal('-1')
>>> c.compare_signal(-1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_signal(b, context=self)
def compare_total(self, a, b):
"""Compares two operands using their abstract representation.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
>>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))
Decimal('0')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))
Decimal('1')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))
Decimal('-1')
>>> ExtendedContext.compare_total(1, 2)
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare_total(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_total(b)
def compare_total_mag(self, a, b):
"""Compares two operands using their abstract representation ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
a = _convert_other(a, raiseit=True)
return a.compare_total_mag(b)
def copy_abs(self, a):
"""Returns a copy of the operand with the sign set to 0.
>>> ExtendedContext.copy_abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.copy_abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_abs()
def copy_decimal(self, a):
"""Returns a copy of the decimal object.
>>> ExtendedContext.copy_decimal(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_decimal(Decimal('-1.00'))
Decimal('-1.00')
>>> ExtendedContext.copy_decimal(1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return Decimal(a)
def copy_negate(self, a):
"""Returns a copy of the operand with the sign inverted.
>>> ExtendedContext.copy_negate(Decimal('101.5'))
Decimal('-101.5')
>>> ExtendedContext.copy_negate(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.copy_negate(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_negate()
def copy_sign(self, a, b):
"""Copies the second operand's sign to the first one.
In detail, it returns a copy of the first operand with the sign
equal to the sign of the second operand.
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(1, -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(Decimal(1), -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(1, Decimal(-2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__div__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal('0')
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal('3')
>>> ExtendedContext.divide_int(10, 3)
Decimal('3')
>>> ExtendedContext.divide_int(Decimal(10), 3)
Decimal('3')
>>> ExtendedContext.divide_int(10, Decimal(3))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__floordiv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divmod(self, a, b):
"""Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
"""
a = _convert_other(a, raiseit=True)
r = a.__divmod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def exp(self, a):
"""Returns e ** a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.exp(Decimal('-Infinity'))
Decimal('0')
>>> c.exp(Decimal('-1'))
Decimal('0.367879441')
>>> c.exp(Decimal('0'))
Decimal('1')
>>> c.exp(Decimal('1'))
Decimal('2.71828183')
>>> c.exp(Decimal('0.693147181'))
Decimal('2.00000000')
>>> c.exp(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.exp(10)
Decimal('22026.4658')
"""
a =_convert_other(a, raiseit=True)
return a.exp(context=self)
def fma(self, a, b, c):
"""Returns a multiplied by b, plus c.
The first two operands are multiplied together, using multiply,
the third operand is then added to the result of that
multiplication, using add, all with only one final rounding.
>>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7'))
Decimal('22')
>>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7'))
Decimal('-8')
>>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578'))
Decimal('1.38435736E+12')
>>> ExtendedContext.fma(1, 3, 4)
Decimal('7')
>>> ExtendedContext.fma(1, Decimal(3), 4)
Decimal('7')
>>> ExtendedContext.fma(1, 3, Decimal(4))
Decimal('7')
"""
a = _convert_other(a, raiseit=True)
return a.fma(b, c, context=self)
def is_canonical(self, a):
"""Return True if the operand is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
>>> ExtendedContext.is_canonical(Decimal('2.50'))
True
"""
return a.is_canonical()
def is_finite(self, a):
"""Return True if the operand is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
>>> ExtendedContext.is_finite(Decimal('2.50'))
True
>>> ExtendedContext.is_finite(Decimal('-0.3'))
True
>>> ExtendedContext.is_finite(Decimal('0'))
True
>>> ExtendedContext.is_finite(Decimal('Inf'))
False
>>> ExtendedContext.is_finite(Decimal('NaN'))
False
>>> ExtendedContext.is_finite(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_finite()
def is_infinite(self, a):
"""Return True if the operand is infinite; otherwise return False.
>>> ExtendedContext.is_infinite(Decimal('2.50'))
False
>>> ExtendedContext.is_infinite(Decimal('-Inf'))
True
>>> ExtendedContext.is_infinite(Decimal('NaN'))
False
>>> ExtendedContext.is_infinite(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_infinite()
def is_nan(self, a):
"""Return True if the operand is a qNaN or sNaN;
otherwise return False.
>>> ExtendedContext.is_nan(Decimal('2.50'))
False
>>> ExtendedContext.is_nan(Decimal('NaN'))
True
>>> ExtendedContext.is_nan(Decimal('-sNaN'))
True
>>> ExtendedContext.is_nan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_nan()
def is_normal(self, a):
"""Return True if the operand is a normal number;
otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_normal(Decimal('2.50'))
True
>>> c.is_normal(Decimal('0.1E-999'))
False
>>> c.is_normal(Decimal('0.00'))
False
>>> c.is_normal(Decimal('-Inf'))
False
>>> c.is_normal(Decimal('NaN'))
False
>>> c.is_normal(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_normal(context=self)
def is_qnan(self, a):
"""Return True if the operand is a quiet NaN; otherwise return False.
>>> ExtendedContext.is_qnan(Decimal('2.50'))
False
>>> ExtendedContext.is_qnan(Decimal('NaN'))
True
>>> ExtendedContext.is_qnan(Decimal('sNaN'))
False
>>> ExtendedContext.is_qnan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_qnan()
def is_signed(self, a):
"""Return True if the operand is negative; otherwise return False.
>>> ExtendedContext.is_signed(Decimal('2.50'))
False
>>> ExtendedContext.is_signed(Decimal('-12'))
True
>>> ExtendedContext.is_signed(Decimal('-0'))
True
>>> ExtendedContext.is_signed(8)
False
>>> ExtendedContext.is_signed(-8)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_signed()
def is_snan(self, a):
"""Return True if the operand is a signaling NaN;
otherwise return False.
>>> ExtendedContext.is_snan(Decimal('2.50'))
False
>>> ExtendedContext.is_snan(Decimal('NaN'))
False
>>> ExtendedContext.is_snan(Decimal('sNaN'))
True
>>> ExtendedContext.is_snan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_snan()
def is_subnormal(self, a):
"""Return True if the operand is subnormal; otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_subnormal(Decimal('2.50'))
False
>>> c.is_subnormal(Decimal('0.1E-999'))
True
>>> c.is_subnormal(Decimal('0.00'))
False
>>> c.is_subnormal(Decimal('-Inf'))
False
>>> c.is_subnormal(Decimal('NaN'))
False
>>> c.is_subnormal(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_subnormal(context=self)
def is_zero(self, a):
"""Return True if the operand is a zero; otherwise return False.
>>> ExtendedContext.is_zero(Decimal('0'))
True
>>> ExtendedContext.is_zero(Decimal('2.50'))
False
>>> ExtendedContext.is_zero(Decimal('-0E+2'))
True
>>> ExtendedContext.is_zero(1)
False
>>> ExtendedContext.is_zero(0)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_zero()
def ln(self, a):
"""Returns the natural (base e) logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.ln(Decimal('0'))
Decimal('-Infinity')
>>> c.ln(Decimal('1.000'))
Decimal('0')
>>> c.ln(Decimal('2.71828183'))
Decimal('1.00000000')
>>> c.ln(Decimal('10'))
Decimal('2.30258509')
>>> c.ln(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.ln(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.ln(context=self)
def log10(self, a):
"""Returns the base 10 logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.log10(Decimal('0'))
Decimal('-Infinity')
>>> c.log10(Decimal('0.001'))
Decimal('-3')
>>> c.log10(Decimal('1.000'))
Decimal('0')
>>> c.log10(Decimal('2'))
Decimal('0.301029996')
>>> c.log10(Decimal('10'))
Decimal('1')
>>> c.log10(Decimal('70'))
Decimal('1.84509804')
>>> c.log10(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.log10(0)
Decimal('-Infinity')
>>> c.log10(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.log10(context=self)
def logb(self, a):
""" Returns the exponent of the magnitude of the operand's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of the operand (as though the
operand were truncated to a single digit while maintaining the
value of that digit and without limiting the resulting exponent).
>>> ExtendedContext.logb(Decimal('250'))
Decimal('2')
>>> ExtendedContext.logb(Decimal('2.50'))
Decimal('0')
>>> ExtendedContext.logb(Decimal('0.03'))
Decimal('-2')
>>> ExtendedContext.logb(Decimal('0'))
Decimal('-Infinity')
>>> ExtendedContext.logb(1)
Decimal('0')
>>> ExtendedContext.logb(10)
Decimal('1')
>>> ExtendedContext.logb(100)
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.logb(context=self)
def logical_and(self, a, b):
"""Applies the logical operation 'and' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))
Decimal('1000')
>>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))
Decimal('10')
>>> ExtendedContext.logical_and(110, 1101)
Decimal('100')
>>> ExtendedContext.logical_and(Decimal(110), 1101)
Decimal('100')
>>> ExtendedContext.logical_and(110, Decimal(1101))
Decimal('100')
"""
a = _convert_other(a, raiseit=True)
return a.logical_and(b, context=self)
def logical_invert(self, a):
"""Invert all the digits in the operand.
The operand must be a logical number.
>>> ExtendedContext.logical_invert(Decimal('0'))
Decimal('111111111')
>>> ExtendedContext.logical_invert(Decimal('1'))
Decimal('111111110')
>>> ExtendedContext.logical_invert(Decimal('111111111'))
Decimal('0')
>>> ExtendedContext.logical_invert(Decimal('101010101'))
Decimal('10101010')
>>> ExtendedContext.logical_invert(1101)
Decimal('111110010')
"""
a = _convert_other(a, raiseit=True)
return a.logical_invert(context=self)
def logical_or(self, a, b):
"""Applies the logical operation 'or' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010'))
Decimal('1110')
>>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10'))
Decimal('1110')
>>> ExtendedContext.logical_or(110, 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(Decimal(110), 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(110, Decimal(1101))
Decimal('1111')
"""
a = _convert_other(a, raiseit=True)
return a.logical_or(b, context=self)
def logical_xor(self, a, b):
"""Applies the logical operation 'xor' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010'))
Decimal('110')
>>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10'))
Decimal('1101')
>>> ExtendedContext.logical_xor(110, 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(Decimal(110), 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(110, Decimal(1101))
Decimal('1011')
"""
a = _convert_other(a, raiseit=True)
return a.logical_xor(b, context=self)
def max(self, a, b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal('3')
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max(1, 2)
Decimal('2')
>>> ExtendedContext.max(Decimal(1), 2)
Decimal('2')
>>> ExtendedContext.max(1, Decimal(2))
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.max(b, context=self)
def max_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10'))
Decimal('-10')
>>> ExtendedContext.max_mag(1, -2)
Decimal('-2')
>>> ExtendedContext.max_mag(Decimal(1), -2)
Decimal('-2')
>>> ExtendedContext.max_mag(1, Decimal(-2))
Decimal('-2')
"""
a = _convert_other(a, raiseit=True)
return a.max_mag(b, context=self)
def min(self, a, b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal('2')
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal('-10')
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal('1.0')
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.min(1, 2)
Decimal('1')
>>> ExtendedContext.min(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.min(1, Decimal(29))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min(b, context=self)
def min_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2'))
Decimal('-2')
>>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN'))
Decimal('-3')
>>> ExtendedContext.min_mag(1, -2)
Decimal('1')
>>> ExtendedContext.min_mag(Decimal(1), -2)
Decimal('1')
>>> ExtendedContext.min_mag(1, Decimal(-2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min_mag(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal('-1.3')
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal('1.3')
>>> ExtendedContext.minus(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together
('long multiplication'), resulting in a number which may be as long as
the sum of the lengths of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal('3.60')
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal('21')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal('0.72')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal('-0.0')
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal('4.28135971E+11')
>>> ExtendedContext.multiply(7, 7)
Decimal('49')
>>> ExtendedContext.multiply(Decimal(7), 7)
Decimal('49')
>>> ExtendedContext.multiply(7, Decimal(7))
Decimal('49')
"""
a = _convert_other(a, raiseit=True)
r = a.__mul__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def next_minus(self, a):
"""Returns the largest representable number smaller than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_minus(Decimal('1'))
Decimal('0.999999999')
>>> c.next_minus(Decimal('1E-1007'))
Decimal('0E-1007')
>>> ExtendedContext.next_minus(Decimal('-1.00000003'))
Decimal('-1.00000004')
>>> c.next_minus(Decimal('Infinity'))
Decimal('9.99999999E+999')
>>> c.next_minus(1)
Decimal('0.999999999')
"""
a = _convert_other(a, raiseit=True)
return a.next_minus(context=self)
def next_plus(self, a):
"""Returns the smallest representable number larger than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_plus(Decimal('1'))
Decimal('1.00000001')
>>> c.next_plus(Decimal('-1E-1007'))
Decimal('-0E-1007')
>>> ExtendedContext.next_plus(Decimal('-1.00000003'))
Decimal('-1.00000002')
>>> c.next_plus(Decimal('-Infinity'))
Decimal('-9.99999999E+999')
>>> c.next_plus(1)
Decimal('1.00000001')
"""
a = _convert_other(a, raiseit=True)
return a.next_plus(context=self)
def next_toward(self, a, b):
"""Returns the number closest to a, in direction towards b.
The result is the closest representable number from the first
operand (but not the first operand) that is in the direction
towards the second operand, unless the operands have the same
value.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.next_toward(Decimal('1'), Decimal('2'))
Decimal('1.00000001')
>>> c.next_toward(Decimal('-1E-1007'), Decimal('1'))
Decimal('-0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('0'))
Decimal('-1.00000002')
>>> c.next_toward(Decimal('1'), Decimal('0'))
Decimal('0.999999999')
>>> c.next_toward(Decimal('1E-1007'), Decimal('-100'))
Decimal('0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('-10'))
Decimal('-1.00000004')
>>> c.next_toward(Decimal('0.00'), Decimal('-0.0000'))
Decimal('-0.00')
>>> c.next_toward(0, 1)
Decimal('1E-1007')
>>> c.next_toward(Decimal(0), 1)
Decimal('1E-1007')
>>> c.next_toward(0, Decimal(1))
Decimal('1E-1007')
"""
a = _convert_other(a, raiseit=True)
return a.next_toward(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
"""
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
def number_class(self, a):
"""Returns an indication of the class of the operand.
The class is one of the following strings:
-sNaN
-NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
>>> c = Context(ExtendedContext)
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.number_class(Decimal('Infinity'))
'+Infinity'
>>> c.number_class(Decimal('1E-10'))
'+Normal'
>>> c.number_class(Decimal('2.50'))
'+Normal'
>>> c.number_class(Decimal('0.1E-999'))
'+Subnormal'
>>> c.number_class(Decimal('0'))
'+Zero'
>>> c.number_class(Decimal('-0'))
'-Zero'
>>> c.number_class(Decimal('-0.1E-999'))
'-Subnormal'
>>> c.number_class(Decimal('-1E-10'))
'-Normal'
>>> c.number_class(Decimal('-2.50'))
'-Normal'
>>> c.number_class(Decimal('-Infinity'))
'-Infinity'
>>> c.number_class(Decimal('NaN'))
'NaN'
>>> c.number_class(Decimal('-NaN'))
'NaN'
>>> c.number_class(Decimal('sNaN'))
'sNaN'
>>> c.number_class(123)
'+Normal'
"""
a = _convert_other(a, raiseit=True)
return a.number_class(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal('1.3')
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal('-1.3')
>>> ExtendedContext.plus(-1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
With two arguments, compute a**b. If a is negative then b
must be integral. The result will be inexact unless b is
integral and the result is finite and can be expressed exactly
in 'precision' digits.
With three arguments, compute (a**b) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- b must be nonnegative
- at least one of a or b must be nonzero
- modulo must be nonzero and have at most 'precision' digits
The result of pow(a, b, modulo) is identical to the result
that would be obtained by computing (a**b) % modulo with
unbounded precision, but is computed more efficiently. It is
always exact.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.power(Decimal('2'), Decimal('3'))
Decimal('8')
>>> c.power(Decimal('-2'), Decimal('3'))
Decimal('-8')
>>> c.power(Decimal('2'), Decimal('-3'))
Decimal('0.125')
>>> c.power(Decimal('1.7'), Decimal('8'))
Decimal('69.7575744')
>>> c.power(Decimal('10'), Decimal('0.301029996'))
Decimal('2.00000000')
>>> c.power(Decimal('Infinity'), Decimal('-1'))
Decimal('0')
>>> c.power(Decimal('Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('Infinity'), Decimal('1'))
Decimal('Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('-1'))
Decimal('-0')
>>> c.power(Decimal('-Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('-Infinity'), Decimal('1'))
Decimal('-Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('2'))
Decimal('Infinity')
>>> c.power(Decimal('0'), Decimal('0'))
Decimal('NaN')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('16'))
Decimal('11')
>>> c.power(Decimal('-3'), Decimal('7'), Decimal('16'))
Decimal('-11')
>>> c.power(Decimal('-3'), Decimal('8'), Decimal('16'))
Decimal('1')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('-16'))
Decimal('11')
>>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789'))
Decimal('11729830')
>>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729'))
Decimal('-0')
>>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537'))
Decimal('1')
>>> ExtendedContext.power(7, 7)
Decimal('823543')
>>> ExtendedContext.power(Decimal(7), 7)
Decimal('823543')
>>> ExtendedContext.power(7, Decimal(7), 2)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__pow__(b, modulo, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded), having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is
an error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal('2.170')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal('2.17')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal('2.2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal('2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal('0E+1')
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal('-Infinity')
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal('-0')
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal('-0E+5')
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal('217.0')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal('217')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal('2.2E+2')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal('2E+2')
>>> ExtendedContext.quantize(1, 2)
Decimal('1')
>>> ExtendedContext.quantize(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.quantize(1, Decimal(2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.quantize(b, context=self)
def radix(self):
"""Just returns 10, as this is Decimal, :)
>>> ExtendedContext.radix()
Decimal('10')
"""
return Decimal(10)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded
to precision digits if necessary. The sign of the result, if
non-zero, is the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal('2.1')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal('1.0')
>>> ExtendedContext.remainder(22, 6)
Decimal('4')
>>> ExtendedContext.remainder(Decimal(22), 6)
Decimal('4')
>>> ExtendedContext.remainder(22, Decimal(6))
Decimal('4')
"""
a = _convert_other(a, raiseit=True)
r = a.__mod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
return a.remainder_near(b, context=self)
def rotate(self, a, b):
"""Returns a rotated copy of a, b times.
The coefficient of the result is a rotated copy of the digits in
the coefficient of the first operand. The number of places of
rotation is taken from the absolute value of the second operand,
with the rotation being to the left if the second operand is
positive or to the right otherwise.
>>> ExtendedContext.rotate(Decimal('34'), Decimal('8'))
Decimal('400000003')
>>> ExtendedContext.rotate(Decimal('12'), Decimal('9'))
Decimal('12')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2'))
Decimal('891234567')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2'))
Decimal('345678912')
>>> ExtendedContext.rotate(1333333, 1)
Decimal('13333330')
>>> ExtendedContext.rotate(Decimal(1333333), 1)
Decimal('13333330')
>>> ExtendedContext.rotate(1333333, Decimal(1))
Decimal('13333330')
"""
a = _convert_other(a, raiseit=True)
return a.rotate(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
>>> ExtendedContext.same_quantum(10000, -1)
True
>>> ExtendedContext.same_quantum(Decimal(10000), -1)
True
>>> ExtendedContext.same_quantum(10000, Decimal(-1))
True
"""
a = _convert_other(a, raiseit=True)
return a.same_quantum(b)
def scaleb (self, a, b):
"""Returns the first operand after adding the second value its exp.
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2'))
Decimal('0.0750')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0'))
Decimal('7.50')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3'))
Decimal('7.50E+3')
>>> ExtendedContext.scaleb(1, 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(Decimal(1), 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(1, Decimal(4))
Decimal('1E+4')
"""
a = _convert_other(a, raiseit=True)
return a.scaleb(b, context=self)
def shift(self, a, b):
"""Returns a shifted copy of a, b times.
The coefficient of the result is a shifted copy of the digits
in the coefficient of the first operand. The number of places
to shift is taken from the absolute value of the second operand,
with the shift being to the left if the second operand is
positive or to the right otherwise. Digits shifted into the
coefficient are zeros.
>>> ExtendedContext.shift(Decimal('34'), Decimal('8'))
Decimal('400000000')
>>> ExtendedContext.shift(Decimal('12'), Decimal('9'))
Decimal('0')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2'))
Decimal('1234567')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2'))
Decimal('345678900')
>>> ExtendedContext.shift(88888888, 2)
Decimal('888888800')
>>> ExtendedContext.shift(Decimal(88888888), 2)
Decimal('888888800')
>>> ExtendedContext.shift(88888888, Decimal(2))
Decimal('888888800')
"""
a = _convert_other(a, raiseit=True)
return a.shift(b, context=self)
def sqrt(self, a):
"""Square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal('0')
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal('-0')
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal('0.624499800')
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal('10')
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal('1')
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal('2.64575131')
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal('3.16227766')
>>> ExtendedContext.sqrt(2)
Decimal('1.41421356')
>>> ExtendedContext.prec
9
"""
a = _convert_other(a, raiseit=True)
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal('0.23')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal('0.00')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal('-0.77')
>>> ExtendedContext.subtract(8, 5)
Decimal('3')
>>> ExtendedContext.subtract(Decimal(8), 5)
Decimal('3')
>>> ExtendedContext.subtract(8, Decimal(5))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__sub__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def to_eng_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.__str__(context=self)
def to_integral_exact(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting; Inexact and Rounded flags
are allowed in this operation. The rounding mode is taken from the
context.
>>> ExtendedContext.to_integral_exact(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_exact(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_exact(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_exact(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_exact(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_exact(context=self)
def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_value(context=self)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int or long
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
self.int = int(value._int)
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
##### Integer arithmetic functions used by ln, log10, exp and __pow__ #####
# This function from Tim Peters was taken from here:
# http://mail.python.org/pipermail/python-list/1999-July/007758.html
# The correction being in the function definition is for speed, and
# the whole function is not resolved with math.log because of avoiding
# the use of floats.
def _nbits(n, correction = {
'0': 4, '1': 3, '2': 2, '3': 2,
'4': 1, '5': 1, '6': 1, '7': 1,
'8': 0, '9': 0, 'a': 0, 'b': 0,
'c': 0, 'd': 0, 'e': 0, 'f': 0}):
"""Number of bits in binary representation of the positive integer n,
or 0 if n == 0.
"""
if n < 0:
raise ValueError("The argument to _nbits should be nonnegative.")
hex_n = "%x" % n
return 4*len(hex_n) - correction[hex_n[0]]
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10**e
else:
# val_n = largest power of 10 dividing n.
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10**-e
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1L << shift, x >> shift
return q + (2*(x & (b-1)) + (q&1) > b)
def _div_nearest(a, b):
"""Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
q, r = divmod(a, b)
return q + (2*r + (q&1) > b)
def _ilog(x, M, L = 8):
"""Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
# The basic algorithm is the following: let log1p be the function
# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use
# the reduction
#
# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))
#
# repeatedly until the argument to log1p is small (< 2**-L in
# absolute value). For small y we can use the Taylor series
# expansion
#
# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T
#
# truncating at T such that y**T is small enough. The whole
# computation is carried out in a form of fixed-point arithmetic,
# with a real number z being represented by an integer
# approximation to z*M. To avoid loss of precision, the y below
# is actually an integer approximation to 2**R*y*M, where R is the
# number of reductions performed so far.
y = x-M
# argument reduction; R = number of reductions performed
R = 0
while (R <= L and long(abs(y)) << L-R >= M or
R > L and abs(y) >> R-L >= M):
y = _div_nearest(long(M*y) << 1,
M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M))
R += 1
# Taylor series with T terms
T = -int(-10*len(str(M))//(3*L))
yshift = _rshift_nearest(y, R)
w = _div_nearest(M, T)
for k in xrange(T-1, 0, -1):
w = _div_nearest(M, k) - _div_nearest(yshift*w, M)
return _div_nearest(w*y, M)
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = _div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
def _dlog(c, e, p):
"""Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# Increase precision by 2. The precision increase is compensated
# for at the end with a division by 100.
p += 2
# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,
# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)
# as 10**p * log(d) + 10**p*f * log(10).
l = len(str(c))
f = e+l - (e+l >= 1)
# compute approximation to 10**p*log(d), with error < 27
if p > 0:
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k) # error of <= 0.5 in c
# _ilog magnifies existing error in c by a factor of at most 10
log_d = _ilog(c, 10**p) # error < 5 + 22 = 27
else:
# p <= 0: just approximate the whole thing by 0; error < 2.31
log_d = 0
# compute approximation to f*10**p*log(10), with error < 11.
if f:
extra = len(str(abs(f)))-1
if p + extra >= 0:
# error in f * _log10_digits(p+extra) < |f| * 1 = |f|
# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11
f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra)
else:
f_log_ten = 0
else:
f_log_ten = 0
# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1
return _div_nearest(f_log_ten + log_d, 100)
class _Log10Memoize(object):
"""Class to compute, store, and allow retrieval of, digits of the
constant log(10) = 2.302585.... This constant is needed by
Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__."""
def __init__(self):
self.digits = "23025850929940456840179914546843642076011014886"
def getdigits(self, p):
"""Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
# digits are stored as a string, for quick conversion to
# integer in the case that we've already computed enough
# digits; the stored digits should always be correct
# (truncated, not rounded to nearest).
if p < 0:
raise ValueError("p should be nonnegative")
if p >= len(self.digits):
# compute p+3, p+6, p+9, ... digits; continue until at
# least one of the extra digits is nonzero
extra = 3
while True:
# compute p+extra digits, correct to within 1ulp
M = 10**(p+extra+2)
digits = str(_div_nearest(_ilog(10*M, M), 100))
if digits[-extra:] != '0'*extra:
break
extra += 3
# keep all reliable digits so far; remove trailing zeros
# and next nonzero digit
self.digits = digits.rstrip('0')[:-1]
return int(self.digits[:p+1])
_log10_digits = _Log10Memoize().getdigits
def _iexp(x, M, L=8):
"""Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
# Algorithm: to compute exp(z) for a real number z, first divide z
# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then
# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor
# series
#
# expm1(x) = x + x**2/2! + x**3/3! + ...
#
# Now use the identity
#
# expm1(2x) = expm1(x)*(expm1(x)+2)
#
# R times to compute the sequence expm1(z/2**R),
# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).
# Find R such that x/2**R/M <= 2**-L
R = _nbits((long(x)<<L)//M)
# Taylor series. (2**L)**T > M
T = -int(-10*len(str(M))//(3*L))
y = _div_nearest(x, T)
Mshift = long(M)<<R
for i in xrange(T-1, 0, -1):
y = _div_nearest(x*(Mshift + y), Mshift * i)
# Expansion
for k in xrange(R-1, -1, -1):
Mshift = long(M)<<(k+2)
y = _div_nearest(y*(y+Mshift), Mshift)
return M+y
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
def _dpower(xc, xe, yc, ye, p):
"""Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
# Find b such that 10**(b-1) <= |y| <= 10**b
b = len(str(abs(yc))) + ye
# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point
lxc = _dlog(xc, xe, p+b+1)
# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)
shift = ye-b
if shift >= 0:
pc = lxc*yc*10**shift
else:
pc = _div_nearest(lxc*yc, 10**-shift)
if pc == 0:
# we prefer a result that isn't exactly 1; this makes it
# easier to compute a correctly rounded result in __pow__
if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1:
coeff, exp = 10**(p-1)+1, 1-p
else:
coeff, exp = 10**p-1, -p
else:
coeff, exp = _dexp(pc, -(p+1), p+1)
coeff = _div_nearest(coeff, 10)
exp += 1
return coeff, exp
def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]]
##### Helper Functions ####################################################
def _convert_other(other, raiseit=False, allow_float=False):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
If allow_float is true, allow conversion from float; this
is used in the comparison methods (__eq__ and friends).
"""
if isinstance(other, Decimal):
return other
if isinstance(other, (int, long)):
return Decimal(other)
if allow_float and isinstance(other, float):
return Decimal.from_float(other)
if raiseit:
raise TypeError("Unable to convert %s to Decimal" % other)
return NotImplemented
##### Setup Specific Contexts ############################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=28, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
Emax=999999999,
Emin=-999999999,
capitals=1
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### crud for parsing strings #############################################
#
# Regular expression used for parsing numeric strings. Additional
# comments:
#
# 1. Uncomment the two '\s*' lines to allow leading and/or trailing
# whitespace. But note that the specification disallows whitespace in
# a numeric string.
#
# 2. For finite numbers (not infinities and NaNs) the body of the
# number between the optional sign and the optional exponent must have
# at least one decimal digit, possibly after the decimal point. The
# lookahead expression '(?=\d|\.\d)' checks this.
import re
_parser = re.compile(r""" # A numeric string consists of:
# \s*
(?P<sign>[-+])? # an optional sign, followed by either...
(
(?=\d|\.\d) # ...a number (with at least one digit)
(?P<int>\d*) # having a (possibly empty) integer part
(\.(?P<frac>\d*))? # followed by an optional fractional part
(E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
|
Inf(inity)? # ...an infinity, or...
|
(?P<signal>s)? # ...an (optionally signaling)
NaN # NaN
(?P<diag>\d*) # with (possibly empty) diagnostic info.
)
# \s*
\Z
""", re.VERBOSE | re.IGNORECASE | re.UNICODE).match
_all_zeros = re.compile('0*$').match
_exact_half = re.compile('50*$').match
##### PEP3101 support functions ##############################################
# The functions in this section have little to do with the Decimal
# class, and could potentially be reused or adapted for other pure
# Python numeric classes that want to implement __format__
#
# A format specifier for Decimal looks like:
#
# [[fill]align][sign][0][minimumwidth][,][.precision][type]
_parse_format_specifier_regex = re.compile(r"""\A
(?:
(?P<fill>.)?
(?P<align>[<>=^])
)?
(?P<sign>[-+ ])?
(?P<zeropad>0)?
(?P<minimumwidth>(?!0)\d+)?
(?P<thousands_sep>,)?
(?:\.(?P<precision>0|(?!0)\d+))?
(?P<type>[eEfFgGn%])?
\Z
""", re.VERBOSE)
del re
# The locale module is only needed for the 'n' format specifier. The
# rest of the PEP 3101 code functions quite happily without it, so we
# don't care too much if locale isn't present.
try:
import locale as _locale
except ImportError:
pass
def _parse_format_specifier(format_spec, _localeconv=None):
"""Parse and validate a format specifier.
Turns a standard numeric format specifier into a dict, with the
following entries:
fill: fill character to pad field to minimum width
align: alignment type, either '<', '>', '=' or '^'
sign: either '+', '-' or ' '
minimumwidth: nonnegative integer giving minimum width
zeropad: boolean, indicating whether to pad with zeros
thousands_sep: string to use as thousands separator, or ''
grouping: grouping for thousands separators, in format
used by localeconv
decimal_point: string to use for decimal point
precision: nonnegative integer giving precision, or None
type: one of the characters 'eEfFgG%', or None
unicode: boolean (always True for Python 3.x)
"""
m = _parse_format_specifier_regex.match(format_spec)
if m is None:
raise ValueError("Invalid format specifier: " + format_spec)
# get the dictionary
format_dict = m.groupdict()
# zeropad; defaults for fill and alignment. If zero padding
# is requested, the fill and align fields should be absent.
fill = format_dict['fill']
align = format_dict['align']
format_dict['zeropad'] = (format_dict['zeropad'] is not None)
if format_dict['zeropad']:
if fill is not None:
raise ValueError("Fill character conflicts with '0'"
" in format specifier: " + format_spec)
if align is not None:
raise ValueError("Alignment conflicts with '0' in "
"format specifier: " + format_spec)
format_dict['fill'] = fill or ' '
# PEP 3101 originally specified that the default alignment should
# be left; it was later agreed that right-aligned makes more sense
# for numeric types. See http://bugs.python.org/issue6857.
format_dict['align'] = align or '>'
# default sign handling: '-' for negative, '' for positive
if format_dict['sign'] is None:
format_dict['sign'] = '-'
# minimumwidth defaults to 0; precision remains None if not given
format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0')
if format_dict['precision'] is not None:
format_dict['precision'] = int(format_dict['precision'])
# if format type is 'g' or 'G' then a precision of 0 makes little
# sense; convert it to 1. Same if format type is unspecified.
if format_dict['precision'] == 0:
if format_dict['type'] is None or format_dict['type'] in 'gG':
format_dict['precision'] = 1
# determine thousands separator, grouping, and decimal separator, and
# add appropriate entries to format_dict
if format_dict['type'] == 'n':
# apart from separators, 'n' behaves just like 'g'
format_dict['type'] = 'g'
if _localeconv is None:
_localeconv = _locale.localeconv()
if format_dict['thousands_sep'] is not None:
raise ValueError("Explicit thousands separator conflicts with "
"'n' type in format specifier: " + format_spec)
format_dict['thousands_sep'] = _localeconv['thousands_sep']
format_dict['grouping'] = _localeconv['grouping']
format_dict['decimal_point'] = _localeconv['decimal_point']
else:
if format_dict['thousands_sep'] is None:
format_dict['thousands_sep'] = ''
format_dict['grouping'] = [3, 0]
format_dict['decimal_point'] = '.'
# record whether return type should be str or unicode
format_dict['unicode'] = isinstance(format_spec, unicode)
return format_dict
def _format_align(sign, body, spec):
"""Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
Also converts result to unicode if necessary.
"""
# how much extra space do we have to play with?
minimumwidth = spec['minimumwidth']
fill = spec['fill']
padding = fill*(minimumwidth - len(sign) - len(body))
align = spec['align']
if align == '<':
result = sign + body + padding
elif align == '>':
result = padding + sign + body
elif align == '=':
result = sign + padding + body
elif align == '^':
half = len(padding)//2
result = padding[:half] + sign + body + padding[half:]
else:
raise ValueError('Unrecognised alignment field')
# make sure that result is unicode if necessary
if spec['unicode']:
result = unicode(result)
return result
def _group_lengths(grouping):
"""Convert a localeconv-style grouping into a (possibly infinite)
iterable of integers representing group lengths.
"""
# The result from localeconv()['grouping'], and the input to this
# function, should be a list of integers in one of the
# following three forms:
#
# (1) an empty list, or
# (2) nonempty list of positive integers + [0]
# (3) list of positive integers + [locale.CHAR_MAX], or
from itertools import chain, repeat
if not grouping:
return []
elif grouping[-1] == 0 and len(grouping) >= 2:
return chain(grouping[:-1], repeat(grouping[-2]))
elif grouping[-1] == _locale.CHAR_MAX:
return grouping[:-1]
else:
raise ValueError('unrecognised format for grouping')
def _insert_thousands_sep(digits, spec, min_width=1):
"""Insert thousands separators into a digit string.
spec is a dictionary whose keys should include 'thousands_sep' and
'grouping'; typically it's the result of parsing the format
specifier using _parse_format_specifier.
The min_width keyword argument gives the minimum length of the
result, which will be padded on the left with zeros if necessary.
If necessary, the zero padding adds an extra '0' on the left to
avoid a leading thousands separator. For example, inserting
commas every three digits in '123456', with min_width=8, gives
'0,123,456', even though that has length 9.
"""
sep = spec['thousands_sep']
grouping = spec['grouping']
groups = []
for l in _group_lengths(grouping):
if l <= 0:
raise ValueError("group length should be positive")
# max(..., 1) forces at least 1 digit to the left of a separator
l = min(max(len(digits), min_width, 1), l)
groups.append('0'*(l - len(digits)) + digits[-l:])
digits = digits[:-l]
min_width -= l
if not digits and min_width <= 0:
break
min_width -= len(sep)
else:
l = max(len(digits), min_width, 1)
groups.append('0'*(l - len(digits)) + digits[-l:])
return sep.join(reversed(groups))
def _format_sign(is_negative, spec):
"""Determine sign character."""
if is_negative:
return '-'
elif spec['sign'] in ' +':
return spec['sign']
else:
return ''
def _format_number(is_negative, intpart, fracpart, exp, spec):
"""Format a number, given the following data:
is_negative: true if the number is negative, else false
intpart: string of digits that must appear before the decimal point
fracpart: string of digits that must come after the point
exp: exponent, as an integer
spec: dictionary resulting from parsing the format specifier
This function uses the information in spec to:
insert separators (decimal separator and thousands separators)
format the sign
format the exponent
add trailing '%' for the '%' type
zero-pad if necessary
fill and align if necessary
"""
sign = _format_sign(is_negative, spec)
if fracpart:
fracpart = spec['decimal_point'] + fracpart
if exp != 0 or spec['type'] in 'eE':
echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']]
fracpart += "{0}{1:+}".format(echar, exp)
if spec['type'] == '%':
fracpart += '%'
if spec['zeropad']:
min_width = spec['minimumwidth'] - len(fracpart) - len(sign)
else:
min_width = 0
intpart = _insert_thousands_sep(intpart, spec, min_width)
return _format_align(sign, intpart+fracpart, spec)
##### Useful Constants (internal use only) ################################
# Reusable defaults
_Infinity = Decimal('Inf')
_NegativeInfinity = Decimal('-Inf')
_NaN = Decimal('NaN')
_Zero = Decimal(0)
_One = Decimal(1)
_NegativeOne = Decimal(-1)
# _SignedInfinity[sign] is infinity w/ that sign
_SignedInfinity = (_Infinity, _NegativeInfinity)
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
| gpl-3.0 |
abudnik/elliptics | bindings/python/src/log.py | 6 | 3314 | # =============================================================================
# 2013+ Copyright (c) Kirill Smorodinnikov <[email protected]>
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# =============================================================================
import logging
import elliptics
log = logging.getLogger("elliptics")
formatter = logging.Formatter(
fmt="%(asctime)s.%(msecs)-6d %(thread)d/%(process)d %(levelname)s: %(message)s,"
" attrs: ['thread': '%(threadName)s', process': '%(processName)s']",
datefmt='%F %R:%S')
def logged_class(klass):
"""
This decorator adds 'log' method to passed class
"""
klass.log = logging.getLogger("elliptics")
return klass
def convert_elliptics_log_level(level):
'''
Converts elliptics.log_level into logging log level
'''
if level <= elliptics.log_level.debug:
return logging.DEBUG
elif level <= elliptics.log_level.info:
return logging.INFO
elif level <= elliptics.log_level.warning:
return logging.WARNING
elif level <= elliptics.log_level.error:
return logging.ERROR
else:
return logging.ERROR
def convert_logging_log_level(level):
'''
Converts logging log level into elliptics.log_level
'''
if level <= logging.DEBUG:
return elliptics.log_level.debug
elif level <= logging.INFO:
return elliptics.log_level.info
elif level <= logging.WARNING:
return elliptics.log_level.warning
elif level <= logging.CRITICAL:
return elliptics.log_level.ERROR
else:
return elliptics.log_level.error
class Handler(logging.Handler):
def __init__(self, path, level):
logging.Handler.__init__(self)
if level == elliptics.log_level.error:
logging.Handler.setLevel(self, logging.ERROR)
elif level == elliptics.log_level.info:
logging.Handler.setLevel(self, logging.INFO)
elif level == elliptics.log_level.notice:
logging.Handler.setLevel(self, logging.INFO)
elif level == elliptics.log_level.debug:
logging.Handler.setLevel(self, logging.DEBUG)
self.logger = elliptics.Logger(path, level)
def get_logger(self):
return self.logger
def emit(self, record):
level = elliptics.log_level.error
if record.levelno <= logging.DEBUG:
level = elliptics.log_level.debug
elif record.levelno <= logging.INFO:
level = elliptics.log_level.info
else:
level = elliptics.log_level.error
self.logger.log(level, record.msg.format(*record.args))
def init_logger():
import sys
log.setLevel(logging.ERROR)
ch = logging.StreamHandler(sys.stderr)
ch.setFormatter(formatter)
ch.setLevel(logging.ERROR)
log.addHandler(ch)
| lgpl-3.0 |
jiangzhuo/kbengine | kbe/res/scripts/common/Lib/idlelib/ScrolledList.py | 76 | 4159 | from tkinter import *
class ScrolledList:
default = "(None)"
def __init__(self, master, **options):
# Create top frame, with scrollbar and listbox
self.master = master
self.frame = frame = Frame(master)
self.frame.pack(fill="both", expand=1)
self.vbar = vbar = Scrollbar(frame, name="vbar")
self.vbar.pack(side="right", fill="y")
self.listbox = listbox = Listbox(frame, exportselection=0,
background="white")
if options:
listbox.configure(options)
listbox.pack(expand=1, fill="both")
# Tie listbox and scrollbar together
vbar["command"] = listbox.yview
listbox["yscrollcommand"] = vbar.set
# Bind events to the list box
listbox.bind("<ButtonRelease-1>", self.click_event)
listbox.bind("<Double-ButtonRelease-1>", self.double_click_event)
listbox.bind("<ButtonPress-3>", self.popup_event)
listbox.bind("<Key-Up>", self.up_event)
listbox.bind("<Key-Down>", self.down_event)
# Mark as empty
self.clear()
def close(self):
self.frame.destroy()
def clear(self):
self.listbox.delete(0, "end")
self.empty = 1
self.listbox.insert("end", self.default)
def append(self, item):
if self.empty:
self.listbox.delete(0, "end")
self.empty = 0
self.listbox.insert("end", str(item))
def get(self, index):
return self.listbox.get(index)
def click_event(self, event):
self.listbox.activate("@%d,%d" % (event.x, event.y))
index = self.listbox.index("active")
self.select(index)
self.on_select(index)
return "break"
def double_click_event(self, event):
index = self.listbox.index("active")
self.select(index)
self.on_double(index)
return "break"
menu = None
def popup_event(self, event):
if not self.menu:
self.make_menu()
menu = self.menu
self.listbox.activate("@%d,%d" % (event.x, event.y))
index = self.listbox.index("active")
self.select(index)
menu.tk_popup(event.x_root, event.y_root)
def make_menu(self):
menu = Menu(self.listbox, tearoff=0)
self.menu = menu
self.fill_menu()
def up_event(self, event):
index = self.listbox.index("active")
if self.listbox.selection_includes(index):
index = index - 1
else:
index = self.listbox.size() - 1
if index < 0:
self.listbox.bell()
else:
self.select(index)
self.on_select(index)
return "break"
def down_event(self, event):
index = self.listbox.index("active")
if self.listbox.selection_includes(index):
index = index + 1
else:
index = 0
if index >= self.listbox.size():
self.listbox.bell()
else:
self.select(index)
self.on_select(index)
return "break"
def select(self, index):
self.listbox.focus_set()
self.listbox.activate(index)
self.listbox.selection_clear(0, "end")
self.listbox.selection_set(index)
self.listbox.see(index)
# Methods to override for specific actions
def fill_menu(self):
pass
def on_select(self, index):
pass
def on_double(self, index):
pass
def _scrolled_list(parent):
root = Tk()
root.title("Test ScrolledList")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
class MyScrolledList(ScrolledList):
def fill_menu(self): self.menu.add_command(label="right click")
def on_select(self, index): print("select", self.get(index))
def on_double(self, index): print("double", self.get(index))
scrolled_list = MyScrolledList(root)
for i in range(30):
scrolled_list.append("Item %02d" % i)
root.mainloop()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_scrolled_list)
| lgpl-3.0 |
a-x-/httpie | tests/test_auth.py | 7 | 2318 | """HTTP authentication-related tests."""
import requests
import pytest
from utils import http, add_auth, HTTP_OK, TestEnvironment
import httpie.input
import httpie.cli
class TestAuth:
def test_basic_auth(self, httpbin):
r = http('--auth=user:password',
'GET', httpbin.url + '/basic-auth/user/password')
assert HTTP_OK in r
assert r.json == {'authenticated': True, 'user': 'user'}
@pytest.mark.skipif(
requests.__version__ == '0.13.6',
reason='Redirects with prefetch=False are broken in Requests 0.13.6')
def test_digest_auth(self, httpbin):
r = http('--auth-type=digest', '--auth=user:password',
'GET', httpbin.url + '/digest-auth/auth/user/password')
assert HTTP_OK in r
assert r.json == {'authenticated': True, 'user': 'user'}
def test_password_prompt(self, httpbin):
httpie.input.AuthCredentials._getpass = lambda self, prompt: 'password'
r = http('--auth', 'user',
'GET', httpbin.url + '/basic-auth/user/password')
assert HTTP_OK in r
assert r.json == {'authenticated': True, 'user': 'user'}
def test_credentials_in_url(self, httpbin):
url = add_auth(httpbin.url + '/basic-auth/user/password',
auth='user:password')
r = http('GET', url)
assert HTTP_OK in r
assert r.json == {'authenticated': True, 'user': 'user'}
def test_credentials_in_url_auth_flag_has_priority(self, httpbin):
"""When credentials are passed in URL and via -a at the same time,
then the ones from -a are used."""
url = add_auth(httpbin.url + '/basic-auth/user/password',
auth='user:wrong')
r = http('--auth=user:password', 'GET', url)
assert HTTP_OK in r
assert r.json == {'authenticated': True, 'user': 'user'}
@pytest.mark.parametrize('url', [
'[email protected]',
'username:@example.org',
])
def test_only_username_in_url(self, url):
"""
https://github.com/jakubroztocil/httpie/issues/242
"""
args = httpie.cli.parser.parse_args(args=[url], env=TestEnvironment())
assert args.auth
assert args.auth.key == 'username'
assert args.auth.value == ''
| bsd-3-clause |
scorphus/django | tests/extra_regress/models.py | 281 | 1401 | from __future__ import unicode_literals
import copy
import datetime
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class RevisionableModel(models.Model):
base = models.ForeignKey('self', models.SET_NULL, null=True)
title = models.CharField(blank=True, max_length=255)
when = models.DateTimeField(default=datetime.datetime.now)
def __str__(self):
return "%s (%s, %s)" % (self.title, self.id, self.base.id)
def save(self, *args, **kwargs):
super(RevisionableModel, self).save(*args, **kwargs)
if not self.base:
self.base = self
kwargs.pop('force_insert', None)
kwargs.pop('force_update', None)
super(RevisionableModel, self).save(*args, **kwargs)
def new_revision(self):
new_revision = copy.copy(self)
new_revision.pk = None
return new_revision
class Order(models.Model):
created_by = models.ForeignKey(User, models.CASCADE)
text = models.TextField()
@python_2_unicode_compatible
class TestObject(models.Model):
first = models.CharField(max_length=20)
second = models.CharField(max_length=20)
third = models.CharField(max_length=20)
def __str__(self):
return 'TestObject: %s,%s,%s' % (self.first, self.second, self.third)
| bsd-3-clause |
cloudbase/cloudbase-init-ci | argus/action_manager/base.py | 3 | 5043 | # Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from argus import log as argus_log
LOG = argus_log.LOG
@six.add_metaclass(abc.ABCMeta)
class BaseActionManager(object):
"""Get a Action Manager that can handle basic actions.
:param client:
A Windows client to send command to the instance.
:param conf:
Argus config options.
"""
def __init__(self, client, os_type):
self._client = client
self._os_type = os_type
self._config_os_type_on_logger()
@property
def os_type(self):
return self._os_type
def _config_os_type_on_logger(self):
"""Set the OS type on the logger."""
LOG.debug("Update the logger with the following OS version: %s",
self._os_type)
LOG.extra["os_type"] = self._os_type
argus_log.add_new_handler(LOG)
@abc.abstractmethod
def download(self, uri, location):
"""Download the resource located at a specific URI in the location.
:param uri:
Remote URL where the data is found.
:param location:
Path from the instance in which we should download the
remote resource.
"""
pass
@abc.abstractmethod
def get_installation_script(self):
"""Get installation script for CloudbaseInit."""
pass
@abc.abstractmethod
def install_cbinit(self):
"""Install Cloudbase-Init."""
pass
@abc.abstractmethod
def sysprep(self):
"""Run the sysprep."""
pass
@abc.abstractmethod
def wait_cbinit_service(self):
"""Wait if the Cloudbase-Init Service to stop."""
pass
@abc.abstractmethod
def check_cbinit_service(self, searched_paths=None):
"""Check if the Cloudbase-Init service started.
:param searched_paths:
Paths to files that should exist if the heartbeat patch is
applied.
"""
pass
@abc.abstractmethod
def git_clone(self, repo_url, location, count, delay):
"""Clone from a remote repository to a specified location.
:param repo_url: The remote repository URL.
:param location: The target location for where to clone the repository.
:param count:
The number of tries that should be attempted in case it fails.
:param delay: The time delay before retrying.
:returns: True if the clone was successful, False if not.
:raises: ArgusCLIError if the path is not valid.
:rtype: bool
"""
pass
@abc.abstractmethod
def wait_boot_completion(self):
"""Wait for the instance to be booted a reasonable period."""
pass
@abc.abstractmethod
def specific_prepare(self):
"""Prepare some OS specific resources."""
pass
@abc.abstractmethod
def remove(self, path):
"""Remove a file."""
pass
@abc.abstractmethod
def rmdir(self, path):
"""Remove a directory."""
@abc.abstractmethod
def exists(self, path):
"""Check if the path exists."""
pass
@abc.abstractmethod
def is_file(self, path):
"""Check if the file exists."""
pass
@abc.abstractmethod
def is_dir(self, path):
"""Check if the directory exists."""
pass
@abc.abstractmethod
def mkdir(self, path):
"""Create a directory in the instance if the path is valid.
:param path:
Remote path where the new directory should be created.
"""
pass
@abc.abstractmethod
def mkfile(self, path):
"""Create a file in the instance if the path is valid.
:param path:
Remote path where the new file should be created.
"""
pass
@abc.abstractmethod
def touch(self, path):
"""Update the access and modification time.
If the file doesn't exist, an empty file will be created
as side effect.
"""
pass
# TODO(mmicu): Make a Cloudbase-Init Action Manager and move
# specific methods
@abc.abstractmethod
def prepare_config(self, cbinit_config, cbinit_unattend_conf):
"""Prepare Cloudbase-Init config for every OS.
:param cbinit_config:
Cloudbase-Init config file.
:param cbinit_unattend_conf:
Cloudbase-Init Unattend config file.
"""
pass
| apache-2.0 |
wilmarcardonac/fisher-mcmc | class_montanari-lensing/external_Pk/generate_Pk_example_w_tensors.py | 3 | 1753 | #!/usr/bin/python
import sys
from math import exp
# README:
#
# This is an example python script for the external_Pk mode of Class.
# It generates the primordial spectrum of LambdaCDM.
# It can be edited and used directly, though keeping a copy of it is recommended.
#
# Two (maybe three) things need to be edited:
#
# 1. The name of the parameters needed for the calculation of Pk.
# "sys.argv[1]" corresponds to "custom1" in Class, an so on
try :
k_0 = float(sys.argv[1])
A_s = float(sys.argv[2])
n_s = float(sys.argv[3])
A_t = float(sys.argv[4])
n_t = float(sys.argv[5])
# Error control, no need to touch
except IndexError :
raise IndexError("It seems you are calling this script with too few arguments.")
except ValueError :
raise ValueError("It seems some of the arguments are not correctly formatted. "+
"Remember that they must be floating point numbers.")
# 2. The function giving P(k), including the necessary import statements.
# Inside this function, you can use the parameters named in the previous step.
def P_s(k) :
return A_s * (k/k_0)**(n_s-1.)
def P_t(k) :
return A_t * (k/k_0)**(n_t)
# 3. Limits for k and precision:
# Check that the boundaries are correct for your case.
# It is safer to set k_per_decade primordial slightly bigger than that of Class.
k_min = 1.e-6
k_max = 10.
k_per_decade_primordial = 200.
#
# And nothing should need to be edited from here on.
#
# Filling the array of k's
ks = [float(k_min)]
while ks[-1] <= float(k_max) :
ks.append(ks[-1]*10.**(1./float(k_per_decade_primordial)))
# Filling the array of Pk's
for k in ks :
print "%.18g %.18g %.18g" % (k, P_s(k), P_t(k))
| gpl-2.0 |
emilroz/openmicroscopy | components/tools/OmeroPy/test/integration/test_model42.py | 5 | 2560 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Integration test focused on the model changes in 4.2
Copyright 2010-2013 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import test.integration.library as lib
from omero.rtypes import *
class TestModel42(lib.ITest):
def testNs(self):
ns = omero.model.NamespaceI()
ns.name = rstring(self.uuid())
ns.keywords = ["a", "b"]
ns = self.root.sf.getUpdateService().saveAndReturnObject(ns)
def testRoi(self):
img = self.new_image("testRoi")
roi = omero.model.RoiI()
roi.namespaces = [self.uuid(), self.uuid()]
roi.keywords = [["a", "b"], ["c", "d"]]
roi.image = img
roi = self.update.saveAndReturnObject(roi)
class TestTicket2290(lib.ITest):
def testEmptyArrays2290(self):
img = self.new_image("testRoi")
roi = omero.model.RoiI()
roi.image = img
roi.keywords = []
roi.namespaces = []
roi = self.update.saveAndReturnObject(roi)
# Then resave
roi = self.update.saveAndReturnObject(roi)
def testNoArrays2290(self):
img = self.new_image("testRoi")
roi = omero.model.RoiI()
roi.image = img
roi = self.update.saveAndReturnObject(roi)
# Then resave
roi = self.update.saveAndReturnObject(roi)
def testRect2290(self):
img = self.new_image("testRoi")
roi = omero.model.RoiI()
roi.namespaces = [self.uuid(), self.uuid()]
roi.keywords = [["a", "b"], ["c", "d"]]
roi.image = img
rect = omero.model.RectI()
roi.addShape(rect)
roi = self.update.saveAndReturnObject(roi)
def testRectB2290(self):
img = self.new_image("testRoi")
roi = omero.model.RoiI()
roi.namespaces = [self.uuid(), self.uuid()]
roi.keywords = [["a", "b"], ["c", "d"]]
roi.image = img
roi = self.update.saveAndReturnObject(roi)
rect = omero.model.RectI()
roi.addShape(rect)
rect.roi = roi
rect = self.update.saveAndReturnObject(rect)
def testResave2290(self):
img = self.new_image("testRoi")
roi = omero.model.RoiI()
roi.namespaces = [self.uuid(), self.uuid()]
roi.keywords = [["a", "b"], ["c", "d"]]
roi.image = img
roi = self.update.saveAndReturnObject(roi)
rect = omero.model.RectI()
roi.addShape(rect)
roi = self.update.saveAndReturnObject(roi)
| gpl-2.0 |
romankagan/DDBWorkbench | python/helpers/docutils/parsers/rst/languages/ru.py | 57 | 5207 | # $Id: ru.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Roman Suzi <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Russian-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
u'\u0431\u043b\u043e\u043a-\u0441\u0442\u0440\u043e\u043a': u'line-block',
u'meta': u'meta',
u'\u043e\u0431\u0440\u0430\u0431\u043e\u0442\u0430\u043d\u043d\u044b\u0439-\u043b\u0438\u0442\u0435\u0440\u0430\u043b':
u'parsed-literal',
u'\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u043d\u0430\u044f-\u0446\u0438\u0442\u0430\u0442\u0430':
u'pull-quote',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
u'table (translation required)': 'table',
u'csv-table (translation required)': 'csv-table',
u'list-table (translation required)': 'list-table',
u'\u0441\u044b\u0440\u043e\u0439': u'raw',
u'\u0437\u0430\u043c\u0435\u043d\u0430': u'replace',
u'\u0442\u0435\u0441\u0442\u043e\u0432\u0430\u044f-\u0434\u0438\u0440\u0435\u043a\u0442\u0438\u0432\u0430-restructuredtext':
u'restructuredtext-test-directive',
u'\u0446\u0435\u043b\u0435\u0432\u044b\u0435-\u0441\u043d\u043e\u0441\u043a\u0438':
u'target-notes',
u'unicode': u'unicode',
u'\u0434\u0430\u0442\u0430': u'date',
u'\u0431\u043e\u043a\u043e\u0432\u0430\u044f-\u043f\u043e\u043b\u043e\u0441\u0430':
u'sidebar',
u'\u0432\u0430\u0436\u043d\u043e': u'important',
u'\u0432\u043a\u043b\u044e\u0447\u0430\u0442\u044c': u'include',
u'\u0432\u043d\u0438\u043c\u0430\u043d\u0438\u0435': u'attention',
u'\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u0438\u0435': u'highlights',
u'\u0437\u0430\u043c\u0435\u0447\u0430\u043d\u0438\u0435': u'admonition',
u'\u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435':
u'image',
u'\u043a\u043b\u0430\u0441\u0441': u'class',
u'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
u'\u043d\u043e\u043c\u0435\u0440-\u0440\u0430\u0437\u0434\u0435\u043b\u0430':
u'sectnum',
u'\u043d\u0443\u043c\u0435\u0440\u0430\u0446\u0438\u044f-\u0440\u0430\u0437'
u'\u0434\u0435\u043b\u043e\u0432': u'sectnum',
u'\u043e\u043f\u0430\u0441\u043d\u043e': u'danger',
u'\u043e\u0441\u0442\u043e\u0440\u043e\u0436\u043d\u043e': u'caution',
u'\u043e\u0448\u0438\u0431\u043a\u0430': u'error',
u'\u043f\u043e\u0434\u0441\u043a\u0430\u0437\u043a\u0430': u'tip',
u'\u043f\u0440\u0435\u0434\u0443\u043f\u0440\u0435\u0436\u0434\u0435\u043d'
u'\u0438\u0435': u'warning',
u'\u043f\u0440\u0438\u043c\u0435\u0447\u0430\u043d\u0438\u0435': u'note',
u'\u0440\u0438\u0441\u0443\u043d\u043e\u043a': u'figure',
u'\u0440\u0443\u0431\u0440\u0438\u043a\u0430': u'rubric',
u'\u0441\u043e\u0432\u0435\u0442': u'hint',
u'\u0441\u043e\u0434\u0435\u0440\u0436\u0430\u043d\u0438\u0435': u'contents',
u'\u0442\u0435\u043c\u0430': u'topic',
u'\u044d\u043f\u0438\u0433\u0440\u0430\u0444': u'epigraph',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',}
"""Russian name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'\u0430\u043a\u0440\u043e\u043d\u0438\u043c': 'acronym',
u'\u0430\u043d\u043e\u043d\u0438\u043c\u043d\u0430\u044f-\u0441\u0441\u044b\u043b\u043a\u0430':
'anonymous-reference',
u'\u0431\u0443\u043a\u0432\u0430\u043b\u044c\u043d\u043e': 'literal',
u'\u0432\u0435\u0440\u0445\u043d\u0438\u0439-\u0438\u043d\u0434\u0435\u043a\u0441':
'superscript',
u'\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u0438\u0435': 'emphasis',
u'\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u043d\u0430\u044f-\u0441\u0441\u044b\u043b\u043a\u0430':
'named-reference',
u'\u0438\u043d\u0434\u0435\u043a\u0441': 'index',
u'\u043d\u0438\u0436\u043d\u0438\u0439-\u0438\u043d\u0434\u0435\u043a\u0441':
'subscript',
u'\u0441\u0438\u043b\u044c\u043d\u043e\u0435-\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u0438\u0435':
'strong',
u'\u0441\u043e\u043a\u0440\u0430\u0449\u0435\u043d\u0438\u0435':
'abbreviation',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u0437\u0430\u043c\u0435\u043d\u0430':
'substitution-reference',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-pep': 'pep-reference',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-rfc': 'rfc-reference',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-uri': 'uri-reference',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-\u0437\u0430\u0433\u043b\u0430\u0432\u0438\u0435':
'title-reference',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-\u0441\u043d\u043e\u0441\u043a\u0443':
'footnote-reference',
u'\u0446\u0438\u0442\u0430\u0442\u043d\u0430\u044f-\u0441\u0441\u044b\u043b\u043a\u0430':
'citation-reference',
u'\u0446\u0435\u043b\u044c': 'target',
u'raw (translation required)': 'raw',}
"""Mapping of Russian role names to canonical role names for interpreted text.
"""
| apache-2.0 |
zeeman/cyder | lib/log.py | 2 | 2154 | """Taken from funfactory (funfactory/log.py) on a380a54"""
import logging
from django.conf import settings
from django.http import HttpRequest
import commonware
class AreciboHandler(logging.Handler):
"""An exception log handler that sends tracebacks to Arecibo."""
def emit(self, record):
from django.conf import settings
arecibo = getattr(settings, 'ARECIBO_SERVER_URL', '')
if arecibo and hasattr(record, 'request'):
if getattr(settings, 'ARECIBO_USES_CELERY', False):
from django_arecibo.tasks import post
else:
from django_arecibo.wrapper import post
post(record.request, 500)
def log_cef(name, severity=logging.INFO, env=None, username='none',
signature=None, **kwargs):
"""
Wraps cef logging function so we don't need to pass in the config
dictionary every time. See bug 707060. ``env`` can be either a request
object or just the request.META dictionary.
"""
cef_logger = commonware.log.getLogger('cef')
c = {'product': settings.CEF_PRODUCT,
'vendor': settings.CEF_VENDOR,
'version': settings.CEF_VERSION,
'device_version': settings.CEF_DEVICE_VERSION,}
# The CEF library looks for some things in the env object like
# REQUEST_METHOD and any REMOTE_ADDR stuff. Django not only doesn't send
# half the stuff you'd expect, but it specifically doesn't implement
# readline on its FakePayload object so these things fail. I have no idea
# if that's outdated code in Django or not, but andym made this
# <strike>awesome</strike> less crappy so the tests will actually pass.
# In theory, the last part of this if() will never be hit except in the
# test runner. Good luck with that.
if isinstance(env, HttpRequest):
r = env.META.copy()
elif isinstance(env, dict):
r = env
else:
r = {}
# Drop kwargs into CEF config array, then log.
c['environ'] = r
c.update({
'username': username,
'signature': signature,
'data': kwargs,
})
cef_logger.log(severity, name, c)
| bsd-3-clause |
michaelgugino/web_keyer | werkzeug/testsuite/compat.py | 102 | 1117 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.compat
~~~~~~~~~~~~~~~~~~~~~~~~~
Ensure that old stuff does not break on update.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
import warnings
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.wrappers import Response
from werkzeug.test import create_environ
class CompatTestCase(WerkzeugTestCase):
def test_old_imports(self):
from werkzeug.utils import Headers, MultiDict, CombinedMultiDict, \
Headers, EnvironHeaders
from werkzeug.http import Accept, MIMEAccept, CharsetAccept, \
LanguageAccept, ETags, HeaderSet, WWWAuthenticate, \
Authorization
def test_exposed_werkzeug_mod(self):
import werkzeug
for key in werkzeug.__all__:
# deprecated, skip it
if key in ('templates', 'Template'):
continue
getattr(werkzeug, key)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CompatTestCase))
return suite
| gpl-3.0 |
Hawaii-Smart-Energy-Project/Maui-Smart-Grid | src/static-data-insert/insertSCADAWeatherData.py | 1 | 3206 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script for inserting Kihei SCADA temperature and humidity data.
Usage:
With the current working directory set to the path containing the data files,
python insertSCADAWeatherData.py
"""
__author__ = 'Daniel Zhang (張道博)'
__copyright__ = 'Copyright (c) 2013, University of Hawaii Smart Energy Project'
__license__ = 'https://raw.github' \
'.com/Hawaii-Smart-Energy-Project/Maui-Smart-Grid/master/BSD' \
'-LICENSE.txt'
from msg_db_connector import MSGDBConnector
from msg_db_util import MSGDBUtil
import csv
import re
if __name__ == '__main__':
connector = MSGDBConnector()
conn = connector.connectDB()
dbUtil = MSGDBUtil()
cursor = conn.cursor()
tFiles = ['Kihei AirTemp F 2013_07.csv', 'Kihei AirTemp F 2013_08.csv',
'Kihei AirTemp F 2013_09.csv', 'Kihei AirTemp F 2013_10.csv']
hFiles = ['Kihei_Rel_Humid 2013_07.csv', 'Kihei_Rel_Humid 2013_08.csv',
'Kihei_Rel_Humid 2013_09.csv', 'Kihei_Rel_Humid 2013_10.csv']
table = 'KiheiSCADATemperatureHumidity'
tCols = ['timestamp', 'met_air_temp_degf']
hCols = ['met_rel_humid_pct']
cnt = 0
temps = []
t_i = 0
h_i = 0
for i in range(4):
with open(tFiles[i], 'rb') as csvfile:
print "Reading %s" % (tFiles[i])
myReader = csv.reader(csvfile, delimiter = ',')
for row in myReader:
if cnt == 0:
cnt += 1
continue
#temps[t_i] = [row[0], row[1]]
row[0] = row[0].replace('GMT-1000', '')
row[0] = re.sub(r'\s24:', '\s00:', row[0])
if row[1] == '':
row[1] = 'NULL'
sql = """INSERT INTO "%s" (%s) VALUES (TIMESTAMP %s,%s)""" % (
table, ','.join(tCols), "'" + row[0].strip() + "'",
"'" + row[1].strip() + "'")
sql = sql.replace("'NULL'", 'NULL')
#print sql
dbUtil.executeSQL(cursor, sql)
cnt += 1
if cnt % 10000 == 0:
conn.commit()
conn.commit()
cnt = 0
with open(hFiles[i], 'rb') as csvfile:
print "Reading %s" % (hFiles[i])
myReader = csv.reader(csvfile, delimiter = ',')
for row in myReader:
if cnt == 0:
cnt += 1
continue
row[0] = row[0].replace('GMT-1000', '')
row[0] = re.sub(r'\s24:', '\s00:', row[0])
if row[1] == '':
row[1] = 'NULL'
sql = """UPDATE "%s" SET %s = %s WHERE timestamp =
TIMESTAMP '%s'""" % (
table, ','.join(hCols), "'" + row[1].strip() + "'",
row[0].strip())
sql = sql.replace("'NULL'", 'NULL')
#print sql
dbUtil.executeSQL(cursor, sql)
cnt += 1
if cnt % 10000 == 0:
conn.commit()
pass
conn.commit()
cnt = 0
| bsd-3-clause |
matthaywardwebdesign/rethinkdb | test/rql_test/connections/http_support/werkzeug/testsuite/contrib/sessions.py | 146 | 2325 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.sessions
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Added tests for the sessions.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import unittest
import shutil
from tempfile import mkdtemp, gettempdir
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.contrib.sessions import FilesystemSessionStore
class SessionTestCase(WerkzeugTestCase):
def setup(self):
self.session_folder = mkdtemp()
def teardown(self):
shutil.rmtree(self.session_folder)
def test_default_tempdir(self):
store = FilesystemSessionStore()
assert store.path == gettempdir()
def test_basic_fs_sessions(self):
store = FilesystemSessionStore(self.session_folder)
x = store.new()
assert x.new
assert not x.modified
x['foo'] = [1, 2, 3]
assert x.modified
store.save(x)
x2 = store.get(x.sid)
assert not x2.new
assert not x2.modified
assert x2 is not x
assert x2 == x
x2['test'] = 3
assert x2.modified
assert not x2.new
store.save(x2)
x = store.get(x.sid)
store.delete(x)
x2 = store.get(x.sid)
# the session is not new when it was used previously.
assert not x2.new
def test_non_urandom(self):
urandom = os.urandom
del os.urandom
try:
store = FilesystemSessionStore(self.session_folder)
store.new()
finally:
os.urandom = urandom
def test_renewing_fs_session(self):
store = FilesystemSessionStore(self.session_folder, renew_missing=True)
x = store.new()
store.save(x)
store.delete(x)
x2 = store.get(x.sid)
assert x2.new
def test_fs_session_lising(self):
store = FilesystemSessionStore(self.session_folder, renew_missing=True)
sessions = set()
for x in range(10):
sess = store.new()
store.save(sess)
sessions.add(sess.sid)
listed_sessions = set(store.list())
assert sessions == listed_sessions
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SessionTestCase))
return suite
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.